hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c64e7e57066fc421b51839543271fcb79807bc9
| 10,087
|
py
|
Python
|
salika/views/django_migrations_views.py
|
BarisSari/django_crud
|
ce9586c10da2f865d29d9a18e9ff5582abe5e3a0
|
[
"MIT"
] | null | null | null |
salika/views/django_migrations_views.py
|
BarisSari/django_crud
|
ce9586c10da2f865d29d9a18e9ff5582abe5e3a0
|
[
"MIT"
] | null | null | null |
salika/views/django_migrations_views.py
|
BarisSari/django_crud
|
ce9586c10da2f865d29d9a18e9ff5582abe5e3a0
|
[
"MIT"
] | null | null | null |
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from ..models import DjangoMigrations
from ..forms import DjangoMigrationsForm
from django.urls import reverse_lazy
from django.urls import reverse
from django.http import Http404
class DjangoMigrationsListView(ListView):
model = DjangoMigrations
template_name = "salika/django_migrations_list.html"
paginate_by = 20
context_object_name = "django_migrations_list"
allow_empty = True
page_kwarg = 'page'
paginate_orphans = 0
def __init__(self, **kwargs):
return super(DjangoMigrationsListView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(DjangoMigrationsListView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return super(DjangoMigrationsListView, self).get(request, *args, **kwargs)
def get_queryset(self):
return super(DjangoMigrationsListView, self).get_queryset()
def get_allow_empty(self):
return super(DjangoMigrationsListView, self).get_allow_empty()
def get_context_data(self, *args, **kwargs):
ret = super(DjangoMigrationsListView, self).get_context_data(*args, **kwargs)
return ret
def get_paginate_by(self, queryset):
return super(DjangoMigrationsListView, self).get_paginate_by(queryset)
def get_context_object_name(self, object_list):
return super(DjangoMigrationsListView, self).get_context_object_name(object_list)
def paginate_queryset(self, queryset, page_size):
return super(DjangoMigrationsListView, self).paginate_queryset(queryset, page_size)
def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True):
return super(DjangoMigrationsListView, self).get_paginator(queryset, per_page, orphans=0, allow_empty_first_page=True)
def render_to_response(self, context, **response_kwargs):
return super(DjangoMigrationsListView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(DjangoMigrationsListView, self).get_template_names()
class DjangoMigrationsDetailView(DetailView):
model = DjangoMigrations
template_name = "salika/django_migrations_detail.html"
context_object_name = "django_migrations"
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
def __init__(self, **kwargs):
return super(DjangoMigrationsDetailView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(DjangoMigrationsDetailView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return super(DjangoMigrationsDetailView, self).get(request, *args, **kwargs)
def get_object(self, queryset=None):
return super(DjangoMigrationsDetailView, self).get_object(queryset)
def get_queryset(self):
return super(DjangoMigrationsDetailView, self).get_queryset()
def get_slug_field(self):
return super(DjangoMigrationsDetailView, self).get_slug_field()
def get_context_data(self, **kwargs):
ret = super(DjangoMigrationsDetailView, self).get_context_data(**kwargs)
return ret
def get_context_object_name(self, obj):
return super(DjangoMigrationsDetailView, self).get_context_object_name(obj)
def render_to_response(self, context, **response_kwargs):
return super(DjangoMigrationsDetailView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(DjangoMigrationsDetailView, self).get_template_names()
class DjangoMigrationsCreateView(CreateView):
model = DjangoMigrations
form_class = DjangoMigrationsForm
# fields = ['app', 'name', 'applied']
template_name = "salika/django_migrations_create.html"
success_url = reverse_lazy("django_migrations_list")
def __init__(self, **kwargs):
return super(DjangoMigrationsCreateView, self).__init__(**kwargs)
def dispatch(self, request, *args, **kwargs):
return super(DjangoMigrationsCreateView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return super(DjangoMigrationsCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return super(DjangoMigrationsCreateView, self).post(request, *args, **kwargs)
def get_form_class(self):
return super(DjangoMigrationsCreateView, self).get_form_class()
def get_form(self, form_class=None):
return super(DjangoMigrationsCreateView, self).get_form(form_class)
def get_form_kwargs(self, **kwargs):
return super(DjangoMigrationsCreateView, self).get_form_kwargs(**kwargs)
def get_initial(self):
return super(DjangoMigrationsCreateView, self).get_initial()
def form_invalid(self, form):
return super(DjangoMigrationsCreateView, self).form_invalid(form)
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return super(DjangoMigrationsCreateView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(DjangoMigrationsCreateView, self).get_context_data(**kwargs)
return ret
def render_to_response(self, context, **response_kwargs):
return super(DjangoMigrationsCreateView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(DjangoMigrationsCreateView, self).get_template_names()
def get_success_url(self):
return reverse("salika:django_migrations_detail", args=(self.object.pk,))
class DjangoMigrationsUpdateView(UpdateView):
model = DjangoMigrations
form_class = DjangoMigrationsForm
# fields = ['app', 'name', 'applied']
template_name = "salika/django_migrations_update.html"
initial = {}
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
context_object_name = "django_migrations"
def __init__(self, **kwargs):
return super(DjangoMigrationsUpdateView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(DjangoMigrationsUpdateView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return super(DjangoMigrationsUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return super(DjangoMigrationsUpdateView, self).post(request, *args, **kwargs)
def get_object(self, queryset=None):
return super(DjangoMigrationsUpdateView, self).get_object(queryset)
def get_queryset(self):
return super(DjangoMigrationsUpdateView, self).get_queryset()
def get_slug_field(self):
return super(DjangoMigrationsUpdateView, self).get_slug_field()
def get_form_class(self):
return super(DjangoMigrationsUpdateView, self).get_form_class()
def get_form(self, form_class=None):
return super(DjangoMigrationsUpdateView, self).get_form(form_class)
def get_form_kwargs(self, **kwargs):
return super(DjangoMigrationsUpdateView, self).get_form_kwargs(**kwargs)
def get_initial(self):
return super(DjangoMigrationsUpdateView, self).get_initial()
def form_invalid(self, form):
return super(DjangoMigrationsUpdateView, self).form_invalid(form)
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return super(DjangoMigrationsUpdateView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(DjangoMigrationsUpdateView, self).get_context_data(**kwargs)
return ret
def get_context_object_name(self, obj):
return super(DjangoMigrationsUpdateView, self).get_context_object_name(obj)
def render_to_response(self, context, **response_kwargs):
return super(DjangoMigrationsUpdateView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(DjangoMigrationsUpdateView, self).get_template_names()
def get_success_url(self):
return reverse("salika:django_migrations_detail", args=(self.object.pk,))
class DjangoMigrationsDeleteView(DeleteView):
model = DjangoMigrations
template_name = "salika/django_migrations_delete.html"
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
context_object_name = "django_migrations"
def __init__(self, **kwargs):
return super(DjangoMigrationsDeleteView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(DjangoMigrationsDeleteView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
raise Http404
def post(self, request, *args, **kwargs):
return super(DjangoMigrationsDeleteView, self).post(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return super(DjangoMigrationsDeleteView, self).delete(request, *args, **kwargs)
def get_object(self, queryset=None):
return super(DjangoMigrationsDeleteView, self).get_object(queryset)
def get_queryset(self):
return super(DjangoMigrationsDeleteView, self).get_queryset()
def get_slug_field(self):
return super(DjangoMigrationsDeleteView, self).get_slug_field()
def get_context_data(self, **kwargs):
ret = super(DjangoMigrationsDeleteView, self).get_context_data(**kwargs)
return ret
def get_context_object_name(self, obj):
return super(DjangoMigrationsDeleteView, self).get_context_object_name(obj)
def render_to_response(self, context, **response_kwargs):
return super(DjangoMigrationsDeleteView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(DjangoMigrationsDeleteView, self).get_template_names()
def get_success_url(self):
return reverse("salika:django_migrations_list")
| 37.779026
| 126
| 0.727669
| 1,117
| 10,087
| 6.31513
| 0.079678
| 0.090445
| 0.06025
| 0.092997
| 0.875106
| 0.744542
| 0.601077
| 0.560391
| 0.512475
| 0.512475
| 0
| 0.001312
| 0.168831
| 10,087
| 266
| 127
| 37.921053
| 0.840052
| 0.007039
| 0
| 0.475936
| 0
| 0
| 0.039748
| 0.031259
| 0
| 0
| 0
| 0
| 0
| 1
| 0.358289
| false
| 0
| 0.042781
| 0.315508
| 0.946524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
130b52e3814bc40c0f483b1ab60b034d1374049c
| 201
|
py
|
Python
|
venv/lib/python3.7/site-packages/oauthlib/oauth2/rfc8628/clients/__init__.py
|
hamesjan/SurfForecastBot
|
83429cdbe0d569b75478f5318a7db84d993bdcb4
|
[
"MIT"
] | 7
|
2022-03-10T07:03:14.000Z
|
2022-03-24T09:42:46.000Z
|
venv/lib/python3.7/site-packages/oauthlib/oauth2/rfc8628/clients/__init__.py
|
hamesjan/SurfForecastBot
|
83429cdbe0d569b75478f5318a7db84d993bdcb4
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/oauthlib/oauth2/rfc8628/clients/__init__.py
|
hamesjan/SurfForecastBot
|
83429cdbe0d569b75478f5318a7db84d993bdcb4
|
[
"MIT"
] | 1
|
2022-03-10T07:15:54.000Z
|
2022-03-10T07:15:54.000Z
|
"""
oauthlib.oauth2.rfc8628
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming OAuth 2.0 Device Authorization RFC8628.
"""
from .device import DeviceClient
| 22.333333
| 56
| 0.706468
| 24
| 201
| 5.916667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.124378
| 201
| 8
| 57
| 25.125
| 0.744318
| 0.791045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
131743debf087b282ae7a96752a65ebe35099341
| 92
|
py
|
Python
|
hoistcrabber/__init__.py
|
reo-ar/reo-cogs
|
1424270a41891822e3a6e68bd43a92abe75fa6fc
|
[
"Unlicense"
] | 1
|
2020-10-22T21:28:55.000Z
|
2020-10-22T21:28:55.000Z
|
hoistcrabber/__init__.py
|
reo-ar/reo-cogs
|
1424270a41891822e3a6e68bd43a92abe75fa6fc
|
[
"Unlicense"
] | null | null | null |
hoistcrabber/__init__.py
|
reo-ar/reo-cogs
|
1424270a41891822e3a6e68bd43a92abe75fa6fc
|
[
"Unlicense"
] | null | null | null |
from .hoistcrabber import HoistCrabber
def setup(bot):
bot.add_cog(HoistCrabber(bot))
| 15.333333
| 38
| 0.76087
| 12
| 92
| 5.75
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141304
| 92
| 5
| 39
| 18.4
| 0.873418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
131ab55442b37780137a40571f0809cf00e4f70e
| 91
|
py
|
Python
|
catkin_ws/src/adafruit_drivers/include/Adafruit_MotorHAT/__init__.py
|
DiegoOrtegoP/Software
|
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
|
[
"CC-BY-2.0"
] | 12
|
2016-04-14T12:21:46.000Z
|
2021-06-18T07:51:40.000Z
|
catkin_ws/src/adafruit_drivers/include/Adafruit_MotorHAT/__init__.py
|
DiegoOrtegoP/Software
|
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
|
[
"CC-BY-2.0"
] | 14
|
2017-03-03T23:33:05.000Z
|
2018-04-03T18:07:53.000Z
|
catkin_ws/src/adafruit_drivers/include/Adafruit_MotorHAT/__init__.py
|
DiegoOrtegoP/Software
|
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
|
[
"CC-BY-2.0"
] | 113
|
2016-05-03T06:11:42.000Z
|
2019-06-01T14:37:38.000Z
|
from .Adafruit_MotorHAT import Adafruit_StepperMotor, Adafruit_DCMotor, Adafruit_MotorHAT
| 30.333333
| 89
| 0.89011
| 10
| 91
| 7.7
| 0.6
| 0.415584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 91
| 2
| 90
| 45.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
13571b57e45d3d561028a8b9680303c095fe1990
| 5,725
|
py
|
Python
|
regress/tests-disconnected.py
|
fp7-ofelia/VeRTIGO
|
11f39f819196c8352611852435dea17bc6a2292f
|
[
"BSD-3-Clause"
] | 2
|
2016-10-12T08:20:00.000Z
|
2017-05-09T13:13:18.000Z
|
regress/tests-disconnected.py
|
fp7-ofelia/VeRTIGO
|
11f39f819196c8352611852435dea17bc6a2292f
|
[
"BSD-3-Clause"
] | null | null | null |
regress/tests-disconnected.py
|
fp7-ofelia/VeRTIGO
|
11f39f819196c8352611852435dea17bc6a2292f
|
[
"BSD-3-Clause"
] | 1
|
2020-10-01T07:57:34.000Z
|
2020-10-01T07:57:34.000Z
|
#!/usr/bin/python
from fvregress import *
import string # really? you have to do this?
import sys
# start up a flowvisor with 1 switch (default) and two guests
#h= HyperTest(guests=[('localhost',54321),('localhost',54322)],
# hyperargs=["-v0", "-a", "flowvisor-conf.d-base", "ptcp:%d"% HyperTest.OFPORT],valgrind=valgrindArgs)
wantPause = True
try:
h= FvRegress()
port=16633
h.addController("alice", 54321)
if len(sys.argv) > 1 :
wantPause = False
port=int(sys.argv[1])
timeout=60
h.useAlreadyRunningFlowVisor(port)
else:
wantPause = False
timeout=5
h.spawnFlowVisor(configFile="tests-disconnected.xml")
h.lamePause()
h.addSwitch(name='switch1',port=port)
h.addSwitch(name='switch2',port=port)
if wantPause:
doPause("start tests")
#################################### Start Tests
feature_request = FvRegress.OFVERSION + '05 0008 2d47 c5eb'
feature_request_after = FvRegress.OFVERSION + '05 0008 0000 0101'
h.runTest(name="feature_request",timeout=timeout, events= [
# send features_request
TestEvent( "send","guest",'alice', feature_request),
# make sure the XID is updated
TestEvent( "recv","switch",'switch1', feature_request_after, strict=True),
])
############################################################
feature_reply = FvRegress.OFVERSION + '''06 00e0 0000 0101 0000 76a9
d40d 2548 0000 0100 0200 0000 0000 001f
0000 03ff 0000 1ac1 51ff ef8a 7665 7468
3100 0000 0000 0000 0000 0000 0000 0000
0000 0000 0000 00c0 0000 0000 0000 0000
0000 0000 0001 ce2f a287 f670 7665 7468
3300 0000 0000 0000 0000 0000 0000 0000
0000 0000 0000 00c0 0000 0000 0000 0000
0000 0000 0002 ca8a 1ef3 77ef 7665 7468
3500 0000 0000 0000 0000 0000 0000 0000
0000 0000 0000 00c0 0000 0000 0000 0000
0000 0000 0003 fabc 778d 7e0b 7665 7468
3700 0000 0000 0000 0000 0000 0000 0000
0000 0000 0000 00c0 0000 0000 0000 0000
0000 0000'''
# this reply should strip the STP bit, and trim the ports down to the allowable set
feature_reply_after = FvRegress.OFVERSION + '''06 00 b0 2d 47 c5 eb 00 00 76 a9 d4 0d 25 48
00 00 01 00 02 00 00 00 00 00 00 1f 00 00 03 ff
00 00 1a c1 51 ff ef 8a 76 65 74 68 31 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 c0 00 00 00 00 00 00 00 00 00 00 00 00
00 02 ca 8a 1e f3 77 ef 76 65 74 68 35 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 c0 00 00 00 00 00 00 00 00 00 00 00 00
00 03 fa bc 77 8d 7e 0b 76 65 74 68 37 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 c0 00 00 00 00 00 00 00 00 00 00 00 00'''
h.runTest(name="feature_reply", timeout=timeout, events= [
# send features_reply (with xid from request as above)
TestEvent( "send","switch",'switch1', feature_reply),
# make sure the reply has pruned ports
TestEvent( "recv","guest",'alice', feature_reply_after),
])
############################################################
packet_to_g0_p0 = FvRegress.OFVERSION + '''0a 0052 0000 0000 0000 0101
0040 0000 0000 0000 0000 0001 0000 0000
0002 0800 4500 0032 0000 0000 40ff f72c
c0a8 0028 c0a8 0128 7a18 586b 1108 97f5
19e2 657e 07cc 31c3 11c7 c40c 8b95 5151
3354 51d5 0036'''
packet_to_g1_p0 = FvRegress.OFVERSION + '''0a 0052 0000 0001 0000 0101
0040 0001 0000 0000 0000 0002 0000 0000
0001 0800 4500 0032 0000 0000 40ff f72c
c0a8 0028 c0a8 0128 7a18 586b 1108 97f5
19e2 657e 07cc 31c3 11c7 c40c 8b95 5151
3354 51d5 0036'''
drop_rule = FvRegress.OFVERSION + '''0e 00 48 00 00 00 00 00 00 00 00 00 01 00 00
00 00 00 01 00 00 00 00 00 02 ff ff 00 00 08 00
00 ff 00 00 c0 a8 00 28 c0 a8 01 28 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00
00 00 00 00 00 00 00 01'''
h.runTest(name="packet_in drop rule", timeout=timeout, events= [
TestEvent( "send","switch",'switch1', packet_to_g0_p0),
TestEvent( "recv","guest",'alice', packet_to_g0_p0),
TestEvent( "send","switch",'switch1', packet_to_g1_p0),
TestEvent( "recv","switch",'switch1', drop_rule),
])
################################################################
probe = '''01 23 20 00 00 01 00 12 e2 b8 dc 4c 88 cc 02 07
04 e2 b8 dc 3b 17 95 04 03 02 00 01 06 02 00 78
00 00'''
lldp= FvRegress.OFVERSION + \
'''0a 00 34 00 00 00 00 00 00 01 01 00 22 00 03
00 00''' + probe
# given an LLDP to no one (i.e., no trailer) then should go to alice but
# not bob (bob is disconnected
h.runTest(name="lldp disconnected", timeout=timeout, events= [
TestEvent( "send","switch",'switch1', lldp),
TestEvent( "recv","guest",'alice', lldp),
TestEvent( "clear?","guest", 'alice',packet=None),
TestEvent( "clear?","switch", 'switch1',packet=None),
])
#########################################
# more tests for this setup HERE
#################################### End Tests
finally:
if wantPause:
doPause("start cleanup")
h.cleanup()
| 44.038462
| 105
| 0.557205
| 837
| 5,725
| 3.770609
| 0.299881
| 0.200253
| 0.256654
| 0.309252
| 0.357731
| 0.321926
| 0.284537
| 0.250951
| 0.250951
| 0.250951
| 0
| 0.331455
| 0.318079
| 5,725
| 129
| 106
| 44.379845
| 0.476947
| 0.113712
| 0
| 0.239583
| 0
| 0
| 0.579048
| 0.004694
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.03125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1381085d1bbe8e724aae54ae554385ece64ab068
| 133
|
py
|
Python
|
tests/combine_strings.py
|
Gemini321/PyPy-interpreter
|
eeb835f4703ec842ddc4cb8b094a41626ae28875
|
[
"MIT"
] | null | null | null |
tests/combine_strings.py
|
Gemini321/PyPy-interpreter
|
eeb835f4703ec842ddc4cb8b094a41626ae28875
|
[
"MIT"
] | null | null | null |
tests/combine_strings.py
|
Gemini321/PyPy-interpreter
|
eeb835f4703ec842ddc4cb8b094a41626ae28875
|
[
"MIT"
] | null | null | null |
print("Here is combine_strings test:")
s1 = input("Please enter a string:")
s2 = input("Please enter another string:")
print(s1, s2)
| 26.6
| 42
| 0.714286
| 21
| 133
| 4.47619
| 0.666667
| 0.234043
| 0.340426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034783
| 0.135338
| 133
| 4
| 43
| 33.25
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0.593985
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1392a63d2b57882281591d5510b1c599432a796a
| 166
|
py
|
Python
|
learn_field_type/settings.py
|
MacHu-GWU/learn_whoosh-project
|
3ffff3b2084d2bb0bd17f38be322f75fa14986b5
|
[
"MIT"
] | null | null | null |
learn_field_type/settings.py
|
MacHu-GWU/learn_whoosh-project
|
3ffff3b2084d2bb0bd17f38be322f75fa14986b5
|
[
"MIT"
] | null | null | null |
learn_field_type/settings.py
|
MacHu-GWU/learn_whoosh-project
|
3ffff3b2084d2bb0bd17f38be322f75fa14986b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from pathlib_mate import Path
from learn_whoosh.config import Config
Config.index_dir = Path(__file__).change(new_basename="index").abspath
| 23.714286
| 70
| 0.76506
| 24
| 166
| 4.958333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.108434
| 166
| 6
| 71
| 27.666667
| 0.797297
| 0.126506
| 0
| 0
| 0
| 0
| 0.034965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1395ba4609e75761fde9b64badfedacbaafade99
| 156
|
py
|
Python
|
wafw00f/plugins/nevisproxy.py
|
wizard531/wafw00f
|
dce0d0616db0f970013432c520b51aeef62d387f
|
[
"BSD-3-Clause"
] | null | null | null |
wafw00f/plugins/nevisproxy.py
|
wizard531/wafw00f
|
dce0d0616db0f970013432c520b51aeef62d387f
|
[
"BSD-3-Clause"
] | null | null | null |
wafw00f/plugins/nevisproxy.py
|
wizard531/wafw00f
|
dce0d0616db0f970013432c520b51aeef62d387f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
NAME = 'AdNovum nevisProxy'
def is_waf(self):
# credit goes to an anonymous reporter
return self.matchcookie('^Navajo.*?$')
| 19.5
| 42
| 0.685897
| 21
| 156
| 5.047619
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173077
| 156
| 7
| 43
| 22.285714
| 0.821705
| 0.365385
| 0
| 0
| 0
| 0
| 0.298969
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
13b43f5f0444201e0302fb537b4b45c98268ec8a
| 292
|
py
|
Python
|
src/gryffin/bayesian_network/edward_interface/_edward_network.py
|
Atinary-technologies/gryffin
|
9770ffa049076fc0b82619c6f0d3fc32437aaea4
|
[
"Apache-2.0"
] | 1
|
2021-05-11T21:37:05.000Z
|
2021-05-11T21:37:05.000Z
|
src/gryffin/bayesian_network/edward_interface/_edward_network.py
|
Atinary-technologies/gryffin
|
9770ffa049076fc0b82619c6f0d3fc32437aaea4
|
[
"Apache-2.0"
] | 1
|
2022-03-10T23:16:30.000Z
|
2022-03-14T17:29:15.000Z
|
src/gryffin/bayesian_network/edward_interface/_edward_network.py
|
Atinary-technologies/gryffin
|
9770ffa049076fc0b82619c6f0d3fc32437aaea4
|
[
"Apache-2.0"
] | 1
|
2022-03-10T21:43:03.000Z
|
2022-03-10T21:43:03.000Z
|
#!/usr/bin/env
__author__ = 'Florian Hase'
#========================================================================
from Utilities import Logger
#========================================================================
class EdwardNetwork(Logger):
def __init__(self):
pass
| 19.466667
| 73
| 0.335616
| 17
| 292
| 5.294118
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116438
| 292
| 14
| 74
| 20.857143
| 0.348837
| 0.537671
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
13c81f427463ba0b5296a44c8aa81493571fb32f
| 62
|
py
|
Python
|
listingmirror/__init__.py
|
listingmirror/async-gelf-handler
|
5b2e665e229277f914db0247ac174f7090882eb7
|
[
"BSD-3-Clause"
] | null | null | null |
listingmirror/__init__.py
|
listingmirror/async-gelf-handler
|
5b2e665e229277f914db0247ac174f7090882eb7
|
[
"BSD-3-Clause"
] | null | null | null |
listingmirror/__init__.py
|
listingmirror/async-gelf-handler
|
5b2e665e229277f914db0247ac174f7090882eb7
|
[
"BSD-3-Clause"
] | null | null | null |
from listingmirror.async_gelf_handler import AsyncGELFHandler
| 31
| 61
| 0.919355
| 7
| 62
| 7.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 62
| 1
| 62
| 62
| 0.948276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
13dbe420399b35f1e927c30dfece9507ab87febd
| 21
|
py
|
Python
|
test.py
|
535809017/szpyhon
|
ff5fc137037caf42dff02de69d8bc428580194e5
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
535809017/szpyhon
|
ff5fc137037caf42dff02de69d8bc428580194e5
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
535809017/szpyhon
|
ff5fc137037caf42dff02de69d8bc428580194e5
|
[
"Apache-2.0"
] | null | null | null |
print("chenteng 瓜娃子")
| 21
| 21
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 21
| 1
| 21
| 21
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
13dd512e22da8454c73da8672a7d38a1489f6f5e
| 65
|
py
|
Python
|
pydrawing/modules/beautifiers/characterize/__init__.py
|
CharlesPikachu/pydrawing
|
be95378a5667ea345f2a3760f8814dff255ebe15
|
[
"MIT"
] | 93
|
2022-01-18T01:42:58.000Z
|
2022-03-18T18:42:55.000Z
|
pydrawing/modules/beautifiers/characterize/__init__.py
|
CharlesPikachu/pydrawing
|
be95378a5667ea345f2a3760f8814dff255ebe15
|
[
"MIT"
] | null | null | null |
pydrawing/modules/beautifiers/characterize/__init__.py
|
CharlesPikachu/pydrawing
|
be95378a5667ea345f2a3760f8814dff255ebe15
|
[
"MIT"
] | 1
|
2022-02-17T04:36:17.000Z
|
2022-02-17T04:36:17.000Z
|
'''initialize'''
from .characterize import CharacterizeBeautifier
| 32.5
| 48
| 0.830769
| 5
| 65
| 10.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061538
| 65
| 2
| 48
| 32.5
| 0.885246
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
13e10dbf447f9ab378da72f01901d6b5af0fe28a
| 3,630
|
py
|
Python
|
tests/integration/modules/event.py
|
inthecloud247/salt
|
4feda14159ade741bd5e6f86c7ff31d617927007
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/modules/event.py
|
inthecloud247/salt
|
4feda14159ade741bd5e6f86c7ff31d617927007
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/modules/event.py
|
inthecloud247/salt
|
4feda14159ade741bd5e6f86c7ff31d617927007
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
tests.integration.modules.event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2012 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import python libs
import time
import threading
from Queue import Queue, Empty
# Import salt libs
import integration
from salt.utils import event
class EventModuleTest(integration.ModuleCase):
def test_event_fire_master(self):
events = Queue()
def get_event(events):
me = event.MasterEvent(self.master_opts['sock_dir'])
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire_master',
['event.fire_master: just test it!!!!', 'salttest']
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIsNotNone(eventfired)
self.assertIn(
'event.fire_master: just test it!!!!', eventfired['data']
)
ret = self.run_function(
'event.fire_master',
['event.fire_master: just test it!!!!', 'salttest-miss']
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
def __test_event_fire(self):
events = Queue()
def get_event(events):
me = event.MinionEvent(**self.minion_opts)
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest']
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIsNotNone(eventfired)
self.assertIn('event.fire: just test it!!!!', eventfired)
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest-miss']
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
def __test_event_fire_ipc_mode_tcp(self):
events = Queue()
def get_event(events):
me = event.MinionEvent(**self.sub_minion_opts)
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest'],
minion_tgt='sub_minion'
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIsNotNone(eventfired)
self.assertIn('event.fire: just test it!!!!', eventfired)
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest-miss'],
minion_tgt='sub_minion'
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
if __name__ == '__main__':
from integration import run_tests
run_tests(EventModuleTest)
| 30.504202
| 76
| 0.593113
| 408
| 3,630
| 5.142157
| 0.235294
| 0.077216
| 0.042898
| 0.051478
| 0.770734
| 0.770734
| 0.763108
| 0.745949
| 0.745949
| 0.727359
| 0
| 0.010626
| 0.274105
| 3,630
| 118
| 77
| 30.762712
| 0.785199
| 0.115978
| 0
| 0.592593
| 0
| 0
| 0.149244
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.160494
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13e5a07b3b96016f7972d50f5a6e23bd8b28dfbf
| 10,591
|
py
|
Python
|
rmgpy/molecule/pathfinderTest.py
|
keceli/RMG-Py
|
17c7870195a4feb6e8bf8974292f9bcdca1a1d9d
|
[
"MIT"
] | 1
|
2017-12-18T18:43:22.000Z
|
2017-12-18T18:43:22.000Z
|
rmgpy/molecule/pathfinderTest.py
|
keceli/RMG-Py
|
17c7870195a4feb6e8bf8974292f9bcdca1a1d9d
|
[
"MIT"
] | null | null | null |
rmgpy/molecule/pathfinderTest.py
|
keceli/RMG-Py
|
17c7870195a4feb6e8bf8974292f9bcdca1a1d9d
|
[
"MIT"
] | 1
|
2021-08-14T13:47:18.000Z
|
2021-08-14T13:47:18.000Z
|
import unittest
from rmgpy.molecule import Molecule
from rmgpy.molecule.pathfinder import *
class FindButadieneTest(unittest.TestCase):
def test_13butadiene(self):
mol = Molecule().fromSMILES("C=CC=C")#1,3-butadiene
start, end = mol.atoms[0], mol.atoms[3]
path = find_butadiene(start, end)
self.assertIsNotNone(path)
def test_acrolein(self):
mol = Molecule().fromSMILES("C=CC=O")#Acrolein
start, end = mol.atoms[0], mol.atoms[3]
path = find_butadiene(start, end)
self.assertIsNotNone(path)
start, end = mol.atoms[0], mol.atoms[4]#wrong end
path = find_butadiene(start, end)
self.assertIsNone(path)
start, end = mol.atoms[-1], mol.atoms[3]#wrong start
path = find_butadiene(start, end)
self.assertIsNone(path)
def test_135hexatriene(self):
mol = Molecule().fromSMILES("C=CC=CC=C")#1,3,5-hexatriene
start, end = mol.atoms[0], mol.atoms[5]
path = find_butadiene(start, end)
self.assertIsNotNone(path)
def test_13cyclohexadiene(self):
adjlist = """
1 C u0 p0 c0 {2,D} {6,S} {7,S}
2 C u0 p0 c0 {1,D} {3,S} {8,S}
3 C u0 p0 c0 {2,S} {4,D} {9,S}
4 C u0 p0 c0 {3,D} {5,S} {10,S}
5 C u0 p0 c0 {4,S} {6,S} {11,S} {12,S}
6 C u0 p0 c0 {1,S} {5,S} {13,S} {14,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {4,S}
11 H u0 p0 c0 {5,S}
12 H u0 p0 c0 {5,S}
13 H u0 p0 c0 {6,S}
14 H u0 p0 c0 {6,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)#1,3-cyclohexadiene
start, end = mol.atoms[0], mol.atoms[3]
path = find_butadiene(start, end)
self.assertIsNotNone(path)
def test_14cyclohexadiene(self):
adjlist = """
1 C u0 p0 c0 {2,D} {6,S} {7,S}
2 C u0 p0 c0 {1,D} {3,S} {8,S}
3 C u0 p0 c0 {2,S} {4,S} {9,S} {10,S}
4 C u0 p0 c0 {3,S} {5,D} {11,S}
5 C u0 p0 c0 {4,D} {6,S} {12,S}
6 C u0 p0 c0 {1,S} {5,S} {13,S} {14,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {4,S}
12 H u0 p0 c0 {5,S}
13 H u0 p0 c0 {6,S}
14 H u0 p0 c0 {6,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)#1,4-cyclohexadiene
start, end = mol.atoms[0], mol.atoms[3]
path = find_butadiene(start, end)
self.assertIsNone(path)
def test_Benzene(self):
mol = Molecule().fromSMILES("C1=CC=CC=C1")#benzene
start, end = mol.atoms[0], mol.atoms[5]
path = find_butadiene(start, end)
self.assertIsNotNone(path)
def test_C4H4(self):
mol = Molecule().fromSMILES("C=C=C=C")#C4H4
start, end = mol.atoms[0], mol.atoms[3]
path = find_butadiene(start, end)
self.assertIsNotNone(path)
class FindAllylEndWithChargeTest(unittest.TestCase):
def test_C2H2O3(self):
adjlist = """
1 C u0 p0 c0 {5,D} {6,S} {7,S}
2 C u0 p0 c0 {3,D} {4,S} {5,S}
3 O u0 p2 c0 {2,D}
4 O u0 p3 c-1 {2,S}
5 O u0 p1 c+1 {1,D} {2,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {1,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
start = mol.atoms[2]
paths = find_allyl_end_with_charge(start)
idx_path = sorted([[mol.atoms.index(atom)+1 for atom in path[0::2]] for path in paths])
expected_idx_path = [[3,2,4], [3,2,5]]
self.assertEquals(idx_path, expected_idx_path)
def test_C3H2(self):
inchi = "InChI=1S/C3H2/c1-3-2/h1-2H"
mol = Molecule().fromInChI(inchi)
start = mol.atoms[0]
path = find_allyl_end_with_charge(start)[0]
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [1,3,2]
self.assertEquals(idx_path, expected_idx_path)
def test_C3H4(self):
inchi = "InChI=1S/C3H4/c1-3-2/h1,3H,2H2"
mol = Molecule().fromInChI(inchi)
start = mol.atoms[0]
path = find_allyl_end_with_charge(start)[0]
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [1,3,2]
self.assertEquals(idx_path, expected_idx_path)
def test_C3H2O3(self):
adjlist = """
1 C u0 p0 c0 {2,D} {7,S} {8,S}
2 C u0 p0 c0 {1,D} {3,D}
3 C u0 p0 c0 {2,D} {4,S} {6,S}
4 O u0 p3 c-1 {3,S}
5 O u0 p2 c0 {6,D}
6 O u0 p1 c+1 {3,S} {5,D}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
start = mol.atoms[1]
paths = find_allyl_end_with_charge(start)
idx_paths = sorted([[mol.atoms.index(atom)+1 for atom in path[0::2]] for path in paths])
idx_paths = sorted(idx_paths)
expected_idx_paths = [[2,3,4], [2,3,6]]
self.assertEquals(idx_paths, expected_idx_paths)
def test_C3H4O4(self):
inchi = "InChI=1S/C3H4O4/c4-3(5)1-2-7-6/h1-3,6H"
mol = Molecule().fromInChI(inchi)
start = mol.atoms[6]
path = find_allyl_end_with_charge(start)[0]
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [7,2,1]
self.assertEquals(idx_path, expected_idx_path)
def test_C5H6O(self):
inchi = "InChI=1S/C5H6O/c6-5-3-1-2-4-5/h1-3,5H,4H2"
mol = Molecule().fromInChI(inchi)
start = mol.atoms[1]
path = find_allyl_end_with_charge(start)[0]
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [2,1,3]
self.assertEquals(idx_path, expected_idx_path)
class FindButadieneEndWithChargeTest(unittest.TestCase):
def test_CO(self):
adjlist = """
1 C u0 p1 c-1 {2,T}
2 O u0 p1 c+1 {1,T}
"""
mol = Molecule().fromAdjacencyList(adjlist)
start = mol.atoms[0]
path = find_butadiene_end_with_charge(start)
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [1,2]
self.assertEquals(idx_path, expected_idx_path)
def test_C2H2O3(self):
adjlist = """
1 C u0 p0 c0 {5,D} {6,S} {7,S}
2 C u0 p0 c0 {3,D} {4,S} {5,S}
3 O u0 p2 c0 {2,D}
4 O u0 p3 c-1 {2,S}
5 O u0 p1 c+1 {1,D} {2,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {1,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
start = mol.atoms[0]
path = find_butadiene_end_with_charge(start)
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [1,5]
self.assertEquals(idx_path, expected_idx_path)
def test_C3H2O3(self):
adjlist = """
1 C u0 p0 c0 {2,D} {7,S} {8,S}
2 C u0 p0 c0 {1,D} {3,D}
3 C u0 p0 c0 {2,D} {4,S} {6,S}
4 O u0 p3 c-1 {3,S}
5 O u0 p2 c0 {6,D}
6 O u0 p1 c+1 {3,S} {5,D}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
start = mol.atoms[4]
path = find_butadiene_end_with_charge(start)
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [5,6]
self.assertEquals(idx_path, expected_idx_path)
def test_C4H6O(self):
adjlist = """
1 C u0 p0 c0 {2,S} {6,S} {7,S} {8,S}
2 C u0 p1 c-1 {1,S} {3,S} {9,S}
3 C u0 p0 c0 {2,S} {4,S} {10,S} {11,S}
4 C u0 p0 c0 {3,S} {5,T}
5 O u0 p1 c+1 {4,T}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {3,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
start = mol.atoms[3]
path = find_butadiene_end_with_charge(start)
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [4,5]
self.assertEquals(idx_path, expected_idx_path)
def test_C5H6O_2(self):
adjlist = """
1 C u0 p1 c-1 {5,S} {7,S} {8,S}
2 C u0 p0 c0 {3,D} {4,S} {9,S}
3 C u0 p0 c0 {2,D} {5,S} {10,S}
4 C u0 p0 c0 {2,S} {6,D} {11,S}
5 C u0 p0 c0 {1,S} {3,S} {6,S} {12,S}
6 O u0 p1 c+1 {4,D} {5,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {4,S}
12 H u0 p0 c0 {5,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
start = mol.atoms[2]
path = find_butadiene_end_with_charge(start)
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [3,2,4,6]
self.assertEquals(idx_path, expected_idx_path)
def test_C6H6O4(self):
inchi = "InChI=1S/C6H6O4/c1-2-4-9-6(7)3-5-10-8/h2-3H,1,5H2"
mol = Molecule().fromInChI(inchi)
start = mol.atoms[0]
path = find_butadiene_end_with_charge(start)
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [1,2,4,9]
self.assertEquals(idx_path, expected_idx_path)
def test_C6H6O6(self):
inchi = "InChI=1S/C6H6O6/c7-6(2-5-12-9)10-3-1-4-11-8/h1,7H,4-5H2"
mol = Molecule().fromInChI(inchi)
start = mol.atoms[2]
path = find_butadiene_end_with_charge(start)
idx_path = [mol.atoms.index(atom)+1 for atom in path[0::2]]
expected_idx_path = [3,10]
self.assertEquals(idx_path, expected_idx_path)
class ShortestPathTest(unittest.TestCase):
def test_CCC(self):
smi = 'CCC'
mol = Molecule().fromSMILES(smi)
start = mol.atoms[0]
end = mol.atoms[2]
path = find_shortest_path(start, end)
self.assertEquals(len(path), 3)
def test_Cyclohexane(self):
smi = 'C1CCCCC1'
mol = Molecule().fromSMILES(smi)
start = mol.atoms[0]
end = mol.atoms[2]
path = find_shortest_path(start, end)
self.assertEquals(len(path), 3)
def test_bicyclo420octane(self):
smi = 'C12CCC1CCCC2'
mol = Molecule().fromSMILES(smi)
start = mol.atoms[0]
end = mol.atoms[4]
path = find_shortest_path(start, end)
self.assertEquals(len(path), 3)
class DistanceComputingTest(unittest.TestCase):
def test_2_atoms(self):
smi = 'CCC'
mol = Molecule().fromSMILES(smi)
atom_indices = [1,2]
distances = compute_atom_distance(atom_indices, mol)
expected = {(1,2): 1}
self.assertEquals(distances, expected)
def test_3_atoms(self):
smi = 'CCC'
mol = Molecule().fromSMILES(smi)
atom_indices = [1,2,3]
distances = compute_atom_distance(atom_indices, mol)
expected = {
(1,2): 1,
(1,3): 2,
(2,3): 1,
}
self.assertEquals(distances, expected)
| 29.33795
| 96
| 0.574261
| 1,859
| 10,591
| 3.173211
| 0.062937
| 0.044075
| 0.066113
| 0.042719
| 0.84438
| 0.814545
| 0.788947
| 0.765553
| 0.69978
| 0.679268
| 0
| 0.106644
| 0.27665
| 10,591
| 360
| 97
| 29.419444
| 0.66336
| 0.00982
| 0
| 0.675958
| 0
| 0.045296
| 0.235872
| 0.022814
| 0
| 0
| 0
| 0
| 0.094077
| 1
| 0.087108
| false
| 0
| 0.010453
| 0
| 0.114983
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13eeb743a426ed16a780c92b4222db66ab05413b
| 109
|
py
|
Python
|
aws_lambda_builders/workflows/python_pip/__init__.py
|
ekmixon/aws-lambda-builders
|
01031a52ea314217e1c373f6ab2edc0a265ce552
|
[
"Apache-2.0"
] | 180
|
2018-11-09T04:51:19.000Z
|
2020-08-06T21:43:20.000Z
|
aws_lambda_builders/workflows/python_pip/__init__.py
|
ekmixon/aws-lambda-builders
|
01031a52ea314217e1c373f6ab2edc0a265ce552
|
[
"Apache-2.0"
] | 108
|
2018-11-08T18:34:51.000Z
|
2020-08-12T17:59:41.000Z
|
aws_lambda_builders/workflows/python_pip/__init__.py
|
ekmixon/aws-lambda-builders
|
01031a52ea314217e1c373f6ab2edc0a265ce552
|
[
"Apache-2.0"
] | 91
|
2018-11-08T22:58:00.000Z
|
2020-08-17T21:15:31.000Z
|
"""
Builds Python Lambda functions using PIP dependency manager
"""
from .workflow import PythonPipWorkflow
| 18.166667
| 59
| 0.798165
| 12
| 109
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137615
| 109
| 5
| 60
| 21.8
| 0.925532
| 0.541284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b93f0b29120945a7fc15cebb38636644a2cfbd34
| 113
|
py
|
Python
|
count-odd-numbers-in-an-interval-range/count-odd-numbers-in-an-interval-range.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | 1
|
2021-10-10T20:21:18.000Z
|
2021-10-10T20:21:18.000Z
|
count-odd-numbers-in-an-interval-range/count-odd-numbers-in-an-interval-range.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | null | null | null |
count-odd-numbers-in-an-interval-range/count-odd-numbers-in-an-interval-range.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | null | null | null |
class Solution:
def countOdds(self, low: int, high: int) -> int:
return ((high + 1)// 2 - (low // 2))
| 37.666667
| 52
| 0.539823
| 16
| 113
| 3.8125
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036145
| 0.265487
| 113
| 3
| 53
| 37.666667
| 0.698795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b944c1f95f4e0a470e0980c01ea145c1177148d3
| 169
|
py
|
Python
|
space_game/events/creation_events/NewObjectCreatedEvent.py
|
Iwomichu/probable-giggle
|
2af5ed83a60d65ec9d509c217cb5fcb880d5dbcc
|
[
"MIT"
] | 1
|
2020-11-30T11:21:21.000Z
|
2020-11-30T11:21:21.000Z
|
space_game/events/creation_events/NewObjectCreatedEvent.py
|
Iwomichu/probable-giggle
|
2af5ed83a60d65ec9d509c217cb5fcb880d5dbcc
|
[
"MIT"
] | 5
|
2020-11-03T16:46:49.000Z
|
2021-01-24T14:29:24.000Z
|
space_game/events/creation_events/NewObjectCreatedEvent.py
|
Iwomichu/probable-giggle
|
2af5ed83a60d65ec9d509c217cb5fcb880d5dbcc
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import Any
from space_game.events.Event import Event
@dataclass
class NewObjectCreatedEvent(Event):
new_object: Any
| 16.9
| 41
| 0.816568
| 22
| 169
| 6.181818
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142012
| 169
| 9
| 42
| 18.777778
| 0.937931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b96805d3b341b6ddc79ee85dfe8ce12ff3dedaaf
| 361
|
py
|
Python
|
havsim/calibration/__init__.py
|
ronan-keane/hav-sim
|
0aaf9674e987822ff2dc90c74613d5e68e8ef0ce
|
[
"Apache-2.0"
] | null | null | null |
havsim/calibration/__init__.py
|
ronan-keane/hav-sim
|
0aaf9674e987822ff2dc90c74613d5e68e8ef0ce
|
[
"Apache-2.0"
] | null | null | null |
havsim/calibration/__init__.py
|
ronan-keane/hav-sim
|
0aaf9674e987822ff2dc90c74613d5e68e8ef0ce
|
[
"Apache-2.0"
] | 2
|
2020-09-30T22:44:37.000Z
|
2021-05-09T07:36:28.000Z
|
"""
@author: rlk268@cornell.edu
"""
from havsim.calibration import calibration_models
from havsim.calibration import calibration
from havsim.calibration import platoon_formation
from havsim.calibration.calibration import make_calibration
from havsim.calibration.calibration import CalibrationVehicle
from havsim.calibration.platoon_formation import sortveh
| 25.785714
| 61
| 0.858726
| 41
| 361
| 7.463415
| 0.341463
| 0.196078
| 0.411765
| 0.264706
| 0.496732
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009119
| 0.088643
| 361
| 13
| 62
| 27.769231
| 0.920973
| 0.074792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b97c93d9eeceb24f63aea4096cebd230ce68fea2
| 103
|
py
|
Python
|
python/testData/inspections/PyCompatibilityInspection/setLiteralExpression.py
|
pavel-ryzhov/intellij-community
|
8f62306112e5e5f56e5af87b4104ead2aa831e22
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyCompatibilityInspection/setLiteralExpression.py
|
pavel-ryzhov/intellij-community
|
8f62306112e5e5f56e5af87b4104ead2aa831e22
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyCompatibilityInspection/setLiteralExpression.py
|
pavel-ryzhov/intellij-community
|
8f62306112e5e5f56e5af87b4104ead2aa831e22
|
[
"Apache-2.0"
] | 1
|
2019-07-18T16:50:52.000Z
|
2019-07-18T16:50:52.000Z
|
var = <warning descr="Python version 2.6, 3.0 do not support set literal expressions">{1, 2}</warning>
| 51.5
| 102
| 0.718447
| 18
| 103
| 4.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067416
| 0.135922
| 103
| 1
| 103
| 103
| 0.764045
| 0
| 0
| 0
| 0
| 0
| 0.601942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b98e48964200b8e486b3dd733b102cfa585f34bb
| 229,283
|
py
|
Python
|
Test_Generator_Module/test_generator_modul_zuordnungsfrage.py
|
jakobis95/ILIAS---Test-Generator
|
77f3672795ba9fbb37a63b9b75804146e00e8acc
|
[
"MIT"
] | null | null | null |
Test_Generator_Module/test_generator_modul_zuordnungsfrage.py
|
jakobis95/ILIAS---Test-Generator
|
77f3672795ba9fbb37a63b9b75804146e00e8acc
|
[
"MIT"
] | null | null | null |
Test_Generator_Module/test_generator_modul_zuordnungsfrage.py
|
jakobis95/ILIAS---Test-Generator
|
77f3672795ba9fbb37a63b9b75804146e00e8acc
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
from tkinter import * # Stellt die Funktionen für z.B. Labels & Entrys zur Verfügung
from tkinter import ttk # Stellt die Funktionen der Comboboxen (Auswahlboxen) zur Verfügung
from tkinter import filedialog
import base64
import pathlib
import sqlite3
import os
import pprint
import pandas as pd
from datetime import datetime
from PIL import ImageTk, Image # Zur Preview von ausgewählten Bildern
import xlsxwriter # import/export von excel Dateien
import shutil # Wird verwendet um Verzeichnisse zu kopieren
### Eigene Dateien / Module
from Test_Generator_Module import test_generator_modul_datenbanken_anzeigen
from Test_Generator_Module import test_generator_modul_datenbanken_erstellen
from Test_Generator_Module import test_generator_modul_ilias_test_struktur
from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung
from Test_Generator_Module import test_generator_modul_ilias_import_test_datei
class Zuordnungsfrage:
def __init__(self, app, zuordnungsfrage_tab, project_root_path):
self.zuordnungsfrage_tab = zuordnungsfrage_tab
############## SET QUESTION_TYPE SPECIFIC NAMES FOR DATABASE AND WORBOOK/SHEET
# Name des Fragentyps
self.mq_question_type_name = "zuordnungsfrage"
# Name für Datenbank und Tabelle
self.mq_database = "ilias_zuordnungsfrage_db.db"
self.mq_database_table = "zuordnungsfrage_table"
# Name für Tabellenkalulations-Datei und Tabelle
self.mq_xlsx_workbook_name = "Zuordnungsfrage_DB_export_file"
self.mq_xlsx_worksheet_name = "Zuordnungsfrage - Database"
############## SET IMAGE VARIABLES
# Die Variablen müssen am Anfang des Programms gesetzt werden, um diese an andere Funktionen weitergeben zu können
self.mq_description_img_name_1 = ""
self.mq_description_img_name_2 = ""
self.mq_description_img_name_3 = ""
self.mq_description_img_data_1 = ""
self.mq_description_img_data_2 = ""
self.mq_description_img_data_3 = ""
self.mq_description_img_path_1 = ""
self.mq_description_img_path_2 = ""
self.mq_description_img_path_3 = ""
self.mq_mix_answers_value = 0
############## DEFINE MATCHING QUESTIONS PATHS
# Pfad des Projekts und des MQ-Moduls
self.project_root_path = project_root_path
self.zuordnungsfrage_files_path = os.path.normpath(os.path.join(self.project_root_path, "ILIAS-Zuordnungsfrage"))
self.zuordnungsfrage_files_path_pool_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_ilias_pool_abgabe"))
# Pfad für die Datenbank
self.database_zuordnungsfrage_path = os.path.normpath(os.path.join(self.project_root_path, "Test_Generator_Datenbanken", "ilias_zuordnungsfrage_db.db"))
# Pfad für ILIAS-Test Vorlage
self.zuordnungsfrage_test_qti_file_path_template = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__qti__.xml"))
self.zuordnungsfrage_test_tst_file_path_template = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__tst__.xml"))
# Pfad für ILIAS-Test Dateien (zum hochladen in ILIAS)
self.zuordnungsfrage_test_qti_file_path_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path,"mq_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__qti_2040314.xml"))
self.zuordnungsfrage_test_tst_file_path_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path,"mq_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__tst_2040314.xml"))
self.zuordnungsfrage_test_img_file_path = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path,"mq_ilias_test_abgabe", "1604407426__0__tst_2040314", "objects"))
# Pfad für ILIAS-Pool Vorlage
self.zuordnungsfrage_pool_qti_file_path_template = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qti__.xml"))
self.zuordnungsfrage_pool_qpl_file_path_template = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qpl__.xml"))
# Pfad für ILIAS-Pool Dateien (zum hochladen in ILIAS)
# Die Pfade für die qti.xml und qpl.xml werden erst zur Laufzeit bestimmt.
# Die Deklaration ist daher unter "class Create_Zuordnungsfrage_Pool"
self.zuordnungsfrage_pool_directory_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path,"mq_ilias_pool_abgabe"))
###################### "DATENBANK ENTRIES UND INDEX DICT ###################
# Dictionary aus zwei Listen erstellen
self.mq_db_find_entries = []
self.mq_db_find_indexes = []
self.mq_db_column_names_list = []
self.mq_collection_of_question_titles = []
connect = sqlite3.connect(self.database_zuordnungsfrage_path)
cursor = connect.execute('select * from ' + self.mq_database_table)
self.mq_db_column_names_list = list(map(lambda x: x[0], cursor.description))
self.db_column_names_string = ', :'.join(self.mq_db_column_names_list)
self.db_column_names_string = ":" + self.db_column_names_string
for i in range(len(self.mq_db_column_names_list)):
self.mq_db_find_indexes.append(i)
"""
# Durch list(map(lambdax: x[0])) werden die Spaltennamen aus der DB ausgelesen
cursor = conn.execute('select * from ' + self.mq_database_table)
db_column_names_list = list(map(lambda x: x[0], cursor.description))
db_column_names_string = ', :'.join(db_column_names_list)
db_column_names_string = ":" + db_column_names_string
"""
self.mq_db_entry_to_index_dict = dict(zip((self.mq_db_column_names_list), (self.mq_db_find_indexes)))
connect.commit()
connect.close()
# Combobox Entries Dict
self.assignment_pairs_definitions_to_int_dict = {"Definition 1": 0, "Definition 2": 1, "Definition 3": 2, "Definition 4": 3, "Definition 5": 4,
"Definition 6": 5, "Definition 7": 6, "Definition 8": 7, "Definition 9": 8, "Definition 10": 9,
}
self.assignment_pairs_terms_to_int_dict = {"Term 1": 0, "Term 2": 1, "Term 3": 2, "Term 4": 3, "Term 5": 4,
"Term 6": 5, "Term 7": 6, "Term 8": 7, "Term 9": 8, "Term 10": 9,
}
#################### FRAMES
self.mq_frame_ilias_test_title = LabelFrame(self.zuordnungsfrage_tab, text="Testname & Autor", padx=5, pady=5)
self.mq_frame_ilias_test_title.grid(row=0, column=0, padx=10, pady=10, sticky="NW")
self.mq_frame = LabelFrame(self.zuordnungsfrage_tab, text="Zuordnungsfrage", padx=5, pady=5)
self.mq_frame.grid(row=1, column=0, padx=10, pady=10, sticky="NW")
self.mq_frame_question_attributes = LabelFrame(self.zuordnungsfrage_tab, text="Fragen Attribute", padx=5, pady=5)
self.mq_frame_question_attributes.grid(row=2, column=0, padx=10, pady=10, sticky="NE")
self.mq_frame_database = LabelFrame(self.zuordnungsfrage_tab, text="Zuordnungsfrage-Datenbank", padx=5, pady=5)
self.mq_frame_database.grid(row=2, column=0, padx=10, pady=10, sticky="NW")
self.mq_frame_create_zuordnungsfrage_test = LabelFrame(self.zuordnungsfrage_tab, text="MQ-Test erstellen", padx=5, pady=5)
self.mq_frame_create_zuordnungsfrage_test.grid(row=2, column=0, padx=105, pady=120, sticky="NE")
self.mq_frame_taxonomy_settings = LabelFrame(self.zuordnungsfrage_tab, text="Taxonomie Einstellungen", padx=5, pady=5)
self.mq_frame_taxonomy_settings.grid(row=0, column=1, padx=10, pady=10, sticky="NW")
self.mq_frame_question_description_functions = LabelFrame(self.zuordnungsfrage_tab, text="Fragentext Funktionen", padx=5, pady=5)
self.mq_frame_question_description_functions.grid(row=1, column=1, padx=10, pady=10, sticky="NW")
self.mq_frame_excel_import_export = LabelFrame(self.zuordnungsfrage_tab, text="Excel Import/Export", padx=5, pady=5)
self.mq_frame_excel_import_export.grid(row=2, column=1, padx=10, pady=10, sticky="NW")
self.mq_frame_question_permutation = LabelFrame(self.zuordnungsfrage_tab, text="Fragen - Permutation", padx=5, pady=5)
self.mq_frame_question_permutation.grid(row=2, column=1,padx=10, pady=120, sticky="NW")
self.mq_frame_description_picture = LabelFrame(self.zuordnungsfrage_tab, text="Fragen-Text Bild", padx=5, pady=5)
self.mq_frame_description_picture.grid(row=1, column=2, padx=10, pady=10, sticky="NW")
###################### "Testname & Autor" - FRAME -------- LABELS / ENTRYS / BUTTONS ################
self.mq_ilias_test_title_label = Label(self.mq_frame_ilias_test_title, text="Name des Tests")
self.mq_ilias_test_title_label.grid(row=0, column=0, sticky=W)
self.mq_ilias_test_title_entry = Entry(self.mq_frame_ilias_test_title, width=60)
self.mq_ilias_test_title_entry.grid(row=0, column=1, sticky=W, padx=30)
self.mq_ilias_test_autor_label = Label(self.mq_frame_ilias_test_title, text="Autor")
self.mq_ilias_test_autor_label.grid(row=1, column=0, sticky=W)
self.mq_ilias_test_autor_entry = Entry(self.mq_frame_ilias_test_title, width=60)
self.mq_ilias_test_autor_entry.grid(row=1, column=1, sticky=W, padx=30)
###################### "Fragen-Text Bild" - FRAME -------- LABELS / ENTRYS / BUTTONS ################
# Hinzufügen Bild 1
self.mq_var_use_image_1 = IntVar()
self.mq_check_use_image_1_in_description = Checkbutton(self.mq_frame_question_description_functions, text="Bild 1 hochladen?", variable=self.mq_var_use_image_1, onvalue=1, offvalue=0)
self.mq_check_use_image_1_in_description.deselect()
self.mq_check_use_image_1_in_description.grid(row=5, column=0, sticky=W, padx=90, pady=(10, 0))
# Hinzufügen Bild 2
self.mq_var_use_image_2 = IntVar()
self.mq_check_use_image_2_in_description = Checkbutton(self.mq_frame_question_description_functions, text="Bild 2 hochladen?", variable=self.mq_var_use_image_2, onvalue=1, offvalue=0)
self.mq_check_use_image_2_in_description.deselect()
self.mq_check_use_image_2_in_description.grid(row=6, column=0, sticky=W, padx=90)
# Hinzufügen Bild 3
self.mq_var_use_image_3 = IntVar()
self.mq_check_use_image_3_in_description = Checkbutton(self.mq_frame_question_description_functions, text="Bild 3 hochladen?", variable=self.mq_var_use_image_3, onvalue=1, offvalue=0)
self.mq_check_use_image_3_in_description.deselect()
self.mq_check_use_image_3_in_description.grid(row=7, column=0, sticky=W, padx=90)
# Buttons - Bild hinzufügen & Bild löschen
self.mq_add_img_to_description_btn = Button(self.mq_frame_question_description_functions, text="Bild hinzufügen", command=lambda: mq_add_image_to_description_and_create_labels())
self.mq_add_img_to_description_btn.grid(row=8, column=0, sticky=W, padx = 10, pady=(20,0))
def mq_add_image_to_description_and_create_labels():
# Erstelle Labels
self.mq_question_description_img_1_filename_label = Label(self.mq_frame_description_picture, text=self.mq_description_img_name_1)
self.mq_question_description_img_2_filename_label = Label(self.mq_frame_description_picture, text=self.mq_description_img_name_2)
self.mq_question_description_img_3_filename_label = Label(self.mq_frame_description_picture, text=self.mq_description_img_name_3)
self.mq_description_img_name_1, self.mq_description_img_name_2, self.mq_description_img_name_3, self.mq_description_img_path_1, self.mq_description_img_path_2, self.mq_description_img_path_3, self.mq_question_description_img_1_filename_label, self.mq_question_description_img_2_filename_label, self.mq_question_description_img_3_filename_label = test_generator_modul_ilias_test_struktur.Additional_Funtions.add_image_to_description(
self,
self.mq_var_use_image_1.get(),
self.mq_var_use_image_2.get(),
self.mq_var_use_image_3.get(),
self.mq_frame_description_picture,
self.mq_description_img_name_1,
self.mq_description_img_name_2,
self.mq_description_img_name_3,
self.mq_description_img_path_1,
self.mq_description_img_path_2,
self.mq_description_img_path_3,
)
self.mq_remove_img_from_description_btn = Button(self.mq_frame_question_description_functions, text="Bild entfernen", command=lambda: mq_add_image_to_description_and_delete_labels())
self.mq_remove_img_from_description_btn.grid(row=8, column=0, sticky=W, padx=120, pady=(20,0))
def mq_add_image_to_description_and_delete_labels():
self.mq_description_img_name_1, self.mq_description_img_name_2, self.mq_description_img_name_3 = test_generator_modul_ilias_test_struktur.Additional_Funtions.delete_image_from_description(
self,
self.mq_var_use_image_1.get(),
self.mq_var_use_image_2.get(),
self.mq_var_use_image_3.get(),
self.mq_question_description_img_1_filename_label,
self.mq_question_description_img_2_filename_label,
self.mq_question_description_img_3_filename_label,
self.mq_description_img_name_1,
self.mq_description_img_name_2,
self.mq_description_img_name_3,
)
###################### "Taxonomie Einstellungen" - FRAME -------- LABELS / ENTRYS / BUTTONS ################
self.mq_taxonomy_settings_btn = Button(self.mq_frame_taxonomy_settings, text="Taxonomie-Einstellungen",command=lambda: test_generator_modul_taxonomie_und_textformatierung.Taxonomie.__init__(self))
self.mq_taxonomy_settings_btn.grid(row=3, column=0, columnspan = 2, padx=10, sticky="W")
###################### "Fragentext Funktionen" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.add_latex_term_btn = Button(self.mq_frame_question_description_functions, text="Text \"Latex\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_latex(self, self.mq_question_description_main_entry))
self.add_latex_term_btn.grid(row=1, column=0, padx=10, sticky="W")
self.set_text_sub_btn = Button(self.mq_frame_question_description_functions, text="Text \"Tiefgestellt\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sub(self, self.mq_question_description_main_entry))
self.set_text_sub_btn .grid(row=2, column=0, padx=10, pady=(10, 0), sticky="W")
self.set_text_sup_btn = Button(self.mq_frame_question_description_functions, text="Text \"Hochgestellt\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sup(self, self.mq_question_description_main_entry))
self.set_text_sup_btn.grid(row=3, column=0, padx=10, sticky="W")
self.set_text_italic_btn = Button(self.mq_frame_question_description_functions, text="Text \"Kursiv\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_italic(self, self.mq_question_description_main_entry))
self.set_text_italic_btn.grid(row=4, column=0, padx=10, sticky="W")
self.set_postion_for_picture_1_btn = Button(self.mq_frame_question_description_functions, text="Pos. Bild 1", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_1(self, self.mq_question_description_main_entry))
self.set_postion_for_picture_1_btn.grid(row=5, column=0, padx=10, pady=(10, 0), sticky="W")
self.set_postion_for_picture_2_btn = Button(self.mq_frame_question_description_functions, text="Pos. Bild 2", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_2(self, self.mq_question_description_main_entry))
self.set_postion_for_picture_2_btn.grid(row=6, column=0, padx=10, sticky="W")
self.set_postion_for_picture_3_btn = Button(self.mq_frame_question_description_functions, text="Pos. Bild 3", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_3(self, self.mq_question_description_main_entry))
self.set_postion_for_picture_3_btn.grid(row=7, column=0, padx=10, sticky="W")
###################### "Fragen Attribute" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.mq_question_difficulty_label = Label(self.mq_frame_question_attributes, text="Schwierigkeit")
self.mq_question_difficulty_label.grid(row=0, column=0, pady=5, padx=5, sticky=W)
self.mq_question_difficulty_entry = Entry(self.mq_frame_question_attributes, width=15)
self.mq_question_difficulty_entry.grid(row=0, column=1, pady=5, padx=5, sticky=W)
self.mq_question_category_label = Label(self.mq_frame_question_attributes, text="Fragenkategorie")
self.mq_question_category_label.grid(row=1, column=0, pady=5, padx=5, sticky=W)
self.mq_question_category_entry = Entry(self.mq_frame_question_attributes, width=15)
self.mq_question_category_entry.grid(row=1, column=1, pady=5, padx=5, sticky=W)
self.mq_question_type_label = Label(self.mq_frame_question_attributes, text="Fragen-Typ")
self.mq_question_type_label.grid(row=0, column=2, pady=5, padx=5, sticky=W)
self.mq_question_type_entry = Entry(self.mq_frame_question_attributes, width=15)
self.mq_question_type_entry.grid(row=0, column=3, pady=5, padx=5, sticky=W)
self.mq_question_type_entry.insert(0, "Zuordnungsfrage")
self.mq_question_pool_tag_label = Label(self.mq_frame_question_attributes, text="Pool-Tag")
self.mq_question_pool_tag_label.grid(row=1, column=2, pady=5, padx=5, sticky=W)
self.mq_question_pool_tag_entry = Entry(self.mq_frame_question_attributes, width=15)
self.mq_question_pool_tag_entry.grid(row=1, column=3, pady=5, padx=5, sticky=W)
###################### "Zuordnungsfrage-Test erstellen" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
# self.create_zuordnungsfrage_test_btn = Button(self.mq_frame_create_zuordnungsfrage_test, text="MQ-Test erstellen", command=lambda: Create_Zuordnungsfrage_Test.__init__(self, self.mq_db_entry_to_index_dict))
# self.create_zuordnungsfrage_test_btn.grid(row=2, column=0, sticky=W)
# self.create_zuordnungsfrage_test_entry = Entry(self.mq_frame_create_zuordnungsfrage_test, width=15)
# self.create_zuordnungsfrage_test_entry.grid(row=2, column=1, sticky=W, padx=20)
#
# self.create_zuordnungsfrage_pool_btn = Button(self.mq_frame_create_zuordnungsfrage_test, text="MQ-Pool erstellen", command=lambda: Create_Zuordnungsfrage_Pool.__init__(self, self.mq_db_entry_to_index_dict))
# self.create_zuordnungsfrage_pool_btn.grid(row=3, column=0, sticky=W, pady=10)
# self.create_zuordnungsfrage_pool_entry = Entry(self.mq_frame_create_zuordnungsfrage_test, width=15)
# self.create_zuordnungsfrage_pool_entry.grid(row=3, column=1, sticky=W, padx=20, pady=10)
# Button "Zuordnungsfrage-Test erstellen"
self.create_zuordnungsfrage_test_btn = Button(self.mq_frame_create_zuordnungsfrage_test, text="MQ-Test erstellen", command=lambda: Create_Zuordnungsfrage_Test.__init__(self, self.mq_db_entry_to_index_dict))
self.create_zuordnungsfrage_test_btn.grid(row=0, column=0, sticky=W)
self.create_zuordnungsfrage_test_entry = Entry(self.mq_frame_create_zuordnungsfrage_test, width=15)
self.create_zuordnungsfrage_test_entry.grid(row=0, column=1, sticky=W, padx=0)
# Checkbox "Test-Einstellungen übernehmen?"
self.create_test_settings_label = Label(self.mq_frame_create_zuordnungsfrage_test, text="Test-Einstellungen übernehmen?")
self.create_test_settings_label.grid(row=1, column=0, pady=5, padx=5, sticky=W)
self.var_test_settings = IntVar()
self.check_test_settings = Checkbutton(self.mq_frame_create_zuordnungsfrage_test, text="", variable=self.var_test_settings, onvalue=1, offvalue=0)
self.check_test_settings.deselect()
self.check_test_settings.grid(row=1, column=1, sticky=W)
# Checkbox "Latex für Fragentext nutzen?"
self.mq_use_latex_on_text_label = Label(self.mq_frame_create_zuordnungsfrage_test, text="Latex für Fragentext nutzen?")
self.mq_use_latex_on_text_label.grid(row=2, column=0, sticky=W, padx=5)
self.mq_var_use_latex_on_text_check = IntVar()
self.mq_use_latex_on_text_check = Checkbutton(self.mq_frame_create_zuordnungsfrage_test, text="", variable=self.mq_var_use_latex_on_text_check, onvalue=1, offvalue=0)
self.mq_use_latex_on_text_check.deselect()
self.mq_use_latex_on_text_check.grid(row=2, column=1, sticky=W)
# Checkbox "Alle Einträge aus der DB erzeugen?"
self.mq_create_question_pool_all_label = Label(self.mq_frame_create_zuordnungsfrage_test, text="Alle Einträge aus der DB erzeugen?")
self.mq_create_question_pool_all_label.grid(row=4, column=0, pady=(10,0), padx=5, sticky=W)
self.mq_var_create_question_pool_all_check = IntVar()
self.mq_create_question_pool_all = Checkbutton(self.mq_frame_create_zuordnungsfrage_test, text="", variable=self.mq_var_create_question_pool_all_check, onvalue=1, offvalue=0)
#self.mq_var_create_question_pool_all_check.set(0)
self.mq_create_question_pool_all.grid(row=4, column=1, sticky=W, pady=(10,0))
# Button "Zuordnungsfrage-Fragenpool erstellen"
self.create_zuordnungsfrage_pool_btn = Button(self.mq_frame_create_zuordnungsfrage_test, text="MQ-Pool erstellen", command=lambda: Create_Zuordnungsfrage_Pool.__init__(self, self.mq_db_entry_to_index_dict, self.mq_var_create_question_pool_all_check.get()))
self.create_zuordnungsfrage_pool_btn.grid(row=3, column=0, sticky=W, pady=(30,0))
self.create_zuordnungsfrage_pool_entry = Entry(self.mq_frame_create_zuordnungsfrage_test, width=15)
self.create_zuordnungsfrage_pool_entry.grid(row=3, column=1, sticky=W, padx=0, pady=(30,0))
###################### "Zuordnungsfrage-Datenbank" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.mq_database_show_db_zuordnungsfrage_btn = Button(self.mq_frame_database, text="MQ - Datenbank anzeigen", command=lambda: test_generator_modul_datenbanken_anzeigen.MainGUI.__init__(self, self.database_zuordnungsfrage_path, "zuordnungsfrage_table"))
self.mq_database_show_db_zuordnungsfrage_btn.grid(row=0, column=0, sticky=W, pady=5)
self.mq_database_save_id_to_db_zuordnungsfrage_btn = Button(self.mq_frame_database, text="Speichern unter neuer ID", command=lambda: Zuordnungsfrage.mq_save_id_to_db(self))
self.mq_database_save_id_to_db_zuordnungsfrage_btn.grid(row=1, column=0, sticky=W, pady=5)
self.mq_database_delete_id_from_db_btn = Button(self.mq_frame_database, text="ID Löschen", command=lambda: Zuordnungsfrage.mq_delete_id_from_db(self))
self.mq_database_delete_id_from_db_btn.grid(row=6, column=0, sticky=W, pady=5)
self.mq_delete_box = Entry(self.mq_frame_database, width=10)
self.mq_delete_box.grid(row=6, column=0, padx=80, sticky=W)
# Noch keine Funktion
self.mq_database_new_question_btn = Button(self.mq_frame_database, text="GUI Einträge leeren", command=lambda: Zuordnungsfrage.mq_clear_GUI(self))
self.mq_database_new_question_btn.grid(row=8, column=0, sticky=W, pady=5)
# Noch keine Funktion
self.mq_database_edit_btn = Button(self.mq_frame_database, text="Aktuellen Eintrag editieren", command=lambda: Zuordnungsfrage.mq_edit_id_from_db(self))
self.mq_database_edit_btn.grid(row=3, column=0, sticky=W, pady=5)
self.mq_database_load_id_btn = Button(self.mq_frame_database, text="ID Laden", command=lambda: Zuordnungsfrage.mq_load_id_from_db(self, self.mq_db_entry_to_index_dict))
self.mq_database_load_id_btn.grid(row=4, column=0, sticky=W, pady=(15,0))
self.mq_load_box = Entry(self.mq_frame_database, width=10)
self.mq_load_box.grid(row=4, column=0, sticky=W, padx=80, pady=(15,0))
# Checkbox - "Fragentext mit Highlighting?"
self.mq_highlight_question_text_label = Label(self.mq_frame_database, text="Fragentext mit Highlighting?")
self.mq_highlight_question_text_label.grid(row=5, column=0, pady=5, padx=5)
self.mq_var_highlight_question_text = IntVar()
self.mq_check_highlight_question_text = Checkbutton(self.mq_frame_database, text="", variable=self.mq_var_highlight_question_text, onvalue=1, offvalue=0)
self.mq_check_highlight_question_text.deselect()
self.mq_check_highlight_question_text.grid(row=5, column=0, sticky=E)
# Checkbox - "Alle DB Einträge löschen?"
self.mq_delete_all_label = Label(self.mq_frame_database, text="Alle DB Einträge löschen?")
self.mq_delete_all_label.grid(row=7, column=0, pady=5, padx=5)
self.mq_var_delete_all = IntVar()
self.mq_check_delete_all = Checkbutton(self.mq_frame_database, text="", variable=self.mq_var_delete_all, onvalue=1, offvalue=0)
self.mq_check_delete_all.deselect()
self.mq_check_delete_all.grid(row=7, column=0, sticky=E)
###################### "Excel Import/Export" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
#excel_import_btn
self.mq_excel_import_to_db_zuordnungsfrage_btn = Button(self.mq_frame_excel_import_export, text="Excel-Datei importieren", command=lambda: test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_import_to_db(self, self.mq_question_type_name, self.mq_db_entry_to_index_dict))
self.mq_excel_import_to_db_zuordnungsfrage_btn.grid(row=0, column=1, sticky=W, pady=5, padx=10)
# excel_export_btn
self.mq_excel_export_to_xlsx_zuordnungsfrage_btn = Button(self.mq_frame_excel_import_export, text="Datenbank exportieren",command=lambda: test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_export_to_xlsx(self, self.project_root_path, self.mq_db_entry_to_index_dict, self.database_zuordnungsfrage_path, self.mq_database, self.mq_database_table, self.mq_xlsx_workbook_name, self.mq_xlsx_worksheet_name))
self.mq_excel_export_to_xlsx_zuordnungsfrage_btn.grid(row=1, column=1, sticky=W, pady=5, padx=10)
# ILIAS_testfile_import
self.mq_import_ilias_testfile_btn = Button(self.mq_frame_excel_import_export, text="ILIAS-Datei importieren",command=lambda: test_generator_modul_ilias_import_test_datei.Import_ILIAS_Datei_in_DB.__init__(self, self.project_root_path))
self.mq_import_ilias_testfile_btn.grid(row=2, column=1, sticky=W, pady=(20,0), padx=10)
###################### "Zuordnungsfrage" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.mq_question_author_label = Label(self.mq_frame, text="Fragen-Autor")
self.mq_question_author_label.grid(row=0, column=0, sticky=W, padx=10, pady=(10, 0))
self.mq_question_author_entry = Entry(self.mq_frame, width=30)
self.mq_question_author_entry.grid(row=0, column=1, pady=(10, 0), sticky=W)
self.mq_question_title_label = Label(self.mq_frame, text="Fragen-Titel")
self.mq_question_title_label.grid(row=1, column=0, sticky=W, padx=10, pady=(10, 0))
self.mq_question_title_entry = Entry(self.mq_frame, width=60)
self.mq_question_title_entry.grid(row=1, column=1, pady=(10, 0), sticky=W)
self.mq_question_description_title_label = Label(self.mq_frame, text="Fragen-Beschreibung")
self.mq_question_description_title_label.grid(row=2, column=0, sticky=W, padx=10)
self.mq_question_description_title_entry = Entry(self.mq_frame, width=60)
self.mq_question_description_title_entry.grid(row=2, column=1, sticky=W)
self.mq_question_textfield_label = Label(self.mq_frame, text="Fragen-Text")
self.mq_question_textfield_label.grid(row=3, column=0, sticky=W, padx=10)
self.mq_bar = Scrollbar(self.mq_frame)
self.mq_question_description_main_entry = Text(self.mq_frame, height=6, width=80, font=('Helvetica', 9))
self.mq_bar.grid(row=3, column=2, sticky=W)
self.mq_question_description_main_entry.grid(row=3, column=1, pady=10, sticky=W)
self.mq_bar.config(command=self.mq_question_description_main_entry.yview)
self.mq_question_description_main_entry.config(yscrollcommand=self.mq_bar.set)
self.mq_processing_time_label = Label(self.mq_frame, text="Bearbeitungsdauer")
self.mq_processing_time_label.grid(row=4, column=0, sticky=W, pady=(5, 0), padx=10)
self.mq_processing_time_label = Label(self.mq_frame, text="Std:")
self.mq_processing_time_label.grid(row=4, column=1, sticky=W, pady=(5, 0))
self.mq_processing_time_label = Label(self.mq_frame, text="Min:")
self.mq_processing_time_label.grid(row=4, column=1, sticky=W, padx=70, pady=(5, 0))
self.mq_processing_time_label = Label(self.mq_frame, text="Sek:")
self.mq_processing_time_label.grid(row=4, column=1, sticky=W, padx=145, pady=(5, 0))
self.mq_processingtime_hours = list(range(24))
self.mq_processingtime_minutes = list(range(60))
self.mq_processingtime_seconds = list(range(60))
self.mq_proc_hours_box = ttk.Combobox(self.mq_frame, value=self.mq_processingtime_hours, width=2)
self.mq_proc_minutes_box = ttk.Combobox(self.mq_frame, value=self.mq_processingtime_minutes, width=2)
self.mq_proc_seconds_box = ttk.Combobox(self.mq_frame, value=self.mq_processingtime_seconds, width=2)
self.mq_proc_hours_box.current(23)
self.mq_proc_minutes_box.current(0)
self.mq_proc_seconds_box.current(0)
def mq_selected_hours(event):
self.selected_hours = self.mq_proc_hours_box.get()
def mq_selected_minutes(event):
self.selected_minutes = self.mq_proc_minutes_box.get()
def mq_selected_seconds(event):
self.selected_seconds = self.mq_proc_seconds_box.get()
self.mq_proc_hours_box.bind("<<ComboboxSelected>>", mq_selected_hours)
self.mq_proc_hours_box.bind("<<ComboboxSelected>>", mq_selected_minutes)
self.mq_proc_hours_box.bind("<<ComboboxSelected>>", mq_selected_seconds)
self.mq_proc_hours_box.grid(row=4, column=1, sticky=W, padx=25, pady=(5, 0))
self.mq_proc_minutes_box.grid(row=4, column=1, sticky=W, padx=100, pady=(5, 0))
self.mq_proc_seconds_box.grid(row=4, column=1, sticky=W, padx=170, pady=(5, 0))
self.mq_picture_preview_pixel_label = Label(self.mq_frame, text="Bild-Vorschaugröße (in Pixel)")
self.mq_picture_preview_pixel_label.grid(row=4, column=1, sticky=E, padx=70)
self.mq_picture_preview_pixel_entry = Entry(self.mq_frame, width=10)
self.mq_picture_preview_pixel_entry.grid(row=4, column=1, sticky=E, padx=0)
self.mq_picture_preview_pixel_entry.insert(END, "300")
self.mq_mix_answers_label = Label(self.mq_frame, text="Antworten mischen")
self.mq_mix_answers_label.grid(row=5, column=0, sticky=W, padx=10, pady=(5, 0))
self.mq_mix_answers_options = ["Nein", "Beides (Terme und Definitionen)", "Nur Terme", "Nur Definitionen"]
self.mq_mix_answers_box = ttk.Combobox(self.mq_frame, value=self.mq_mix_answers_options, width=26)
self.mq_mix_answers_box.current(0)
def mq_selected_mix_answers_options(event):
if self.mq_mix_answers_box.get() == "Nein":
self.mq_mix_answers_value = 0
elif self.mq_mix_answers_box.get() == "Beides(Terme und Definitionen)":
self.mq_mix_answers_value = 1
elif self.mq_mix_answers_box.get() == "Nur Terme":
self.mq_mix_answers_value = 2
elif self.mq_mix_answers_box.get() == "Nur Definitionen":
self.mq_mix_answers_value = 3
self.mq_mix_answers_box.bind("<<ComboboxSelected>>", mq_selected_mix_answers_options)
self.mq_mix_answers_box.grid(row=5, column=1, sticky=W, padx=25, pady=(5, 0))
self.mq_mix_answers_label = Label(self.mq_frame, text="Zuordnungsmodus")
self.mq_mix_answers_label.grid(row=6, column=0, sticky=W, padx=10, pady=(5, 0))
self.selected_matching_option = StringVar()
self.selected_matching_option.set("1:1")
self.select_question_option_1_radiobtn = Radiobutton(self.mq_frame, text="Ein Term kann einer Definition zugeordnet werden (1:1)", variable=self.selected_matching_option, value="1:1")
self.select_question_option_1_radiobtn.grid(row=6, column=1, pady=0, sticky=W)
self.select_question_option_2_radiobtn = Radiobutton(self.mq_frame, text="Ein oder mehrere Terme können einer oder mehreren Definitionen zugeordnet werden (n:n)", variable=self.selected_matching_option , value="n:n")
self.select_question_option_2_radiobtn.grid(row=7, column=1, pady=0, sticky=W)
self.mq_set_definitions_label = Label(self.mq_frame, text="Definitionen")
self.mq_set_definitions_label.grid(row=10, column=0, sticky=W, padx=10, pady=(25, 0))
self.mq_set_terms_label = Label(self.mq_frame, text="Terme")
self.mq_set_terms_label.grid(row=30, column=0, sticky=W, padx=10, pady=(5, 0))
self.mq_assignment_pairs_label = Label(self.mq_frame, text="Zuordnungspaare")
self.mq_assignment_pairs_label.grid(row=50, column=0, sticky=W, padx=10, pady=(25, 0))
self.mq_assignment_pairs_definitions_value = []
self.mq_assignment_pairs_definitions_value = ["Definition 1", "Definition 2", "Definition 3",
"Definition 4", "Definition 5",
"Definition 6", "Definition 7", "Definition 8",
"Definition 9", "Definition 10"]
self.mq_assignment_pairs_terms_value = []
self.mq_assignment_pairs_terms_value = ["Term 1", "Term 2", "Term 3",
"Term 4", "Term 5",
"Term 6", "Term 7", "Term 8",
"Term 9", "Term 10"]
def mq_definitions_answer_selected(event): # "event" is necessary here to react, although it is not used "officially"
if self.mq_definitions_numbers_of_answers_box.get() == '1':
#self.mq_assignment_pairs_definitions_value = ["Definition 1"]
mq_assignment_pairs_definitions_box_refresh()
mq_definitions_var2_remove()
mq_definitions_var3_remove()
mq_definitions_var4_remove()
mq_definitions_var5_remove()
mq_definitions_var6_remove()
mq_definitions_var7_remove()
mq_definitions_var8_remove()
mq_definitions_var9_remove()
mq_definitions_var10_remove()
elif self.mq_definitions_numbers_of_answers_box.get() == '2':
#self.mq_assignment_pairs_definitions_value = ["Definition 1", "Definition 2"]
mq_assignment_pairs_definitions_box_refresh()
mq_definitions_var2_show()
mq_definitions_var3_remove()
mq_definitions_var4_remove()
mq_definitions_var5_remove()
mq_definitions_var6_remove()
mq_definitions_var7_remove()
mq_definitions_var8_remove()
mq_definitions_var9_remove()
mq_definitions_var10_remove()
elif self.mq_definitions_numbers_of_answers_box.get() == '3':
#self.mq_assignment_pairs_definitions_value = ["Definition 1", "Definition 2", "Definition 3"]
mq_assignment_pairs_definitions_box_refresh()
mq_definitions_var2_show()
mq_definitions_var3_show()
mq_definitions_var4_remove()
mq_definitions_var5_remove()
mq_definitions_var6_remove()
mq_definitions_var7_remove()
mq_definitions_var8_remove()
mq_definitions_var9_remove()
mq_definitions_var10_remove()
elif self.mq_definitions_numbers_of_answers_box.get() == '4':
#self.mq_assignment_pairs_definitions_value = ["Definition 1", "Definition 2", "Definition 3", "Definition 4"]
mq_assignment_pairs_definitions_box_refresh()
mq_definitions_var2_show()
mq_definitions_var3_show()
mq_definitions_var4_show()
mq_definitions_var5_remove()
mq_definitions_var6_remove()
mq_definitions_var7_remove()
mq_definitions_var8_remove()
mq_definitions_var9_remove()
mq_definitions_var10_remove()
elif self.mq_definitions_numbers_of_answers_box.get() == '5':
#self.mq_assignment_pairs_definitions_value = ["Definition 1", "Definition 2", "Definition 3", "Definition 4", "Definition 5"]
mq_assignment_pairs_definitions_box_refresh()
mq_definitions_var2_show()
mq_definitions_var3_show()
mq_definitions_var4_show()
mq_definitions_var5_show()
mq_definitions_var6_remove()
mq_definitions_var7_remove()
mq_definitions_var8_remove()
mq_definitions_var9_remove()
mq_definitions_var10_remove()
elif self.mq_definitions_numbers_of_answers_box.get() == '6':
#self.mq_assignment_pairs_definitions_value = ["Definition 1", "Definition 2", "Definition 3", "Definition 4", "Definition 5",
# "Definition 6"]
mq_assignment_pairs_definitions_box_refresh()
mq_definitions_var2_show()
mq_definitions_var3_show()
mq_definitions_var4_show()
mq_definitions_var5_show()
mq_definitions_var6_show()
mq_definitions_var7_remove()
mq_definitions_var8_remove()
mq_definitions_var9_remove()
mq_definitions_var10_remove()
elif self.mq_definitions_numbers_of_answers_box.get() == '7':
#self.mq_assignment_pairs_definitions_value = ["Definition 1", "Definition 2", "Definition 3", "Definition 4", "Definition 5",
# "Definition 6", "Definition 7"]
mq_assignment_pairs_definitions_box_refresh()
mq_definitions_var2_show()
mq_definitions_var3_show()
mq_definitions_var4_show()
mq_definitions_var5_show()
mq_definitions_var6_show()
mq_definitions_var7_show()
mq_definitions_var8_remove()
mq_definitions_var9_remove()
mq_definitions_var10_remove()
elif self.mq_definitions_numbers_of_answers_box.get() == '8':
#self.mq_assignment_pairs_definitions_value = ["Definition 1", "Definition 2", "Definition 3", "Definition 4", "Definition 5",
# "Definition 6", "Definition 7", "Definition 8"]
mq_assignment_pairs_definitions_box_refresh()
mq_definitions_var2_show()
mq_definitions_var3_show()
mq_definitions_var4_show()
mq_definitions_var5_show()
mq_definitions_var6_show()
mq_definitions_var7_show()
mq_definitions_var8_show()
mq_definitions_var9_remove()
mq_definitions_var10_remove()
elif self.mq_definitions_numbers_of_answers_box.get() == '9':
#self.mq_assignment_pairs_definitions_value = ["Definition 1", "Definition 2", "Definition 3", "Definition 4", "Definition 5",
# "Definition 6", "Definition 7", "Definition 8", "Definition 9"]
mq_assignment_pairs_definitions_box_refresh()
mq_definitions_var2_show()
mq_definitions_var3_show()
mq_definitions_var4_show()
mq_definitions_var5_show()
mq_definitions_var6_show()
mq_definitions_var7_show()
mq_definitions_var8_show()
mq_definitions_var9_show()
mq_definitions_var10_remove()
elif self.mq_definitions_numbers_of_answers_box.get() == '10':
# self.mq_assignment_pairs_definitions_value = ["Definition 1", "Definition 2", "Definition 3", "Definition 4", "Definition 5",
# "Definition 6", "Definition 7", "Definition 8", "Definition 9", "Definition 10"]
mq_assignment_pairs_definitions_box_refresh()
mq_assignment_pairs_terms_box_refresh()
mq_definitions_var2_show()
mq_definitions_var3_show()
mq_definitions_var4_show()
mq_definitions_var5_show()
mq_definitions_var6_show()
mq_definitions_var7_show()
mq_definitions_var8_show()
mq_definitions_var9_show()
mq_definitions_var10_show()
def mq_terms_answer_selected(event): # "event" is necessary here to react, although it is not used "officially"
if self.mq_terms_numbers_of_answers_box.get() == '1':
# self.mq_assignment_pairs_terms_value = ["Term 1"]
mq_assignment_pairs_terms_box_refresh()
mq_terms_var2_remove()
mq_terms_var3_remove()
mq_terms_var4_remove()
mq_terms_var5_remove()
mq_terms_var6_remove()
mq_terms_var7_remove()
mq_terms_var8_remove()
mq_terms_var9_remove()
mq_terms_var10_remove()
elif self.mq_terms_numbers_of_answers_box.get() == '2':
#self.mq_assignment_pairs_terms_value = ["Term 1", "Term 2"]
mq_assignment_pairs_terms_box_refresh()
mq_terms_var2_show()
mq_terms_var3_remove()
mq_terms_var4_remove()
mq_terms_var5_remove()
mq_terms_var6_remove()
mq_terms_var7_remove()
mq_terms_var8_remove()
mq_terms_var9_remove()
mq_terms_var10_remove()
elif self.mq_terms_numbers_of_answers_box.get() == '3':
#self.mq_assignment_pairs_terms_value = ["Term 1", "Term 2", "Term 3"]
mq_assignment_pairs_terms_box_refresh()
mq_terms_var2_show()
mq_terms_var3_show()
mq_terms_var4_remove()
mq_terms_var5_remove()
mq_terms_var6_remove()
mq_terms_var7_remove()
mq_terms_var8_remove()
mq_terms_var9_remove()
mq_terms_var10_remove()
elif self.mq_terms_numbers_of_answers_box.get() == '4':
#self.mq_assignment_pairs_terms_value = ["Term 1", "Term 2", "Term 3", "Term 4"]
mq_assignment_pairs_terms_box_refresh()
mq_terms_var2_show()
mq_terms_var3_show()
mq_terms_var4_show()
mq_terms_var5_remove()
mq_terms_var6_remove()
mq_terms_var7_remove()
mq_terms_var8_remove()
mq_terms_var9_remove()
mq_terms_var10_remove()
elif self.mq_terms_numbers_of_answers_box.get() == '5':
#self.mq_assignment_pairs_terms_value = ["Term 1", "Term 2", "Term 3", "Term 4", "Term 5"]
mq_assignment_pairs_terms_box_refresh()
mq_terms_var2_show()
mq_terms_var3_show()
mq_terms_var4_show()
mq_terms_var5_show()
mq_terms_var6_remove()
mq_terms_var7_remove()
mq_terms_var8_remove()
mq_terms_var9_remove()
mq_terms_var10_remove()
elif self.mq_terms_numbers_of_answers_box.get() == '6':
#self.mq_assignment_pairs_terms_value = ["Term 1", "Term 2", "Term 3", "Term 4", "Term 5",
# "Term 6"]
mq_assignment_pairs_terms_box_refresh()
mq_terms_var2_show()
mq_terms_var3_show()
mq_terms_var4_show()
mq_terms_var5_show()
mq_terms_var6_show()
mq_terms_var7_remove()
mq_terms_var8_remove()
mq_terms_var9_remove()
mq_terms_var10_remove()
elif self.mq_terms_numbers_of_answers_box.get() == '7':
#self.mq_assignment_pairs_terms_value = ["Term 1", "Term 2", "Term 3", "Term 4", "Term 5",
# "Term 6", "Term 7"]
mq_assignment_pairs_terms_box_refresh()
mq_terms_var2_show()
mq_terms_var3_show()
mq_terms_var4_show()
mq_terms_var5_show()
mq_terms_var6_show()
mq_terms_var7_show()
mq_terms_var8_remove()
mq_terms_var9_remove()
mq_terms_var10_remove()
elif self.mq_terms_numbers_of_answers_box.get() == '8':
#self.mq_assignment_pairs_terms_value = ["Term 1", "Term 2", "Term 3", "Term 4", "Term 5",
# "Term 6", "Term 7", "Term 8"]
mq_assignment_pairs_terms_box_refresh()
mq_terms_var2_show()
mq_terms_var3_show()
mq_terms_var4_show()
mq_terms_var5_show()
mq_terms_var6_show()
mq_terms_var7_show()
mq_terms_var8_show()
mq_terms_var9_remove()
mq_terms_var10_remove()
elif self.mq_terms_numbers_of_answers_box.get() == '9':
#self.mq_assignment_pairs_terms_value = ["Term 1", "Term 2", "Term 3", "Term 4", "Term 5",
# "Term 6", "Term 7", "Term 8", "Term 9"]
mq_assignment_pairs_terms_box_refresh()
mq_terms_var2_show()
mq_terms_var3_show()
mq_terms_var4_show()
mq_terms_var5_show()
mq_terms_var6_show()
mq_terms_var7_show()
mq_terms_var8_show()
mq_terms_var9_show()
mq_terms_var10_remove()
elif self.mq_terms_numbers_of_answers_box.get() == '10':
#self.mq_assignment_pairs_terms_value = ["Term 1", "Term 2", "Term 3", "Term 4", "Term 5",
# "Term 6", "Term 7", "Term 8", "Term 9", "Term 10"]
mq_assignment_pairs_terms_box_refresh()
mq_terms_var2_show()
mq_terms_var3_show()
mq_terms_var4_show()
mq_terms_var5_show()
mq_terms_var6_show()
mq_terms_var7_show()
mq_terms_var8_show()
mq_terms_var9_show()
mq_terms_var10_show()
def mq_assignment_pairs_definitions_box_refresh():
self.mq_assignment_pairs_definitions_1_box.destroy()
self.mq_assignment_pairs_definitions_2_box.destroy()
self.mq_assignment_pairs_definitions_3_box.destroy()
self.mq_assignment_pairs_definitions_4_box.destroy()
self.mq_assignment_pairs_definitions_5_box.destroy()
self.mq_assignment_pairs_definitions_6_box.destroy()
self.mq_assignment_pairs_definitions_7_box.destroy()
self.mq_assignment_pairs_definitions_8_box.destroy()
self.mq_assignment_pairs_definitions_9_box.destroy()
self.mq_assignment_pairs_definitions_10_box.destroy()
self.mq_assignment_pairs_definitions_1_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_2_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_3_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_4_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_5_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_6_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_7_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_8_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_9_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_10_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_1_box.current(0)
self.mq_assignment_pairs_definitions_1_box.grid(row=52, column=1, sticky=W, pady=(5, 0))
def mq_assignment_pairs_terms_box_refresh():
self.mq_assignment_pairs_terms_1_box.destroy()
self.mq_assignment_pairs_terms_2_box.destroy()
self.mq_assignment_pairs_terms_3_box.destroy()
self.mq_assignment_pairs_terms_4_box.destroy()
self.mq_assignment_pairs_terms_5_box.destroy()
self.mq_assignment_pairs_terms_6_box.destroy()
self.mq_assignment_pairs_terms_7_box.destroy()
self.mq_assignment_pairs_terms_8_box.destroy()
self.mq_assignment_pairs_terms_9_box.destroy()
self.mq_assignment_pairs_terms_10_box.destroy()
self.mq_assignment_pairs_terms_1_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_2_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_3_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_4_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_5_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_6_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_7_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_8_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_9_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_10_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_1_box.grid(row=52, column=1, sticky=E, pady=(5, 0), padx=100)
def mq_assignment_pairs_selected(event):
if self.mq_assignment_pairs_numbers_of_answers_box.get() == '1':
mq_assignment_pair_2_remove()
mq_assignment_pair_3_remove()
mq_assignment_pair_4_remove()
mq_assignment_pair_5_remove()
mq_assignment_pair_6_remove()
mq_assignment_pair_7_remove()
mq_assignment_pair_8_remove()
mq_assignment_pair_9_remove()
mq_assignment_pair_10_remove()
if self.mq_assignment_pairs_numbers_of_answers_box.get() == '2':
mq_assignment_pair_2_show()
mq_assignment_pair_3_remove()
mq_assignment_pair_4_remove()
mq_assignment_pair_5_remove()
mq_assignment_pair_6_remove()
mq_assignment_pair_7_remove()
mq_assignment_pair_8_remove()
mq_assignment_pair_9_remove()
mq_assignment_pair_10_remove()
if self.mq_assignment_pairs_numbers_of_answers_box.get() == '3':
mq_assignment_pair_2_show()
mq_assignment_pair_3_show()
mq_assignment_pair_4_remove()
mq_assignment_pair_5_remove()
mq_assignment_pair_6_remove()
mq_assignment_pair_7_remove()
mq_assignment_pair_8_remove()
mq_assignment_pair_9_remove()
mq_assignment_pair_10_remove()
if self.mq_assignment_pairs_numbers_of_answers_box.get() == '4':
mq_assignment_pair_2_show()
mq_assignment_pair_3_show()
mq_assignment_pair_4_show()
mq_assignment_pair_5_remove()
mq_assignment_pair_6_remove()
mq_assignment_pair_7_remove()
mq_assignment_pair_8_remove()
mq_assignment_pair_9_remove()
mq_assignment_pair_10_remove()
if self.mq_assignment_pairs_numbers_of_answers_box.get() == '5':
mq_assignment_pair_2_show()
mq_assignment_pair_3_show()
mq_assignment_pair_4_show()
mq_assignment_pair_5_show()
mq_assignment_pair_6_remove()
mq_assignment_pair_7_remove()
mq_assignment_pair_8_remove()
mq_assignment_pair_9_remove()
mq_assignment_pair_10_remove()
if self.mq_assignment_pairs_numbers_of_answers_box.get() == '6':
mq_assignment_pair_2_show()
mq_assignment_pair_3_show()
mq_assignment_pair_4_show()
mq_assignment_pair_5_show()
mq_assignment_pair_6_show()
mq_assignment_pair_7_remove()
mq_assignment_pair_8_remove()
mq_assignment_pair_9_remove()
mq_assignment_pair_10_remove()
if self.mq_assignment_pairs_numbers_of_answers_box.get() == '7':
mq_assignment_pair_2_show()
mq_assignment_pair_3_show()
mq_assignment_pair_4_show()
mq_assignment_pair_5_show()
mq_assignment_pair_6_show()
mq_assignment_pair_7_show()
mq_assignment_pair_8_remove()
mq_assignment_pair_9_remove()
mq_assignment_pair_10_remove()
if self.mq_assignment_pairs_numbers_of_answers_box.get() == '8':
mq_assignment_pair_2_show()
mq_assignment_pair_3_show()
mq_assignment_pair_4_show()
mq_assignment_pair_5_show()
mq_assignment_pair_6_show()
mq_assignment_pair_7_show()
mq_assignment_pair_8_show()
mq_assignment_pair_9_remove()
mq_assignment_pair_10_remove()
if self.mq_assignment_pairs_numbers_of_answers_box.get() == '9':
mq_assignment_pair_2_show()
mq_assignment_pair_3_show()
mq_assignment_pair_4_show()
mq_assignment_pair_5_show()
mq_assignment_pair_6_show()
mq_assignment_pair_7_show()
mq_assignment_pair_8_show()
mq_assignment_pair_9_show()
mq_assignment_pair_10_remove()
if self.mq_assignment_pairs_numbers_of_answers_box.get() == '10':
mq_assignment_pair_2_show()
mq_assignment_pair_3_show()
mq_assignment_pair_4_show()
mq_assignment_pair_5_show()
mq_assignment_pair_6_show()
mq_assignment_pair_7_show()
mq_assignment_pair_8_show()
mq_assignment_pair_9_show()
mq_assignment_pair_10_show()
# AUSWAHLBOX FÜR DEFINITIONEN
self.mq_definitions_numbers_of_answers_box_label = Label(self.mq_frame, text="Anzahl der Antworten")
self.mq_definitions_numbers_of_answers_box_label.grid(row=8, column=0, sticky=W, padx=10, pady=(5, 0))
self.mq_definitions_numbers_of_answers_value = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
self.mq_definitions_numbers_of_answers_box = ttk.Combobox(self.mq_frame, value=self.mq_definitions_numbers_of_answers_value, width=20)
self.mq_definitions_numbers_of_answers_box.bind("<<ComboboxSelected>>", mq_definitions_answer_selected)
self.mq_definitions_numbers_of_answers_box.grid(row=8, column=1, sticky=W, pady=(5, 0))
self.mq_definitions_numbers_of_answers_box.current(0)
# AUSWAHLBOX FÜR TERME
self.mq_terms_numbers_of_answers_value = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
self.mq_terms_numbers_of_answers_box = ttk.Combobox(self.mq_frame, value=self.mq_terms_numbers_of_answers_value, width=20)
self.mq_terms_numbers_of_answers_box.bind("<<ComboboxSelected>>", mq_terms_answer_selected)
self.mq_terms_numbers_of_answers_box.grid(row=8, column=1, sticky=E, pady=(5, 0))
self.mq_terms_numbers_of_answers_box.current(0)
# AUSWAHLBOX FÜR ZUORDNUNGSPAARE
self.mq_assignment_pairs_numbers_of_answers_value = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
self.mq_assignment_pairs_numbers_of_answers_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_numbers_of_answers_value, width=20)
self.mq_assignment_pairs_numbers_of_answers_box.bind("<<ComboboxSelected>>", mq_assignment_pairs_selected)
self.mq_assignment_pairs_numbers_of_answers_box.grid(row=50, column=1, sticky=W, pady=(25, 0))
self.mq_assignment_pairs_numbers_of_answers_box.current(0)
# ZUORDNUNGSPAARE - DEFINITIONEN - AUSWAHLBOXEN
self.mq_assignment_pairs_definitions_1_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_2_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_3_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_4_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_5_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_6_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_7_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_8_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_9_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_10_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_definitions_value, width=20)
self.mq_assignment_pairs_definitions_1_box.grid(row=52, column=1, sticky=W, pady=(5, 0))
# ZUORDNUNGSPAARE - TERME - AUSWAHLBOXEN
self.mq_assignment_pairs_terms_1_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_2_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_3_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_4_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_5_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_6_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_7_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_8_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_9_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_10_box = ttk.Combobox(self.mq_frame, value=self.mq_assignment_pairs_terms_value, width=20)
self.mq_assignment_pairs_terms_1_box.grid(row=52, column=1, sticky=E, pady=(5, 0), padx=100)
# PUNKTE FÜR ZUORDNUNGSPAARE
self.mq_assignment_pairs_pts_1_entry = Entry(self.mq_frame, width=8)
self.mq_assignment_pairs_pts_2_entry = Entry(self.mq_frame, width=8)
self.mq_assignment_pairs_pts_3_entry = Entry(self.mq_frame, width=8)
self.mq_assignment_pairs_pts_4_entry = Entry(self.mq_frame, width=8)
self.mq_assignment_pairs_pts_5_entry = Entry(self.mq_frame, width=8)
self.mq_assignment_pairs_pts_6_entry = Entry(self.mq_frame, width=8)
self.mq_assignment_pairs_pts_7_entry = Entry(self.mq_frame, width=8)
self.mq_assignment_pairs_pts_8_entry = Entry(self.mq_frame, width=8)
self.mq_assignment_pairs_pts_9_entry = Entry(self.mq_frame, width=8)
self.mq_assignment_pairs_pts_10_entry = Entry(self.mq_frame, width=8)
# self.Label(self.mq_frame, text="Antworten").grid(row=9, column=0, sticky=W, padx=10, pady=(5, 0))
# self.Label(self.mq_frame, text="Antwort-Text").grid(row=9, column=1, sticky=W, pady=(5, 0))
self.mq_response_img_label = Label(self.mq_frame, text="Antwort-Grafik")
self.mq_response_img_label.grid(row=10, column=1, sticky=E, padx=40)
# ------------------------------- VARIABLES - TEXT & ENTRY --------------------------------------------
###### VARIABLES
# Eintrag-Felder für DEFINITIONEN
self.mq_definitions_var1_answer_entry = Entry(self.mq_frame, width=45)
self.mq_definitions_var2_answer_entry = Entry(self.mq_frame, width=45)
self.mq_definitions_var3_answer_entry = Entry(self.mq_frame, width=45)
self.mq_definitions_var4_answer_entry = Entry(self.mq_frame, width=45)
self.mq_definitions_var5_answer_entry = Entry(self.mq_frame, width=45)
self.mq_definitions_var6_answer_entry = Entry(self.mq_frame, width=45)
self.mq_definitions_var7_answer_entry = Entry(self.mq_frame, width=45)
self.mq_definitions_var8_answer_entry = Entry(self.mq_frame, width=45)
self.mq_definitions_var9_answer_entry = Entry(self.mq_frame, width=45)
self.mq_definitions_var10_answer_entry = Entry(self.mq_frame, width=45)
# Eintrag-Felder für TERME
self.mq_terms_var1_answer_entry = Entry(self.mq_frame, width=45)
self.mq_terms_var2_answer_entry = Entry(self.mq_frame, width=45)
self.mq_terms_var3_answer_entry = Entry(self.mq_frame, width=45)
self.mq_terms_var4_answer_entry = Entry(self.mq_frame, width=45)
self.mq_terms_var5_answer_entry = Entry(self.mq_frame, width=45)
self.mq_terms_var6_answer_entry = Entry(self.mq_frame, width=45)
self.mq_terms_var7_answer_entry = Entry(self.mq_frame, width=45)
self.mq_terms_var8_answer_entry = Entry(self.mq_frame, width=45)
self.mq_terms_var9_answer_entry = Entry(self.mq_frame, width=45)
self.mq_terms_var10_answer_entry = Entry(self.mq_frame, width=45)
##################
# DEFINITIONEN: BILD-DATEN
self.mq_definitions_var1_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var2_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var3_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var4_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var5_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var6_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var7_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var8_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var9_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var10_img_data_entry = Entry(self.mq_frame, width=8)
# TERME: BILD-DATEN
self.mq_terms_var1_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var2_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var3_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var4_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var5_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var6_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var7_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var8_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var9_img_data_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var10_img_data_entry = Entry(self.mq_frame, width=8)
# DEFINITIONEN: BILD_PFAD
self.mq_definitions_var1_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var2_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var3_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var4_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var5_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var6_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var7_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var8_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var9_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_definitions_var10_img_path_entry = Entry(self.mq_frame, width=8)
# TERME: BILD_PFAD
self.mq_terms_var1_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var2_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var3_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var4_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var5_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var6_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var7_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var8_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var9_img_path_entry = Entry(self.mq_frame, width=8)
self.mq_terms_var10_img_path_entry = Entry(self.mq_frame, width=8)
################
# ------------------------------- VARIABLES BUTTONS - SELECT IMAGE --------------------------------------------
# DEFINITIONEN
self.mq_definitions_var1_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_definitions_var2_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_definitions_var3_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_definitions_var4_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_definitions_var5_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_definitions_var6_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_definitions_var7_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_definitions_var8_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_definitions_var9_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_definitions_var10_img_label_entry = Entry(self.mq_frame, width=30)
# TERME
self.mq_terms_var1_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_terms_var2_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_terms_var3_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_terms_var4_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_terms_var5_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_terms_var6_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_terms_var7_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_terms_var8_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_terms_var9_img_label_entry = Entry(self.mq_frame, width=30)
self.mq_terms_var10_img_label_entry = Entry(self.mq_frame, width=30)
# DEFINITIONEN BUTTONS
self.mq_definitions_var1_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_definitions_var1_img_label_entry, self.mq_definitions_var1_img_data_entry, self.mq_definitions_var1_img_path_entry))
self.mq_definitions_var2_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_definitions_var2_img_label_entry, self.mq_definitions_var2_img_data_entry, self.mq_definitions_var2_img_path_entry))
self.mq_definitions_var3_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_definitions_var3_img_label_entry, self.mq_definitions_var3_img_data_entry, self.mq_definitions_var3_img_path_entry))
self.mq_definitions_var4_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_definitions_var4_img_label_entry, self.mq_definitions_var4_img_data_entry, self.mq_definitions_var4_img_path_entry))
self.mq_definitions_var5_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_definitions_var5_img_label_entry, self.mq_definitions_var5_img_data_entry, self.mq_definitions_var5_img_path_entry))
self.mq_definitions_var6_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_definitions_var6_img_label_entry, self.mq_definitions_var6_img_data_entry, self.mq_definitions_var6_img_path_entry))
self.mq_definitions_var7_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_definitions_var7_img_label_entry, self.mq_definitions_var7_img_data_entry, self.mq_definitions_var7_img_path_entry))
self.mq_definitions_var8_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_definitions_var8_img_label_entry, self.mq_definitions_var8_img_data_entry, self.mq_definitions_var8_img_path_entry))
self.mq_definitions_var9_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_definitions_var9_img_label_entry, self.mq_definitions_var9_img_data_entry, self.mq_definitions_var9_img_path_entry))
self.mq_definitions_var10_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_definitions_var10_img_label_entry, self.mq_definitions_var10_img_data_entry, self.mq_definitions_var10_img_path_entry))
# TERME BUTTONS
self.mq_terms_var1_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_terms_var1_img_label_entry, self.mq_terms_var1_img_data_entry, self.mq_terms_var1_img_path_entry))
self.mq_terms_var2_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_terms_var2_img_label_entry, self.mq_terms_var2_img_data_entry, self.mq_terms_var2_img_path_entry))
self.mq_terms_var3_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_terms_var3_img_label_entry, self.mq_terms_var3_img_data_entry, self.mq_terms_var3_img_path_entry))
self.mq_terms_var4_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_terms_var4_img_label_entry, self.mq_terms_var4_img_data_entry, self.mq_terms_var4_img_path_entry))
self.mq_terms_var5_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_terms_var5_img_label_entry, self.mq_terms_var5_img_data_entry, self.mq_terms_var5_img_path_entry))
self.mq_terms_var6_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_terms_var6_img_label_entry, self.mq_terms_var6_img_data_entry, self.mq_terms_var6_img_path_entry))
self.mq_terms_var7_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_terms_var7_img_label_entry, self.mq_terms_var7_img_data_entry, self.mq_terms_var7_img_path_entry))
self.mq_terms_var8_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_terms_var8_img_label_entry, self.mq_terms_var8_img_data_entry, self.mq_terms_var8_img_path_entry))
self.mq_terms_var9_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_terms_var9_img_label_entry, self.mq_terms_var9_img_data_entry, self.mq_terms_var9_img_path_entry))
self.mq_terms_var10_select_img_btn = Button(self.mq_frame, text="Datei wählen", command=lambda: Zuordnungsfrage.mq_add_image_to_answer(self, self.mq_terms_var10_img_label_entry, self.mq_terms_var10_img_data_entry, self.mq_terms_var10_img_path_entry))
# DEFINITIONEN LABELS
self.mq_definitions_answer1_label = Label(self.mq_frame, text="Zeile 1")
self.mq_definitions_answer2_label = Label(self.mq_frame, text="Zeile 2")
self.mq_definitions_answer3_label = Label(self.mq_frame, text="Zeile 3")
self.mq_definitions_answer4_label = Label(self.mq_frame, text="Zeile 4")
self.mq_definitions_answer5_label = Label(self.mq_frame, text="Zeile 5")
self.mq_definitions_answer6_label = Label(self.mq_frame, text="Zeile 6")
self.mq_definitions_answer7_label = Label(self.mq_frame, text="Zeile 7")
self.mq_definitions_answer8_label = Label(self.mq_frame, text="Zeile 8")
self.mq_definitions_answer9_label = Label(self.mq_frame, text="Zeile 9")
self.mq_definitions_answer10_label = Label(self.mq_frame, text="Zeile 10")
# THEMRE LABELS
self.mq_terms_answer1_label = Label(self.mq_frame, text="Zeile 1")
self.mq_terms_answer2_label = Label(self.mq_frame, text="Zeile 2")
self.mq_terms_answer3_label = Label(self.mq_frame, text="Zeile 3")
self.mq_terms_answer4_label = Label(self.mq_frame, text="Zeile 4")
self.mq_terms_answer5_label = Label(self.mq_frame, text="Zeile 5")
self.mq_terms_answer6_label = Label(self.mq_frame, text="Zeile 6")
self.mq_terms_answer7_label = Label(self.mq_frame, text="Zeile 7")
self.mq_terms_answer8_label = Label(self.mq_frame, text="Zeile 8")
self.mq_terms_answer9_label = Label(self.mq_frame, text="Zeile 9")
self.mq_terms_answer10_label = Label(self.mq_frame, text="Zeile 10")
# DEFINITIONEN
self.mq_definitions_answer1_label.grid(row=20, column=0, sticky=W, padx=30)
self.mq_definitions_var1_answer_entry.grid(row=20, column=1, sticky=W)
self.mq_definitions_var1_img_label_entry.grid(row=20, column=1, sticky=E, padx=0)
self.mq_definitions_var1_select_img_btn.grid(row=20, column=1, sticky=E, padx=200)
def mq_definitions_var2_show():
self.mq_definitions_answer2_label.grid(row=21, column=0, sticky=W, padx=30)
self.mq_definitions_var2_answer_entry.grid(row=21, column=1, sticky=W)
self.mq_definitions_var2_img_label_entry.grid(row=21, column=1, sticky=E, padx=0)
self.mq_definitions_var2_select_img_btn.grid(row=21, column=1, sticky=E, padx=200)
def mq_definitions_var3_show():
self.mq_definitions_answer3_label.grid(row=22, column=0, sticky=W, padx=30)
self.mq_definitions_var3_answer_entry.grid(row=22, column=1, sticky=W)
self.mq_definitions_var3_img_label_entry.grid(row=22, column=1, sticky=E, padx=0)
self.mq_definitions_var3_select_img_btn.grid(row=22, column=1, sticky=E, padx=200)
def mq_definitions_var4_show():
self.mq_definitions_answer4_label.grid(row=23, column=0, sticky=W, padx=30)
self.mq_definitions_var4_answer_entry.grid(row=23, column=1, sticky=W)
self.mq_definitions_var4_img_label_entry.grid(row=23, column=1, sticky=E, padx=0)
self.mq_definitions_var4_select_img_btn.grid(row=23, column=1, sticky=E, padx=200)
def mq_definitions_var5_show():
self.mq_definitions_answer5_label.grid(row=24, column=0, sticky=W, padx=30)
self.mq_definitions_var5_answer_entry.grid(row=24, column=1, sticky=W)
self.mq_definitions_var5_img_label_entry.grid(row=24, column=1, sticky=E, padx=0)
self.mq_definitions_var5_select_img_btn.grid(row=24, column=1, sticky=E, padx=200)
def mq_definitions_var6_show():
self.mq_definitions_answer6_label.grid(row=25, column=0, sticky=W, padx=30)
self.mq_definitions_var6_answer_entry.grid(row=25, column=1, sticky=W)
self.mq_definitions_var6_img_label_entry.grid(row=25, column=1, sticky=E, padx=0)
self.mq_definitions_var6_select_img_btn.grid(row=25, column=1, sticky=E, padx=200)
def mq_definitions_var7_show():
self.mq_definitions_answer7_label.grid(row=26, column=0, sticky=W, padx=30)
self.mq_definitions_var7_answer_entry.grid(row=26, column=1, sticky=W)
self.mq_definitions_var7_img_label_entry.grid(row=26, column=1, sticky=E, padx=0)
self.mq_definitions_var7_select_img_btn.grid(row=26, column=1, sticky=E, padx=200)
def mq_definitions_var8_show():
self.mq_definitions_answer8_label.grid(row=27, column=0, sticky=W, padx=30)
self.mq_definitions_var8_answer_entry.grid(row=27, column=1, sticky=W)
self.mq_definitions_var8_img_label_entry.grid(row=27, column=1, sticky=E, padx=0)
self.mq_definitions_var8_select_img_btn.grid(row=27, column=1, sticky=E, padx=200)
def mq_definitions_var9_show():
self.mq_definitions_answer9_label.grid(row=28, column=0, sticky=W, padx=30)
self.mq_definitions_var9_answer_entry.grid(row=28, column=1, sticky=W)
self.mq_definitions_var9_img_label_entry.grid(row=28, column=1, sticky=E, padx=0)
self.mq_definitions_var9_select_img_btn.grid(row=28, column=1, sticky=E, padx=200)
def mq_definitions_var10_show():
self.mq_definitions_answer10_label.grid(row=29, column=0, sticky=W, padx=30)
self.mq_definitions_var10_answer_entry.grid(row=29, column=1, sticky=W)
self.mq_definitions_var10_img_label_entry.grid(row=29, column=1, sticky=E, padx=0)
self.mq_definitions_var10_select_img_btn.grid(row=29, column=1, sticky=E, padx=200)
def mq_definitions_var2_remove():
self.mq_definitions_answer2_label.grid_remove()
self.mq_definitions_var2_answer_entry.grid_remove()
self.mq_definitions_var2_img_label_entry.grid_remove()
self.mq_definitions_var2_select_img_btn.grid_remove()
def mq_definitions_var3_remove():
self.mq_definitions_answer3_label.grid_remove()
self.mq_definitions_var3_answer_entry.grid_remove()
self.mq_definitions_var3_img_label_entry.grid_remove()
self.mq_definitions_var3_select_img_btn.grid_remove()
def mq_definitions_var4_remove():
self.mq_definitions_answer4_label.grid_remove()
self.mq_definitions_var4_answer_entry.grid_remove()
self.mq_definitions_var4_img_label_entry.grid_remove()
self.mq_definitions_var4_select_img_btn.grid_remove()
def mq_definitions_var5_remove():
self.mq_definitions_answer5_label.grid_remove()
self.mq_definitions_var5_answer_entry.grid_remove()
self.mq_definitions_var5_img_label_entry.grid_remove()
self.mq_definitions_var5_select_img_btn.grid_remove()
def mq_definitions_var6_remove():
self.mq_definitions_answer6_label.grid_remove()
self.mq_definitions_var6_answer_entry.grid_remove()
self.mq_definitions_var6_img_label_entry.grid_remove()
self.mq_definitions_var6_select_img_btn.grid_remove()
def mq_definitions_var7_remove():
self.mq_definitions_answer7_label.grid_remove()
self.mq_definitions_var7_answer_entry.grid_remove()
self.mq_definitions_var7_img_label_entry.grid_remove()
self.mq_definitions_var7_select_img_btn.grid_remove()
def mq_definitions_var8_remove():
self.mq_definitions_answer8_label.grid_remove()
self.mq_definitions_var8_answer_entry.grid_remove()
self.mq_definitions_var8_img_label_entry.grid_remove()
self.mq_definitions_var8_select_img_btn.grid_remove()
def mq_definitions_var9_remove():
self.mq_definitions_answer9_label.grid_remove()
self.mq_definitions_var9_answer_entry.grid_remove()
self.mq_definitions_var9_img_label_entry.grid_remove()
self.mq_definitions_var9_select_img_btn.grid_remove()
def mq_definitions_var10_remove():
self.mq_definitions_answer10_label.grid_remove()
self.mq_definitions_var10_answer_entry.grid_remove()
self.mq_definitions_var10_img_label_entry.grid_remove()
self.mq_definitions_var10_select_img_btn.grid_remove()
# TERME
self.mq_terms_answer1_label.grid(row=40, column=0, sticky=W, padx=30)
self.mq_terms_var1_answer_entry.grid(row=40, column=1, sticky=W)
self.mq_terms_var1_img_label_entry.grid(row=40, column=1, sticky=E, padx=0)
self.mq_terms_var1_select_img_btn.grid(row=40, column=1, sticky=E, padx=200)
def mq_terms_var2_show():
self.mq_terms_answer2_label.grid(row=41, column=0, sticky=W, padx=30)
self.mq_terms_var2_answer_entry.grid(row=41, column=1, sticky=W)
self.mq_terms_var2_img_label_entry.grid(row=41, column=1, sticky=E, padx=0)
self.mq_terms_var2_select_img_btn.grid(row=41, column=1, sticky=E, padx=200)
def mq_terms_var3_show():
self.mq_terms_answer3_label.grid(row=42, column=0, sticky=W, padx=30)
self.mq_terms_var3_answer_entry.grid(row=42, column=1, sticky=W)
self.mq_terms_var3_img_label_entry.grid(row=42, column=1, sticky=E, padx=0)
self.mq_terms_var3_select_img_btn.grid(row=42, column=1, sticky=E, padx=200)
def mq_terms_var4_show():
self.mq_terms_answer4_label.grid(row=43, column=0, sticky=W, padx=30)
self.mq_terms_var4_answer_entry.grid(row=43, column=1, sticky=W)
self.mq_terms_var4_img_label_entry.grid(row=43, column=1, sticky=E, padx=0)
self.mq_terms_var4_select_img_btn.grid(row=43, column=1, sticky=E, padx=200)
def mq_terms_var5_show():
self.mq_terms_answer5_label.grid(row=44, column=0, sticky=W, padx=30)
self.mq_terms_var5_answer_entry.grid(row=44, column=1, sticky=W)
self.mq_terms_var5_img_label_entry.grid(row=44, column=1, sticky=E, padx=0)
self.mq_terms_var5_select_img_btn.grid(row=44, column=1, sticky=E, padx=200)
def mq_terms_var6_show():
self.mq_terms_answer6_label.grid(row=45, column=0, sticky=W, padx=30)
self.mq_terms_var6_answer_entry.grid(row=45, column=1, sticky=W)
self.mq_terms_var6_img_label_entry.grid(row=45, column=1, sticky=E, padx=0)
self.mq_terms_var6_select_img_btn.grid(row=45, column=1, sticky=E, padx=200)
def mq_terms_var7_show():
self.mq_terms_answer7_label.grid(row=46, column=0, sticky=W, padx=30)
self.mq_terms_var7_answer_entry.grid(row=46, column=1, sticky=W)
self.mq_terms_var7_img_label_entry.grid(row=46, column=1, sticky=E, padx=0)
self.mq_terms_var7_select_img_btn.grid(row=46, column=1, sticky=E, padx=200)
def mq_terms_var8_show():
self.mq_terms_answer8_label.grid(row=47, column=0, sticky=W, padx=30)
self.mq_terms_var8_answer_entry.grid(row=47, column=1, sticky=W)
self.mq_terms_var8_img_label_entry.grid(row=47, column=1, sticky=E, padx=0)
self.mq_terms_var8_select_img_btn.grid(row=47, column=1, sticky=E, padx=200)
def mq_terms_var9_show():
self.mq_terms_answer9_label.grid(row=48, column=0, sticky=W, padx=30)
self.mq_terms_var9_answer_entry.grid(row=48, column=1, sticky=W)
self.mq_terms_var9_img_label_entry.grid(row=48, column=1, sticky=E, padx=0)
self.mq_terms_var9_select_img_btn.grid(row=48, column=1, sticky=E, padx=200)
def mq_terms_var10_show():
self.mq_terms_answer10_label.grid(row=49, column=0, sticky=W, padx=30)
self.mq_terms_var10_answer_entry.grid(row=49, column=1, sticky=W)
self.mq_terms_var10_img_label_entry.grid(row=49, column=1, sticky=E, padx=0)
self.mq_terms_var10_select_img_btn.grid(row=49, column=1, sticky=E, padx=200)
def mq_terms_var2_remove():
self.mq_terms_answer2_label.grid_remove()
self.mq_terms_var2_answer_entry.grid_remove()
self.mq_terms_var2_img_label_entry.grid_remove()
self.mq_terms_var2_select_img_btn.grid_remove()
def mq_terms_var3_remove():
self.mq_terms_answer3_label.grid_remove()
self.mq_terms_var3_answer_entry.grid_remove()
self.mq_terms_var3_img_label_entry.grid_remove()
self.mq_terms_var3_select_img_btn.grid_remove()
def mq_terms_var4_remove():
self.mq_terms_answer4_label.grid_remove()
self.mq_terms_var4_answer_entry.grid_remove()
self.mq_terms_var4_img_label_entry.grid_remove()
self.mq_terms_var4_select_img_btn.grid_remove()
def mq_terms_var5_remove():
self.mq_terms_answer5_label.grid_remove()
self.mq_terms_var5_answer_entry.grid_remove()
self.mq_terms_var5_img_label_entry.grid_remove()
self.mq_terms_var5_select_img_btn.grid_remove()
def mq_terms_var6_remove():
self.mq_terms_answer6_label.grid_remove()
self.mq_terms_var6_answer_entry.grid_remove()
self.mq_terms_var6_img_label_entry.grid_remove()
self.mq_terms_var6_select_img_btn.grid_remove()
def mq_terms_var7_remove():
self.mq_terms_answer7_label.grid_remove()
self.mq_terms_var7_answer_entry.grid_remove()
self.mq_terms_var7_img_label_entry.grid_remove()
self.mq_terms_var7_select_img_btn.grid_remove()
def mq_terms_var8_remove():
self.mq_terms_answer8_label.grid_remove()
self.mq_terms_var8_answer_entry.grid_remove()
self.mq_terms_var8_img_label_entry.grid_remove()
self.mq_terms_var8_select_img_btn.grid_remove()
def mq_terms_var9_remove():
self.mq_terms_answer9_label.grid_remove()
self.mq_terms_var9_answer_entry.grid_remove()
self.mq_terms_var9_img_label_entry.grid_remove()
self.mq_terms_var9_select_img_btn.grid_remove()
def mq_terms_var10_remove():
self.mq_terms_answer10_label.grid_remove()
self.mq_terms_var10_answer_entry.grid_remove()
self.mq_terms_var10_img_label_entry.grid_remove()
self.mq_terms_var10_select_img_btn.grid_remove()
### ZUORDNUNGSPAARE ANZEIGEN/AUSBLENDEN
self.mq_assignment_pairs_pts_1_entry.grid(row=52, column=1, sticky=E, pady=(5, 0))
def mq_assignment_pair_2_show():
self.mq_assignment_pairs_definitions_2_box.grid(row=53, column=1, sticky=W, pady=(5, 0))
self.mq_assignment_pairs_terms_2_box.grid(row=53, column=1, sticky=E, pady=(5, 0), padx=100)
self.mq_assignment_pairs_pts_2_entry.grid(row=53, column=1, sticky=E, pady=(5, 0))
def mq_assignment_pair_3_show():
self.mq_assignment_pairs_definitions_3_box.grid(row=54, column=1, sticky=W, pady=(5, 0))
self.mq_assignment_pairs_terms_3_box.grid(row=54, column=1, sticky=E, pady=(5, 0), padx=100)
self.mq_assignment_pairs_pts_3_entry.grid(row=54, column=1, sticky=E, pady=(5, 0))
def mq_assignment_pair_4_show():
self.mq_assignment_pairs_definitions_4_box.grid(row=55, column=1, sticky=W, pady=(5, 0))
self.mq_assignment_pairs_terms_4_box.grid(row=55, column=1, sticky=E, pady=(5, 0), padx=100)
self.mq_assignment_pairs_pts_4_entry.grid(row=55, column=1, sticky=E, pady=(5, 0))
def mq_assignment_pair_5_show():
self.mq_assignment_pairs_definitions_5_box.grid(row=56, column=1, sticky=W, pady=(5, 0))
self.mq_assignment_pairs_terms_5_box.grid(row=56, column=1, sticky=E, pady=(5, 0), padx=100)
self.mq_assignment_pairs_pts_5_entry.grid(row=56, column=1, sticky=E, pady=(5, 0))
def mq_assignment_pair_6_show():
self.mq_assignment_pairs_definitions_6_box.grid(row=57, column=1, sticky=W, pady=(5, 0))
self.mq_assignment_pairs_terms_6_box.grid(row=57, column=1, sticky=E, pady=(5, 0), padx=100)
self.mq_assignment_pairs_pts_6_entry.grid(row=57, column=1, sticky=E, pady=(5, 0))
def mq_assignment_pair_7_show():
self.mq_assignment_pairs_definitions_7_box.grid(row=58, column=1, sticky=W, pady=(5, 0))
self.mq_assignment_pairs_terms_7_box.grid(row=58, column=1, sticky=E, pady=(5, 0), padx=100)
self.mq_assignment_pairs_pts_7_entry.grid(row=58, column=1, sticky=E, pady=(5, 0))
def mq_assignment_pair_8_show():
self.mq_assignment_pairs_definitions_8_box.grid(row=59, column=1, sticky=W, pady=(5, 0))
self.mq_assignment_pairs_terms_8_box.grid(row=59, column=1, sticky=E, pady=(5, 0), padx=100)
self.mq_assignment_pairs_pts_8_entry.grid(row=59, column=1, sticky=E, pady=(5, 0))
def mq_assignment_pair_9_show():
self.mq_assignment_pairs_definitions_9_box.grid(row=60, column=1, sticky=W, pady=(5, 0))
self.mq_assignment_pairs_terms_9_box.grid(row=60, column=1, sticky=E, pady=(5, 0), padx=100)
self.mq_assignment_pairs_pts_9_entry.grid(row=60, column=1, sticky=E, pady=(5, 0))
def mq_assignment_pair_10_show():
self.mq_assignment_pairs_definitions_10_box.grid(row=61, column=1, sticky=W, pady=(5, 0))
self.mq_assignment_pairs_terms_10_box.grid(row=61, column=1, sticky=E, pady=(5, 0), padx=100)
self.mq_assignment_pairs_pts_10_entry.grid(row=61, column=1, sticky=E, pady=(5, 0))
def mq_assignment_pair_2_remove():
self.mq_assignment_pairs_definitions_2_box.grid_remove()
self.mq_assignment_pairs_terms_2_box.grid_remove()
self.mq_assignment_pairs_pts_2_entry.grid_remove()
def mq_assignment_pair_3_remove():
self.mq_assignment_pairs_definitions_3_box.grid_remove()
self.mq_assignment_pairs_terms_3_box.grid_remove()
self.mq_assignment_pairs_pts_3_entry.grid_remove()
def mq_assignment_pair_4_remove():
self.mq_assignment_pairs_definitions_4_box.grid_remove()
self.mq_assignment_pairs_terms_4_box.grid_remove()
self.mq_assignment_pairs_pts_4_entry.grid_remove()
def mq_assignment_pair_5_remove():
self.mq_assignment_pairs_definitions_5_box.grid_remove()
self.mq_assignment_pairs_terms_5_box.grid_remove()
self.mq_assignment_pairs_pts_5_entry.grid_remove()
def mq_assignment_pair_6_remove():
self.mq_assignment_pairs_definitions_6_box.grid_remove()
self.mq_assignment_pairs_terms_6_box.grid_remove()
self.mq_assignment_pairs_pts_6_entry.grid_remove()
def mq_assignment_pair_7_remove():
self.mq_assignment_pairs_definitions_7_box.grid_remove()
self.mq_assignment_pairs_terms_7_box.grid_remove()
self.mq_assignment_pairs_pts_7_entry.grid_remove()
def mq_assignment_pair_8_remove():
self.mq_assignment_pairs_definitions_8_box.grid_remove()
self.mq_assignment_pairs_terms_8_box.grid_remove()
self.mq_assignment_pairs_pts_8_entry.grid_remove()
def mq_assignment_pair_9_remove():
self.mq_assignment_pairs_definitions_9_box.grid_remove()
self.mq_assignment_pairs_terms_9_box.grid_remove()
self.mq_assignment_pairs_pts_9_entry.grid_remove()
def mq_assignment_pair_10_remove():
self.mq_assignment_pairs_definitions_10_box.grid_remove()
self.mq_assignment_pairs_terms_10_box.grid_remove()
self.mq_assignment_pairs_pts_10_entry.grid_remove()
# Funktion dient zur Auswahl von Bildern für einzelne Antwortmöglichkeiten
def mq_add_image_to_answer(self, var_img_label_entry, picture_data_entry, picture_path_entry):
### Dateipfad auswählen
self.mq_picture_path = filedialog.askopenfilename(initialdir=pathlib.Path().absolute(), title="Select a File")
# "rindex" sucht nach einem bestimmten Zeichen in einem String, beginnend von rechts
self.mq_picture_name = self.mq_picture_path[self.mq_picture_path.rindex('/')+1:] # Nach dem "/" befindet sich der Dateiname
self.mq_image_format = self.mq_picture_path[self.mq_picture_path.rindex('.'):] # Nach dem "." befindet sich das Dateiformat z.B. .jpg
### Bild-Namen in entsprechendes, geleertes, Eingabefeld übertragen
var_img_label_entry.delete(0, END)
var_img_label_entry.insert(0, str(self.mq_picture_name))
### Bild-Daten in base64 speichern. Die XML Datei enthält die Bilder der Antworten in base64 encoded
# "encoded64_string_raw enthält die Daten als String in der Form b'String'
# Um die Daten in der richtigen Form zu erhalten (nur den String ohne b''), wird die Funktion .decode('utf-8') verwendet
# Dieser String kann in der .xml Datei verwendet werden um im Ilias ein Bild zu erzeugen
with open(self.mq_picture_path, "rb") as image_file:
encoded64_string_raw = base64.b64encode(image_file.read())
picture_data_entry.insert(END, encoded64_string_raw.decode('utf-8'))
picture_path_entry.insert(END, self.mq_picture_path )
def mq_save_id_to_db(self):
conn = sqlite3.connect(self.database_zuordnungsfrage_path)
c =conn.cursor()
# format of duration P0Y0M0DT0H30M0S
self.mq_test_time = "P0Y0M0DT" + self.mq_proc_hours_box.get() + "H" + self.mq_proc_minutes_box.get() + "M" + self.mq_proc_seconds_box.get() + "S"
# Bild 1
if self.mq_description_img_name_1 != "" or self.mq_description_img_name_1 != "EMPTY":
# read image data in byte format
with open(self.mq_description_img_path_1, 'rb') as image_file_1:
self.mq_description_img_data_1 = image_file_1.read()
else:
self.mq_description_img_name_1 = ""
self.mq_description_img_path_1 = ""
self.mq_description_img_data_1 = ""
# Bild 2
if self.mq_description_img_name_2 != "" or self.mq_description_img_name_2 != "EMPTY":
# read image data in byte format
with open(self.mq_description_img_path_2, 'rb') as image_file_2:
self.mq_description_img_data_2 = image_file_2.read()
else:
self.mq_description_img_name_2 = ""
self.mq_description_img_path_2 = ""
self.mq_description_img_data_2 = ""
# Bild 3
if self.mq_description_img_name_3 != "" or self.mq_description_img_name_3 != "EMPTY":
# read image data in byte format
with open(self.mq_description_img_path_3, 'rb') as image_file_3:
self.mq_description_img_data_3 = image_file_3.read()
else:
self.mq_description_img_name_3 = ""
self.mq_description_img_path_3 = ""
self.mq_description_img_data_3 = ""
def mq_bind_value_for_empty_answer_image(definition_picture_label_entry, definition_picture_data_entry, definition_picture_path_entry, term_picture_label_entry, term_picture_data_entry, term_picture_path_entry):
if definition_picture_label_entry == "":
definition_picture_label_entry.insert(0, "")
definition_picture_data_entry.insert(0, "")
definition_picture_path_entry.insert(0, "")
if term_picture_label_entry == "":
term_picture_label_entry.insert(0, "")
term_picture_data_entry.insert(0, "")
term_picture_path_entry.insert(0, "")
mq_bind_value_for_empty_answer_image(self.mq_definitions_var1_img_label_entry, self.mq_definitions_var1_img_data_entry, self.mq_definitions_var1_img_path_entry, self.mq_terms_var1_img_label_entry, self.mq_terms_var1_img_data_entry, self.mq_terms_var1_img_path_entry)
mq_bind_value_for_empty_answer_image(self.mq_definitions_var2_img_label_entry, self.mq_definitions_var2_img_data_entry, self.mq_definitions_var2_img_path_entry, self.mq_terms_var2_img_label_entry, self.mq_terms_var2_img_data_entry, self.mq_terms_var2_img_path_entry)
mq_bind_value_for_empty_answer_image(self.mq_definitions_var3_img_label_entry, self.mq_definitions_var3_img_data_entry, self.mq_definitions_var3_img_path_entry, self.mq_terms_var3_img_label_entry, self.mq_terms_var3_img_data_entry, self.mq_terms_var3_img_path_entry)
mq_bind_value_for_empty_answer_image(self.mq_definitions_var4_img_label_entry, self.mq_definitions_var4_img_data_entry, self.mq_definitions_var4_img_path_entry, self.mq_terms_var4_img_label_entry, self.mq_terms_var4_img_data_entry, self.mq_terms_var4_img_path_entry)
mq_bind_value_for_empty_answer_image(self.mq_definitions_var6_img_label_entry, self.mq_definitions_var5_img_data_entry, self.mq_definitions_var5_img_path_entry, self.mq_terms_var5_img_label_entry, self.mq_terms_var5_img_data_entry, self.mq_terms_var5_img_path_entry)
mq_bind_value_for_empty_answer_image(self.mq_definitions_var6_img_label_entry, self.mq_definitions_var6_img_data_entry, self.mq_definitions_var6_img_path_entry, self.mq_terms_var6_img_label_entry, self.mq_terms_var6_img_data_entry, self.mq_terms_var6_img_path_entry)
mq_bind_value_for_empty_answer_image(self.mq_definitions_var7_img_label_entry, self.mq_definitions_var7_img_data_entry, self.mq_definitions_var7_img_path_entry, self.mq_terms_var7_img_label_entry, self.mq_terms_var7_img_data_entry, self.mq_terms_var7_img_path_entry)
mq_bind_value_for_empty_answer_image(self.mq_definitions_var8_img_label_entry, self.mq_definitions_var8_img_data_entry, self.mq_definitions_var8_img_path_entry, self.mq_terms_var8_img_label_entry, self.mq_terms_var8_img_data_entry, self.mq_terms_var8_img_path_entry)
mq_bind_value_for_empty_answer_image(self.mq_definitions_var9_img_label_entry, self.mq_definitions_var9_img_data_entry, self.mq_definitions_var9_img_path_entry, self.mq_terms_var9_img_label_entry, self.mq_terms_var9_img_data_entry, self.mq_terms_var9_img_path_entry)
mq_bind_value_for_empty_answer_image(self.mq_definitions_var10_img_label_entry, self.mq_definitions_var10_img_data_entry, self.mq_definitions_var10_img_path_entry, self.mq_terms_var10_img_label_entry, self.mq_terms_var10_img_data_entry, self.mq_terms_var10_img_path_entry)
# Insert into Table
c.execute(
"INSERT INTO zuordnungsfrage_table VALUES ("
":question_difficulty, :question_category, :question_type, "
":question_title, :question_description_title, :question_description_main, :mix_answers, :assignment_mode, "
":definitions_response_1_text, :definitions_response_1_img_label, :definitions_response_1_img_path, :definitions_response_1_img_string_base64_encoded, "
":definitions_response_2_text, :definitions_response_2_img_label, :definitions_response_2_img_path, :definitions_response_2_img_string_base64_encoded, "
":definitions_response_3_text, :definitions_response_3_img_label, :definitions_response_3_img_path, :definitions_response_3_img_string_base64_encoded, "
":definitions_response_4_text, :definitions_response_4_img_label, :definitions_response_4_img_path, :definitions_response_4_img_string_base64_encoded, "
":definitions_response_5_text, :definitions_response_5_img_label, :definitions_response_5_img_path, :definitions_response_5_img_string_base64_encoded, "
":definitions_response_6_text, :definitions_response_6_img_label, :definitions_response_6_img_path, :definitions_response_6_img_string_base64_encoded, "
":definitions_response_7_text, :definitions_response_7_img_label, :definitions_response_7_img_path, :definitions_response_7_img_string_base64_encoded, "
":definitions_response_8_text, :definitions_response_8_img_label, :definitions_response_8_img_path, :definitions_response_8_img_string_base64_encoded, "
":definitions_response_9_text, :definitions_response_9_img_label, :definitions_response_9_img_path, :definitions_response_9_img_string_base64_encoded, "
":definitions_response_10_text, :definitions_response_10_img_label, :definitions_response_10_img_path, :definitions_response_10_img_string_base64_encoded, "
":terms_response_1_text, :terms_response_1_img_label, :terms_response_1_img_path, :terms_response_1_img_string_base64_encoded, "
":terms_response_2_text, :terms_response_2_img_label, :terms_response_2_img_path, :terms_response_2_img_string_base64_encoded, "
":terms_response_3_text, :terms_response_3_img_label, :terms_response_3_img_path, :terms_response_3_img_string_base64_encoded, "
":terms_response_4_text, :terms_response_4_img_label, :terms_response_4_img_path, :terms_response_4_img_string_base64_encoded, "
":terms_response_5_text, :terms_response_5_img_label, :terms_response_5_img_path, :terms_response_5_img_string_base64_encoded, "
":terms_response_6_text, :terms_response_6_img_label, :terms_response_6_img_path, :terms_response_6_img_string_base64_encoded, "
":terms_response_7_text, :terms_response_7_img_label, :terms_response_7_img_path, :terms_response_7_img_string_base64_encoded, "
":terms_response_8_text, :terms_response_8_img_label, :terms_response_8_img_path, :terms_response_8_img_string_base64_encoded, "
":terms_response_9_text, :terms_response_9_img_label, :terms_response_9_img_path, :terms_response_9_img_string_base64_encoded, "
":terms_response_10_text, :terms_response_10_img_label, :terms_response_10_img_path, :terms_response_10_img_string_base64_encoded, "
":assignment_pairs_definition_1, :assignment_pairs_term_1, :assignment_pairs_1_pts,"
":assignment_pairs_definition_2, :assignment_pairs_term_2, :assignment_pairs_2_pts,"
":assignment_pairs_definition_3, :assignment_pairs_term_3, :assignment_pairs_3_pts,"
":assignment_pairs_definition_4, :assignment_pairs_term_4, :assignment_pairs_4_pts,"
":assignment_pairs_definition_5, :assignment_pairs_term_5, :assignment_pairs_5_pts,"
":assignment_pairs_definition_6, :assignment_pairs_term_6, :assignment_pairs_6_pts,"
":assignment_pairs_definition_7, :assignment_pairs_term_7, :assignment_pairs_7_pts,"
":assignment_pairs_definition_8, :assignment_pairs_term_8, :assignment_pairs_8_pts,"
":assignment_pairs_definition_9, :assignment_pairs_term_9, :assignment_pairs_9_pts,"
":assignment_pairs_definition_10, :assignment_pairs_term_10, :assignment_pairs_10_pts,"
":picture_preview_pixel,"
":description_img_name_1, :description_img_data_1, :description_img_path_1, "
":description_img_name_2, :description_img_data_2, :description_img_path_2, "
":description_img_name_3, :description_img_data_3, :description_img_path_3, "
":test_time, :var_number, :res_number, :question_pool_tag, :question_author)",
{
'question_difficulty': self.mq_question_difficulty_entry.get(),
'question_category': self.mq_question_category_entry.get(),
'question_type': self.mq_question_type_entry.get(),
'question_title': self.mq_question_title_entry.get(),
'question_description_title': self.mq_question_description_title_entry.get(),
# The first part, "1.0" means that the input should be read from line one, character zero (ie: the very first character).
# END is an imported constant which is set to the string "end". The END part means to read until the end of the text box is reached.
# The only issue with this is that it actually adds a newline to our input. "
# "So, in order to fix it we should change END to end-1c(Thanks Bryan Oakley) The -1c deletes 1 character, while -2c would mean delete two characters, and so on."
'question_description_main': self.mq_question_description_main_entry.get("1.0", 'end-1c'),
'mix_answers': self.mq_mix_answers_box.get(),
'assignment_mode': self.selected_matching_option.get(),
'definitions_response_1_text': self.mq_definitions_var1_answer_entry.get(),
'definitions_response_2_text': self.mq_definitions_var2_answer_entry.get(),
'definitions_response_3_text': self.mq_definitions_var3_answer_entry.get(),
'definitions_response_4_text': self.mq_definitions_var4_answer_entry.get(),
'definitions_response_5_text': self.mq_definitions_var5_answer_entry.get(),
'definitions_response_6_text': self.mq_definitions_var6_answer_entry.get(),
'definitions_response_7_text': self.mq_definitions_var7_answer_entry.get(),
'definitions_response_8_text': self.mq_definitions_var8_answer_entry.get(),
'definitions_response_9_text': self.mq_definitions_var9_answer_entry.get(),
'definitions_response_10_text': self.mq_definitions_var10_answer_entry.get(),
'definitions_response_1_img_label': self.mq_definitions_var1_img_label_entry.get(),
'definitions_response_2_img_label': self.mq_definitions_var2_img_label_entry.get(),
'definitions_response_3_img_label': self.mq_definitions_var3_img_label_entry.get(),
'definitions_response_4_img_label': self.mq_definitions_var4_img_label_entry.get(),
'definitions_response_5_img_label': self.mq_definitions_var5_img_label_entry.get(),
'definitions_response_6_img_label': self.mq_definitions_var6_img_label_entry.get(),
'definitions_response_7_img_label': self.mq_definitions_var7_img_label_entry.get(),
'definitions_response_8_img_label': self.mq_definitions_var8_img_label_entry.get(),
'definitions_response_9_img_label': self.mq_definitions_var9_img_label_entry.get(),
'definitions_response_10_img_label': self.mq_definitions_var10_img_label_entry.get(),
'definitions_response_1_img_path': self.mq_definitions_var1_img_path_entry.get(),
'definitions_response_2_img_path': self.mq_definitions_var2_img_path_entry.get(),
'definitions_response_3_img_path': self.mq_definitions_var3_img_path_entry.get(),
'definitions_response_4_img_path': self.mq_definitions_var4_img_path_entry.get(),
'definitions_response_5_img_path': self.mq_definitions_var5_img_path_entry.get(),
'definitions_response_6_img_path': self.mq_definitions_var6_img_path_entry.get(),
'definitions_response_7_img_path': self.mq_definitions_var7_img_path_entry.get(),
'definitions_response_8_img_path': self.mq_definitions_var8_img_path_entry.get(),
'definitions_response_9_img_path': self.mq_definitions_var9_img_path_entry.get(),
'definitions_response_10_img_path': self.mq_definitions_var10_img_path_entry.get(),
'definitions_response_1_img_string_base64_encoded': self.mq_definitions_var1_img_data_entry.get(),
'definitions_response_2_img_string_base64_encoded': self.mq_definitions_var2_img_data_entry.get(),
'definitions_response_3_img_string_base64_encoded': self.mq_definitions_var3_img_data_entry.get(),
'definitions_response_4_img_string_base64_encoded': self.mq_definitions_var4_img_data_entry.get(),
'definitions_response_5_img_string_base64_encoded': self.mq_definitions_var5_img_data_entry.get(),
'definitions_response_6_img_string_base64_encoded': self.mq_definitions_var6_img_data_entry.get(),
'definitions_response_7_img_string_base64_encoded': self.mq_definitions_var7_img_data_entry.get(),
'definitions_response_8_img_string_base64_encoded': self.mq_definitions_var8_img_data_entry.get(),
'definitions_response_9_img_string_base64_encoded': self.mq_definitions_var9_img_data_entry.get(),
'definitions_response_10_img_string_base64_encoded': self.mq_definitions_var10_img_data_entry.get(),
'terms_response_1_text': self.mq_terms_var1_answer_entry.get(),
'terms_response_2_text': self.mq_terms_var2_answer_entry.get(),
'terms_response_3_text': self.mq_terms_var3_answer_entry.get(),
'terms_response_4_text': self.mq_terms_var4_answer_entry.get(),
'terms_response_5_text': self.mq_terms_var5_answer_entry.get(),
'terms_response_6_text': self.mq_terms_var6_answer_entry.get(),
'terms_response_7_text': self.mq_terms_var7_answer_entry.get(),
'terms_response_8_text': self.mq_terms_var8_answer_entry.get(),
'terms_response_9_text': self.mq_terms_var9_answer_entry.get(),
'terms_response_10_text': self.mq_terms_var10_answer_entry.get(),
'terms_response_1_img_label': self.mq_terms_var1_img_label_entry.get(),
'terms_response_2_img_label': self.mq_terms_var2_img_label_entry.get(),
'terms_response_3_img_label': self.mq_terms_var3_img_label_entry.get(),
'terms_response_4_img_label': self.mq_terms_var4_img_label_entry.get(),
'terms_response_5_img_label': self.mq_terms_var5_img_label_entry.get(),
'terms_response_6_img_label': self.mq_terms_var6_img_label_entry.get(),
'terms_response_7_img_label': self.mq_terms_var7_img_label_entry.get(),
'terms_response_8_img_label': self.mq_terms_var8_img_label_entry.get(),
'terms_response_9_img_label': self.mq_terms_var9_img_label_entry.get(),
'terms_response_10_img_label': self.mq_terms_var10_img_label_entry.get(),
'terms_response_1_img_path': self.mq_terms_var1_img_path_entry.get(),
'terms_response_2_img_path': self.mq_terms_var2_img_path_entry.get(),
'terms_response_3_img_path': self.mq_terms_var3_img_path_entry.get(),
'terms_response_4_img_path': self.mq_terms_var4_img_path_entry.get(),
'terms_response_5_img_path': self.mq_terms_var5_img_path_entry.get(),
'terms_response_6_img_path': self.mq_terms_var6_img_path_entry.get(),
'terms_response_7_img_path': self.mq_terms_var7_img_path_entry.get(),
'terms_response_8_img_path': self.mq_terms_var8_img_path_entry.get(),
'terms_response_9_img_path': self.mq_terms_var9_img_path_entry.get(),
'terms_response_10_img_path': self.mq_terms_var10_img_path_entry.get(),
'terms_response_1_img_string_base64_encoded': self.mq_terms_var1_img_data_entry.get(),
'terms_response_2_img_string_base64_encoded': self.mq_terms_var2_img_data_entry.get(),
'terms_response_3_img_string_base64_encoded': self.mq_terms_var3_img_data_entry.get(),
'terms_response_4_img_string_base64_encoded': self.mq_terms_var4_img_data_entry.get(),
'terms_response_5_img_string_base64_encoded': self.mq_terms_var5_img_data_entry.get(),
'terms_response_6_img_string_base64_encoded': self.mq_terms_var6_img_data_entry.get(),
'terms_response_7_img_string_base64_encoded': self.mq_terms_var7_img_data_entry.get(),
'terms_response_8_img_string_base64_encoded': self.mq_terms_var8_img_data_entry.get(),
'terms_response_9_img_string_base64_encoded': self.mq_terms_var9_img_data_entry.get(),
'terms_response_10_img_string_base64_encoded': self.mq_terms_var10_img_data_entry.get(),
'assignment_pairs_definition_1': self.mq_assignment_pairs_definitions_1_box.get(),
'assignment_pairs_definition_2': self.mq_assignment_pairs_definitions_2_box.get(),
'assignment_pairs_definition_3': self.mq_assignment_pairs_definitions_3_box.get(),
'assignment_pairs_definition_4': self.mq_assignment_pairs_definitions_4_box.get(),
'assignment_pairs_definition_5': self.mq_assignment_pairs_definitions_5_box.get(),
'assignment_pairs_definition_6': self.mq_assignment_pairs_definitions_6_box.get(),
'assignment_pairs_definition_7': self.mq_assignment_pairs_definitions_7_box.get(),
'assignment_pairs_definition_8': self.mq_assignment_pairs_definitions_8_box.get(),
'assignment_pairs_definition_9': self.mq_assignment_pairs_definitions_9_box.get(),
'assignment_pairs_definition_10': self.mq_assignment_pairs_definitions_10_box.get(),
'assignment_pairs_term_1': self.mq_assignment_pairs_terms_1_box.get(),
'assignment_pairs_term_2': self.mq_assignment_pairs_terms_2_box.get(),
'assignment_pairs_term_3': self.mq_assignment_pairs_terms_3_box.get(),
'assignment_pairs_term_4': self.mq_assignment_pairs_terms_4_box.get(),
'assignment_pairs_term_5': self.mq_assignment_pairs_terms_5_box.get(),
'assignment_pairs_term_6': self.mq_assignment_pairs_terms_6_box.get(),
'assignment_pairs_term_7': self.mq_assignment_pairs_terms_7_box.get(),
'assignment_pairs_term_8': self.mq_assignment_pairs_terms_8_box.get(),
'assignment_pairs_term_9': self.mq_assignment_pairs_terms_9_box.get(),
'assignment_pairs_term_10': self.mq_assignment_pairs_terms_10_box.get(),
'assignment_pairs_1_pts': self.mq_assignment_pairs_pts_1_entry.get(),
'assignment_pairs_2_pts': self.mq_assignment_pairs_pts_2_entry.get(),
'assignment_pairs_3_pts': self.mq_assignment_pairs_pts_3_entry.get(),
'assignment_pairs_4_pts': self.mq_assignment_pairs_pts_4_entry.get(),
'assignment_pairs_5_pts': self.mq_assignment_pairs_pts_5_entry.get(),
'assignment_pairs_6_pts': self.mq_assignment_pairs_pts_6_entry.get(),
'assignment_pairs_7_pts': self.mq_assignment_pairs_pts_7_entry.get(),
'assignment_pairs_8_pts': self.mq_assignment_pairs_pts_8_entry.get(),
'assignment_pairs_9_pts': self.mq_assignment_pairs_pts_9_entry.get(),
'assignment_pairs_10_pts': self.mq_assignment_pairs_pts_10_entry.get(),
'picture_preview_pixel': self.mq_picture_preview_pixel_entry.get(),
'description_img_name_1': self.mq_description_img_name_1,
'description_img_data_1': self.mq_description_img_data_1,
'description_img_path_1': self.mq_description_img_path_1,
'description_img_name_2': self.mq_description_img_name_2,
'description_img_data_2': self.mq_description_img_data_2,
'description_img_path_2': self.mq_description_img_path_2,
'description_img_name_3': self.mq_description_img_name_3,
'description_img_data_3': self.mq_description_img_data_3,
'description_img_path_3': self.mq_description_img_path_3,
'test_time': self.mq_test_time,
'var_number': "",
'res_number': "",
'question_pool_tag': self.mq_question_pool_tag_entry.get(),
'question_author': self.mq_question_author_entry.get()
}
)
conn.commit()
conn.close()
print("Neuer Eintrag in die Zuordnungsfragen-Datenbank --> Fragentitel: " + str(self.mq_question_title_entry.get()))
def mq_load_id_from_db(self, entry_to_index_dict):
self.mq_db_entry_to_index_dict = entry_to_index_dict
conn = sqlite3.connect(self.database_zuordnungsfrage_path)
c = conn.cursor()
record_id = self.mq_load_box.get()
c.execute("SELECT * FROM zuordnungsfrage_table WHERE oid =" + record_id)
mq_db_records = c.fetchall()
Zuordnungsfrage.mq_clear_GUI(self)
self.mq_definitions_numbers_of_answers_box.current(9)
self.mq_terms_numbers_of_answers_box.current(9)
#Zuordnungsfrage.__init__()
for mq_db_record in mq_db_records:
self.mq_question_difficulty_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['question_difficulty']] )
self.mq_question_category_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['question_category']])
self.mq_question_type_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['question_type']])
self.mq_question_title_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['question_title']])
self.mq_question_description_title_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['question_description_title']])
self.mq_question_description_main_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['question_description_main']])
self.mq_definitions_var1_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_1_text']])
self.mq_definitions_var2_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_2_text']])
self.mq_definitions_var3_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_3_text']])
self.mq_definitions_var4_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_4_text']])
self.mq_definitions_var5_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_5_text']])
self.mq_definitions_var6_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_6_text']])
self.mq_definitions_var7_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_7_text']])
self.mq_definitions_var8_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_8_text']])
self.mq_definitions_var9_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_9_text']])
self.mq_definitions_var10_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_10_text']])
self.mq_definitions_var1_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_1_img_label']])
self.mq_definitions_var2_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_2_img_label']])
self.mq_definitions_var3_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_3_img_label']])
self.mq_definitions_var4_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_4_img_label']])
self.mq_definitions_var5_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_5_img_label']])
self.mq_definitions_var6_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_6_img_label']])
self.mq_definitions_var7_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_7_img_label']])
self.mq_definitions_var8_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_8_img_label']])
self.mq_definitions_var9_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_9_img_label']])
self.mq_definitions_var10_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_10_img_label']])
self.mq_terms_var1_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_1_text']])
self.mq_terms_var2_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_2_text']])
self.mq_terms_var3_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_3_text']])
self.mq_terms_var4_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_4_text']])
self.mq_terms_var5_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_5_text']])
self.mq_terms_var6_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_6_text']])
self.mq_terms_var7_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_7_text']])
self.mq_terms_var8_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_8_text']])
self.mq_terms_var9_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_9_text']])
self.mq_terms_var10_answer_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_10_text']])
self.mq_terms_var1_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_1_img_label']])
self.mq_terms_var2_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_2_img_label']])
self.mq_terms_var3_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_3_img_label']])
self.mq_terms_var4_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_4_img_label']])
self.mq_terms_var5_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_5_img_label']])
self.mq_terms_var6_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_6_img_label']])
self.mq_terms_var7_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_7_img_label']])
self.mq_terms_var8_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_8_img_label']])
self.mq_terms_var9_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_9_img_label']])
self.mq_terms_var10_img_label_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['terms_response_10_img_label']])
self.mq_assignment_pairs_pts_1_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_1_pts']])
self.mq_assignment_pairs_pts_2_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_2_pts']])
self.mq_assignment_pairs_pts_3_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_3_pts']])
self.mq_assignment_pairs_pts_4_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_4_pts']])
self.mq_assignment_pairs_pts_5_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_5_pts']])
self.mq_assignment_pairs_pts_6_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_6_pts']])
self.mq_assignment_pairs_pts_7_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_7_pts']])
self.mq_assignment_pairs_pts_8_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_8_pts']])
self.mq_assignment_pairs_pts_9_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_9_pts']])
self.mq_assignment_pairs_pts_10_entry.insert(END, mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_10_pts']])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_1']] != "":
self.mq_assignment_pairs_definitions_1_box.current(self.assignment_pairs_definitions_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_1']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_2']] != "":
self.mq_assignment_pairs_definitions_2_box.current(self.assignment_pairs_definitions_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_2']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_3']] != "":
self.mq_assignment_pairs_definitions_3_box.current(self.assignment_pairs_definitions_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_3']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_4']] != "":
self.mq_assignment_pairs_definitions_4_box.current(self.assignment_pairs_definitions_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_4']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_5']] != "":
self.mq_assignment_pairs_definitions_5_box.current(self.assignment_pairs_definitions_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_5']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_6']] != "":
self.mq_assignment_pairs_definitions_6_box.current(self.assignment_pairs_definitions_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_6']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_7']] != "":
self.mq_assignment_pairs_definitions_7_box.current(self.assignment_pairs_definitions_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_7']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_8']] != "":
self.mq_assignment_pairs_definitions_8_box.current(self.assignment_pairs_definitions_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_8']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_9']] != "":
self.mq_assignment_pairs_definitions_9_box.current(self.assignment_pairs_definitions_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_9']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_10']] != "":
self.mq_assignment_pairs_definitions_10_box.current(self.assignment_pairs_definitions_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_10']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_1']] != "":
self.mq_assignment_pairs_terms_1_box.current(self.assignment_pairs_terms_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_1']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_2']] != "":
self.mq_assignment_pairs_terms_2_box.current(self.assignment_pairs_terms_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_2']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_3']] != "":
self.mq_assignment_pairs_terms_3_box.current(self.assignment_pairs_terms_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_3']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_4']] != "":
self.mq_assignment_pairs_terms_4_box.current(self.assignment_pairs_terms_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_4']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_5']] != "":
self.mq_assignment_pairs_terms_5_box.current(self.assignment_pairs_terms_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_5']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_6']] != "":
self.mq_assignment_pairs_terms_6_box.current(self.assignment_pairs_terms_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_6']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_7']] != "":
self.mq_assignment_pairs_terms_7_box.current(self.assignment_pairs_terms_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_7']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_8']] != "":
self.mq_assignment_pairs_terms_8_box.current(self.assignment_pairs_terms_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_8']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_9']] != "":
self.mq_assignment_pairs_terms_9_box.current(self.assignment_pairs_terms_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_9']]])
if mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_10']] != "":
self.mq_assignment_pairs_terms_10_box.current(self.assignment_pairs_terms_to_int_dict[mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_10']]])
def mq_edit_id_from_db(self):
# Verbindung mit der Datenbank
conn = sqlite3.connect(self.database_zuordnungsfrage_path)
c = conn.cursor()
# ID der Frage aus dem Eingabefeld "ID Laden" auslesen
record_id = self.mq_load_box.get()
# Format von Testdauer in der XML Datei: P0Y0M0DT0H30M0S
self.mq_test_time = "P0Y0M0DT" + self.mq_proc_hours_box.get() + "H" + self.mq_proc_minutes_box.get() + "M" + self.mq_proc_seconds_box.get() + "S"
# Ist ein Bild-Name vorhanden, dann das Bild über den Pfad einlesen
# Sonst auf "EMPTY" setzen
# Bilder werden als byte eingelesen "rb" = read byte
# Fragen-Text Bild 1
if self.mq_description_img_name_1 != "" or self.mq_description_img_name_1 != "EMPTY":
with open(self.mq_description_img_path_1, 'rb') as description_image_file_1:
self.mq_description_img_data_1 = description_image_file_1.read()
else:
self.mq_description_img_name_1 = ""
self.mq_description_img_data_1 = ""
self.mq_description_img_path_1 = ""
# Fragen-Text Bild 2
if self.mq_description_img_name_2 != "" or self.mq_description_img_name_2 != "EMPTY":
with open(self.mq_description_img_path_2, 'rb') as description_image_file_2:
self.mq_description_img_data_2 = description_image_file_2.read()
else:
self.mq_description_img_name_2 = ""
self.mq_description_img_data_2 = ""
self.mq_description_img_path_2 = ""
# Fragen-Text Bild 3
if self.mq_description_img_name_3 != "" or self.mq_description_img_name_3 != "EMPTY":
with open(self.mq_description_img_path_3, 'rb') as description_image_file_3:
self.mq_description_img_data_3 = description_image_file_3.read()
else:
self.mq_description_img_name_3 = ""
self.mq_description_img_data_3 = ""
self.mq_description_img_path_3 = ""
c.execute("""UPDATE zuordnungsfrage_table SET
question_difficulty = :question_difficulty,
question_category = :question_category,
question_type = :question_type,
question_title = :question_title,
question_description_title = :question_description_title,
question_description_main = :question_description_main,
mix_answers = :mix_answers,
assignment_mode = :assignment_mode,
definitions_response_1_text = :definitions_response_1_text,
definitions_response_2_text = :definitions_response_2_text,
definitions_response_3_text = :definitions_response_3_text,
definitions_response_4_text = :definitions_response_4_text,
definitions_response_5_text = :definitions_response_5_text,
definitions_response_6_text = :definitions_response_6_text,
definitions_response_7_text = :definitions_response_7_text,
definitions_response_8_text = :definitions_response_8_text,
definitions_response_9_text = :definitions_response_9_text,
definitions_response_10_text = :definitions_response_10_text,
definitions_response_1_img_label = :definitions_response_1_img_label,
definitions_response_2_img_label = :definitions_response_2_img_label,
definitions_response_3_img_label = :definitions_response_3_img_label,
definitions_response_4_img_label = :definitions_response_4_img_label,
definitions_response_5_img_label = :definitions_response_5_img_label,
definitions_response_6_img_label = :definitions_response_6_img_label,
definitions_response_7_img_label = :definitions_response_7_img_label,
definitions_response_8_img_label = :definitions_response_8_img_label,
definitions_response_9_img_label = :definitions_response_9_img_label,
definitions_response_10_img_label = :definitions_response_10_img_label,
definitions_response_1_img_path = :definitions_response_1_img_path,
definitions_response_2_img_path = :definitions_response_2_img_path,
definitions_response_3_img_path = :definitions_response_3_img_path,
definitions_response_4_img_path = :definitions_response_4_img_path,
definitions_response_5_img_path = :definitions_response_5_img_path,
definitions_response_6_img_path = :definitions_response_6_img_path,
definitions_response_7_img_path = :definitions_response_7_img_path,
definitions_response_8_img_path = :definitions_response_8_img_path,
definitions_response_9_img_path = :definitions_response_9_img_path,
definitions_response_10_img_path = :definitions_response_10_img_path,
definitions_response_1_img_string_base64_encoded = :definitions_response_1_img_string_base64_encoded,
definitions_response_2_img_string_base64_encoded = :definitions_response_2_img_string_base64_encoded,
definitions_response_3_img_string_base64_encoded = :definitions_response_3_img_string_base64_encoded,
definitions_response_4_img_string_base64_encoded = :definitions_response_4_img_string_base64_encoded,
definitions_response_5_img_string_base64_encoded = :definitions_response_5_img_string_base64_encoded,
definitions_response_6_img_string_base64_encoded = :definitions_response_6_img_string_base64_encoded,
definitions_response_7_img_string_base64_encoded = :definitions_response_7_img_string_base64_encoded,
definitions_response_8_img_string_base64_encoded = :definitions_response_8_img_string_base64_encoded,
definitions_response_9_img_string_base64_encoded = :definitions_response_9_img_string_base64_encoded,
definitions_response_10_img_string_base64_encoded = :definitions_response_10_img_string_base64_encoded,
terms_response_1_text = :terms_response_1_text ,
terms_response_2_text = :terms_response_2_text,
terms_response_3_text = :terms_response_3_text,
terms_response_4_text = :terms_response_4_text,
terms_response_5_text = :terms_response_5_text,
terms_response_6_text = :terms_response_6_text,
terms_response_7_text = :terms_response_7_text,
terms_response_8_text = :terms_response_8_text,
terms_response_9_text = :terms_response_9_text,
terms_response_10_text = :terms_response_10_text,
terms_response_1_img_label = :terms_response_1_img_label,
terms_response_2_img_label = :terms_response_2_img_label,
terms_response_3_img_label = :terms_response_3_img_label,
terms_response_4_img_label = :terms_response_4_img_label,
terms_response_5_img_label = :terms_response_5_img_label,
terms_response_6_img_label = :terms_response_6_img_label,
terms_response_7_img_label = :terms_response_7_img_label,
terms_response_8_img_label = :terms_response_8_img_label,
terms_response_9_img_label = :terms_response_9_img_label,
terms_response_10_img_label = :terms_response_10_img_label,
terms_response_1_img_path = :terms_response_1_img_path,
terms_response_2_img_path = :terms_response_2_img_path,
terms_response_3_img_path = :terms_response_3_img_path,
terms_response_4_img_path = :terms_response_4_img_path,
terms_response_5_img_path = :terms_response_5_img_path,
terms_response_6_img_path = :terms_response_6_img_path,
terms_response_7_img_path = :terms_response_7_img_path,
terms_response_8_img_path = :terms_response_8_img_path,
terms_response_9_img_path = :terms_response_9_img_path,
terms_response_10_img_path = :terms_response_10_img_path,
terms_response_1_img_string_base64_encoded = :terms_response_1_img_string_base64_encoded ,
terms_response_2_img_string_base64_encoded = :terms_response_2_img_string_base64_encoded,
terms_response_3_img_string_base64_encoded = :terms_response_3_img_string_base64_encoded,
terms_response_4_img_string_base64_encoded = :terms_response_4_img_string_base64_encoded,
terms_response_5_img_string_base64_encoded = :terms_response_5_img_string_base64_encoded,
terms_response_6_img_string_base64_encoded = :terms_response_6_img_string_base64_encoded,
terms_response_7_img_string_base64_encoded = :terms_response_7_img_string_base64_encoded,
terms_response_8_img_string_base64_encoded = :terms_response_8_img_string_base64_encoded,
terms_response_9_img_string_base64_encoded = :terms_response_9_img_string_base64_encoded,
terms_response_10_img_string_base64_encoded = :terms_response_10_img_string_base64_encoded,
assignment_pairs_definition_1 = :assignment_pairs_definition_1,
assignment_pairs_definition_2 = :assignment_pairs_definition_2,
assignment_pairs_definition_3 = :assignment_pairs_definition_3,
assignment_pairs_definition_4 = :assignment_pairs_definition_4,
assignment_pairs_definition_5 = :assignment_pairs_definition_5,
assignment_pairs_definition_6 = :assignment_pairs_definition_6,
assignment_pairs_definition_7 = :assignment_pairs_definition_7,
assignment_pairs_definition_8 = :assignment_pairs_definition_8,
assignment_pairs_definition_9 = :assignment_pairs_definition_9,
assignment_pairs_definition_10 = :assignment_pairs_definition_10,
assignment_pairs_term_1 = :assignment_pairs_term_1,
assignment_pairs_term_2 = :assignment_pairs_term_2,
assignment_pairs_term_3 = :assignment_pairs_term_3,
assignment_pairs_term_4 = :assignment_pairs_term_4,
assignment_pairs_term_5 = :assignment_pairs_term_5,
assignment_pairs_term_6 = :assignment_pairs_term_6,
assignment_pairs_term_7 = :assignment_pairs_term_7,
assignment_pairs_term_8 = :assignment_pairs_term_8,
assignment_pairs_term_9 = :assignment_pairs_term_9,
assignment_pairs_term_10 = :assignment_pairs_term_10,
assignment_pairs_1_pts = :assignment_pairs_1_pts ,
assignment_pairs_2_pts = :assignment_pairs_2_pts ,
assignment_pairs_3_pts = :assignment_pairs_3_pts ,
assignment_pairs_4_pts = :assignment_pairs_4_pts ,
assignment_pairs_5_pts = :assignment_pairs_5_pts ,
assignment_pairs_6_pts = :assignment_pairs_6_pts ,
assignment_pairs_7_pts = :assignment_pairs_7_pts ,
assignment_pairs_8_pts = :assignment_pairs_8_pts ,
assignment_pairs_9_pts = :assignment_pairs_9_pts ,
assignment_pairs_10_pts = :assignment_pairs_10_pts ,
picture_preview_pixel = :picture_preview_pixel ,
description_img_name_1 = :description_img_name_1,
description_img_data_1 = :description_img_data_1,
description_img_path_1 = :description_img_path_1,
description_img_name_2 = :description_img_name_2,
description_img_data_2 = :description_img_data_2,
description_img_path_2 = :description_img_path_2,
description_img_name_3 = :description_img_name_3,
description_img_data_3 = :description_img_data_3,
description_img_path_3 = :description_img_path_3,
test_time = :test_time,
question_pool_tag = :question_pool_tag,
question_author = :question_author
WHERE oid = :oid""",
{
'question_difficulty': self.mq_question_difficulty_entry.get(),
'question_category': self.mq_question_category_entry.get(),
'question_type': self.mq_question_type_entry.get(),
'question_title': self.mq_question_title_entry.get(),
'question_description_title': self.mq_question_description_title_entry.get(),
# The first part, "1.0" means that the input should be read from line one, character zero (ie: the very first character).
# END is an imported constant which is set to the string "end". The END part means to read until the end of the text box is reached.
# The only issue with this is that it actually adds a newline to our input. "
# "So, in order to fix it we should change END to end-1c(Thanks Bryan Oakley) The -1c deletes 1 character, while -2c would mean delete two characters, and so on."
'question_description_main': self.mq_question_description_main_entry.get("1.0", 'end-1c'),
'mix_answers': self.mq_mix_answers_box.get(),
'assignment_mode': self.selected_matching_option.get(),
'definitions_response_1_text': self.mq_definitions_var1_answer_entry.get(),
'definitions_response_2_text': self.mq_definitions_var2_answer_entry.get(),
'definitions_response_3_text': self.mq_definitions_var3_answer_entry.get(),
'definitions_response_4_text': self.mq_definitions_var4_answer_entry.get(),
'definitions_response_5_text': self.mq_definitions_var5_answer_entry.get(),
'definitions_response_6_text': self.mq_definitions_var6_answer_entry.get(),
'definitions_response_7_text': self.mq_definitions_var7_answer_entry.get(),
'definitions_response_8_text': self.mq_definitions_var8_answer_entry.get(),
'definitions_response_9_text': self.mq_definitions_var9_answer_entry.get(),
'definitions_response_10_text': self.mq_definitions_var10_answer_entry.get(),
'definitions_response_1_img_label': self.mq_definitions_var1_img_label_entry.get(),
'definitions_response_2_img_label': self.mq_definitions_var2_img_label_entry.get(),
'definitions_response_3_img_label': self.mq_definitions_var3_img_label_entry.get(),
'definitions_response_4_img_label': self.mq_definitions_var4_img_label_entry.get(),
'definitions_response_5_img_label': self.mq_definitions_var5_img_label_entry.get(),
'definitions_response_6_img_label': self.mq_definitions_var6_img_label_entry.get(),
'definitions_response_7_img_label': self.mq_definitions_var7_img_label_entry.get(),
'definitions_response_8_img_label': self.mq_definitions_var8_img_label_entry.get(),
'definitions_response_9_img_label': self.mq_definitions_var9_img_label_entry.get(),
'definitions_response_10_img_label': self.mq_definitions_var10_img_label_entry.get(),
'definitions_response_1_img_path': self.mq_definitions_var1_img_path_entry.get(),
'definitions_response_2_img_path': self.mq_definitions_var2_img_path_entry.get(),
'definitions_response_3_img_path': self.mq_definitions_var3_img_path_entry.get(),
'definitions_response_4_img_path': self.mq_definitions_var4_img_path_entry.get(),
'definitions_response_5_img_path': self.mq_definitions_var5_img_path_entry.get(),
'definitions_response_6_img_path': self.mq_definitions_var6_img_path_entry.get(),
'definitions_response_7_img_path': self.mq_definitions_var7_img_path_entry.get(),
'definitions_response_8_img_path': self.mq_definitions_var8_img_path_entry.get(),
'definitions_response_9_img_path': self.mq_definitions_var9_img_path_entry.get(),
'definitions_response_10_img_path': self.mq_definitions_var10_img_path_entry.get(),
'definitions_response_1_img_string_base64_encoded': self.mq_definitions_var1_img_data_entry.get(),
'definitions_response_2_img_string_base64_encoded': self.mq_definitions_var2_img_data_entry.get(),
'definitions_response_3_img_string_base64_encoded': self.mq_definitions_var3_img_data_entry.get(),
'definitions_response_4_img_string_base64_encoded': self.mq_definitions_var4_img_data_entry.get(),
'definitions_response_5_img_string_base64_encoded': self.mq_definitions_var5_img_data_entry.get(),
'definitions_response_6_img_string_base64_encoded': self.mq_definitions_var6_img_data_entry.get(),
'definitions_response_7_img_string_base64_encoded': self.mq_definitions_var7_img_data_entry.get(),
'definitions_response_8_img_string_base64_encoded': self.mq_definitions_var8_img_data_entry.get(),
'definitions_response_9_img_string_base64_encoded': self.mq_definitions_var9_img_data_entry.get(),
'definitions_response_10_img_string_base64_encoded': self.mq_definitions_var10_img_data_entry.get(),
'terms_response_1_text': self.mq_terms_var1_answer_entry.get(),
'terms_response_2_text': self.mq_terms_var2_answer_entry.get(),
'terms_response_3_text': self.mq_terms_var3_answer_entry.get(),
'terms_response_4_text': self.mq_terms_var4_answer_entry.get(),
'terms_response_5_text': self.mq_terms_var5_answer_entry.get(),
'terms_response_6_text': self.mq_terms_var6_answer_entry.get(),
'terms_response_7_text': self.mq_terms_var7_answer_entry.get(),
'terms_response_8_text': self.mq_terms_var8_answer_entry.get(),
'terms_response_9_text': self.mq_terms_var9_answer_entry.get(),
'terms_response_10_text': self.mq_terms_var10_answer_entry.get(),
'terms_response_1_img_label': self.mq_terms_var1_img_label_entry.get(),
'terms_response_2_img_label': self.mq_terms_var2_img_label_entry.get(),
'terms_response_3_img_label': self.mq_terms_var3_img_label_entry.get(),
'terms_response_4_img_label': self.mq_terms_var4_img_label_entry.get(),
'terms_response_5_img_label': self.mq_terms_var5_img_label_entry.get(),
'terms_response_6_img_label': self.mq_terms_var6_img_label_entry.get(),
'terms_response_7_img_label': self.mq_terms_var7_img_label_entry.get(),
'terms_response_8_img_label': self.mq_terms_var8_img_label_entry.get(),
'terms_response_9_img_label': self.mq_terms_var9_img_label_entry.get(),
'terms_response_10_img_label': self.mq_terms_var10_img_label_entry.get(),
'terms_response_1_img_path': self.mq_terms_var1_img_path_entry.get(),
'terms_response_2_img_path': self.mq_terms_var2_img_path_entry.get(),
'terms_response_3_img_path': self.mq_terms_var3_img_path_entry.get(),
'terms_response_4_img_path': self.mq_terms_var4_img_path_entry.get(),
'terms_response_5_img_path': self.mq_terms_var5_img_path_entry.get(),
'terms_response_6_img_path': self.mq_terms_var6_img_path_entry.get(),
'terms_response_7_img_path': self.mq_terms_var7_img_path_entry.get(),
'terms_response_8_img_path': self.mq_terms_var8_img_path_entry.get(),
'terms_response_9_img_path': self.mq_terms_var9_img_path_entry.get(),
'terms_response_10_img_path': self.mq_terms_var10_img_path_entry.get(),
'terms_response_1_img_string_base64_encoded': self.mq_terms_var1_img_data_entry.get(),
'terms_response_2_img_string_base64_encoded': self.mq_terms_var2_img_data_entry.get(),
'terms_response_3_img_string_base64_encoded': self.mq_terms_var3_img_data_entry.get(),
'terms_response_4_img_string_base64_encoded': self.mq_terms_var4_img_data_entry.get(),
'terms_response_5_img_string_base64_encoded': self.mq_terms_var5_img_data_entry.get(),
'terms_response_6_img_string_base64_encoded': self.mq_terms_var6_img_data_entry.get(),
'terms_response_7_img_string_base64_encoded': self.mq_terms_var7_img_data_entry.get(),
'terms_response_8_img_string_base64_encoded': self.mq_terms_var8_img_data_entry.get(),
'terms_response_9_img_string_base64_encoded': self.mq_terms_var9_img_data_entry.get(),
'terms_response_10_img_string_base64_encoded': self.mq_terms_var10_img_data_entry.get(),
'assignment_pairs_definition_1': self.mq_assignment_pairs_definitions_1_box.get(),
'assignment_pairs_definition_2': self.mq_assignment_pairs_definitions_2_box.get(),
'assignment_pairs_definition_3': self.mq_assignment_pairs_definitions_3_box.get(),
'assignment_pairs_definition_4': self.mq_assignment_pairs_definitions_4_box.get(),
'assignment_pairs_definition_5': self.mq_assignment_pairs_definitions_5_box.get(),
'assignment_pairs_definition_6': self.mq_assignment_pairs_definitions_6_box.get(),
'assignment_pairs_definition_7': self.mq_assignment_pairs_definitions_7_box.get(),
'assignment_pairs_definition_8': self.mq_assignment_pairs_definitions_8_box.get(),
'assignment_pairs_definition_9': self.mq_assignment_pairs_definitions_9_box.get(),
'assignment_pairs_definition_10': self.mq_assignment_pairs_definitions_10_box.get(),
'assignment_pairs_term_1': self.mq_assignment_pairs_terms_1_box.get(),
'assignment_pairs_term_2': self.mq_assignment_pairs_terms_2_box.get(),
'assignment_pairs_term_3': self.mq_assignment_pairs_terms_3_box.get(),
'assignment_pairs_term_4': self.mq_assignment_pairs_terms_4_box.get(),
'assignment_pairs_term_5': self.mq_assignment_pairs_terms_5_box.get(),
'assignment_pairs_term_6': self.mq_assignment_pairs_terms_6_box.get(),
'assignment_pairs_term_7': self.mq_assignment_pairs_terms_7_box.get(),
'assignment_pairs_term_8': self.mq_assignment_pairs_terms_8_box.get(),
'assignment_pairs_term_9': self.mq_assignment_pairs_terms_9_box.get(),
'assignment_pairs_term_10': self.mq_assignment_pairs_terms_10_box.get(),
'assignment_pairs_1_pts': self.mq_assignment_pairs_pts_1_entry.get(),
'assignment_pairs_2_pts': self.mq_assignment_pairs_pts_2_entry.get(),
'assignment_pairs_3_pts': self.mq_assignment_pairs_pts_3_entry.get(),
'assignment_pairs_4_pts': self.mq_assignment_pairs_pts_4_entry.get(),
'assignment_pairs_5_pts': self.mq_assignment_pairs_pts_5_entry.get(),
'assignment_pairs_6_pts': self.mq_assignment_pairs_pts_6_entry.get(),
'assignment_pairs_7_pts': self.mq_assignment_pairs_pts_7_entry.get(),
'assignment_pairs_8_pts': self.mq_assignment_pairs_pts_8_entry.get(),
'assignment_pairs_9_pts': self.mq_assignment_pairs_pts_9_entry.get(),
'assignment_pairs_10_pts': self.mq_assignment_pairs_pts_10_entry.get(),
'picture_preview_pixel': self.mq_picture_preview_pixel_entry.get(),
'description_img_name_1': self.mq_description_img_name_1,
'description_img_data_1': self.mq_description_img_data_1,
'description_img_path_1': self.mq_description_img_path_1,
'description_img_name_2': self.mq_description_img_name_2,
'description_img_data_2': self.mq_description_img_data_2,
'description_img_path_2': self.mq_description_img_path_2,
'description_img_name_3': self.mq_description_img_name_3,
'description_img_data_3': self.mq_description_img_data_3,
'description_img_path_3': self.mq_description_img_path_3,
'test_time': self.mq_test_time,
'question_pool_tag': self.mq_question_pool_tag_entry.get(),
'question_author': self.mq_question_author_entry.get()
})
def mq_delete_id_from_db(self):
self.mq_delete_box_id = ""
self.mq_delete_box_id = self.mq_delete_box.get()
test_generator_modul_datenbanken_erstellen.Delete_Entry_from_Database.__init__(self, self.mq_delete_box_id, self.mq_question_type_name, self.mq_var_delete_all.get(), self.project_root_path, self.mq_db_entry_to_index_dict, self.database_zuordnungsfrage_path, "zuordnungsfrage_db.db", "zuordnungsfrage_table", "Zuordnungsfrage_DB_export_file.xlsx", "Zuordnungsfrage - Database")
self.mq_delete_box.delete(0, END)
def mq_clear_GUI(self):
self.mq_question_difficulty_entry.delete(0, END)
self.mq_question_category_entry.delete(0, END)
#self.mq_question_type_entry.delete(0, END)
self.mq_question_title_entry.delete(0, END)
self.mq_question_description_title_entry.delete(0, END)
self.mq_question_description_main_entry.delete('1.0', 'end-1c')
# Eintragfelder für "DEFINITIONEN" leeren
self.mq_definitions_var1_answer_entry.delete(0, END)
self.mq_definitions_var2_answer_entry.delete(0, END)
self.mq_definitions_var3_answer_entry.delete(0, END)
self.mq_definitions_var4_answer_entry.delete(0, END)
self.mq_definitions_var5_answer_entry.delete(0, END)
self.mq_definitions_var6_answer_entry.delete(0, END)
self.mq_definitions_var7_answer_entry.delete(0, END)
self.mq_definitions_var8_answer_entry.delete(0, END)
self.mq_definitions_var9_answer_entry.delete(0, END)
self.mq_definitions_var10_answer_entry.delete(0, END)
self.mq_definitions_var1_img_label_entry.delete(0, END)
self.mq_definitions_var2_img_label_entry.delete(0, END)
self.mq_definitions_var3_img_label_entry.delete(0, END)
self.mq_definitions_var4_img_label_entry.delete(0, END)
self.mq_definitions_var5_img_label_entry.delete(0, END)
self.mq_definitions_var6_img_label_entry.delete(0, END)
self.mq_definitions_var7_img_label_entry.delete(0, END)
self.mq_definitions_var8_img_label_entry.delete(0, END)
self.mq_definitions_var9_img_label_entry.delete(0, END)
self.mq_definitions_var10_img_label_entry.delete(0, END)
# Eintragfelder für "TERME" leeren
self.mq_terms_var1_answer_entry.delete(0, END)
self.mq_terms_var2_answer_entry.delete(0, END)
self.mq_terms_var3_answer_entry.delete(0, END)
self.mq_terms_var4_answer_entry.delete(0, END)
self.mq_terms_var5_answer_entry.delete(0, END)
self.mq_terms_var6_answer_entry.delete(0, END)
self.mq_terms_var7_answer_entry.delete(0, END)
self.mq_terms_var8_answer_entry.delete(0, END)
self.mq_terms_var9_answer_entry.delete(0, END)
self.mq_terms_var10_answer_entry.delete(0, END)
self.mq_terms_var1_img_label_entry.delete(0, END)
self.mq_terms_var2_img_label_entry.delete(0, END)
self.mq_terms_var3_img_label_entry.delete(0, END)
self.mq_terms_var4_img_label_entry.delete(0, END)
self.mq_terms_var5_img_label_entry.delete(0, END)
self.mq_terms_var6_img_label_entry.delete(0, END)
self.mq_terms_var7_img_label_entry.delete(0, END)
self.mq_terms_var8_img_label_entry.delete(0, END)
self.mq_terms_var9_img_label_entry.delete(0, END)
self.mq_terms_var10_img_label_entry.delete(0, END)
self.mq_assignment_pairs_definitions_1_box.delete(0, "end")
self.mq_assignment_pairs_definitions_2_box.delete(0, "end")
self.mq_assignment_pairs_definitions_3_box.delete(0, "end")
self.mq_assignment_pairs_definitions_4_box.delete(0, "end")
self.mq_assignment_pairs_definitions_5_box.delete(0, "end")
self.mq_assignment_pairs_definitions_6_box.delete(0, "end")
self.mq_assignment_pairs_definitions_7_box.delete(0, "end")
self.mq_assignment_pairs_definitions_8_box.delete(0, "end")
self.mq_assignment_pairs_definitions_9_box.delete(0, "end")
self.mq_assignment_pairs_definitions_10_box.delete(0, "end")
self.mq_assignment_pairs_terms_1_box.delete(0, "end")
self.mq_assignment_pairs_terms_2_box.delete(0, "end")
self.mq_assignment_pairs_terms_3_box.delete(0, "end")
self.mq_assignment_pairs_terms_4_box.delete(0, "end")
self.mq_assignment_pairs_terms_5_box.delete(0, "end")
self.mq_assignment_pairs_terms_6_box.delete(0, "end")
self.mq_assignment_pairs_terms_7_box.delete(0, "end")
self.mq_assignment_pairs_terms_8_box.delete(0, "end")
self.mq_assignment_pairs_terms_9_box.delete(0, "end")
self.mq_assignment_pairs_terms_10_box.delete(0, "end")
class Create_Zuordnungsfrage_Questions(Zuordnungsfrage):
def __init__(self, db_entry_to_index_dict, ids_in_entry_box, question_type_test_or_pool, pool_img_dir, ilias_id_pool_qpl_dir, xml_read_qti_template_path, xml_qti_output_file_path, xml_qpl_output_file_path, max_id_pool_qti_xml, max_id, taxonomy_file_question_pool):
self.mq_db_entry_to_index_dict = db_entry_to_index_dict
self.mq_test_entry_splitted = ids_in_entry_box.split(",")
self.qti_file_path_output = xml_qti_output_file_path
self.zuordnungsfrage_pool_qpl_file_path_output = xml_qpl_output_file_path
self.mq_mytree = ET.parse(xml_read_qti_template_path)
self.mq_myroot = self.mq_mytree.getroot()
self.question_type_test_or_pool = question_type_test_or_pool
self.zuordnungsfrage_pool_img_file_path = pool_img_dir # Wird nur bei Erstellung eines Fragen-Pool verwendet. Ordnername wird erst bei Laufzeit erstellt)
self.all_entries_from_db_list = []
self.number_of_entrys = []
self.mq_question_pool_id_list = []
self.mq_question_title_list = []
self.mq_ilias_id_pool_qpl_dir = ilias_id_pool_qpl_dir
self.mq_file_max_id = max_id
self.mq_taxonomy_file_question_pool = taxonomy_file_question_pool
self.mq_ilias_id_pool_qti_xml = max_id_pool_qti_xml
self.assignment_pairs_definitions_terms_to_id_dict = {"Definition 1": 0, "Definition 2": 1, "Definition 3": 2, "Definition 4": 3, "Definition 5": 4,
"Definition 6": 5, "Definition 7": 6, "Definition 8": 7, "Definition 9": 8, "Definition 10": 9,
"Term 1": 10, "Term 2": 11, "Term 3": 12, "Term 4": 13, "Term 5": 14,
"Term 6": 15, "Term 7": 16, "Term 8": 17, "Term 9": 18, "Term 10": 19
}
print("\n")
if self.question_type_test_or_pool == "question_test":
print("ZUORDNUNGSFRAGE: ILIAS-TEST WIRD ERSTELLT... ID: " + str(ids_in_entry_box))
else:
print("ZUORDNUNGSFRAGE: ILIAS-POOL WIRD ERSTELLT... ID: " + str(ids_in_entry_box))
# Mit MQ_Datenbank verknüpfen
connect_mq_db = sqlite3.connect(self.database_zuordnungsfrage_path)
cursor = connect_mq_db.cursor()
# Prüfen ob alle Einträge generiert werden sollen (checkbox gesetzt)
if self.mq_var_create_question_pool_all_check.get() == 1:
conn = sqlite3.connect(self.database_zuordnungsfrage_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM singlechoice_table")
mq_db_records = c.fetchall()
for mq_db_record in mq_db_records:
self.all_entries_from_db_list.append(int(mq_db_record[len(mq_db_record) - 1]))
self.string_temp = ','.join(map(str, self.all_entries_from_db_list))
self.mq_test_entry_splitted = self.string_temp.split(",")
# Eintrag mit ID "1" entspricht der Vorlage und soll nicht mit erstellt werden
self.mq_test_entry_splitted.pop(0)
# Sämtliche Datenbank Einträge auslesen mit der entsprechenden "oid" (Datenbank ID)
# Datenbank ID wird automatimqh bei einem neuen Eintrag erstellt (fortlaufend) und kann nicht beeinflusst werden
cursor.execute("SELECT *, oid FROM zuordnungsfrage_table")
mq_db_records = cursor.fetchall()
for i in range(len(self.mq_test_entry_splitted)):
for mq_db_record in mq_db_records:
if str(mq_db_record[len(mq_db_record) - 1]) == self.mq_test_entry_splitted[i]:
for t in range(len(mq_db_record)):
if mq_db_record[self.mq_db_entry_to_index_dict['question_type']].lower() == self.mq_question_type_name.lower():
self.mq_question_difficulty = mq_db_record[self.mq_db_entry_to_index_dict['question_difficulty']]
self.mq_question_category = mq_db_record[self.mq_db_entry_to_index_dict['question_category']]
self.mq_question_type = mq_db_record[self.mq_db_entry_to_index_dict['question_type']]
self.mq_question_title = mq_db_record[self.mq_db_entry_to_index_dict['question_title']].replace('&', "&")
self.mq_question_description_title = mq_db_record[self.mq_db_entry_to_index_dict['question_description_title']].replace('&', "&")
self.mq_question_description_main = mq_db_record[self.mq_db_entry_to_index_dict['question_description_main']].replace('&', "&")
self.mq_mix_answers = mq_db_record[self.mq_db_entry_to_index_dict['mix_answers']]
self.mq_assignment_mode = mq_db_record[self.mq_db_entry_to_index_dict['assignment_mode']]
self.mq_definitions_response_1_text = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_1_text']].replace('&', "&")
self.mq_definitions_response_2_text = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_2_text']].replace('&', "&")
self.mq_definitions_response_3_text = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_3_text']].replace('&', "&")
self.mq_definitions_response_4_text = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_4_text']].replace('&', "&")
self.mq_definitions_response_5_text = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_5_text']].replace('&', "&")
self.mq_definitions_response_6_text = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_6_text']].replace('&', "&")
self.mq_definitions_response_7_text = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_7_text']].replace('&', "&")
self.mq_definitions_response_8_text = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_8_text']].replace('&', "&")
self.mq_definitions_response_9_text = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_9_text']].replace('&', "&")
self.mq_definitions_response_10_text = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_10_text']].replace('&', "&")
self.mq_definitions_response_1_img_label = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_1_img_label']]
self.mq_definitions_response_2_img_label = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_2_img_label']]
self.mq_definitions_response_3_img_label = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_3_img_label']]
self.mq_definitions_response_4_img_label = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_4_img_label']]
self.mq_definitions_response_5_img_label = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_5_img_label']]
self.mq_definitions_response_6_img_label = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_6_img_label']]
self.mq_definitions_response_7_img_label = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_7_img_label']]
self.mq_definitions_response_8_img_label = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_8_img_label']]
self.mq_definitions_response_9_img_label = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_9_img_label']]
self.mq_definitions_response_10_img_label = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_10_img_label']]
self.mq_definitions_response_1_img_path = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_1_img_path']]
self.mq_definitions_response_2_img_path = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_2_img_path']]
self.mq_definitions_response_3_img_path = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_3_img_path']]
self.mq_definitions_response_4_img_path = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_4_img_path']]
self.mq_definitions_response_5_img_path = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_5_img_path']]
self.mq_definitions_response_6_img_path = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_6_img_path']]
self.mq_definitions_response_7_img_path = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_7_img_path']]
self.mq_definitions_response_8_img_path = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_8_img_path']]
self.mq_definitions_response_9_img_path = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_9_img_path']]
self.mq_definitions_response_10_img_path = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_10_img_path']]
self.mq_definitions_response_1_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_1_img_string_base64_encoded']]
self.mq_definitions_response_2_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_2_img_string_base64_encoded']]
self.mq_definitions_response_3_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_3_img_string_base64_encoded']]
self.mq_definitions_response_4_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_4_img_string_base64_encoded']]
self.mq_definitions_response_5_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_5_img_string_base64_encoded']]
self.mq_definitions_response_6_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_6_img_string_base64_encoded']]
self.mq_definitions_response_7_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_7_img_string_base64_encoded']]
self.mq_definitions_response_8_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_8_img_string_base64_encoded']]
self.mq_definitions_response_9_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_9_img_string_base64_encoded']]
self.mq_definitions_response_10_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['definitions_response_10_img_string_base64_encoded']]
self.mq_terms_response_1_text = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_1_text']].replace('&', "&")
self.mq_terms_response_2_text = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_2_text']].replace('&', "&")
self.mq_terms_response_3_text = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_3_text']].replace('&', "&")
self.mq_terms_response_4_text = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_4_text']].replace('&', "&")
self.mq_terms_response_5_text = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_5_text']].replace('&', "&")
self.mq_terms_response_6_text = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_6_text']].replace('&', "&")
self.mq_terms_response_7_text = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_7_text']].replace('&', "&")
self.mq_terms_response_8_text = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_8_text']].replace('&', "&")
self.mq_terms_response_9_text = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_9_text']].replace('&', "&")
self.mq_terms_response_10_text = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_10_text']].replace('&', "&")
self.mq_terms_response_1_img_label = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_1_img_label']]
self.mq_terms_response_2_img_label = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_2_img_label']]
self.mq_terms_response_3_img_label = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_3_img_label']]
self.mq_terms_response_4_img_label = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_4_img_label']]
self.mq_terms_response_5_img_label = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_5_img_label']]
self.mq_terms_response_6_img_label = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_6_img_label']]
self.mq_terms_response_7_img_label = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_7_img_label']]
self.mq_terms_response_8_img_label = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_8_img_label']]
self.mq_terms_response_9_img_label = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_9_img_label']]
self.mq_terms_response_10_img_label = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_10_img_label']]
self.mq_terms_response_1_img_path = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_1_img_path']]
self.mq_terms_response_2_img_path = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_2_img_path']]
self.mq_terms_response_3_img_path = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_3_img_path']]
self.mq_terms_response_4_img_path = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_4_img_path']]
self.mq_terms_response_5_img_path = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_5_img_path']]
self.mq_terms_response_6_img_path = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_6_img_path']]
self.mq_terms_response_7_img_path = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_7_img_path']]
self.mq_terms_response_8_img_path = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_8_img_path']]
self.mq_terms_response_9_img_path = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_9_img_path']]
self.mq_terms_response_10_img_path = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_10_img_path']]
self.mq_terms_response_1_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_1_img_string_base64_encoded']]
self.mq_terms_response_2_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_2_img_string_base64_encoded']]
self.mq_terms_response_3_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_3_img_string_base64_encoded']]
self.mq_terms_response_4_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_4_img_string_base64_encoded']]
self.mq_terms_response_5_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_5_img_string_base64_encoded']]
self.mq_terms_response_6_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_6_img_string_base64_encoded']]
self.mq_terms_response_7_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_7_img_string_base64_encoded']]
self.mq_terms_response_8_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_8_img_string_base64_encoded']]
self.mq_terms_response_9_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_9_img_string_base64_encoded']]
self.mq_terms_response_10_img_string_base64_encoded = mq_db_record[self.mq_db_entry_to_index_dict['terms_response_10_img_string_base64_encoded']]
self.mq_assignment_pairs_definition_1 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_1']]
self.mq_assignment_pairs_definition_2 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_2']]
self.mq_assignment_pairs_definition_3 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_3']]
self.mq_assignment_pairs_definition_4 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_4']]
self.mq_assignment_pairs_definition_5 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_5']]
self.mq_assignment_pairs_definition_6 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_6']]
self.mq_assignment_pairs_definition_7 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_7']]
self.mq_assignment_pairs_definition_8 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_8']]
self.mq_assignment_pairs_definition_9 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_9']]
self.mq_assignment_pairs_definition_10 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_definition_10']]
self.mq_assignment_pairs_term_1 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_1']]
self.mq_assignment_pairs_term_2 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_2']]
self.mq_assignment_pairs_term_3 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_3']]
self.mq_assignment_pairs_term_4 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_4']]
self.mq_assignment_pairs_term_5 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_5']]
self.mq_assignment_pairs_term_6 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_6']]
self.mq_assignment_pairs_term_7 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_7']]
self.mq_assignment_pairs_term_8 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_8']]
self.mq_assignment_pairs_term_9 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_9']]
self.mq_assignment_pairs_term_10 = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_term_10']]
self.mq_assignment_pairs_1_pts = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_1_pts']]
self.mq_assignment_pairs_2_pts = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_2_pts']]
self.mq_assignment_pairs_3_pts = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_3_pts']]
self.mq_assignment_pairs_4_pts = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_4_pts']]
self.mq_assignment_pairs_5_pts = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_5_pts']]
self.mq_assignment_pairs_6_pts = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_6_pts']]
self.mq_assignment_pairs_7_pts = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_7_pts']]
self.mq_assignment_pairs_8_pts = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_8_pts']]
self.mq_assignment_pairs_9_pts = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_9_pts']]
self.mq_assignment_pairs_10_pts = mq_db_record[self.mq_db_entry_to_index_dict['assignment_pairs_10_pts']]
self.mq_picture_preview_pixel = mq_db_record[self.mq_db_entry_to_index_dict['picture_preview_pixel']]
self.mq_description_img_name_1 = mq_db_record[self.mq_db_entry_to_index_dict['description_img_name_1']]
self.mq_description_img_data_1 = mq_db_record[self.mq_db_entry_to_index_dict['description_img_data_1']]
self.mq_description_img_path_1 = mq_db_record[self.mq_db_entry_to_index_dict['description_img_path_1']]
self.mq_description_img_name_2 = mq_db_record[self.mq_db_entry_to_index_dict['description_img_name_2']]
self.mq_description_img_data_2 = mq_db_record[self.mq_db_entry_to_index_dict['description_img_data_2']]
self.mq_description_img_path_2 = mq_db_record[self.mq_db_entry_to_index_dict['description_img_path_2']]
self.mq_description_img_name_3 = mq_db_record[self.mq_db_entry_to_index_dict['description_img_name_3']]
self.mq_description_img_data_3 = mq_db_record[self.mq_db_entry_to_index_dict['description_img_data_3']]
self.mq_description_img_path_3 = mq_db_record[self.mq_db_entry_to_index_dict['description_img_path_3']]
self.mq_test_time = mq_db_record[self.mq_db_entry_to_index_dict['test_time']]
self.mq_var_number = mq_db_record[self.mq_db_entry_to_index_dict['var_number']]
self.mq_res_number = mq_db_record[self.mq_db_entry_to_index_dict['res_number']]
self.mq_question_pool_tag = mq_db_record[self.mq_db_entry_to_index_dict['question_pool_tag']]
self.mq_question_author = mq_db_record[self.mq_db_entry_to_index_dict['question_author']].replace('&', "&")
Create_Zuordnungsfrage_Questions.mq_question_structure(self, i)
def mq_question_structure(self, id_nr):
"""Diese Funktion wandelt die SQL-Einträge in die .xml um, welche anschließend in ILIAS eingespielt werden kann"""
# VARIABLEN
self.mq_response_counter = 0 #wird verwendet zu zählen, wieviele Anworten pro Frage verwendet werden. Bei einer neuer Antwort -> +1
self.mq_question_description_main = test_generator_modul_taxonomie_und_textformatierung.Textformatierung.format_description_text_in_xml(self, self.mq_var_use_latex_on_text_check.get(), self.mq_question_description_main)
# Verbindung zur mq-Datenank
mq_connect = sqlite3.connect(self.database_zuordnungsfrage_path)
mq_cursor = mq_connect.cursor()
# Alle Einträge auslesen
mq_cursor.execute("SELECT *, oid FROM zuordnungsfrage_table")
mq_db_records = mq_cursor.fetchall()
for mq_db_record in mq_db_records:
if str(mq_db_record[len(mq_db_record)-1]) == self.mq_test_entry_splitted[id_nr]:
test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images(self, self.mq_description_img_name_1, self.mq_description_img_data_1, id_nr, self.question_type_test_or_pool, self.zuordnungsfrage_test_img_file_path, self.zuordnungsfrage_pool_img_file_path)
test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images(self, self.mq_description_img_name_2, self.mq_description_img_data_2, id_nr, self.question_type_test_or_pool, self.zuordnungsfrage_test_img_file_path, self.zuordnungsfrage_pool_img_file_path)
test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images(self, self.mq_description_img_name_3, self.mq_description_img_data_3, id_nr, self.question_type_test_or_pool, self.zuordnungsfrage_test_img_file_path, self.zuordnungsfrage_pool_img_file_path)
# Aufbau für Fragenstruktur "TEST"
if self.question_type_test_or_pool == "question_test":
# XML Struktur aus XML Datei festlegen. Muss nur einmal angelegt werden
questestinterop = ET.Element('questestinterop')
assessment = ET.SubElement(questestinterop, 'assessment')
section = ET.SubElement(assessment, 'section')
item = ET.SubElement(section, 'item')
# Aufbau für Fragenstruktur "POOL"
else:
# XML Struktur aus XML Datei festlegen. Muss nur einmal angelegt werden
questestinterop = ET.Element('questestinterop')
item = ET.SubElement(questestinterop, 'item')
# Zusatz für Taxonomie-Einstellungen
test_generator_modul_ilias_test_struktur.Additional_Funtions.set_taxonomy_for_question(self,
id_nr,
self.number_of_entrys,
item,
self.zuordnungsfrage_pool_qpl_file_path_template,
self.zuordnungsfrage_pool_qpl_file_path_output)
# Struktur für den Zuordnungsfrage - Fragen/Antworten Teil -- HEADER
# Muss für jede Frage neu angelegt/hinzugefügt werden
qticomment = ET.SubElement(item, 'qticomment')
duration = ET.SubElement(item, 'duration')
itemmetadata = ET.SubElement(item, 'itemmetadata')
presentation = ET.SubElement(item, 'presentation')
resprocessing = ET.SubElement(item, 'resprocessing')
# Struktur für den Zuordnungsfrage - Fragen/Antworten Teil -- MAIN
# Muss für jede Frage neu angelegt/hinzugefügt werden
flow = ET.SubElement(presentation, 'flow')
question_description_material = ET.SubElement(flow, 'material')
question_description_mattext = ET.SubElement(question_description_material, 'mattext')
response_grp = ET.SubElement(flow, 'response_grp')
render_choice = ET.SubElement(response_grp, 'render_choice')
qtimetadata = ET.SubElement(itemmetadata, 'qtimetadata')
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
### ------------------------------------------------------- XML Einträge mit Werten füllen
# Fragen-Titel -- "item title" in xml
item_ident_nr = format(id_nr, "06")
item.set('ident', "il_0_qst_" + str(item_ident_nr))
item.set('title', self.mq_question_title)
item.set('maxattempts', "0")
# Fragen-Titel Beschreibung
qticomment.text = self.mq_question_description_title
# Testdauer -- "duration" in xml
# wird keine Testzeit eingetragen, wird 1h vorausgewählt
duration.text = self.mq_test_time
if duration.text == "":
duration.text = "P0Y0M0DT1H0M0S"
""" Prüfen ob ILIAS Version ausgelesen werden kann"""
# -----------------------------------------------------------------------ILIAS VERSION
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "ILIAS_VERSION"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "5.4.14 2020-07-31"
# -----------------------------------------------------------------------QUESTIONTYPE
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "QUESTIONTYPE"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "MATCHING QUESTION"
# -----------------------------------------------------------------------AUTHOR
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "AUTHOR"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = self.mq_question_author
# -----------------------------------------------------------------------ADDITIONAL_CONT_EDIT_MODE
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "additional_cont_edit_mode"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "default"
# -----------------------------------------------------------------------EXTERNAL_ID
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "externalId"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "5f11d3ed9af3e5.53678796"
# -----------------------------------------------------------------------SHUFFLE
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "shuffle"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = str(self.mq_mix_answers)
# -----------------------------------------------------------------------THUMB_GEOMETRY
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "thumb_geometry"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = str(self.mq_picture_preview_pixel)
# -----------------------------------------------------------------------MATCHING MODE
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "matching_mode"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = str(self.mq_assignment_mode)
# Fragentitel einsetzen -- "presentation label" in xml
presentation.set('label', self.mq_question_title)
# Fragen-Text (Format) einsetzen -- "mattext_texttype" in xml -- Gibt das Format des Textes an
question_description_mattext.set('texttype', "text/html")
# Fragen-Text (Text) einsetzen -- "mattext_texttype" in xml -- Gibt die eigentliche Fragen-Beschreibung an
# Wenn Bild enthalten ist, dann in Fragenbeschreibung einbetten
if self.mq_description_img_data_1 != "":
with open('il_0_mob_TEST.png', 'wb') as image_file:
image_file.write(self.mq_description_img_data_1)
self.mq_file_image_raw = Image.open('il_0_mob_TEST.png')
self.mq_file_image_size_width, self.mq_file_image_size_height = self.mq_file_image_raw.size
question_description_mattext.text = "<p>" + self.mq_question_description_main + "</p>" + "<p><img height=\"" + str(self.mq_file_image_size_height) + "\" src=\"il_0_mob_000000" + str(id_nr) + "\" width=\"" + str(self.mq_file_image_size_width) + "\" /></p>"
matimage = ET.SubElement(question_description_material, 'matimage')
matimage.set('label', "il_0_mob_000000" + str(id_nr)) # Object -> Filename
matimage.set('uri', "objects/il_0_mob_000000" + str(id_nr) + "/" + self.mq_description_img_name_1 + ".png")
else:
question_description_mattext.text = "<p>" + self.mq_question_description_main + "</p>"
# "MQ --> Matching Question Identifier für xml datei
response_grp.set('ident', "MQ")
response_grp.set('rcardinality', "Multiple")
render_choice.set('shuffle', "Yes")
self.mq_number_of_terms_used = []
if self.mq_assignment_pairs_term_1 != "":
self.mq_number_of_terms_used.append(self.assignment_pairs_definitions_terms_to_id_dict[self.mq_assignment_pairs_term_1])
if self.mq_assignment_pairs_term_2 != "":
self.mq_number_of_terms_used.append(self.assignment_pairs_definitions_terms_to_id_dict[self.mq_assignment_pairs_term_2])
if self.mq_assignment_pairs_term_3 != "":
self.mq_number_of_terms_used.append(self.assignment_pairs_definitions_terms_to_id_dict[self.mq_assignment_pairs_term_3])
if self.mq_assignment_pairs_term_4 != "":
self.mq_number_of_terms_used.append(self.assignment_pairs_definitions_terms_to_id_dict[self.mq_assignment_pairs_term_4])
if self.mq_assignment_pairs_term_5 != "":
self.mq_number_of_terms_used.append(self.assignment_pairs_definitions_terms_to_id_dict[self.mq_assignment_pairs_term_5])
if self.mq_assignment_pairs_term_6 != "":
self.mq_number_of_terms_used.append(self.assignment_pairs_definitions_terms_to_id_dict[self.mq_assignment_pairs_term_6])
if self.mq_assignment_pairs_term_7 != "":
self.mq_number_of_terms_used.append(self.assignment_pairs_definitions_terms_to_id_dict[self.mq_assignment_pairs_term_7])
if self.mq_assignment_pairs_term_8 != "":
self.mq_number_of_terms_used.append(self.assignment_pairs_definitions_terms_to_id_dict[self.mq_assignment_pairs_term_8])
if self.mq_assignment_pairs_term_9 != "":
self.mq_number_of_terms_used.append(self.assignment_pairs_definitions_terms_to_id_dict[self.mq_assignment_pairs_term_9])
if self.mq_assignment_pairs_term_10 != "":
self.mq_number_of_terms_used.append(self.assignment_pairs_definitions_terms_to_id_dict[self.mq_assignment_pairs_term_10])
self.mq_number_of_terms_used_string = str(self.mq_number_of_terms_used)
self.mq_number_of_terms_used_string = self.mq_number_of_terms_used_string[1:-1]
#Antworten erstellen
# Antworten erstellen
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_definitions(self, self.mq_definitions_response_1_text, self.mq_definitions_response_1_img_path, self.mq_definitions_response_1_img_string_base64_encoded, render_choice, "0", self.mq_number_of_terms_used_string)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_definitions(self, self.mq_definitions_response_2_text, self.mq_definitions_response_2_img_path, self.mq_definitions_response_2_img_string_base64_encoded, render_choice, "1", self.mq_number_of_terms_used_string)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_definitions(self, self.mq_definitions_response_3_text, self.mq_definitions_response_3_img_path, self.mq_definitions_response_3_img_string_base64_encoded, render_choice, "2", self.mq_number_of_terms_used_string)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_definitions(self, self.mq_definitions_response_4_text, self.mq_definitions_response_4_img_path, self.mq_definitions_response_4_img_string_base64_encoded, render_choice, "3", self.mq_number_of_terms_used_string)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_definitions(self, self.mq_definitions_response_5_text, self.mq_definitions_response_5_img_path, self.mq_definitions_response_5_img_string_base64_encoded, render_choice, "4", self.mq_number_of_terms_used_string)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_definitions(self, self.mq_definitions_response_6_text, self.mq_definitions_response_6_img_path, self.mq_definitions_response_6_img_string_base64_encoded, render_choice, "5", self.mq_number_of_terms_used_string)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_definitions(self, self.mq_definitions_response_7_text, self.mq_definitions_response_7_img_path, self.mq_definitions_response_7_img_string_base64_encoded, render_choice, "6", self.mq_number_of_terms_used_string)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_definitions(self, self.mq_definitions_response_8_text, self.mq_definitions_response_8_img_path, self.mq_definitions_response_8_img_string_base64_encoded, render_choice, "7", self.mq_number_of_terms_used_string)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_definitions(self, self.mq_definitions_response_9_text, self.mq_definitions_response_9_img_path, self.mq_definitions_response_9_img_string_base64_encoded, render_choice, "8", self.mq_number_of_terms_used_string)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_definitions(self, self.mq_definitions_response_10_text, self.mq_definitions_response_10_img_path, self.mq_definitions_response_10_img_string_base64_encoded, render_choice, "9", self.mq_number_of_terms_used_string)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_terms(self, self.mq_terms_response_1_text, self.mq_terms_response_1_img_path, self.mq_terms_response_1_img_string_base64_encoded,render_choice, "10")
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_terms(self, self.mq_terms_response_2_text, self.mq_terms_response_2_img_path, self.mq_terms_response_2_img_string_base64_encoded,render_choice, "11")
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_terms(self, self.mq_terms_response_3_text, self.mq_terms_response_3_img_path, self.mq_terms_response_3_img_string_base64_encoded,render_choice, "12")
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_terms(self, self.mq_terms_response_4_text, self.mq_terms_response_4_img_path, self.mq_terms_response_4_img_string_base64_encoded,render_choice, "13")
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_terms(self, self.mq_terms_response_5_text, self.mq_terms_response_5_img_path, self.mq_terms_response_5_img_string_base64_encoded,render_choice, "14")
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_terms(self, self.mq_terms_response_6_text, self.mq_terms_response_6_img_path, self.mq_terms_response_6_img_string_base64_encoded,render_choice, "15")
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_terms(self, self.mq_terms_response_7_text, self.mq_terms_response_7_img_path, self.mq_terms_response_7_img_string_base64_encoded,render_choice, "16")
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_terms(self, self.mq_terms_response_8_text, self.mq_terms_response_8_img_path, self.mq_terms_response_8_img_string_base64_encoded,render_choice, "17")
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_terms(self, self.mq_terms_response_9_text, self.mq_terms_response_9_img_path, self.mq_terms_response_9_img_string_base64_encoded,render_choice, "18")
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_terms(self, self.mq_terms_response_10_text, self.mq_terms_response_10_img_path, self.mq_terms_response_10_img_string_base64_encoded,render_choice, "19")
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_assignment_pairs(self, self.mq_assignment_pairs_definition_1, self.mq_assignment_pairs_term_1, self.mq_assignment_pairs_1_pts, resprocessing, item)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_assignment_pairs(self, self.mq_assignment_pairs_definition_2, self.mq_assignment_pairs_term_2, self.mq_assignment_pairs_2_pts, resprocessing, item)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_assignment_pairs(self, self.mq_assignment_pairs_definition_3, self.mq_assignment_pairs_term_3, self.mq_assignment_pairs_3_pts, resprocessing, item)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_assignment_pairs(self, self.mq_assignment_pairs_definition_4, self.mq_assignment_pairs_term_4, self.mq_assignment_pairs_4_pts, resprocessing, item)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_assignment_pairs(self, self.mq_assignment_pairs_definition_5, self.mq_assignment_pairs_term_5, self.mq_assignment_pairs_5_pts, resprocessing, item)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_assignment_pairs(self, self.mq_assignment_pairs_definition_6, self.mq_assignment_pairs_term_6, self.mq_assignment_pairs_6_pts, resprocessing, item)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_assignment_pairs(self, self.mq_assignment_pairs_definition_7, self.mq_assignment_pairs_term_7, self.mq_assignment_pairs_7_pts, resprocessing, item)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_assignment_pairs(self, self.mq_assignment_pairs_definition_8, self.mq_assignment_pairs_term_8, self.mq_assignment_pairs_8_pts, resprocessing, item)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_assignment_pairs(self, self.mq_assignment_pairs_definition_9, self.mq_assignment_pairs_term_9, self.mq_assignment_pairs_9_pts, resprocessing, item)
Create_Zuordnungsfrage_Questions.mq_question_answer_structure_assignment_pairs(self, self.mq_assignment_pairs_definition_10, self.mq_assignment_pairs_term_10, self.mq_assignment_pairs_10_pts, resprocessing, item)
# Wenn es sich um einen ILIAS-Test handelt, beinhaltet die XML eine Struktur mit mehreren "Zweigen"
# Der letzte "Zweig" --> "len(self.mq_myroot[0]) - 1" (beschreibt das letze Fach) beinhaltet die eigentlichen Fragen
if self.question_type_test_or_pool == "question_test":
self.mq_myroot[0][len(self.mq_myroot[0]) - 1].append(item)
# Wenn es sich um einen ILIAS-Pool handelt, beinhaltet die XML keine Struktur
# Die Frage kann einfach angehangen werden
else:
self.mq_myroot.append(item)
self.mq_mytree.write(self.qti_file_path_output)
print("Zuordnungsfrage erstellt! --> Titel: " + str(self.mq_question_title))
mq_connect.commit()
mq_connect.close()
if self.question_type_test_or_pool == "question_pool":
###### Anpassung der Datei "qpl". Akualisierung des Dateinamens
self.qpl_file = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path,"mq_ilias_pool_abgabe", self.mq_ilias_id_pool_qpl_dir, self.mq_ilias_id_pool_qti_xml))
self.mytree = ET.parse(self.qpl_file)
self.myroot = self.mytree.getroot()
for ident_id in self.myroot.iter('Identifier'):
ident_id.set('Entry', "il_0_qpl_" + str(self.mq_file_max_id+1))
self.mytree.write(self.qpl_file)
####################### QUESTION_ANSWER STRUCTURE #############################
def mq_question_answer_structure_definitions(self, mq_definitions_response_var_text, mq_definitions_response_var_img_path, mq_definitions_response_var_img_string_base64_encoded, xml_render_choice, mq_definition_id, mq_number_of_terms_used):
# Antworten für Definitionen
if mq_definitions_response_var_text != "":
response_label = ET.SubElement(xml_render_choice, 'response_label')
question_answer_material = ET.SubElement(response_label, 'material')
question_answer_mattext = ET.SubElement(question_answer_material, 'mattext')
response_label.set('ident', str(mq_definition_id))
response_label.set('match_max', "1")
response_label.set('match_group', str(self.mq_number_of_terms_used))
question_answer_mattext.set('texttype', "text/plain")
question_answer_mattext.text = mq_definitions_response_var_text
if mq_definitions_response_var_img_string_base64_encoded != "":
question_answer_matimage = ET.SubElement(question_answer_material, 'matimage')
if str(mq_definitions_response_var_img_path.rpartition('.')[-1]) == "jpg" or str(mq_definitions_response_var_img_path.rpartition('.')[-1]) == "jpeg":
question_answer_matimage.set('imagtype', "image/jpeg")
elif str(mq_definitions_response_var_img_path.rpartition('.')[-1]) == "png":
question_answer_matimage.set('imagtype', "image/png")
elif str(mq_definitions_response_var_img_path.rpartition('.')[-1]) == "gif":
question_answer_matimage.set('imagtype', "image/gif")
else:
print("Bildformat ist nicht jpg/jpeg/png/gif und wird von ILIAS nicht unterstützt!")
question_answer_matimage.set('label', mq_definitions_response_var_img_path.rpartition('/')[-1])
question_answer_matimage.set('embedded', "base64")
question_answer_matimage.text = str(mq_definitions_response_var_img_string_base64_encoded)
def mq_question_answer_structure_terms(self, mq_terms_response_var_text, mq_terms_response_var_img_path, mq_terms_response_var_img_string_base64_encoded, xml_render_choice, mq_response_counter):
#Antworten für Terme
if mq_terms_response_var_text != "":
response_label = ET.SubElement(xml_render_choice, 'response_label')
question_answer_material = ET.SubElement(response_label, 'material')
question_answer_mattext = ET.SubElement(question_answer_material, 'mattext')
response_label.set('ident', str(mq_response_counter))
question_answer_mattext.set('texttype', "text/plain")
question_answer_mattext.text = mq_terms_response_var_text
if mq_terms_response_var_img_string_base64_encoded != "":
question_answer_matimage = ET.SubElement(question_answer_material, 'matimage')
if str(mq_terms_response_var_img_path.rpartition('.')[-1]) == "jpg" or str(mq_terms_response_var_img_path.rpartition('.')[-1]) == "jpeg":
question_answer_matimage.set('imagtype', "image/jpeg")
elif str(mq_terms_response_var_img_path.rpartition('.')[-1]) == "png":
question_answer_matimage.set('imagtype', "image/png")
elif str(mq_terms_response_var_img_path.rpartition('.')[-1]) == "gif":
question_answer_matimage.set('imagtype', "image/gif")
else:
print("Bildformat ist nicht jpg/jpeg/png/gif und wird von ILIAS nicht unterstützt!")
question_answer_matimage.set('label', mq_terms_response_var_img_path.rpartition('/')[-1])
question_answer_matimage.set('embedded', "base64")
question_answer_matimage.text = str(mq_terms_response_var_img_string_base64_encoded)
def mq_question_answer_structure_assignment_pairs(self, mq_assignment_pairs_definition_var, mq_assignment_pairs_term_var, mq_assignment_pairs_var_pts, xml_resprocessing, xml_item):
if mq_assignment_pairs_term_var != "" and mq_assignment_pairs_definition_var != "":
#Zuordnugspaare definieren
respcondition = ET.SubElement(xml_resprocessing, 'respcondition')
respcondition.set('continue', "Yes")
outcomes = ET.SubElement(xml_resprocessing, 'outcomes')
decvar = ET.SubElement(outcomes, 'decvar')
conditionvar = ET.SubElement(respcondition, 'conditionvar')
varsubset = ET.SubElement(conditionvar, 'varsubset')
varsubset.set('respident', "MQ") # MQ --> Matching Question Ident
varsubset.text = str(self.assignment_pairs_definitions_terms_to_id_dict[mq_assignment_pairs_term_var]) + "," + str(self.assignment_pairs_definitions_terms_to_id_dict[mq_assignment_pairs_definition_var]) # ID der Antwort inkrementiert für jede Antwort
setvar = ET.SubElement(respcondition, 'setvar')
setvar.set('action', "Add")
setvar.text = str(mq_assignment_pairs_var_pts) # Punktevergabe für die Antwort
displayfeedback = ET.SubElement(respcondition, 'displayfeedback')
displayfeedback.set('feedbacktype', "Response")
displayfeedback.set('linkrefid', "correct_" + str(self.assignment_pairs_definitions_terms_to_id_dict[mq_assignment_pairs_term_var]) + "_")
# --------------------------------------------------------ZUSATZ FÜR ANTWORT
itemfeedback = ET.SubElement(xml_item, 'itemfeedback')
itemfeedback_flow_mat = ET.SubElement(itemfeedback, 'flow_mat')
itemfeedback_material = ET.SubElement(itemfeedback_flow_mat, 'material')
itemfeedback_mattext = ET.SubElement(itemfeedback_material, 'mattext')
itemfeedback.set('ident', "correct_" + str(self.assignment_pairs_definitions_terms_to_id_dict[mq_assignment_pairs_term_var]) + "_" + str(self.assignment_pairs_definitions_terms_to_id_dict[mq_assignment_pairs_definition_var]))
itemfeedback.set('view', "All")
class Create_Zuordnungsfrage_Test(Zuordnungsfrage):
def __init__(self, entry_to_index_dict):
"""
Ein ILIAS-Test besteht immer aus den beiden Dateien "*_qti_*.xml" und "*_tst_*.xml".
Die "tst" beinhaltelt eine Auflistung der Fragen und den Test-Titel, sowie die Test-id
Die "qti" beinhaltet die Test-Einstellungen und die eigentliche Beschreibung der einzelnen Fragen
Dazu gehört die Fragenbeschreibung, Lösungen, Punkte, Bilder etc.
_________________________________________________________________
Beispiel für einen Test, bestehend aus 3 Fragen für die _tst_:
...
...
Test-Titel: <Title Language="de">Zuordnungsfrage</Title>
...
...
Test-ID: <Identifier Catalog="ILIAS" Entry="il_0_tst_2040314"/>
...
...
Auflistung der Fragen:
<Question QRef="il_0_qst_457015"/>
<Question QRef="il_0_qst_526726"/>
<Question QRef="il_0_qst_457016"/>
...
...
<TriggerQuestion Id="457015"/>
<TriggerQuestion Id="526726"/>
<TriggerQuestion Id="457016"/>
__________________________________________________________________
Beispiel für einen Test, bestehend aus 3 Fragen für die _qti_:
...
...
<assessment ident="il_0_tst_8869" title="Zuordnungsfrage">
...
// diverse Test-Einstellungen //
...
<item ident="il_0_qst_457015" title="Arbeitspunkt" maxattempts="0"> -- Erste Frage
// Fragenbeschreibung, Lösungen, Punktevergabe -- Eigentliche Darstellung der Frage
...
...
<item ident="il_0_qst_526726" title="Zuordnungsfrage Test" maxattempts="0"> -- Zweite Frage
// Fragenbeschreibung, Lösungen, Punktevergabe -- Eigentliche Darstellung der Frage
...
...
<item ident="il_0_qst_457016" title="Eigenschaften der Asynchronmaschine" maxattempts="0"> -- Dritte Frage
// Fragenbeschreibung, Lösungen, Punktevergabe -- Eigentliche Darstellung der Frage
...
"""
self.mq_db_entry_to_index_dict = entry_to_index_dict
test_generator_modul_ilias_test_struktur.Create_ILIAS_Test.__init__(self,
self.mq_db_entry_to_index_dict,
self.zuordnungsfrage_test_tst_file_path_template,
self.zuordnungsfrage_test_tst_file_path_output,
self.zuordnungsfrage_test_qti_file_path_template,
self.zuordnungsfrage_test_qti_file_path_output,
self.mq_ilias_test_title_entry.get(),
self.create_zuordnungsfrage_test_entry.get(),
self.mq_question_type_entry.get(),
)
class Create_Zuordnungsfrage_Pool(Zuordnungsfrage):
def __init__(self, entry_to_index_dict, var_create_all_questions):
self.mq_entry_to_index_dict = entry_to_index_dict
self.mq_var_create_question_pool_all = var_create_all_questions
# Die __init__ wird bei einem Knopfdruck auf "ILIAS-Fragenpool erstellen" ausgeführt
# Es werden XML-Dateien und Ordner mit einer aufsteigenden ID erstellt.
test_generator_modul_ilias_test_struktur.Create_ILIAS_Pool.__init__(self,
self.project_root_path,
self.zuordnungsfrage_pool_directory_output,
self.zuordnungsfrage_files_path_pool_output,
self.zuordnungsfrage_pool_qti_file_path_template,
self.mq_ilias_test_title_entry.get(),
self.create_zuordnungsfrage_pool_entry.get(),
self.mq_question_type_name,
self.database_zuordnungsfrage_path,
self.mq_database_table,
self.mq_db_entry_to_index_dict,
self.mq_var_create_question_pool_all
)
#shutil.make_archive("test", 'zip', self.zuordnungsfrage_pool_directory_output)
print("\n ----> Erstellung Fragenpool abgeschlossen! <----")
| 72.6729
| 445
| 0.681276
| 30,188
| 229,283
| 4.633265
| 0.026964
| 0.095404
| 0.053236
| 0.060507
| 0.91141
| 0.880252
| 0.8398
| 0.764015
| 0.692763
| 0.61569
| 0
| 0.028679
| 0.23293
| 229,283
| 3,154
| 446
| 72.695942
| 0.766591
| 0.063371
| 0
| 0.319298
| 0
| 0
| 0.160613
| 0.119523
| 0.000439
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.011404
| 0
| 0.048246
| 0.003947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9c75cef565000d565aac689dbe48f75c26c4be7
| 105
|
py
|
Python
|
bpnet/modisco/__init__.py
|
mlweilert/bpnet
|
dcc9e8d805f9de774ae9dcc62c20504915be614f
|
[
"MIT"
] | 93
|
2019-08-15T19:49:19.000Z
|
2022-03-04T08:23:44.000Z
|
bpnet/modisco/__init__.py
|
mlweilert/bpnet
|
dcc9e8d805f9de774ae9dcc62c20504915be614f
|
[
"MIT"
] | 29
|
2019-08-15T15:44:44.000Z
|
2022-03-28T06:56:07.000Z
|
bpnet/modisco/__init__.py
|
mlweilert/bpnet
|
dcc9e8d805f9de774ae9dcc62c20504915be614f
|
[
"MIT"
] | 24
|
2019-08-29T18:54:36.000Z
|
2022-03-23T21:04:46.000Z
|
from .core import Pattern, Seqlet, StackedSeqletContrib
from .files import ModiscoFile, ModiscoFileGroup
| 35
| 55
| 0.847619
| 11
| 105
| 8.090909
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104762
| 105
| 2
| 56
| 52.5
| 0.946809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b9c8dd2245671c28d0464679312391ad817d877b
| 215
|
py
|
Python
|
tests/tools.py
|
mnagel/pymailsort
|
4d1084315935ac29174f2c6ad778d9a99e58ed96
|
[
"MIT"
] | 3
|
2016-04-01T15:28:37.000Z
|
2016-04-18T14:28:37.000Z
|
tests/tools.py
|
mnagel/pymailsort
|
4d1084315935ac29174f2c6ad778d9a99e58ed96
|
[
"MIT"
] | null | null | null |
tests/tools.py
|
mnagel/pymailsort
|
4d1084315935ac29174f2c6ad778d9a99e58ed96
|
[
"MIT"
] | null | null | null |
from nose.tools import *
def same(message, expected, actual):
eq_(actual, expected, '%s. We expected "%s" but got "%s"' % (message, expected, actual))
def confirm(message, actual):
eq_(message, True, actual)
| 26.875
| 92
| 0.683721
| 30
| 215
| 4.833333
| 0.533333
| 0.206897
| 0.289655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15814
| 215
| 7
| 93
| 30.714286
| 0.801105
| 0
| 0
| 0
| 0
| 0
| 0.153488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
b9cc8063d1faeaf2834d36b2efb16404a9ecdb69
| 166
|
py
|
Python
|
5_kyu/extract_the_domain_name_from_a_url.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | null | null | null |
5_kyu/extract_the_domain_name_from_a_url.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | null | null | null |
5_kyu/extract_the_domain_name_from_a_url.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | null | null | null |
def domain_name(url):
for i in ['www.', '//']:
if i in url:
return url.split(i)[1][:url.split(i)[1].find('.')]
return url[:url.find('.')]
| 27.666667
| 62
| 0.481928
| 26
| 166
| 3.038462
| 0.5
| 0.075949
| 0.227848
| 0.253165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01626
| 0.259036
| 166
| 5
| 63
| 33.2
| 0.626016
| 0
| 0
| 0
| 0
| 0
| 0.048193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
b9efd23c4779c182774cef8920c037415dddec6c
| 193
|
py
|
Python
|
app/fields.py
|
rockyhandsm2k/website
|
153df20e26d7060c671d7ed4eeb6ed0221951bcf
|
[
"MIT"
] | 2
|
2021-03-30T22:13:49.000Z
|
2021-04-05T17:43:43.000Z
|
app/fields.py
|
rockyhandsm2k/website
|
153df20e26d7060c671d7ed4eeb6ed0221951bcf
|
[
"MIT"
] | 5
|
2020-09-30T14:46:54.000Z
|
2021-03-29T19:12:13.000Z
|
app/fields.py
|
rockyhandsm2k/website
|
153df20e26d7060c671d7ed4eeb6ed0221951bcf
|
[
"MIT"
] | 17
|
2019-09-28T15:08:59.000Z
|
2021-11-17T02:07:27.000Z
|
from wtforms import SelectField
class NonValidatingSelectField(SelectField):
""" A WTForms SelectField without built-in validation.
"""
def pre_validate(self, form):
pass
| 21.444444
| 58
| 0.715026
| 20
| 193
| 6.85
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207254
| 193
| 8
| 59
| 24.125
| 0.895425
| 0.259067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
b9f40fbf8dfd62a1485384e06a29b9ef6278e909
| 177
|
py
|
Python
|
packages/pyright-internal/src/tests/samples/import9.py
|
sasano8/pyright
|
e804f324ee5dbd25fd37a258791b3fd944addecd
|
[
"MIT"
] | 4,391
|
2019-05-07T01:18:57.000Z
|
2022-03-31T20:45:44.000Z
|
packages/pyright-internal/src/tests/samples/import9.py
|
sasano8/pyright
|
e804f324ee5dbd25fd37a258791b3fd944addecd
|
[
"MIT"
] | 2,740
|
2019-05-07T03:29:30.000Z
|
2022-03-31T12:57:46.000Z
|
packages/pyright-internal/src/tests/samples/import9.py
|
sasano8/pyright
|
e804f324ee5dbd25fd37a258791b3fd944addecd
|
[
"MIT"
] | 455
|
2019-05-07T12:55:14.000Z
|
2022-03-31T17:09:15.000Z
|
# This sample tests support for PEP 562's __getattr__ function.
# This should not generate an error because import8 has
# a __getattr__ method.
from .import8 import foo
foo()
| 22.125
| 63
| 0.774011
| 27
| 177
| 4.777778
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034247
| 0.175141
| 177
| 7
| 64
| 25.285714
| 0.849315
| 0.774011
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6a399e57cc3033f0298e836a24ac506aed3f7433
| 132
|
py
|
Python
|
feincms/module/blog/admin.py
|
pjdelport/feincms
|
6aed5b76104a6b3bb801fb1657f597506aa96d72
|
[
"BSD-3-Clause"
] | null | null | null |
feincms/module/blog/admin.py
|
pjdelport/feincms
|
6aed5b76104a6b3bb801fb1657f597506aa96d72
|
[
"BSD-3-Clause"
] | null | null | null |
feincms/module/blog/admin.py
|
pjdelport/feincms
|
6aed5b76104a6b3bb801fb1657f597506aa96d72
|
[
"BSD-3-Clause"
] | 1
|
2020-06-27T11:12:21.000Z
|
2020-06-27T11:12:21.000Z
|
from django.contrib import admin
from feincms.module.blog.models import Entry, EntryAdmin
admin.site.register(Entry, EntryAdmin)
| 18.857143
| 56
| 0.818182
| 18
| 132
| 6
| 0.722222
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106061
| 132
| 6
| 57
| 22
| 0.915254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6a4997d05fd5ab939e620024eb1fa55d6c2ce447
| 5,231
|
py
|
Python
|
marry_me.py
|
graham768/3D-printed-mirror-array
|
d67a2e8dc4a74143e20b02ef062df8b03a838643
|
[
"MIT"
] | null | null | null |
marry_me.py
|
graham768/3D-printed-mirror-array
|
d67a2e8dc4a74143e20b02ef062df8b03a838643
|
[
"MIT"
] | null | null | null |
marry_me.py
|
graham768/3D-printed-mirror-array
|
d67a2e8dc4a74143e20b02ef062df8b03a838643
|
[
"MIT"
] | null | null | null |
from mirror_array.helpers import *
from mirror_array.coords import get_coords
from mirror_array.grid import build_mirror_array, divide_hex_grid_flower
marry_me_initial = [[749.67, 438.1], [1204.75, 469.56], [546.35, 438.88], [1275.28, 832.91], [1017.78, 833.74], [647.6295776367188, 799.1300964355469], [724.099609375, 799.9089050292969], [266.5836181640625, 817.9267272949219], [472.28, 825.59], [749.72, 380.21], [545.87, 382.16], [452.72, 854.7], [286.96, 853.25], [715., 402.43], [268.17, 763.19], [566.45, 435.89], [728.47, 434.11], [1216.48, 375.58], [1187.9737548828125, 256.1741943359375], [1195.5775146484375, 256.1741943359375], [1192.04, 263.91], [1190.316650390625, 319.66131591796875], [1190.37, 339.86], [1200.08, 360.98], [1232.17, 392.32], [1242.01, 414.51], [1240.2, 442.8], [1226.76, 460.95], [1179.57, 470.69], [1158.2, 461.2], [1144.78, 446.96], [1138.196044921875, 426.24267578125], [897.4918823242188, 418.1148681640625], [926.57, 467.27], [960.62, 466.59], [993.49, 467.39], [927.3, 366.3], [956.21, 365.58], [993.6444091796875, 257.5711669921875], [958.5223388671875, 257.8609619140625], [924.378173828125, 258.04620361328125], [898.84, 282.62], [898.1, 310.38], [896.8499755859375, 340.74298095703125], [897.62, 392.57], [897.430908203125, 443.7537841796875], [898.8089599609375, 366.19598388671875], [1019.248291015625, 465.7032470703125], [988.32, 367.04], [1022.5699462890625, 257.55078125], [898.09, 257.23], [897.6317138671875, 467.7864990234375], [545.9, 412.09], [578.06, 406.69], [471.71, 759.91], [749.8, 410.35], [749.28, 348.51], [697.87, 364.76], [595.89, 369.18], [545.29, 351.59], [544.24755859375, 320.94464111328125], [615.7874755859375, 323.77764892578125], [677.2498168945312, 322.1021728515625], [749.7682495117188, 321.30474853515625], [750.0340576171875, 286.13226318359375], [661.3663940429688, 288.0025634765625], [633.14208984375, 288.0025634765625], [544.21484375, 286.75433349609375], [751.0889892578125, 255.06298828125], [745.34, 465.11], [647.705078125, 257.4637451171875], [548.85, 467.3], [545.132568359375, 254.5081787109375], [1484.33, 806.92], [1433.8072509765625, 805.7794647216797], [1459.27, 692.7], [1458.76, 719.32], [1457.575439453125, 745.1332092285156], [1397.06, 866.63], [1408.35, 846.25], [1421.2, 825.46], [1446.67, 784.15], [1469.96, 785.14], [1497.03, 828.66], [1508.74, 848.17], [1520.617919921875, 869.9008636474609], [1458.5758056640625, 766.6499633789062], [1530.668212890625, 884.5923004150391], [1384.2706298828125, 883.6441497802734], [1458.09423828125, 671.2614135742188], [1185.76, 882.84], [1223.1552734375, 882.6755676269531], [1251.27, 875.17], [1270.83, 857.79], [1268.17431640625, 805.2270812988281], [1245.04, 787.51], [1181.78, 781.8], [1211.3526611328125, 781.2495880126953], [1226.75, 754.47], [1244.0364990234375, 725.6549987792969], [1261.5723876953125, 697.1242370605469], [1277.6258544921875, 671.0181884765625], [1155.409912109375, 672.613037109375], [1155.4261474609375, 704.9848327636719], [1155.4664306640625, 730.7848815917969], [1155.3052978515625, 756.4649353027344], [1155.66796875, 781.4039306640625], [1155.81, 807.5], [1154.74, 834.85], [1154.44, 860.99], [1154.1702880859375, 883.5802764892578], [897.8416748046875, 804.0701904296875], [897.5758666992188, 833.1923522949219], [897.0039672851562, 861.1928863525391], [922.34, 882.96], [955.9832153320312, 883.3663787841797], [985.98, 879.14], [1010.79, 862.27], [1012.2882080078125, 805.221923828125], [987.794921875, 786.5226898193359], [925.7581787109375, 781.9527893066406], [897.406982421875, 756.2396545410156], [968.6502685546875, 757.0301513671875], [986.75, 729.44], [897.3654174804688, 732.0417785644531], [897.139892578125, 698.6778564453125], [1003.6195068359375, 698.6194152832031], [955.4535522460938, 782.1603088378906], [898.4556884765625, 781.937255859375], [897.9386596679688, 882.872314453125], [1021.3914794921875, 670.7004699707031], [898.5322875976562, 671.0590209960938], [651.59, 734.93], [717.95, 734.58], [762.74, 703.86], [605.9717407226562, 703.7068481445312], [634.17, 767.06], [735.34, 769.67], [712.33, 830.17], [660.43, 828.6], [671.5, 851.59], [702.06005859375, 853.2278900146484], [749.8562622070312, 736.0979309082031], [621.53, 736.07], [685.41, 735.23], [685.58, 879.34], [776.983154296875, 672.78271484375], [593.150146484375, 671.5205993652344], [268.43, 700.08], [349.78631591796875, 709.9300231933594], [389.09, 710.53], [471.39, 705.05], [471.59, 731.2], [402.43, 746.08], [334.43, 747.92], [267.55, 731.3], [266.57, 851.98], [301.76, 822.9], [439.2, 827.33], [470.74, 853.1], [471.35, 791.46], [422.74, 789.53], [316.13, 789.57], [267.87, 791.82], [471.3092041015625, 673.2996826171875], [469.02, 882.34], [369.4679870605469, 675.087646484375], [270.44, 881.96], [267.99627685546875, 670.501220703125]]
marry_me_coords = get_coords(marry_me_initial, width_in=49.77, height_in=24.81)
# To generate the final 3D model and use the partitioning scheme:
positions, normals, target_positions = build_mirror_array(marry_me_coords, use_ground_target=True, filename="mirror_array.stl", divider_function=divide_hex_grid_flower)
plot_colored_projection(target_positions)
plot_colored_grid(positions, divide_hex_grid_flower(positions, 14.85))
| 435.916667
| 4,649
| 0.724909
| 769
| 5,231
| 4.881665
| 0.652796
| 0.017581
| 0.011987
| 0.015184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.678977
| 0.073409
| 5,231
| 12
| 4,650
| 435.916667
| 0.095523
| 0.012044
| 0
| 0
| 0
| 0
| 0.003097
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.375
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
dbe97251ebeb696966fb72e7dd3ea21f14476a4d
| 220
|
py
|
Python
|
shopify/resources/customer_saved_search.py
|
subhrajyoti21/shopify_python_api
|
8efdafd7a57aad782e7e93b5b16cde47d350da2a
|
[
"MIT"
] | 828
|
2015-01-08T16:03:55.000Z
|
2022-03-25T16:58:37.000Z
|
shopify/resources/customer_saved_search.py
|
subhrajyoti21/shopify_python_api
|
8efdafd7a57aad782e7e93b5b16cde47d350da2a
|
[
"MIT"
] | 389
|
2015-02-01T03:33:49.000Z
|
2022-03-23T08:42:33.000Z
|
shopify/resources/customer_saved_search.py
|
subhrajyoti21/shopify_python_api
|
8efdafd7a57aad782e7e93b5b16cde47d350da2a
|
[
"MIT"
] | 267
|
2015-01-20T21:40:19.000Z
|
2022-03-29T04:09:56.000Z
|
from ..base import ShopifyResource
from .customer import Customer
class CustomerSavedSearch(ShopifyResource):
def customers(cls, **kwargs):
return Customer._build_collection(cls.get("customers", **kwargs))
| 27.5
| 73
| 0.759091
| 23
| 220
| 7.173913
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 220
| 7
| 74
| 31.428571
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0.040909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
e0004cbfb607e3871441e638397a8f70c0cbce21
| 57,764
|
py
|
Python
|
amazon_msk/datadog_checks/amazon_msk/metrics.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 663
|
2016-08-23T05:23:45.000Z
|
2022-03-29T00:37:23.000Z
|
amazon_msk/datadog_checks/amazon_msk/metrics.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 6,642
|
2016-06-09T16:29:20.000Z
|
2022-03-31T22:24:09.000Z
|
amazon_msk/datadog_checks/amazon_msk/metrics.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 1,222
|
2017-01-27T15:51:38.000Z
|
2022-03-31T18:17:51.000Z
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
NODE_METRICS_MAP = {
'go_gc_duration_seconds': 'go.gc.duration.seconds',
'go_goroutines': 'go.goroutines',
'go_info': 'go.info',
'go_memstats_alloc_bytes': 'go.memstats.alloc.bytes',
'go_memstats_alloc_bytes_total': 'go.memstats.alloc.bytes.total',
'go_memstats_buck_hash_sys_bytes': 'go.memstats.buck.hash.sys.bytes',
'go_memstats_frees_total': 'go.memstats.frees.total',
'go_memstats_gc_cpu_fraction': 'go.memstats.gc.cpu.fraction',
'go_memstats_gc_sys_bytes': 'go.memstats.gc.sys.bytes',
'go_memstats_heap_alloc_bytes': 'go.memstats.heap.alloc.bytes',
'go_memstats_heap_idle_bytes': 'go.memstats.heap.idle.bytes',
'go_memstats_heap_inuse_bytes': 'go.memstats.heap.inuse.bytes',
'go_memstats_heap_objects': 'go.memstats.heap.objects',
'go_memstats_heap_released_bytes': 'go.memstats.heap.released.bytes',
'go_memstats_heap_sys_bytes': 'go.memstats.heap.sys.bytes',
'go_memstats_last_gc_time_seconds': 'go.memstats.last.gc.time.seconds',
'go_memstats_lookups_total': 'go.memstats.lookups.total',
'go_memstats_mallocs_total': 'go.memstats.mallocs.total',
'go_memstats_mcache_inuse_bytes': 'go.memstats.mcache.inuse.bytes',
'go_memstats_mcache_sys_bytes': 'go.memstats.mcache.sys.bytes',
'go_memstats_mspan_inuse_bytes': 'go.memstats.mspan.inuse.bytes',
'go_memstats_mspan_sys_bytes': 'go.memstats.mspan.sys.bytes',
'go_memstats_next_gc_bytes': 'go.memstats.next.gc.bytes',
'go_memstats_other_sys_bytes': 'go.memstats.other.sys.bytes',
'go_memstats_stack_inuse_bytes': 'go.memstats.stack.inuse.bytes',
'go_memstats_stack_sys_bytes': 'go.memstats.stack.sys.bytes',
'go_memstats_sys_bytes': 'go.memstats.sys.bytes',
'go_threads': 'go.threads',
'node_cpu_seconds_total': 'node.cpu.seconds.total',
'node_exporter_build_info': 'node.exporter.build.info',
'node_filesystem_avail_bytes': 'node.filesystem.avail.bytes',
'node_filesystem_device_error': 'node.filesystem.device.error',
'node_filesystem_files': 'node.filesystem.files',
'node_filesystem_files_free': 'node.filesystem.files.free',
'node_filesystem_free_bytes': 'node.filesystem.free.bytes',
'node_filesystem_readonly': 'node.filesystem.readonly',
'node_filesystem_size_bytes': 'node.filesystem.size.bytes',
'node_scrape_collector_duration_seconds': 'node.scrape.collector.duration.seconds',
'node_scrape_collector_success': 'node.scrape.collector.success',
'process_cpu_seconds_total': 'process.cpu.seconds.total',
'process_max_fds': 'process.max.fds',
'process_open_fds': 'process.open.fds',
'process_resident_memory_bytes': 'process.resident.memory.bytes',
'process_start_time_seconds': 'process.start.time.seconds',
'process_virtual_memory_bytes': 'process.virtual.memory.bytes',
'process_virtual_memory_max_bytes': 'process.virtual.memory.max.bytes',
'promhttp_metric_handler_requests_in_flight': 'promhttp.metric.handler.requests.in.flight',
'promhttp_metric_handler_requests_total': 'promhttp.metric.handler.requests.total',
}
NODE_METRICS_OVERRIDES = {}
JMX_METRICS_MAP = {
'jmx_config_reload_failure_total': 'jmx.config.reload.failure.total',
'jmx_config_reload_success_total': 'jmx.config.reload.success.total',
'jmx_exporter_build_info': 'jmx.exporter.build.info',
'jmx_scrape_duration_seconds': 'jmx.scrape.duration.seconds',
'jmx_scrape_error': 'jmx.scrape.error',
'kafka_cluster_Partition_Value': 'kafka.cluster.Partition.Value',
'kafka_consumer_group_ConsumerLagMetrics_Value': 'kafka.consumer.group.ConsumerLagMetrics.Value',
'kafka_controller_ControllerChannelManager_50thPercentile': (
'kafka.controller.ControllerChannelManager.50thPercentile'
),
'kafka_controller_ControllerChannelManager_75thPercentile': (
'kafka.controller.ControllerChannelManager.75thPercentile'
),
'kafka_controller_ControllerChannelManager_95thPercentile': (
'kafka.controller.ControllerChannelManager.95thPercentile'
),
'kafka_controller_ControllerChannelManager_98thPercentile': (
'kafka.controller.ControllerChannelManager.98thPercentile'
),
'kafka_controller_ControllerChannelManager_999thPercentile': (
'kafka.controller.ControllerChannelManager.999thPercentile'
),
'kafka_controller_ControllerChannelManager_99thPercentile': (
'kafka.controller.ControllerChannelManager.99thPercentile'
),
'kafka_controller_ControllerChannelManager_Count': 'kafka.controller.ControllerChannelManager.Count',
'kafka_controller_ControllerChannelManager_FifteenMinuteRate': (
'kafka.controller.ControllerChannelManager.FifteenMinuteRate'
),
'kafka_controller_ControllerChannelManager_FiveMinuteRate': (
'kafka.controller.ControllerChannelManager.FiveMinuteRate'
),
'kafka_controller_ControllerChannelManager_Max': 'kafka.controller.ControllerChannelManager.Max',
'kafka_controller_ControllerChannelManager_Mean': 'kafka.controller.ControllerChannelManager.Mean',
'kafka_controller_ControllerChannelManager_MeanRate': 'kafka.controller.ControllerChannelManager.MeanRate',
'kafka_controller_ControllerChannelManager_Min': 'kafka.controller.ControllerChannelManager.Min',
'kafka_controller_ControllerChannelManager_OneMinuteRate': (
'kafka.controller.ControllerChannelManager.OneMinuteRate'
),
'kafka_controller_ControllerChannelManager_StdDev': 'kafka.controller.ControllerChannelManager.StdDev',
'kafka_controller_ControllerChannelManager_Value': 'kafka.controller.ControllerChannelManager.Value',
'kafka_controller_ControllerEventManager_50thPercentile': 'kafka.controller.ControllerEventManager.50thPercentile',
'kafka_controller_ControllerEventManager_75thPercentile': 'kafka.controller.ControllerEventManager.75thPercentile',
'kafka_controller_ControllerEventManager_95thPercentile': 'kafka.controller.ControllerEventManager.95thPercentile',
'kafka_controller_ControllerEventManager_98thPercentile': 'kafka.controller.ControllerEventManager.98thPercentile',
'kafka_controller_ControllerEventManager_999thPercentile': (
'kafka.controller.ControllerEventManager.999thPercentile'
),
'kafka_controller_ControllerEventManager_99thPercentile': 'kafka.controller.ControllerEventManager.99thPercentile',
'kafka_controller_ControllerEventManager_Count': 'kafka.controller.ControllerEventManager.Count',
'kafka_controller_ControllerEventManager_Max': 'kafka.controller.ControllerEventManager.Max',
'kafka_controller_ControllerEventManager_Mean': 'kafka.controller.ControllerEventManager.Mean',
'kafka_controller_ControllerEventManager_Min': 'kafka.controller.ControllerEventManager.Min',
'kafka_controller_ControllerEventManager_StdDev': 'kafka.controller.ControllerEventManager.StdDev',
'kafka_controller_ControllerEventManager_Value': 'kafka.controller.ControllerEventManager.Value',
'kafka_controller_ControllerStats_50thPercentile': 'kafka.controller.ControllerStats.50thPercentile',
'kafka_controller_ControllerStats_75thPercentile': 'kafka.controller.ControllerStats.75thPercentile',
'kafka_controller_ControllerStats_95thPercentile': 'kafka.controller.ControllerStats.95thPercentile',
'kafka_controller_ControllerStats_98thPercentile': 'kafka.controller.ControllerStats.98thPercentile',
'kafka_controller_ControllerStats_999thPercentile': 'kafka.controller.ControllerStats.999thPercentile',
'kafka_controller_ControllerStats_99thPercentile': 'kafka.controller.ControllerStats.99thPercentile',
'kafka_controller_ControllerStats_Count': 'kafka.controller.ControllerStats.Count',
'kafka_controller_ControllerStats_FifteenMinuteRate': 'kafka.controller.ControllerStats.FifteenMinuteRate',
'kafka_controller_ControllerStats_FiveMinuteRate': 'kafka.controller.ControllerStats.FiveMinuteRate',
'kafka_controller_ControllerStats_Max': 'kafka.controller.ControllerStats.Max',
'kafka_controller_ControllerStats_Mean': 'kafka.controller.ControllerStats.Mean',
'kafka_controller_ControllerStats_MeanRate': 'kafka.controller.ControllerStats.MeanRate',
'kafka_controller_ControllerStats_Min': 'kafka.controller.ControllerStats.Min',
'kafka_controller_ControllerStats_OneMinuteRate': 'kafka.controller.ControllerStats.OneMinuteRate',
'kafka_controller_ControllerStats_StdDev': 'kafka.controller.ControllerStats.StdDev',
'kafka_controller_KafkaController_Value': 'kafka.controller.KafkaController.Value',
'kafka_coordinator_group_GroupMetadataManager_Value': 'kafka.coordinator.group.GroupMetadataManager.Value',
'kafka_coordinator_transaction_TransactionMarkerChannelManager_Value': (
'kafka.coordinator.transaction.TransactionMarkerChannelManager.Value'
),
'kafka_log_Log_Value': 'kafka.log.Log.Value',
'kafka_log_LogCleaner_Value': 'kafka.log.LogCleaner.Value',
'kafka_log_LogCleanerManager_Value': 'kafka.log.LogCleanerManager.Value',
'kafka_log_LogManager_Value': 'kafka.log.LogManager.Value',
'kafka_network_Acceptor_Count': 'kafka.network.Acceptor.Count',
'kafka_network_Acceptor_FifteenMinuteRate': 'kafka.network.Acceptor.FifteenMinuteRate',
'kafka_network_Acceptor_FiveMinuteRate': 'kafka.network.Acceptor.FiveMinuteRate',
'kafka_network_Acceptor_MeanRate': 'kafka.network.Acceptor.MeanRate',
'kafka_network_Acceptor_OneMinuteRate': 'kafka.network.Acceptor.OneMinuteRate',
'kafka_network_Processor_Value': 'kafka.network.Processor.Value',
'kafka_network_RequestChannel_Value': 'kafka.network.RequestChannel.Value',
'kafka_network_RequestMetrics_50thPercentile': 'kafka.network.RequestMetrics.50thPercentile',
'kafka_network_RequestMetrics_75thPercentile': 'kafka.network.RequestMetrics.75thPercentile',
'kafka_network_RequestMetrics_95thPercentile': 'kafka.network.RequestMetrics.95thPercentile',
'kafka_network_RequestMetrics_98thPercentile': 'kafka.network.RequestMetrics.98thPercentile',
'kafka_network_RequestMetrics_999thPercentile': 'kafka.network.RequestMetrics.999thPercentile',
'kafka_network_RequestMetrics_99thPercentile': 'kafka.network.RequestMetrics.99thPercentile',
'kafka_network_RequestMetrics_FifteenMinuteRate': 'kafka.network.RequestMetrics.FifteenMinuteRate',
'kafka_network_RequestMetrics_FiveMinuteRate': 'kafka.network.RequestMetrics.FiveMinuteRate',
'kafka_network_RequestMetrics_Max': 'kafka.network.RequestMetrics.Max',
'kafka_network_RequestMetrics_Mean': 'kafka.network.RequestMetrics.Mean',
'kafka_network_RequestMetrics_MeanRate': 'kafka.network.RequestMetrics.MeanRate',
'kafka_network_RequestMetrics_Min': 'kafka.network.RequestMetrics.Min',
'kafka_network_RequestMetrics_OneMinuteRate': 'kafka.network.RequestMetrics.OneMinuteRate',
'kafka_network_RequestMetrics_StdDev': 'kafka.network.RequestMetrics.StdDev',
'kafka_network_SocketServer_Value': 'kafka.network.SocketServer.Value',
'kafka_security_SimpleAclAuthorizer_Count': 'kafka.security.SimpleAclAuthorizer.Count',
'kafka_security_SimpleAclAuthorizer_FifteenMinuteRate': 'kafka.security.SimpleAclAuthorizer.FifteenMinuteRate',
'kafka_security_SimpleAclAuthorizer_FiveMinuteRate': 'kafka.security.SimpleAclAuthorizer.FiveMinuteRate',
'kafka_security_SimpleAclAuthorizer_MeanRate': 'kafka.security.SimpleAclAuthorizer.MeanRate',
'kafka_security_SimpleAclAuthorizer_OneMinuteRate': 'kafka.security.SimpleAclAuthorizer.OneMinuteRate',
'kafka_server_BrokerTopicMetrics_FifteenMinuteRate': 'kafka.server.BrokerTopicMetrics.FifteenMinuteRate',
'kafka_server_BrokerTopicMetrics_FiveMinuteRate': 'kafka.server.BrokerTopicMetrics.FiveMinuteRate',
'kafka_server_BrokerTopicMetrics_MeanRate': 'kafka.server.BrokerTopicMetrics.MeanRate',
'kafka_server_BrokerTopicMetrics_OneMinuteRate': 'kafka.server.BrokerTopicMetrics.OneMinuteRate',
'kafka_server_DelayedFetchMetrics_Count': 'kafka.server.DelayedFetchMetrics.Count',
'kafka_server_DelayedFetchMetrics_FifteenMinuteRate': 'kafka.server.DelayedFetchMetrics.FifteenMinuteRate',
'kafka_server_DelayedFetchMetrics_FiveMinuteRate': 'kafka.server.DelayedFetchMetrics.FiveMinuteRate',
'kafka_server_DelayedFetchMetrics_MeanRate': 'kafka.server.DelayedFetchMetrics.MeanRate',
'kafka_server_DelayedFetchMetrics_OneMinuteRate': 'kafka.server.DelayedFetchMetrics.OneMinuteRate',
'kafka_server_DelayedOperationPurgatory_Value': 'kafka.server.DelayedOperationPurgatory.Value',
'kafka_server_Fetch_queue_size': 'kafka.server.Fetch.queue.size',
'kafka_server_FetchSessionCache_Count': 'kafka.server.FetchSessionCache.Count',
'kafka_server_FetchSessionCache_FifteenMinuteRate': 'kafka.server.FetchSessionCache.FifteenMinuteRate',
'kafka_server_FetchSessionCache_FiveMinuteRate': 'kafka.server.FetchSessionCache.FiveMinuteRate',
'kafka_server_FetchSessionCache_MeanRate': 'kafka.server.FetchSessionCache.MeanRate',
'kafka_server_FetchSessionCache_OneMinuteRate': 'kafka.server.FetchSessionCache.OneMinuteRate',
'kafka_server_FetchSessionCache_Value': 'kafka.server.FetchSessionCache.Value',
'kafka_server_FetcherLagMetrics_Value': 'kafka.server.FetcherLagMetrics.Value',
'kafka_server_FetcherStats_Count': 'kafka.server.FetcherStats.Count',
'kafka_server_FetcherStats_FifteenMinuteRate': 'kafka.server.FetcherStats.FifteenMinuteRate',
'kafka_server_FetcherStats_FiveMinuteRate': 'kafka.server.FetcherStats.FiveMinuteRate',
'kafka_server_FetcherStats_MeanRate': 'kafka.server.FetcherStats.MeanRate',
'kafka_server_FetcherStats_OneMinuteRate': 'kafka.server.FetcherStats.OneMinuteRate',
'kafka_server_KafkaRequestHandlerPool_Count': 'kafka.server.KafkaRequestHandlerPool.Count',
'kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate': 'kafka.server.KafkaRequestHandlerPool.FifteenMinuteRate',
'kafka_server_KafkaRequestHandlerPool_FiveMinuteRate': 'kafka.server.KafkaRequestHandlerPool.FiveMinuteRate',
'kafka_server_KafkaRequestHandlerPool_MeanRate': 'kafka.server.KafkaRequestHandlerPool.MeanRate',
'kafka_server_KafkaRequestHandlerPool_OneMinuteRate': 'kafka.server.KafkaRequestHandlerPool.OneMinuteRate',
'kafka_server_KafkaServer_Value': 'kafka.server.KafkaServer.Value',
'kafka_server_LeaderReplication_byte_rate': 'kafka.server.LeaderReplication.byte.rate',
'kafka_server_Produce_queue_size': 'kafka.server.Produce.queue.size',
'kafka_server_ReplicaAlterLogDirsManager_Value': 'kafka.server.ReplicaAlterLogDirsManager.Value',
'kafka_server_ReplicaFetcherManager_Value': 'kafka.server.ReplicaFetcherManager.Value',
'kafka_server_ReplicaManager_Count': 'kafka.server.ReplicaManager.Count',
'kafka_server_ReplicaManager_FifteenMinuteRate': 'kafka.server.ReplicaManager.FifteenMinuteRate',
'kafka_server_ReplicaManager_FiveMinuteRate': 'kafka.server.ReplicaManager.FiveMinuteRate',
'kafka_server_ReplicaManager_MeanRate': 'kafka.server.ReplicaManager.MeanRate',
'kafka_server_ReplicaManager_OneMinuteRate': 'kafka.server.ReplicaManager.OneMinuteRate',
'kafka_server_Request_queue_size': 'kafka.server.Request.queue.size',
'kafka_server_SessionExpireListener_Count': 'kafka.server.SessionExpireListener.Count',
'kafka_server_SessionExpireListener_FifteenMinuteRate': 'kafka.server.SessionExpireListener.FifteenMinuteRate',
'kafka_server_SessionExpireListener_FiveMinuteRate': 'kafka.server.SessionExpireListener.FiveMinuteRate',
'kafka_server_SessionExpireListener_MeanRate': 'kafka.server.SessionExpireListener.MeanRate',
'kafka_server_SessionExpireListener_OneMinuteRate': 'kafka.server.SessionExpireListener.OneMinuteRate',
'kafka_server_ZooKeeperClientMetrics_50thPercentile': 'kafka.server.ZooKeeperClientMetrics.50thPercentile',
'kafka_server_ZooKeeperClientMetrics_75thPercentile': 'kafka.server.ZooKeeperClientMetrics.75thPercentile',
'kafka_server_ZooKeeperClientMetrics_95thPercentile': 'kafka.server.ZooKeeperClientMetrics.95thPercentile',
'kafka_server_ZooKeeperClientMetrics_98thPercentile': 'kafka.server.ZooKeeperClientMetrics.98thPercentile',
'kafka_server_ZooKeeperClientMetrics_999thPercentile': 'kafka.server.ZooKeeperClientMetrics.999thPercentile',
'kafka_server_ZooKeeperClientMetrics_99thPercentile': 'kafka.server.ZooKeeperClientMetrics.99thPercentile',
'kafka_server_ZooKeeperClientMetrics_Count': 'kafka.server.ZooKeeperClientMetrics.Count',
'kafka_server_ZooKeeperClientMetrics_Max': 'kafka.server.ZooKeeperClientMetrics.Max',
'kafka_server_ZooKeeperClientMetrics_Mean': 'kafka.server.ZooKeeperClientMetrics.Mean',
'kafka_server_ZooKeeperClientMetrics_Min': 'kafka.server.ZooKeeperClientMetrics.Min',
'kafka_server_ZooKeeperClientMetrics_StdDev': 'kafka.server.ZooKeeperClientMetrics.StdDev',
'kafka_server_controller_channel_metrics_connection_close_rate': (
'kafka.server.controller.channel.metrics.connection.close.rate'
),
'kafka_server_controller_channel_metrics_connection_close_total': (
'kafka.server.controller.channel.metrics.connection.close.total'
),
'kafka_server_controller_channel_metrics_connection_count': (
'kafka.server.controller.channel.metrics.connection.count'
),
'kafka_server_controller_channel_metrics_connection_creation_rate': (
'kafka.server.controller.channel.metrics.connection.creation.rate'
),
'kafka_server_controller_channel_metrics_connection_creation_total': (
'kafka.server.controller.channel.metrics.connection.creation.total'
),
'kafka_server_controller_channel_metrics_failed_authentication_rate': (
'kafka.server.controller.channel.metrics.failed.authentication.rate'
),
'kafka_server_controller_channel_metrics_failed_authentication_total': (
'kafka.server.controller.channel.metrics.failed.authentication.total'
),
'kafka_server_controller_channel_metrics_failed_reauthentication_rate': (
'kafka.server.controller.channel.metrics.failed.reauthentication.rate'
),
'kafka_server_controller_channel_metrics_failed_reauthentication_total': (
'kafka.server.controller.channel.metrics.failed.reauthentication.total'
),
'kafka_server_controller_channel_metrics_incoming_byte_rate': (
'kafka.server.controller.channel.metrics.incoming.byte.rate'
),
'kafka_server_controller_channel_metrics_incoming_byte_total': (
'kafka.server.controller.channel.metrics.incoming.byte.total'
),
'kafka_server_controller_channel_metrics_io_ratio': 'kafka.server.controller.channel.metrics.io.ratio',
'kafka_server_controller_channel_metrics_io_time_ns_avg': 'kafka.server.controller.channel.metrics.io.time.ns.avg',
'kafka_server_controller_channel_metrics_io_wait_ratio': 'kafka.server.controller.channel.metrics.io.wait.ratio',
'kafka_server_controller_channel_metrics_io_wait_time_ns_avg': (
'kafka.server.controller.channel.metrics.io.wait.time.ns.avg'
),
'kafka_server_controller_channel_metrics_io_waittime_total': (
'kafka.server.controller.channel.metrics.io.waittime.total'
),
'kafka_server_controller_channel_metrics_iotime_total': 'kafka.server.controller.channel.metrics.iotime.total',
'kafka_server_controller_channel_metrics_network_io_rate': (
'kafka.server.controller.channel.metrics.network.io.rate'
),
'kafka_server_controller_channel_metrics_network_io_total': (
'kafka.server.controller.channel.metrics.network.io.total'
),
'kafka_server_controller_channel_metrics_outgoing_byte_rate': (
'kafka.server.controller.channel.metrics.outgoing.byte.rate'
),
'kafka_server_controller_channel_metrics_outgoing_byte_total': (
'kafka.server.controller.channel.metrics.outgoing.byte.total'
),
'kafka_server_controller_channel_metrics_reauthentication_latency_avg': (
'kafka.server.controller.channel.metrics.reauthentication.latency.avg'
),
'kafka_server_controller_channel_metrics_reauthentication_latency_max': (
'kafka.server.controller.channel.metrics.reauthentication.latency.max'
),
'kafka_server_controller_channel_metrics_request_rate': 'kafka.server.controller.channel.metrics.request.rate',
'kafka_server_controller_channel_metrics_request_size_avg': (
'kafka.server.controller.channel.metrics.request.size.avg'
),
'kafka_server_controller_channel_metrics_request_size_max': (
'kafka.server.controller.channel.metrics.request.size.max'
),
'kafka_server_controller_channel_metrics_request_total': 'kafka.server.controller.channel.metrics.request.total',
'kafka_server_controller_channel_metrics_response_rate': 'kafka.server.controller.channel.metrics.response.rate',
'kafka_server_controller_channel_metrics_response_total': 'kafka.server.controller.channel.metrics.response.total',
'kafka_server_controller_channel_metrics_select_rate': 'kafka.server.controller.channel.metrics.select.rate',
'kafka_server_controller_channel_metrics_select_total': 'kafka.server.controller.channel.metrics.select.total',
'kafka_server_controller_channel_metrics_successful_authentication_no_reauth_total': (
'kafka.server.controller.channel.metrics.successful.authentication.no.reauth.total'
),
'kafka_server_controller_channel_metrics_successful_authentication_rate': (
'kafka.server.controller.channel.metrics.successful.authentication.rate'
),
'kafka_server_controller_channel_metrics_successful_authentication_total': (
'kafka.server.controller.channel.metrics.successful.authentication.total'
),
'kafka_server_controller_channel_metrics_successful_reauthentication_rate': (
'kafka.server.controller.channel.metrics.successful.reauthentication.rate'
),
'kafka_server_controller_channel_metrics_successful_reauthentication_total': (
'kafka.server.controller.channel.metrics.successful.reauthentication.total'
),
'kafka_server_kafka_metrics_count_count': 'kafka.server.kafka.metrics.count.count',
'kafka_server_replica_fetcher_metrics_connection_close_rate': (
'kafka.server.replica.fetcher.metrics.connection.close.rate'
),
'kafka_server_replica_fetcher_metrics_connection_close_total': (
'kafka.server.replica.fetcher.metrics.connection.close.total'
),
'kafka_server_replica_fetcher_metrics_connection_count': 'kafka.server.replica.fetcher.metrics.connection.count',
'kafka_server_replica_fetcher_metrics_connection_creation_rate': (
'kafka.server.replica.fetcher.metrics.connection.creation.rate'
),
'kafka_server_replica_fetcher_metrics_connection_creation_total': (
'kafka.server.replica.fetcher.metrics.connection.creation.total'
),
'kafka_server_replica_fetcher_metrics_failed_authentication_rate': (
'kafka.server.replica.fetcher.metrics.failed.authentication.rate'
),
'kafka_server_replica_fetcher_metrics_failed_authentication_total': (
'kafka.server.replica.fetcher.metrics.failed.authentication.total'
),
'kafka_server_replica_fetcher_metrics_incoming_byte_rate': (
'kafka.server.replica.fetcher.metrics.incoming.byte.rate'
),
'kafka_server_replica_fetcher_metrics_incoming_byte_total': (
'kafka.server.replica.fetcher.metrics.incoming.byte.total'
),
'kafka_server_replica_fetcher_metrics_io_ratio': 'kafka.server.replica.fetcher.metrics.io.ratio',
'kafka_server_replica_fetcher_metrics_io_time_ns_avg': 'kafka.server.replica.fetcher.metrics.io.time.ns.avg',
'kafka_server_replica_fetcher_metrics_io_wait_ratio': 'kafka.server.replica.fetcher.metrics.io.wait.ratio',
'kafka_server_replica_fetcher_metrics_io_wait_time_ns_avg': (
'kafka.server.replica.fetcher.metrics.io.wait.time.ns.avg'
),
'kafka_server_replica_fetcher_metrics_io_waittime_total': 'kafka.server.replica.fetcher.metrics.io.waittime.total',
'kafka_server_replica_fetcher_metrics_iotime_total': 'kafka.server.replica.fetcher.metrics.iotime.total',
'kafka_server_replica_fetcher_metrics_network_io_rate': 'kafka.server.replica.fetcher.metrics.network.io.rate',
'kafka_server_replica_fetcher_metrics_network_io_total': 'kafka.server.replica.fetcher.metrics.network.io.total',
'kafka_server_replica_fetcher_metrics_outgoing_byte_rate': (
'kafka.server.replica.fetcher.metrics.outgoing.byte.rate'
),
'kafka_server_replica_fetcher_metrics_outgoing_byte_total': (
'kafka.server.replica.fetcher.metrics.outgoing.byte.total'
),
'kafka_server_replica_fetcher_metrics_request_rate': 'kafka.server.replica.fetcher.metrics.request.rate',
'kafka_server_replica_fetcher_metrics_request_size_avg': 'kafka.server.replica.fetcher.metrics.request.size.avg',
'kafka_server_replica_fetcher_metrics_request_size_max': 'kafka.server.replica.fetcher.metrics.request.size.max',
'kafka_server_replica_fetcher_metrics_request_total': 'kafka.server.replica.fetcher.metrics.request.total',
'kafka_server_replica_fetcher_metrics_response_rate': 'kafka.server.replica.fetcher.metrics.response.rate',
'kafka_server_replica_fetcher_metrics_response_total': 'kafka.server.replica.fetcher.metrics.response.total',
'kafka_server_replica_fetcher_metrics_select_rate': 'kafka.server.replica.fetcher.metrics.select.rate',
'kafka_server_replica_fetcher_metrics_select_total': 'kafka.server.replica.fetcher.metrics.select.total',
'kafka_server_replica_fetcher_metrics_successful_authentication_rate': (
'kafka.server.replica.fetcher.metrics.successful.authentication.rate'
),
'kafka_server_replica_fetcher_metrics_successful_authentication_total': (
'kafka.server.replica.fetcher.metrics.successful.authentication.total'
),
'kafka_server_socket_server_metrics_MemoryPoolAvgDepletedPercent': (
'kafka.server.socket.server.metrics.MemoryPoolAvgDepletedPercent'
),
'kafka_server_socket_server_metrics_MemoryPoolDepletedTimeTotal': (
'kafka.server.socket.server.metrics.MemoryPoolDepletedTimeTotal'
),
'kafka_server_socket_server_metrics_connection_close_rate': (
'kafka.server.socket.server.metrics.connection.close.rate'
),
'kafka_server_socket_server_metrics_connection_close_total': (
'kafka.server.socket.server.metrics.connection.close.total'
),
'kafka_server_socket_server_metrics_connection_count': 'kafka.server.socket.server.metrics.connection.count',
'kafka_server_socket_server_metrics_connection_creation_rate': (
'kafka.server.socket.server.metrics.connection.creation.rate'
),
'kafka_server_socket_server_metrics_connection_creation_total': (
'kafka.server.socket.server.metrics.connection.creation.total'
),
'kafka_server_socket_server_metrics_expired_connections_killed_count': (
'kafka.server.socket.server.metrics.expired.connections.killed.count'
),
'kafka_server_socket_server_metrics_failed_authentication_rate': (
'kafka.server.socket.server.metrics.failed.authentication.rate'
),
'kafka_server_socket_server_metrics_failed_authentication_total': (
'kafka.server.socket.server.metrics.failed.authentication.total'
),
'kafka_server_socket_server_metrics_failed_reauthentication_rate': (
'kafka.server.socket.server.metrics.failed.reauthentication.rate'
),
'kafka_server_socket_server_metrics_failed_reauthentication_total': (
'kafka.server.socket.server.metrics.failed.reauthentication.total'
),
'kafka_server_socket_server_metrics_incoming_byte_rate': 'kafka.server.socket.server.metrics.incoming.byte.rate',
'kafka_server_socket_server_metrics_incoming_byte_total': 'kafka.server.socket.server.metrics.incoming.byte.total',
'kafka_server_socket_server_metrics_io_ratio': 'kafka.server.socket.server.metrics.io.ratio',
'kafka_server_socket_server_metrics_io_time_ns_avg': 'kafka.server.socket.server.metrics.io.time.ns.avg',
'kafka_server_socket_server_metrics_io_wait_ratio': 'kafka.server.socket.server.metrics.io.wait.ratio',
'kafka_server_socket_server_metrics_io_wait_time_ns_avg': 'kafka.server.socket.server.metrics.io.wait.time.ns.avg',
'kafka_server_socket_server_metrics_io_waittime_total': 'kafka.server.socket.server.metrics.io.waittime.total',
'kafka_server_socket_server_metrics_iotime_total': 'kafka.server.socket.server.metrics.iotime.total',
'kafka_server_socket_server_metrics_network_io_rate': 'kafka.server.socket.server.metrics.network.io.rate',
'kafka_server_socket_server_metrics_network_io_total': 'kafka.server.socket.server.metrics.network.io.total',
'kafka_server_socket_server_metrics_outgoing_byte_rate': 'kafka.server.socket.server.metrics.outgoing.byte.rate',
'kafka_server_socket_server_metrics_outgoing_byte_total': 'kafka.server.socket.server.metrics.outgoing.byte.total',
'kafka_server_socket_server_metrics_reauthentication_latency_avg': (
'kafka.server.socket.server.metrics.reauthentication.latency.avg'
),
'kafka_server_socket_server_metrics_reauthentication_latency_max': (
'kafka.server.socket.server.metrics.reauthentication.latency.max'
),
'kafka_server_socket_server_metrics_request_rate': 'kafka.server.socket.server.metrics.request.rate',
'kafka_server_socket_server_metrics_request_size_avg': 'kafka.server.socket.server.metrics.request.size.avg',
'kafka_server_socket_server_metrics_request_size_max': 'kafka.server.socket.server.metrics.request.size.max',
'kafka_server_socket_server_metrics_request_total': 'kafka.server.socket.server.metrics.request.total',
'kafka_server_socket_server_metrics_response_rate': 'kafka.server.socket.server.metrics.response.rate',
'kafka_server_socket_server_metrics_response_total': 'kafka.server.socket.server.metrics.response.total',
'kafka_server_socket_server_metrics_select_rate': 'kafka.server.socket.server.metrics.select.rate',
'kafka_server_socket_server_metrics_select_total': 'kafka.server.socket.server.metrics.select.total',
'kafka_server_socket_server_metrics_successful_authentication_no_reauth_total': (
'kafka.server.socket.server.metrics.successful.authentication.no.reauth.total'
),
'kafka_server_socket_server_metrics_successful_authentication_rate': (
'kafka.server.socket.server.metrics.successful.authentication.rate'
),
'kafka_server_socket_server_metrics_successful_authentication_total': (
'kafka.server.socket.server.metrics.successful.authentication.total'
),
'kafka_server_socket_server_metrics_successful_reauthentication_rate': (
'kafka.server.socket.server.metrics.successful.reauthentication.rate'
),
'kafka_server_socket_server_metrics_successful_reauthentication_total': (
'kafka.server.socket.server.metrics.successful.reauthentication.total'
),
'kafka_server_txn_marker_channel_metrics_connection_close_rate': (
'kafka.server.txn.marker.channel.metrics.connection.close.rate'
),
'kafka_server_txn_marker_channel_metrics_connection_close_total': (
'kafka.server.txn.marker.channel.metrics.connection.close.total'
),
'kafka_server_txn_marker_channel_metrics_connection_count': (
'kafka.server.txn.marker.channel.metrics.connection.count'
),
'kafka_server_txn_marker_channel_metrics_connection_creation_rate': (
'kafka.server.txn.marker.channel.metrics.connection.creation.rate'
),
'kafka_server_txn_marker_channel_metrics_connection_creation_total': (
'kafka.server.txn.marker.channel.metrics.connection.creation.total'
),
'kafka_server_txn_marker_channel_metrics_failed_authentication_rate': (
'kafka.server.txn.marker.channel.metrics.failed.authentication.rate'
),
'kafka_server_txn_marker_channel_metrics_failed_authentication_total': (
'kafka.server.txn.marker.channel.metrics.failed.authentication.total'
),
'kafka_server_txn_marker_channel_metrics_failed_reauthentication_rate': (
'kafka.server.txn.marker.channel.metrics.failed.reauthentication.rate'
),
'kafka_server_txn_marker_channel_metrics_failed_reauthentication_total': (
'kafka.server.txn.marker.channel.metrics.failed.reauthentication.total'
),
'kafka_server_txn_marker_channel_metrics_incoming_byte_rate': (
'kafka.server.txn.marker.channel.metrics.incoming.byte.rate'
),
'kafka_server_txn_marker_channel_metrics_incoming_byte_total': (
'kafka.server.txn.marker.channel.metrics.incoming.byte.total'
),
'kafka_server_txn_marker_channel_metrics_io_ratio': 'kafka.server.txn.marker.channel.metrics.io.ratio',
'kafka_server_txn_marker_channel_metrics_io_time_ns_avg': 'kafka.server.txn.marker.channel.metrics.io.time.ns.avg',
'kafka_server_txn_marker_channel_metrics_io_wait_ratio': 'kafka.server.txn.marker.channel.metrics.io.wait.ratio',
'kafka_server_txn_marker_channel_metrics_io_wait_time_ns_avg': (
'kafka.server.txn.marker.channel.metrics.io.wait.time.ns.avg'
),
'kafka_server_txn_marker_channel_metrics_io_waittime_total': (
'kafka.server.txn.marker.channel.metrics.io.waittime.total'
),
'kafka_server_txn_marker_channel_metrics_iotime_total': 'kafka.server.txn.marker.channel.metrics.iotime.total',
'kafka_server_txn_marker_channel_metrics_network_io_rate': (
'kafka.server.txn.marker.channel.metrics.network.io.rate'
),
'kafka_server_txn_marker_channel_metrics_network_io_total': (
'kafka.server.txn.marker.channel.metrics.network.io.total'
),
'kafka_server_txn_marker_channel_metrics_outgoing_byte_rate': (
'kafka.server.txn.marker.channel.metrics.outgoing.byte.rate'
),
'kafka_server_txn_marker_channel_metrics_outgoing_byte_total': (
'kafka.server.txn.marker.channel.metrics.outgoing.byte.total'
),
'kafka_server_txn_marker_channel_metrics_reauthentication_latency_avg': (
'kafka.server.txn.marker.channel.metrics.reauthentication.latency.avg'
),
'kafka_server_txn_marker_channel_metrics_reauthentication_latency_max': (
'kafka.server.txn.marker.channel.metrics.reauthentication.latency.max'
),
'kafka_server_txn_marker_channel_metrics_request_rate': 'kafka.server.txn.marker.channel.metrics.request.rate',
'kafka_server_txn_marker_channel_metrics_request_size_avg': (
'kafka.server.txn.marker.channel.metrics.request.size.avg'
),
'kafka_server_txn_marker_channel_metrics_request_size_max': (
'kafka.server.txn.marker.channel.metrics.request.size.max'
),
'kafka_server_txn_marker_channel_metrics_request_total': 'kafka.server.txn.marker.channel.metrics.request.total',
'kafka_server_txn_marker_channel_metrics_response_rate': 'kafka.server.txn.marker.channel.metrics.response.rate',
'kafka_server_txn_marker_channel_metrics_response_total': 'kafka.server.txn.marker.channel.metrics.response.total',
'kafka_server_txn_marker_channel_metrics_select_rate': 'kafka.server.txn.marker.channel.metrics.select.rate',
'kafka_server_txn_marker_channel_metrics_select_total': 'kafka.server.txn.marker.channel.metrics.select.total',
'kafka_server_txn_marker_channel_metrics_successful_authentication_no_reauth_total': (
'kafka.server.txn.marker.channel.metrics.successful.authentication.no.reauth.total'
),
'kafka_server_txn_marker_channel_metrics_successful_authentication_rate': (
'kafka.server.txn.marker.channel.metrics.successful.authentication.rate'
),
'kafka_server_txn_marker_channel_metrics_successful_authentication_total': (
'kafka.server.txn.marker.channel.metrics.successful.authentication.total'
),
'kafka_server_txn_marker_channel_metrics_successful_reauthentication_rate': (
'kafka.server.txn.marker.channel.metrics.successful.reauthentication.rate'
),
'kafka_server_txn_marker_channel_metrics_successful_reauthentication_total': (
'kafka.server.txn.marker.channel.metrics.successful.reauthentication.total'
),
'kafka_utils_Throttler_Count': 'kafka.utils.Throttler.Count',
'kafka_utils_Throttler_FifteenMinuteRate': 'kafka.utils.Throttler.FifteenMinuteRate',
'kafka_utils_Throttler_FiveMinuteRate': 'kafka.utils.Throttler.FiveMinuteRate',
'kafka_utils_Throttler_MeanRate': 'kafka.utils.Throttler.MeanRate',
'kafka_utils_Throttler_OneMinuteRate': 'kafka.utils.Throttler.OneMinuteRate',
}
JMX_METRICS_OVERRIDES = {
'kafka_cluster_Partition_Value': 'gauge',
'kafka_consumer_group_ConsumerLagMetrics_Value': 'gauge',
'kafka_controller_ControllerChannelManager_50thPercentile': 'gauge',
'kafka_controller_ControllerChannelManager_75thPercentile': 'gauge',
'kafka_controller_ControllerChannelManager_95thPercentile': 'gauge',
'kafka_controller_ControllerChannelManager_98thPercentile': 'gauge',
'kafka_controller_ControllerChannelManager_999thPercentile': 'gauge',
'kafka_controller_ControllerChannelManager_99thPercentile': 'gauge',
'kafka_controller_ControllerChannelManager_Count': 'gauge',
'kafka_controller_ControllerChannelManager_FifteenMinuteRate': 'gauge',
'kafka_controller_ControllerChannelManager_FiveMinuteRate': 'gauge',
'kafka_controller_ControllerChannelManager_Max': 'gauge',
'kafka_controller_ControllerChannelManager_Mean': 'gauge',
'kafka_controller_ControllerChannelManager_MeanRate': 'gauge',
'kafka_controller_ControllerChannelManager_Min': 'gauge',
'kafka_controller_ControllerChannelManager_OneMinuteRate': 'gauge',
'kafka_controller_ControllerChannelManager_StdDev': 'gauge',
'kafka_controller_ControllerChannelManager_Value': 'gauge',
'kafka_controller_ControllerEventManager_50thPercentile': 'gauge',
'kafka_controller_ControllerEventManager_75thPercentile': 'gauge',
'kafka_controller_ControllerEventManager_95thPercentile': 'gauge',
'kafka_controller_ControllerEventManager_98thPercentile': 'gauge',
'kafka_controller_ControllerEventManager_999thPercentile': 'gauge',
'kafka_controller_ControllerEventManager_99thPercentile': 'gauge',
'kafka_controller_ControllerEventManager_Count': 'gauge',
'kafka_controller_ControllerEventManager_Max': 'gauge',
'kafka_controller_ControllerEventManager_Mean': 'gauge',
'kafka_controller_ControllerEventManager_Min': 'gauge',
'kafka_controller_ControllerEventManager_StdDev': 'gauge',
'kafka_controller_ControllerEventManager_Value': 'gauge',
'kafka_controller_ControllerStats_50thPercentile': 'gauge',
'kafka_controller_ControllerStats_75thPercentile': 'gauge',
'kafka_controller_ControllerStats_95thPercentile': 'gauge',
'kafka_controller_ControllerStats_98thPercentile': 'gauge',
'kafka_controller_ControllerStats_999thPercentile': 'gauge',
'kafka_controller_ControllerStats_99thPercentile': 'gauge',
'kafka_controller_ControllerStats_Count': 'gauge',
'kafka_controller_ControllerStats_FifteenMinuteRate': 'gauge',
'kafka_controller_ControllerStats_FiveMinuteRate': 'gauge',
'kafka_controller_ControllerStats_Max': 'gauge',
'kafka_controller_ControllerStats_Mean': 'gauge',
'kafka_controller_ControllerStats_MeanRate': 'gauge',
'kafka_controller_ControllerStats_Min': 'gauge',
'kafka_controller_ControllerStats_OneMinuteRate': 'gauge',
'kafka_controller_ControllerStats_StdDev': 'gauge',
'kafka_controller_KafkaController_Value': 'gauge',
'kafka_coordinator_group_GroupMetadataManager_Value': 'gauge',
'kafka_coordinator_transaction_TransactionMarkerChannelManager_Value': 'gauge',
'kafka_log_LogCleanerManager_Value': 'gauge',
'kafka_log_LogCleaner_Value': 'gauge',
'kafka_log_LogManager_Value': 'gauge',
'kafka_log_Log_Value': 'gauge',
'kafka_network_Acceptor_Count': 'gauge',
'kafka_network_Acceptor_FifteenMinuteRate': 'gauge',
'kafka_network_Acceptor_FiveMinuteRate': 'gauge',
'kafka_network_Acceptor_MeanRate': 'gauge',
'kafka_network_Acceptor_OneMinuteRate': 'gauge',
'kafka_network_Processor_Value': 'gauge',
'kafka_network_RequestChannel_Value': 'gauge',
'kafka_network_RequestMetrics_50thPercentile': 'gauge',
'kafka_network_RequestMetrics_75thPercentile': 'gauge',
'kafka_network_RequestMetrics_95thPercentile': 'gauge',
'kafka_network_RequestMetrics_98thPercentile': 'gauge',
'kafka_network_RequestMetrics_999thPercentile': 'gauge',
'kafka_network_RequestMetrics_99thPercentile': 'gauge',
'kafka_network_RequestMetrics_FifteenMinuteRate': 'gauge',
'kafka_network_RequestMetrics_FiveMinuteRate': 'gauge',
'kafka_network_RequestMetrics_Max': 'gauge',
'kafka_network_RequestMetrics_Mean': 'gauge',
'kafka_network_RequestMetrics_MeanRate': 'gauge',
'kafka_network_RequestMetrics_Min': 'gauge',
'kafka_network_RequestMetrics_OneMinuteRate': 'gauge',
'kafka_network_RequestMetrics_StdDev': 'gauge',
'kafka_network_SocketServer_Value': 'gauge',
'kafka_security_SimpleAclAuthorizer_Count': 'gauge',
'kafka_security_SimpleAclAuthorizer_FifteenMinuteRate': 'gauge',
'kafka_security_SimpleAclAuthorizer_FiveMinuteRate': 'gauge',
'kafka_security_SimpleAclAuthorizer_MeanRate': 'gauge',
'kafka_security_SimpleAclAuthorizer_OneMinuteRate': 'gauge',
'kafka_server_BrokerTopicMetrics_FifteenMinuteRate': 'gauge',
'kafka_server_BrokerTopicMetrics_FiveMinuteRate': 'gauge',
'kafka_server_BrokerTopicMetrics_MeanRate': 'gauge',
'kafka_server_BrokerTopicMetrics_OneMinuteRate': 'gauge',
'kafka_server_DelayedFetchMetrics_Count': 'gauge',
'kafka_server_DelayedFetchMetrics_FifteenMinuteRate': 'gauge',
'kafka_server_DelayedFetchMetrics_FiveMinuteRate': 'gauge',
'kafka_server_DelayedFetchMetrics_MeanRate': 'gauge',
'kafka_server_DelayedFetchMetrics_OneMinuteRate': 'gauge',
'kafka_server_DelayedOperationPurgatory_Value': 'gauge',
'kafka_server_FetchSessionCache_Count': 'gauge',
'kafka_server_FetchSessionCache_FifteenMinuteRate': 'gauge',
'kafka_server_FetchSessionCache_FiveMinuteRate': 'gauge',
'kafka_server_FetchSessionCache_MeanRate': 'gauge',
'kafka_server_FetchSessionCache_OneMinuteRate': 'gauge',
'kafka_server_FetchSessionCache_Value': 'gauge',
'kafka_server_Fetch_queue_size': 'gauge',
'kafka_server_FetcherLagMetrics_Value': 'gauge',
'kafka_server_FetcherStats_Count': 'gauge',
'kafka_server_FetcherStats_FifteenMinuteRate': 'gauge',
'kafka_server_FetcherStats_FiveMinuteRate': 'gauge',
'kafka_server_FetcherStats_MeanRate': 'gauge',
'kafka_server_FetcherStats_OneMinuteRate': 'gauge',
'kafka_server_KafkaRequestHandlerPool_Count': 'gauge',
'kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate': 'gauge',
'kafka_server_KafkaRequestHandlerPool_FiveMinuteRate': 'gauge',
'kafka_server_KafkaRequestHandlerPool_MeanRate': 'gauge',
'kafka_server_KafkaRequestHandlerPool_OneMinuteRate': 'gauge',
'kafka_server_KafkaServer_Value': 'gauge',
'kafka_server_LeaderReplication_byte_rate': 'gauge',
'kafka_server_Produce_queue_size': 'gauge',
'kafka_server_ReplicaAlterLogDirsManager_Value': 'gauge',
'kafka_server_ReplicaFetcherManager_Value': 'gauge',
'kafka_server_ReplicaManager_Count': 'gauge',
'kafka_server_ReplicaManager_FifteenMinuteRate': 'gauge',
'kafka_server_ReplicaManager_FiveMinuteRate': 'gauge',
'kafka_server_ReplicaManager_MeanRate': 'gauge',
'kafka_server_ReplicaManager_OneMinuteRate': 'gauge',
'kafka_server_Request_queue_size': 'gauge',
'kafka_server_SessionExpireListener_Count': 'gauge',
'kafka_server_SessionExpireListener_FifteenMinuteRate': 'gauge',
'kafka_server_SessionExpireListener_FiveMinuteRate': 'gauge',
'kafka_server_SessionExpireListener_MeanRate': 'gauge',
'kafka_server_SessionExpireListener_OneMinuteRate': 'gauge',
'kafka_server_ZooKeeperClientMetrics_50thPercentile': 'gauge',
'kafka_server_ZooKeeperClientMetrics_75thPercentile': 'gauge',
'kafka_server_ZooKeeperClientMetrics_95thPercentile': 'gauge',
'kafka_server_ZooKeeperClientMetrics_98thPercentile': 'gauge',
'kafka_server_ZooKeeperClientMetrics_999thPercentile': 'gauge',
'kafka_server_ZooKeeperClientMetrics_99thPercentile': 'gauge',
'kafka_server_ZooKeeperClientMetrics_Count': 'gauge',
'kafka_server_ZooKeeperClientMetrics_Max': 'gauge',
'kafka_server_ZooKeeperClientMetrics_Mean': 'gauge',
'kafka_server_ZooKeeperClientMetrics_Min': 'gauge',
'kafka_server_ZooKeeperClientMetrics_StdDev': 'gauge',
'kafka_server_controller_channel_metrics_connection_close_rate': 'gauge',
'kafka_server_controller_channel_metrics_connection_close_total': 'gauge',
'kafka_server_controller_channel_metrics_connection_count': 'gauge',
'kafka_server_controller_channel_metrics_connection_creation_rate': 'gauge',
'kafka_server_controller_channel_metrics_connection_creation_total': 'gauge',
'kafka_server_controller_channel_metrics_failed_authentication_rate': 'gauge',
'kafka_server_controller_channel_metrics_failed_authentication_total': 'gauge',
'kafka_server_controller_channel_metrics_failed_reauthentication_rate': 'gauge',
'kafka_server_controller_channel_metrics_failed_reauthentication_total': 'gauge',
'kafka_server_controller_channel_metrics_incoming_byte_rate': 'gauge',
'kafka_server_controller_channel_metrics_incoming_byte_total': 'gauge',
'kafka_server_controller_channel_metrics_io_ratio': 'gauge',
'kafka_server_controller_channel_metrics_io_time_ns_avg': 'gauge',
'kafka_server_controller_channel_metrics_io_wait_ratio': 'gauge',
'kafka_server_controller_channel_metrics_io_wait_time_ns_avg': 'gauge',
'kafka_server_controller_channel_metrics_io_waittime_total': 'gauge',
'kafka_server_controller_channel_metrics_iotime_total': 'gauge',
'kafka_server_controller_channel_metrics_network_io_rate': 'gauge',
'kafka_server_controller_channel_metrics_network_io_total': 'gauge',
'kafka_server_controller_channel_metrics_outgoing_byte_rate': 'gauge',
'kafka_server_controller_channel_metrics_outgoing_byte_total': 'gauge',
'kafka_server_controller_channel_metrics_reauthentication_latency_avg': 'gauge',
'kafka_server_controller_channel_metrics_reauthentication_latency_max': 'gauge',
'kafka_server_controller_channel_metrics_request_rate': 'gauge',
'kafka_server_controller_channel_metrics_request_size_avg': 'gauge',
'kafka_server_controller_channel_metrics_request_size_max': 'gauge',
'kafka_server_controller_channel_metrics_request_total': 'gauge',
'kafka_server_controller_channel_metrics_response_rate': 'gauge',
'kafka_server_controller_channel_metrics_response_total': 'gauge',
'kafka_server_controller_channel_metrics_select_rate': 'gauge',
'kafka_server_controller_channel_metrics_select_total': 'gauge',
'kafka_server_controller_channel_metrics_successful_authentication_no_reauth_total': 'gauge',
'kafka_server_controller_channel_metrics_successful_authentication_rate': 'gauge',
'kafka_server_controller_channel_metrics_successful_authentication_total': 'gauge',
'kafka_server_controller_channel_metrics_successful_reauthentication_rate': 'gauge',
'kafka_server_controller_channel_metrics_successful_reauthentication_total': 'gauge',
'kafka_server_kafka_metrics_count_count': 'gauge',
'kafka_server_replica_fetcher_metrics_connection_close_rate': 'gauge',
'kafka_server_replica_fetcher_metrics_connection_close_total': 'gauge',
'kafka_server_replica_fetcher_metrics_connection_count': 'gauge',
'kafka_server_replica_fetcher_metrics_connection_creation_rate': 'gauge',
'kafka_server_replica_fetcher_metrics_connection_creation_total': 'gauge',
'kafka_server_replica_fetcher_metrics_failed_authentication_rate': 'gauge',
'kafka_server_replica_fetcher_metrics_failed_authentication_total': 'gauge',
'kafka_server_replica_fetcher_metrics_incoming_byte_rate': 'gauge',
'kafka_server_replica_fetcher_metrics_incoming_byte_total': 'gauge',
'kafka_server_replica_fetcher_metrics_io_ratio': 'gauge',
'kafka_server_replica_fetcher_metrics_io_time_ns_avg': 'gauge',
'kafka_server_replica_fetcher_metrics_io_wait_ratio': 'gauge',
'kafka_server_replica_fetcher_metrics_io_wait_time_ns_avg': 'gauge',
'kafka_server_replica_fetcher_metrics_io_waittime_total': 'gauge',
'kafka_server_replica_fetcher_metrics_iotime_total': 'gauge',
'kafka_server_replica_fetcher_metrics_network_io_rate': 'gauge',
'kafka_server_replica_fetcher_metrics_network_io_total': 'gauge',
'kafka_server_replica_fetcher_metrics_outgoing_byte_rate': 'gauge',
'kafka_server_replica_fetcher_metrics_outgoing_byte_total': 'gauge',
'kafka_server_replica_fetcher_metrics_request_rate': 'gauge',
'kafka_server_replica_fetcher_metrics_request_size_avg': 'gauge',
'kafka_server_replica_fetcher_metrics_request_size_max': 'gauge',
'kafka_server_replica_fetcher_metrics_request_total': 'gauge',
'kafka_server_replica_fetcher_metrics_response_rate': 'gauge',
'kafka_server_replica_fetcher_metrics_response_total': 'gauge',
'kafka_server_replica_fetcher_metrics_select_rate': 'gauge',
'kafka_server_replica_fetcher_metrics_select_total': 'gauge',
'kafka_server_replica_fetcher_metrics_successful_authentication_rate': 'gauge',
'kafka_server_replica_fetcher_metrics_successful_authentication_total': 'gauge',
'kafka_server_socket_server_metrics_MemoryPoolAvgDepletedPercent': 'gauge',
'kafka_server_socket_server_metrics_MemoryPoolDepletedTimeTotal': 'gauge',
'kafka_server_socket_server_metrics_connection_close_rate': 'gauge',
'kafka_server_socket_server_metrics_connection_close_total': 'gauge',
'kafka_server_socket_server_metrics_connection_count': 'gauge',
'kafka_server_socket_server_metrics_connection_creation_rate': 'gauge',
'kafka_server_socket_server_metrics_connection_creation_total': 'gauge',
'kafka_server_socket_server_metrics_expired_connections_killed_count': 'gauge',
'kafka_server_socket_server_metrics_failed_authentication_rate': 'gauge',
'kafka_server_socket_server_metrics_failed_authentication_total': 'gauge',
'kafka_server_socket_server_metrics_failed_reauthentication_rate': 'gauge',
'kafka_server_socket_server_metrics_failed_reauthentication_total': 'gauge',
'kafka_server_socket_server_metrics_incoming_byte_rate': 'gauge',
'kafka_server_socket_server_metrics_incoming_byte_total': 'gauge',
'kafka_server_socket_server_metrics_io_ratio': 'gauge',
'kafka_server_socket_server_metrics_io_time_ns_avg': 'gauge',
'kafka_server_socket_server_metrics_io_wait_ratio': 'gauge',
'kafka_server_socket_server_metrics_io_wait_time_ns_avg': 'gauge',
'kafka_server_socket_server_metrics_io_waittime_total': 'gauge',
'kafka_server_socket_server_metrics_iotime_total': 'gauge',
'kafka_server_socket_server_metrics_network_io_rate': 'gauge',
'kafka_server_socket_server_metrics_network_io_total': 'gauge',
'kafka_server_socket_server_metrics_outgoing_byte_rate': 'gauge',
'kafka_server_socket_server_metrics_outgoing_byte_total': 'gauge',
'kafka_server_socket_server_metrics_reauthentication_latency_avg': 'gauge',
'kafka_server_socket_server_metrics_reauthentication_latency_max': 'gauge',
'kafka_server_socket_server_metrics_request_rate': 'gauge',
'kafka_server_socket_server_metrics_request_size_avg': 'gauge',
'kafka_server_socket_server_metrics_request_size_max': 'gauge',
'kafka_server_socket_server_metrics_request_total': 'gauge',
'kafka_server_socket_server_metrics_response_rate': 'gauge',
'kafka_server_socket_server_metrics_response_total': 'gauge',
'kafka_server_socket_server_metrics_select_rate': 'gauge',
'kafka_server_socket_server_metrics_select_total': 'gauge',
'kafka_server_socket_server_metrics_successful_authentication_no_reauth_total': 'gauge',
'kafka_server_socket_server_metrics_successful_authentication_rate': 'gauge',
'kafka_server_socket_server_metrics_successful_authentication_total': 'gauge',
'kafka_server_socket_server_metrics_successful_reauthentication_rate': 'gauge',
'kafka_server_socket_server_metrics_successful_reauthentication_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_connection_close_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_connection_close_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_connection_count': 'gauge',
'kafka_server_txn_marker_channel_metrics_connection_creation_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_connection_creation_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_failed_authentication_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_failed_authentication_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_failed_reauthentication_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_failed_reauthentication_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_incoming_byte_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_incoming_byte_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_io_ratio': 'gauge',
'kafka_server_txn_marker_channel_metrics_io_time_ns_avg': 'gauge',
'kafka_server_txn_marker_channel_metrics_io_wait_ratio': 'gauge',
'kafka_server_txn_marker_channel_metrics_io_wait_time_ns_avg': 'gauge',
'kafka_server_txn_marker_channel_metrics_io_waittime_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_iotime_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_network_io_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_network_io_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_outgoing_byte_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_outgoing_byte_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_reauthentication_latency_avg': 'gauge',
'kafka_server_txn_marker_channel_metrics_reauthentication_latency_max': 'gauge',
'kafka_server_txn_marker_channel_metrics_request_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_request_size_avg': 'gauge',
'kafka_server_txn_marker_channel_metrics_request_size_max': 'gauge',
'kafka_server_txn_marker_channel_metrics_request_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_response_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_response_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_select_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_select_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_successful_authentication_no_reauth_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_successful_authentication_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_successful_authentication_total': 'gauge',
'kafka_server_txn_marker_channel_metrics_successful_reauthentication_rate': 'gauge',
'kafka_server_txn_marker_channel_metrics_successful_reauthentication_total': 'gauge',
'kafka_utils_Throttler_Count': 'gauge',
'kafka_utils_Throttler_FifteenMinuteRate': 'gauge',
'kafka_utils_Throttler_FiveMinuteRate': 'gauge',
'kafka_utils_Throttler_MeanRate': 'gauge',
'kafka_utils_Throttler_OneMinuteRate': 'gauge',
}
METRICS_WITH_NAME_AS_LABEL = {
'kafka_network_RequestMetrics_Count': {
'legacy_name': 'kafka.network.RequestMetrics.Count',
'new_name': 'kafka.network.request',
'label_name': 'name',
'metric_type': 'gauge',
},
'kafka_server_BrokerTopicMetrics_Count': {
'legacy_name': 'kafka.server.BrokerTopicMetrics.Count',
'new_name': 'kafka.server.broker_topics',
'label_name': 'name',
'metric_type': 'gauge',
},
'kafka_server_ReplicaManager_Value': {
'legacy_name': 'kafka.server.ReplicaManager.Value',
'new_name': 'kafka.server.replica_manager',
'label_name': 'name',
'metric_type': 'gauge',
},
}
def construct_node_metrics_config():
metrics_map = NODE_METRICS_MAP.copy()
metrics = []
dynamic_metrics = ('go_memstats_alloc_bytes',)
for metric in dynamic_metrics:
metrics.append({metric: {'name': metrics_map[metric], 'type': 'native_dynamic'}})
del metrics_map[metric]
del metrics_map['{}_total'.format(metric)]
for raw_metric_name, metric_name in metrics_map.items():
if raw_metric_name.endswith('_total') and raw_metric_name not in NODE_METRICS_OVERRIDES:
raw_metric_name = raw_metric_name[:-6]
metric_name = metric_name[:-6]
config = {raw_metric_name: {'name': metric_name}}
if raw_metric_name in NODE_METRICS_OVERRIDES:
config[raw_metric_name]['type'] = NODE_METRICS_OVERRIDES[raw_metric_name]
metrics.append(config)
return metrics
def construct_jmx_metrics_config():
metrics = []
for raw_metric_name, metric_name in JMX_METRICS_MAP.items():
if raw_metric_name.endswith('_total') and raw_metric_name not in JMX_METRICS_OVERRIDES:
raw_metric_name = raw_metric_name[:-6]
metric_name = metric_name[:-6]
config = {raw_metric_name: {'name': metric_name}}
if raw_metric_name in JMX_METRICS_OVERRIDES:
config[raw_metric_name]['type'] = JMX_METRICS_OVERRIDES[raw_metric_name]
metrics.append(config)
return metrics
| 66.091533
| 119
| 0.805675
| 6,134
| 57,764
| 7.075481
| 0.033909
| 0.15055
| 0.072994
| 0.062003
| 0.843206
| 0.820695
| 0.78641
| 0.529527
| 0.414484
| 0.366098
| 0
| 0.003903
| 0.095042
| 57,764
| 873
| 120
| 66.167239
| 0.826357
| 0.00187
| 0
| 0.130841
| 0
| 0.002336
| 0.81645
| 0.784951
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002336
| false
| 0
| 0
| 0
| 0.004673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e0035b16cb97f5403e2fba653b28e1358511ee93
| 223
|
py
|
Python
|
lego/apps/emojis/permissions.py
|
ollfkaih/lego
|
b15aacaf09efe90e7f984d25b0e7bddbe12647e8
|
[
"MIT"
] | 45
|
2017-10-24T12:09:06.000Z
|
2021-11-03T21:21:03.000Z
|
lego/apps/emojis/permissions.py
|
ollfkaih/lego
|
b15aacaf09efe90e7f984d25b0e7bddbe12647e8
|
[
"MIT"
] | 980
|
2017-10-24T12:29:07.000Z
|
2022-03-31T04:04:31.000Z
|
lego/apps/emojis/permissions.py
|
ollfkaih/lego
|
b15aacaf09efe90e7f984d25b0e7bddbe12647e8
|
[
"MIT"
] | 23
|
2018-04-11T16:34:22.000Z
|
2021-11-23T12:28:30.000Z
|
from lego.apps.permissions.constants import LIST, VIEW
from lego.apps.permissions.permissions import PermissionHandler
class EmojiPermissionHandler(PermissionHandler):
authentication_map = {LIST: False, VIEW: False}
| 27.875
| 63
| 0.820628
| 24
| 223
| 7.583333
| 0.583333
| 0.087912
| 0.131868
| 0.252747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107623
| 223
| 7
| 64
| 31.857143
| 0.914573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e00fc46a2d1b85e9e40b7b30956fcd7409c9dcc0
| 192
|
py
|
Python
|
tests/commands/ddtrace_run_disabled.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/commands/ddtrace_run_disabled.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2022-02-16T09:35:37.000Z
|
2022-03-04T16:48:45.000Z
|
tests/commands/ddtrace_run_disabled.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-02-11T16:34:22.000Z
|
2022-02-11T16:34:22.000Z
|
from ddtrace import _monkey
from ddtrace import tracer
if __name__ == "__main__":
assert not tracer.enabled
assert len(_monkey._get_patched_modules()) == 0
print("Test success")
| 21.333333
| 51
| 0.729167
| 25
| 192
| 5.08
| 0.76
| 0.173228
| 0.267717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006369
| 0.182292
| 192
| 8
| 52
| 24
| 0.802548
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.166667
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e01f94d55a8cc1a91fe0dc40067fcd2013fc004f
| 1,234
|
py
|
Python
|
terrascript/turbot/r.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/turbot/r.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/turbot/r.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/turbot/r.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class turbot_file(terrascript.Resource):
pass
class turbot_folder(terrascript.Resource):
pass
class turbot_google_directory(terrascript.Resource):
pass
class turbot_grant(terrascript.Resource):
pass
class turbot_grant_activation(terrascript.Resource):
pass
class turbot_ldap_directory(terrascript.Resource):
pass
class turbot_local_directory(terrascript.Resource):
pass
class turbot_local_directory_user(terrascript.Resource):
pass
class turbot_mod(terrascript.Resource):
pass
class turbot_policy_setting(terrascript.Resource):
pass
class turbot_profile(terrascript.Resource):
pass
class turbot_resource(terrascript.Resource):
pass
class turbot_saml_directory(terrascript.Resource):
pass
class turbot_shadow_resource(terrascript.Resource):
pass
class turbot_smart_folder(terrascript.Resource):
pass
class turbot_smart_folder_attachment(terrascript.Resource):
pass
class turbot_turbot_directory(terrascript.Resource):
pass
| 15.820513
| 79
| 0.78282
| 140
| 1,234
| 6.685714
| 0.292857
| 0.199786
| 0.417735
| 0.478632
| 0.69765
| 0.443376
| 0.208333
| 0.112179
| 0
| 0
| 0
| 0.00095
| 0.146677
| 1,234
| 77
| 80
| 16.025974
| 0.887939
| 0.057536
| 0
| 0.435897
| 1
| 0
| 0.033621
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.435897
| 0.051282
| 0
| 0.487179
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
e0419b2ce8c9928d43ee8e7901e825aba498a94b
| 14,912
|
py
|
Python
|
integration_tests/src/main/python/ast_test.py
|
rongou/spark-rapids
|
1953d45c8a0d83cbf50bb0d8e6abc830c4b5e2d7
|
[
"Apache-2.0"
] | 415
|
2020-06-22T15:30:07.000Z
|
2022-03-30T20:04:32.000Z
|
integration_tests/src/main/python/ast_test.py
|
rongou/spark-rapids
|
1953d45c8a0d83cbf50bb0d8e6abc830c4b5e2d7
|
[
"Apache-2.0"
] | 4,758
|
2020-06-22T15:35:28.000Z
|
2022-03-31T23:05:00.000Z
|
integration_tests/src/main/python/ast_test.py
|
Surfndez/spark-rapids
|
b47a2450e1e592e94dfb144d121d2f05bc5087c1
|
[
"Apache-2.0"
] | 147
|
2020-06-22T15:38:25.000Z
|
2022-03-10T10:13:06.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_cpu_and_gpu_are_equal_collect_with_capture
from data_gen import *
from marks import approximate_float
from spark_session import with_cpu_session
import pyspark.sql.functions as f
# Each descriptor contains a list of data generators and a corresponding boolean
# indicating whether that data type is supported by the AST
ast_integral_descrs = [
(byte_gen, False), # AST implicitly upcasts to INT32, need AST cast to support
(short_gen, False), # AST implicitly upcasts to INT32, need AST cast to support
(int_gen, True),
(long_gen, True)
]
ast_arithmetic_descrs = ast_integral_descrs + [(float_gen, True), (double_gen, True)]
# cudf AST cannot support comparing floating point until it is expressive enough to handle NaNs
# cudf AST does not support strings yet
ast_comparable_descrs = [
(boolean_gen, True),
(byte_gen, True),
(short_gen, True),
(int_gen, True),
(long_gen, True),
(float_gen, False),
(double_gen, False),
(timestamp_gen, True),
(string_gen, False)
]
ast_boolean_descr = [(boolean_gen, True)]
ast_double_descr = [(double_gen, True)]
def assert_gpu_ast(is_supported, func, conf={}):
exist = "GpuProjectAstExec"
non_exist = "GpuProjectExec"
if not is_supported:
exist = "GpuProjectExec"
non_exist = "GpuProjectAstExec"
ast_conf = copy_and_update(conf, {"spark.rapids.sql.projectAstEnabled": "true"})
assert_cpu_and_gpu_are_equal_collect_with_capture(
func,
exist_classes=exist,
non_exist_classes=non_exist,
conf=ast_conf)
def assert_unary_ast(data_descr, func, conf={}):
(data_gen, is_supported) = data_descr
assert_gpu_ast(is_supported, lambda spark: func(unary_op_df(spark, data_gen)), conf=conf)
def assert_binary_ast(data_descr, func, conf={}):
(data_gen, is_supported) = data_descr
assert_gpu_ast(is_supported, lambda spark: func(binary_op_df(spark, data_gen)), conf=conf)
@pytest.mark.parametrize('data_gen', [boolean_gen, byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen, timestamp_gen], ids=idfn)
def test_literal(spark_tmp_path, data_gen):
# Write data to Parquet so Spark generates a plan using just the count of the data.
data_path = spark_tmp_path + '/AST_TEST_DATA'
with_cpu_session(lambda spark: gen_df(spark, [("a", IntegerGen())]).write.parquet(data_path))
scalar = gen_scalar(data_gen, force_no_nulls=True)
assert_gpu_ast(is_supported=True,
func=lambda spark: spark.read.parquet(data_path).select(scalar))
@pytest.mark.parametrize('data_gen', [boolean_gen, byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen, timestamp_gen], ids=idfn)
def test_null_literal(spark_tmp_path, data_gen):
# Write data to Parquet so Spark generates a plan using just the count of the data.
data_path = spark_tmp_path + '/AST_TEST_DATA'
with_cpu_session(lambda spark: gen_df(spark, [("a", IntegerGen())]).write.parquet(data_path))
data_type = data_gen.data_type
assert_gpu_ast(is_supported=True,
func=lambda spark: spark.read.parquet(data_path).select(f.lit(None).cast(data_type)))
@pytest.mark.parametrize('data_descr', ast_integral_descrs, ids=idfn)
def test_bitwise_not(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('~a'))
# This just ends up being a pass through. There is no good way to force
# a unary positive into a plan, because it gets optimized out, but this
# verifies that we can handle it.
@pytest.mark.parametrize('data_descr', [
(byte_gen, True),
(short_gen, True),
(int_gen, True),
(long_gen, True),
(float_gen, True),
(double_gen, True)], ids=idfn)
def test_unary_positive(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('+a'))
@pytest.mark.parametrize('data_descr', ast_arithmetic_descrs, ids=idfn)
def test_unary_minus(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('-a'))
@pytest.mark.parametrize('data_descr', ast_arithmetic_descrs, ids=idfn)
def test_abs(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('abs(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_cbrt(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('cbrt(a)'))
@pytest.mark.parametrize('data_descr', ast_boolean_descr, ids=idfn)
def test_not(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('!a'))
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_rint(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('rint(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_sqrt(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('sqrt(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_sin(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('sin(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_cos(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('cos(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_tan(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('tan(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_cot(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('cot(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_sinh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('sinh(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_cosh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('cosh(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_tanh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('tanh(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_asin(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('asin(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_acos(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('acos(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_atan(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('atan(a)'))
# AST is not expressive enough to support the ASINH Spark emulation expression
@approximate_float
@pytest.mark.parametrize('data_descr', [(double_gen, False)], ids=idfn)
def test_asinh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('asinh(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_acosh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('acosh(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_atanh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('atanh(a)'))
# The default approximate is 1e-6 or 1 in a million
# in some cases we need to adjust this because the algorithm is different
@approximate_float(rel=1e-4, abs=1e-12)
# Because Spark will overflow on large exponents drop to something well below
# what it fails at, note this is binary exponent, not base 10
@pytest.mark.parametrize('data_descr', [(DoubleGen(min_exp=-20, max_exp=20), True)], ids=idfn)
def test_asinh_improved(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('asinh(a)'),
conf={'spark.rapids.sql.improvedFloatOps.enabled': 'true'})
# The default approximate is 1e-6 or 1 in a million
# in some cases we need to adjust this because the algorithm is different
@approximate_float(rel=1e-4, abs=1e-12)
# Because Spark will overflow on large exponents drop to something well below
# what it fails at, note this is binary exponent, not base 10
@pytest.mark.parametrize('data_descr', [(DoubleGen(min_exp=-20, max_exp=20), True)], ids=idfn)
def test_acosh_improved(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('acosh(a)'),
conf={'spark.rapids.sql.improvedFloatOps.enabled': 'true'})
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_exp(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('exp(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_expm1(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('expm1(a)'))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_eq(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') == s1,
s2 == f.col('b'),
f.col('a') == f.col('b')))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_ne(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') != s1,
s2 != f.col('b'),
f.col('a') != f.col('b')))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_lt(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') < s1,
s2 < f.col('b'),
f.col('a') < f.col('b')))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_lte(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') <= s1,
s2 <= f.col('b'),
f.col('a') <= f.col('b')))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_gt(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') > s1,
s2 > f.col('b'),
f.col('a') > f.col('b')))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_gte(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') >= s1,
s2 >= f.col('b'),
f.col('a') >= f.col('b')))
@pytest.mark.parametrize('data_descr', ast_integral_descrs, ids=idfn)
def test_bitwise_and(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a').bitwiseAND(f.lit(100).cast(data_type)),
f.lit(-12).cast(data_type).bitwiseAND(f.col('b')),
f.col('a').bitwiseAND(f.col('b'))))
@pytest.mark.parametrize('data_descr', ast_integral_descrs, ids=idfn)
def test_bitwise_or(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a').bitwiseOR(f.lit(100).cast(data_type)),
f.lit(-12).cast(data_type).bitwiseOR(f.col('b')),
f.col('a').bitwiseOR(f.col('b'))))
@pytest.mark.parametrize('data_descr', ast_integral_descrs, ids=idfn)
def test_bitwise_xor(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a').bitwiseXOR(f.lit(100).cast(data_type)),
f.lit(-12).cast(data_type).bitwiseXOR(f.col('b')),
f.col('a').bitwiseXOR(f.col('b'))))
@pytest.mark.parametrize('data_descr', ast_arithmetic_descrs, ids=idfn)
def test_addition(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') + f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) + f.col('b'),
f.col('a') + f.col('b')))
@pytest.mark.parametrize('data_descr', ast_arithmetic_descrs, ids=idfn)
def test_subtraction(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') - f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) - f.col('b'),
f.col('a') - f.col('b')))
@pytest.mark.parametrize('data_descr', ast_arithmetic_descrs, ids=idfn)
def test_multiplication(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') * f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) * f.col('b'),
f.col('a') * f.col('b')))
@approximate_float
def test_scalar_pow():
# For the 'b' field include a lot more values that we would expect customers to use as a part of a pow
data_gen = [('a', DoubleGen()),('b', DoubleGen().with_special_case(lambda rand: float(rand.randint(-16, 16)), weight=100.0))]
assert_gpu_ast(is_supported=True,
func=lambda spark: gen_df(spark, data_gen).selectExpr(
'pow(a, 7.0)',
'pow(-12.0, b)'))
@approximate_float
@pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/89')
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_columnar_pow(data_descr):
assert_binary_ast(data_descr, lambda df: df.selectExpr('pow(a, b)'))
@pytest.mark.parametrize('data_gen', boolean_gens, ids=idfn)
def test_and(data_gen):
data_type = data_gen.data_type
assert_gpu_ast(is_supported=True,
func=lambda spark: binary_op_df(spark, data_gen).select(
f.col('a') & f.lit(True),
f.lit(False) & f.col('b'),
f.col('a') & f.col('b')))
@pytest.mark.parametrize('data_gen', boolean_gens, ids=idfn)
def test_or(data_gen):
data_type = data_gen.data_type
assert_gpu_ast(is_supported=True,
func=lambda spark: binary_op_df(spark, data_gen).select(
f.col('a') | f.lit(True),
f.lit(False) | f.col('b'),
f.col('a') | f.col('b')))
| 40.411924
| 139
| 0.703393
| 2,285
| 14,912
| 4.343545
| 0.129978
| 0.117884
| 0.088867
| 0.105793
| 0.789018
| 0.766751
| 0.756776
| 0.742771
| 0.730277
| 0.717783
| 0
| 0.010299
| 0.160072
| 14,912
| 368
| 140
| 40.521739
| 0.782116
| 0.132578
| 0
| 0.433099
| 0
| 0
| 0.071783
| 0.008992
| 0
| 0
| 0
| 0
| 0.176056
| 1
| 0.161972
| false
| 0
| 0.021127
| 0
| 0.183099
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e06618e513efead750e924652fbb7c0799957e60
| 129
|
py
|
Python
|
opyapi/utils/__init__.py
|
dkraczkowski/opyapi
|
93f9ca4a39ec3dc0622e3248b96f2efad8323827
|
[
"MIT"
] | 5
|
2019-06-05T19:06:56.000Z
|
2020-05-02T07:57:51.000Z
|
opyapi/utils/__init__.py
|
dkraczkowski/opyapi
|
93f9ca4a39ec3dc0622e3248b96f2efad8323827
|
[
"MIT"
] | 12
|
2019-06-05T19:34:09.000Z
|
2019-09-08T18:50:48.000Z
|
opyapi/utils/__init__.py
|
dkraczkowski/opyapi
|
93f9ca4a39ec3dc0622e3248b96f2efad8323827
|
[
"MIT"
] | 1
|
2019-06-05T20:03:26.000Z
|
2019-06-05T20:03:26.000Z
|
from .doc_string import DocString
from .final import Final
from .immutable import Immutable
from .is_optional import is_optional
| 25.8
| 36
| 0.844961
| 19
| 129
| 5.578947
| 0.473684
| 0.188679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124031
| 129
| 4
| 37
| 32.25
| 0.938053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0eb85383f5a579da600d7b289ce17839339d9e3a
| 44,053
|
py
|
Python
|
glance/tests/functional/test_bin_glance.py
|
dreamhost/glance
|
1d91a4dc2b74d224ea12947d672aa66a576d9d9a
|
[
"Apache-2.0"
] | null | null | null |
glance/tests/functional/test_bin_glance.py
|
dreamhost/glance
|
1d91a4dc2b74d224ea12947d672aa66a576d9d9a
|
[
"Apache-2.0"
] | null | null | null |
glance/tests/functional/test_bin_glance.py
|
dreamhost/glance
|
1d91a4dc2b74d224ea12947d672aa66a576d9d9a
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functional test case that utilizes the bin/glance CLI tool"""
import BaseHTTPServer
import datetime
import httplib2
import json
import os
import tempfile
import thread
import time
from glance.openstack.common import timeutils
from glance.tests import functional
from glance.tests.functional.store_utils import (setup_http,
teardown_http,
get_http_uri)
from glance.tests.utils import execute, requires, minimal_add_command
class TestBinGlance(functional.FunctionalTest):
"""Functional tests for the bin/glance CLI tool"""
def setUp(self):
super(TestBinGlance, self).setUp()
# NOTE(sirp): This is needed in case we are running the tests under an
# environment in which OS_AUTH_STRATEGY=keystone. The test server we
# spin up won't have keystone support, so we need to switch to the
# NoAuth strategy.
os.environ['OS_AUTH_STRATEGY'] = 'noauth'
os.environ['OS_AUTH_URL'] = ''
def _assertStartsWith(self, str, prefix):
msg = 'expected "%s" to start with "%s"' % (str, prefix)
self.assertTrue(str.startswith(prefix), msg)
def _assertNotIn(self, key, bag):
msg = 'Expected not to find substring "%s" in "%s"' % (key, bag)
self.assertFalse(key in bag, msg)
def test_index_with_https(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
cmd = ("bin/glance -N https://auth/ --port=%d index") % self.api_port
exitcode, out, err = execute(cmd, raise_error=False)
self.assertNotEqual(0, exitcode)
self._assertNotIn('SSL23_GET_SERVER_HELLO', out)
self.stop_servers()
def test_add_with_location_and_id(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 0. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
image_id = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
# 1a. Add public image
cmd = minimal_add_command(api_port,
'MyImage',
'id=%s' % image_id,
'location=http://example.com')
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
expected = 'Added new image with ID: %s' % image_id
self.assertTrue(expected in out)
# 1b. Add public image with non-uuid id
cmd = minimal_add_command(api_port,
'MyImage',
'id=12345',
'location=http://example.com')
exitcode, out, err = execute(cmd, expected_exitcode=1)
self.assertEqual(1, exitcode)
self.assertTrue('Invalid image id format' in out)
# 2. Verify image added as public image
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
lines = out.split("\n")[2:-1]
self.assertEqual(1, len(lines))
line = lines[0]
image_id, name, disk_format, container_format, size = \
[c.strip() for c in line.split()]
self.assertEqual('MyImage', name)
self.assertEqual('0', size, "Expected image to be 0 bytes in size, "
"but got %s. " % size)
self.stop_servers()
def test_add_with_location(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 0. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 1. Add public image
cmd = minimal_add_command(api_port,
'MyImage',
'location=http://localhost:0')
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().startswith('Added new image with ID:'))
# 2. Verify image added as public image
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
lines = out.split("\n")[2:-1]
self.assertEqual(1, len(lines))
line = lines[0]
img_info = [c.strip() for c in line.split()]
image_id, name, disk_format, container_format, size = img_info
self.assertEqual('MyImage', name)
self.assertEqual('0', size, "Expected image to be 0 bytes in size, "
"but got %s. " % size)
self.stop_servers()
def test_add_no_name(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 0. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 1. Add public image
# Can't use minimal_add_command since that uses
# name...
cmd = ("bin/glance --port=%d add is_public=True"
" disk_format=raw container_format=ovf"
" %s" % (api_port, 'location=http://localhost:0'))
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().startswith('Added new image with ID:'))
# 2. Verify image added as public image
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
lines = out.split("\n")[2:-1]
self.assertEqual(1, len(lines))
line = lines[0]
image_id, name, disk_format, container_format, size = \
[c.strip() for c in line.split()]
self.assertEqual('None', name)
self.stop_servers()
@requires(teardown=teardown_http)
def test_add_copying_from(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
api_port = self.api_port
registry_port = self.registry_port
# 0. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 1. Add public image
suffix = 'copy_from=%s' % get_http_uri(self, 'foobar')
cmd = minimal_add_command(api_port, 'MyImage', suffix)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().startswith('Added new image with ID:'))
# 2. Verify image added as public image
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
lines = out.split("\n")[2:-1]
self.assertEqual(1, len(lines))
line = lines[0]
image_id, name, disk_format, container_format, size = \
[c.strip() for c in line.split()]
self.assertEqual('MyImage', name)
self.assertEqual('5120', size, "Expected image to be 5120 bytes "
" in size, but got %s. " % size)
self.stop_servers()
def _do_test_update_external_source(self, source):
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
api_port = self.api_port
registry_port = self.registry_port
# 1. Add public image with no image content
headers = {'X-Image-Meta-Name': 'MyImage',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True'}
path = "http://%s:%d/v1/images" % ("0.0.0.0", api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['name'], 'MyImage')
image_id = data['image']['id']
# 2. Update image with external source
source = '%s=%s' % (source, get_http_uri(self, 'foobar'))
cmd = "bin/glance update %s %s -p %d" % (image_id, source, api_port)
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().endswith('Updated image %s' % image_id))
# 3. Verify image is now active and of the correct size
cmd = "bin/glance --port=%d show %s" % (api_port, image_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
expected_lines = [
'URI: http://0.0.0.0:%s/v1/images/%s' % (api_port, image_id),
'Id: %s' % image_id,
'Public: Yes',
'Name: MyImage',
'Status: active',
'Size: 5120',
'Disk format: raw',
'Container format: ovf',
'Minimum Ram Required (MB): 0',
'Minimum Disk Required (GB): 0',
]
lines = out.split("\n")
self.assertTrue(set(lines) >= set(expected_lines))
self.stop_servers()
@requires(teardown=teardown_http)
def test_update_copying_from(self):
"""
Tests creating an queued image then subsequently updating
with a copy-from source
"""
self._do_test_update_external_source('copy_from')
@requires(teardown=teardown_http)
def test_update_location(self):
"""
Tests creating an queued image then subsequently updating
with a location source
"""
self._do_test_update_external_source('location')
def test_add_with_location_and_stdin(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 0. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 1. Add public image
with tempfile.NamedTemporaryFile() as image_file:
image_file.write("XXX")
image_file.flush()
file_name = image_file.name
cmd = minimal_add_command(api_port,
'MyImage',
'location=http://localhost:0 < %s' %
file_name)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().startswith('Added new image with ID:'))
# 2. Verify image added as public image
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
lines = out.split("\n")[2:-1]
self.assertEqual(1, len(lines))
line = lines[0]
img_info = [c.strip() for c in line.split()]
image_id, name, disk_format, container_format, size = img_info
self.assertEqual('MyImage', name)
self.assertEqual('0', size, "Expected image to be 0 bytes in size, "
"but got %s. " % size)
self.stop_servers()
def test_add_list_delete_list(self):
"""
We test the following:
0. Verify no public images in index
1. Add a public image
2. Check that image exists in index
3. Delete the image
4. Verify no longer in index
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 0. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 1. Add public image
with tempfile.NamedTemporaryFile() as image_file:
image_file.write("XXX")
image_file.flush()
image_file_name = image_file.name
suffix = '--silent-upload < %s' % image_file_name
cmd = minimal_add_command(api_port, 'MyImage', suffix)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
msg = out.split("\n")
self._assertStartsWith(msg[0], 'Added new image with ID:')
# 2. Verify image added as public image
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
lines = out.split("\n")[2:-1]
self.assertEqual(1, len(lines))
line = lines[0]
img_info = [c.strip() for c in line.split()]
image_id, name, disk_format, container_format, size = img_info
self.assertEqual('MyImage', name)
self.assertEqual('3', size,
"Expected image to be 3 bytes in size, but got %s. "
"Make sure you're running the correct version "
"of webob." % size)
# 3. Delete the image
cmd = "bin/glance --port=%d --force delete %s" % (api_port, image_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('Deleted image %s' % image_id, out.strip())
# 4. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
self.stop_servers()
def test_add_list_update_list(self):
"""
Test for LP Bugs #736295, #767203
We test the following:
0. Verify no public images in index
1. Add a NON-public image
2. Check that image does not appear in index
3. Update the image to be public
4. Check that image now appears in index
5. Update the image's Name attribute
6. Verify the updated name is shown
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 0. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 1. Add public image
cmd = minimal_add_command(api_port,
'MyImage',
'location=http://localhost:0',
public=False)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
msg = out.split('\n')
self.assertTrue(msg[0].startswith('Added new image with ID:'))
image_id = out.strip().split(':')[1].strip()
# 2. Verify image does not appear as a public image
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 3. Update the image to make it public
cmd = "bin/glance --port=%d update %s is_public=True" % (
api_port, image_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('Updated image %s' % image_id, out.strip())
# 4. Verify image 1 in list of public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
lines = out.split("\n")[2:-1]
self.assertEqual(len(lines), 1)
self.assertTrue('MyImage' in lines[0])
# 5. Update the image's Name attribute
updated_image_name = "Updated image name"
cmd = ("bin/glance --port=%d update %s is_public=True name=\"%s\"" %
(api_port, image_id, updated_image_name))
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('Updated image %s' % image_id, out.strip())
# 6. Verify updated name shown
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(updated_image_name in out,
"%s not found in %s" % (updated_image_name, out))
self.stop_servers()
def test_killed_image_not_in_index(self):
"""
We test conditions that produced LP Bug #768969, where an image
in the 'killed' status is displayed in the output of glance index,
and the status column is not displayed in the output of
glance show <ID>.
Start servers with Swift backend and a bad auth URL, and then:
0. Verify no public images in index
1. Attempt to add an image
2. Verify the image does NOT appear in the index output
"""
self.cleanup()
# Start servers with a Swift backend and a bad auth URL
override_options = {
'default_store': 'swift',
'swift_store_auth_address': 'badurl',
}
options = self.__dict__.copy()
options.update(override_options)
self.start_servers(**options)
api_port = self.api_port
registry_port = self.registry_port
# 0. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 1. Attempt to add an image
with tempfile.NamedTemporaryFile() as image_file:
image_file.write("XXX")
image_file.flush()
image_file_name = image_file.name
cmd = ("bin/glance --port=%d add name=Jonas is_public=True "
"disk_format=qcow2 container_format=bare < %s"
% (api_port, image_file_name))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertNotEqual(0, exitcode)
self.assertTrue('Failed to add image.' in out)
# 2. Verify image does not appear as public image
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
self.stop_servers()
def test_add_location_with_checksum(self):
"""
We test the following:
1. Add an image with location and checksum
2. Run SQL against DB to verify checksum was entered correctly
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 1. Add public image
cmd = minimal_add_command(api_port,
'MyImage',
'location=http://localhost:0 checksum=1')
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().startswith('Added new image with ID:'))
image_id = out.split(":")[1].strip()
sql = 'SELECT checksum FROM images WHERE id = "%s"' % image_id
recs = self.run_sql_cmd(sql)
self.assertEqual('1', recs.first()[0])
self.stop_servers()
def test_add_location_without_checksum(self):
"""
We test the following:
1. Add an image with location and no checksum
2. Run SQL against DB to verify checksum is NULL
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 1. Add public image
cmd = minimal_add_command(api_port,
'MyImage',
'location=http://localhost:0')
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().startswith('Added new image with ID:'))
image_id = out.split(":")[1].strip()
sql = 'SELECT checksum FROM images WHERE id = "%s"' % image_id
recs = self.run_sql_cmd(sql)
self.assertEqual(None, recs.first()[0])
self.stop_servers()
def test_add_clear(self):
"""
We test the following:
1. Add a couple images with metadata
2. Clear the images
3. Verify no public images found
4. Run SQL against DB to verify no undeleted properties
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 1. Add some images
for i in range(1, 5):
cmd = minimal_add_command(api_port,
'MyImage',
'foo=bar')
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().find('Added new image with ID:') > -1)
# 2. Clear all images
cmd = "bin/glance --port=%d --force clear" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
# 3. Verify no public images are found
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
lines = out.split("\n")
first_line = lines[0]
self.assertEqual('', first_line)
# 4. Lastly we manually verify with SQL that image properties are
# also getting marked as deleted.
sql = "SELECT COUNT(*) FROM image_properties WHERE deleted = 0"
recs = self.run_sql_cmd(sql)
for rec in recs:
self.assertEqual(0, rec[0])
self.stop_servers()
def test_results_filtering(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 1. Add some images
_add_cmd = "bin/glance --port=%d add is_public=True" % api_port
_add_args = [
"name=Name1 disk_format=vhd container_format=ovf foo=bar",
"name=Name2 disk_format=ami container_format=ami foo=bar",
"name=Name3 disk_format=vhd container_format=ovf foo=baz "
"min_disk=7 min_ram=256",
]
image_ids = []
for i, args in enumerate(_add_args):
cmd = "%s %s" % (_add_cmd, args)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().find('Added new image with ID:') > -1)
image_ids.append(out.strip().split(':')[1].strip())
_base_cmd = "bin/glance --port=%d" % api_port
_index_cmd = "%s index -f" % (_base_cmd,)
# 2. Check name filter
cmd = "name=Name2"
exitcode, out, err = execute("%s %s" % (_index_cmd, cmd))
image_lines = out.split("\n")[2:-1]
self.assertEqual(0, exitcode)
self.assertEqual(1, len(image_lines))
self.assertEqual(image_lines[0].split()[0], image_ids[1])
# 3. Check disk_format filter
cmd = "disk_format=vhd"
exitcode, out, err = execute("%s %s" % (_index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(2, len(image_lines))
self.assertEqual(image_lines[0].split()[0], image_ids[2])
self.assertEqual(image_lines[1].split()[0], image_ids[0])
# 4. Check container_format filter
cmd = "container_format=ami"
exitcode, out, err = execute("%s %s" % (_index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(1, len(image_lines))
self.assertEqual(image_lines[0].split()[0], image_ids[1])
# 5. Check container_format filter
cmd = "container_format=ami"
exitcode, out, err = execute("%s %s" % (_index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(1, len(image_lines))
self.assertEqual(image_lines[0].split()[0], image_ids[1])
# 6. Check status filter
cmd = "status=killed"
exitcode, out, err = execute("%s %s" % (_index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(0, len(image_lines))
# 7. Check property filter
cmd = "foo=bar"
exitcode, out, err = execute("%s %s" % (_index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(2, len(image_lines))
self.assertEqual(image_lines[0].split()[0], image_ids[1])
self.assertEqual(image_lines[1].split()[0], image_ids[0])
# 8. Check multiple filters
cmd = "name=Name2 foo=bar"
exitcode, out, err = execute("%s %s" % (_index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(1, len(image_lines))
self.assertEqual(image_lines[0].split()[0], image_ids[1])
# 9. Check past changes-since
dt1 = timeutils.utcnow() - datetime.timedelta(1)
iso1 = timeutils.isotime(dt1)
cmd = "changes-since=%s" % iso1
exitcode, out, err = execute("%s %s" % (_index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(3, len(image_lines))
self.assertEqual(image_lines[0].split()[0], image_ids[2])
self.assertEqual(image_lines[1].split()[0], image_ids[1])
self.assertEqual(image_lines[2].split()[0], image_ids[0])
# 10. Check future changes-since
dt2 = timeutils.utcnow() + datetime.timedelta(1)
iso2 = timeutils.isotime(dt2)
cmd = "changes-since=%s" % iso2
exitcode, out, err = execute("%s %s" % (_index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(0, len(image_lines))
# 11. Ensure details call also respects filters
_details_cmd = "%s details" % (_base_cmd,)
cmd = "foo=bar"
exitcode, out, err = execute("%s %s" % (_details_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[1:-1]
self.assertEqual(30, len(image_lines))
self.assertEqual(image_lines[1].split()[1], image_ids[1])
self.assertEqual(image_lines[16].split()[1], image_ids[0])
# 12. Check min_ram filter
cmd = "min_ram=256"
exitcode, out, err = execute("%s %s" % (_details_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(14, len(image_lines))
self.assertEqual(image_lines[0].split()[1], image_ids[2])
# 13. Check min_disk filter
cmd = "min_disk=7"
exitcode, out, err = execute("%s %s" % (_details_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(14, len(image_lines))
self.assertEqual(image_lines[0].split()[1], image_ids[2])
self.stop_servers()
def test_results_pagination(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
_base_cmd = "bin/glance --port=%d" % self.api_port
index_cmd = "%s index -f" % _base_cmd
details_cmd = "%s details -f" % _base_cmd
# 1. Add some images
_add_cmd = "bin/glance --port=%d add is_public=True" % self.api_port
_add_args = [
"name=Name1 disk_format=ami container_format=ami",
"name=Name2 disk_format=vhd container_format=ovf",
"name=Name3 disk_format=ami container_format=ami",
"name=Name4 disk_format=ami container_format=ami",
"name=Name5 disk_format=vhd container_format=ovf",
]
image_ids = []
for i, args in enumerate(_add_args):
cmd = "%s %s" % (_add_cmd, args)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
expected_out = 'Added new image with ID: %d' % (i + 1,)
self.assertTrue(out.strip().find('Added new image with ID:') > -1)
image_ids.append(out.strip().split(':')[1].strip())
# 2. Limit less than total
cmd = "--limit=3"
exitcode, out, err = execute("%s %s" % (index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(5, len(image_lines))
self.assertTrue(image_lines[0].split()[0], image_ids[0])
self.assertTrue(image_lines[1].split()[0], image_ids[1])
self.assertTrue(image_lines[2].split()[0], image_ids[2])
self.assertTrue(image_lines[3].split()[0], image_ids[3])
self.assertTrue(image_lines[4].split()[0], image_ids[4])
# 3. With a marker
cmd = "--marker=%s" % image_ids[3]
exitcode, out, err = execute("%s %s" % (index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(3, len(image_lines))
self.assertTrue(image_lines[0].split()[0], image_ids[1])
self.assertTrue(image_lines[1].split()[0], image_ids[2])
self.assertTrue(image_lines[2].split()[0], image_ids[3])
# 3. With a marker and limit
cmd = "--marker=%s --limit=1" % image_ids[2]
exitcode, out, err = execute("%s %s" % (index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(2, len(image_lines))
self.assertTrue(image_lines[0].split()[0], image_ids[1])
self.assertTrue(image_lines[1].split()[0], image_ids[2])
# 4. Pagination params with filtered results
cmd = "--marker=%s --limit=1 container_format=ami" % image_ids[3]
exitcode, out, err = execute("%s %s" % (index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(2, len(image_lines))
self.assertTrue(image_lines[0].split()[0], image_ids[2])
self.assertTrue(image_lines[1].split()[0], image_ids[1])
# 5. Pagination params with filtered results in a details call
cmd = "--marker=%s --limit=1 container_format=ami" % image_ids[3]
exitcode, out, err = execute("%s %s" % (details_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[1:-1]
self.assertEqual(28, len(image_lines))
self.assertTrue(image_lines[1].split()[1], image_ids[2])
self.assertTrue(image_lines[15].split()[1], image_ids[1])
self.stop_servers()
def test_results_sorting(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
_base_cmd = "bin/glance --port=%d" % self.api_port
index_cmd = "%s index -f" % _base_cmd
details_cmd = "%s details -f" % _base_cmd
# 1. Add some images
_add_cmd = "bin/glance --port=%d add is_public=True" % self.api_port
_add_args = [
"name=Name1 disk_format=ami container_format=ami",
"name=Name4 disk_format=vhd container_format=ovf",
"name=Name3 disk_format=ami container_format=ami",
"name=Name2 disk_format=ami container_format=ami",
"name=Name5 disk_format=vhd container_format=ovf",
]
image_ids = []
for i, args in enumerate(_add_args):
cmd = "%s %s" % (_add_cmd, args)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
expected_out = 'Added new image with ID: %d' % (i + 1,)
self.assertTrue(out.strip().find('Added new image with ID:') > -1)
image_ids.append(out.strip().split(':')[1].strip())
# 2. Sort by name asc
cmd = "--sort_key=name --sort_dir=asc"
exitcode, out, err = execute("%s %s" % (index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(5, len(image_lines))
self.assertTrue(image_lines[0].split()[0], image_ids[0])
self.assertTrue(image_lines[1].split()[0], image_ids[1])
self.assertTrue(image_lines[2].split()[0], image_ids[2])
self.assertTrue(image_lines[3].split()[0], image_ids[3])
self.assertTrue(image_lines[4].split()[0], image_ids[4])
# 3. Sort by name asc with a marker
cmd = "--sort_key=name --sort_dir=asc --marker=%s" % image_ids[3]
exitcode, out, err = execute("%s %s" % (index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(3, len(image_lines))
self.assertTrue(image_lines[0].split()[0], image_ids[2])
self.assertTrue(image_lines[1].split()[0], image_ids[1])
self.assertTrue(image_lines[2].split()[0], image_ids[4])
# 4. Sort by container_format desc
cmd = "--sort_key=container_format --sort_dir=desc --limit=10"
exitcode, out, err = execute("%s %s" % (index_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[2:-1]
self.assertEqual(5, len(image_lines))
self.assertTrue(image_lines[0].split()[0], image_ids[4])
self.assertTrue(image_lines[1].split()[0], image_ids[1])
self.assertTrue(image_lines[2].split()[0], image_ids[3])
self.assertTrue(image_lines[3].split()[0], image_ids[2])
self.assertTrue(image_lines[4].split()[0], image_ids[0])
# 5. Sort by name asc with a marker (details)
cmd = "--sort_key=name --sort_dir=asc --marker=%s" % image_ids[3]
exitcode, out, err = execute("%s %s" % (details_cmd, cmd))
self.assertEqual(0, exitcode)
image_lines = out.split("\n")[1:-1]
self.assertEqual(42, len(image_lines))
self.assertTrue(image_lines[1].split()[1], image_ids[2])
self.assertTrue(image_lines[15].split()[1], image_ids[1])
self.assertTrue(image_lines[29].split()[1], image_ids[4])
self.stop_servers()
def test_show_image_format(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 1. Add public image
with tempfile.NamedTemporaryFile() as image_file:
image_file.write("XXX")
image_file.flush()
image_file_name = image_file.name
suffix = ' --silent-upload < %s' % image_file_name
cmd = minimal_add_command(api_port, 'MyImage', suffix)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
image_id = out.strip().rsplit(' ', 1)[1]
# 2. Verify image added as public image
cmd = "bin/glance --port=%d show %s" % (api_port, image_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
lines = out.split("\n")[:-1]
expected_lines = [
'URI: http://0.0.0.0:%s/v1/images/%s' % (api_port, image_id),
'Id: %s' % image_id,
'Public: Yes',
'Name: MyImage',
'Status: active',
'Size: 3',
'Disk format: raw',
'Container format: ovf',
'Minimum Ram Required (MB): 0',
'Minimum Disk Required (GB): 0',
]
self.assertTrue(set(lines) >= set(expected_lines))
# 3. Delete the image
cmd = "bin/glance --port=%d --force delete %s" % (api_port, image_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('Deleted image %s' % image_id, out.strip())
self.stop_servers()
def test_protected_image(self):
"""
We test the following:
0. Verify no public images in index
1. Add a public image with a location attr
protected and no image data
2. Check that image exists in index
3. Attempt to delete the image
4. Remove protection from image
5. Delete the image
6. Verify no longer in index
"""
self.cleanup()
self.start_servers()
api_port = self.api_port
registry_port = self.registry_port
# 0. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 1. Add public image
cmd = ("echo testdata | " +
minimal_add_command(api_port,
'MyImage',
'protected=True'))
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
msg = out.split("\n")
self.assertTrue(msg[3].startswith('Added new image with ID:'))
# 2. Verify image added as public image
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
lines = out.split("\n")[2:-1]
self.assertEqual(1, len(lines))
line = lines[0]
img_info = [c.strip() for c in line.split()]
image_id, name, disk_format, container_format, size = img_info
self.assertEqual('MyImage', name)
# 3. Delete the image
cmd = "bin/glance --port=%d --force delete %s" % (api_port, image_id)
exitcode, out, err = execute(cmd, raise_error=False)
self.assertNotEqual(0, exitcode)
self.assertTrue(out.startswith('You do not have permission'))
# 4. Remove image protection
cmd = ("bin/glance --port=%d --force update %s "
"protected=False" % (api_port, image_id))
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().startswith('Updated image'))
# 5. Delete the image
cmd = "bin/glance --port=%d --force delete %s" % (api_port, image_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(out.strip().startswith('Deleted image'))
# 6. Verify no public images
cmd = "bin/glance --port=%d index" % api_port
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
self.stop_servers()
def test_timeout(self):
self.cleanup()
keep_sleeping = True
#start a simple HTTP server in a thread that hangs for a bit
class RemoteImageHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
cnt = 1
while (keep_sleeping):
cnt += 1
time.sleep(0.1)
if cnt > 100:
break
server_class = BaseHTTPServer.HTTPServer
local_server = server_class(('127.0.0.1', 0), RemoteImageHandler)
local_ip, local_port = local_server.server_address
def serve_requests(httpd):
httpd.serve_forever()
thread.start_new_thread(serve_requests, (local_server,))
cmd = ("bin/glance --port=%d index --timeout=1") % local_port
exitcode, out, err = execute(cmd, raise_error=False)
keep_sleeping = False
local_server.shutdown()
self.assertNotEqual(0, exitcode)
self.assertTrue("timed out" in out)
def test_add_member(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
image_id = "11111111-1111-1111-1111-111111111111"
member_id = "21111111-2111-2111-2111-211111111111"
member2_id = "31111111-3111-3111-3111-311111111111"
# 0. Add an image
cmd = minimal_add_command(api_port,
'MyImage',
'id=%s' % image_id,
'location=http://example.com')
exitcode, out, err = execute(cmd)
# 1. Add an image member
cmd = "bin/glance --port=%d member-add %s %s" % (api_port, image_id,
member_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 2. Verify image-members
cmd = "bin/glance --port=%d image-members %s " % (api_port, image_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(member_id in out)
# 3. Verify member-images
cmd = "bin/glance --port=%d member-images %s " % (api_port, member_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(image_id in out)
# 4. Replace image members
cmd = "bin/glance --port=%d members-replace %s %s" % (api_port,
image_id,
member2_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 5. Verify member-images again for member2
cmd = "bin/glance --port=%d member-images %s " % (api_port, member2_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(image_id in out)
# 6. Verify member-images again for member1 (should not be present)
cmd = "bin/glance --port=%d member-images %s " % (api_port, member_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(image_id not in out)
# 7. Delete the member
cmd = "bin/glance --port=%d member-delete %s %s" % (api_port, image_id,
member2_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
# 8. Verify image-members is empty
cmd = "bin/glance --port=%d image-members %s " % (api_port, image_id)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip())
self.stop_servers()
| 34.605656
| 79
| 0.574876
| 5,548
| 44,053
| 4.415105
| 0.081651
| 0.098592
| 0.047438
| 0.071157
| 0.792447
| 0.769096
| 0.74354
| 0.709165
| 0.697122
| 0.671117
| 0
| 0.023304
| 0.300615
| 44,053
| 1,272
| 80
| 34.632862
| 0.77173
| 0.12519
| 0
| 0.726221
| 0
| 0
| 0.148457
| 0.008256
| 0
| 0
| 0
| 0
| 0.294344
| 1
| 0.034704
| false
| 0
| 0.015424
| 0
| 0.052699
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0eeb6e60ea01c88f26d035900348dee0c1ea106f
| 97
|
py
|
Python
|
models/container.py
|
sharkwouter/Whale
|
cf149c4f57a3837f0ff1d38449826a87eb481d3d
|
[
"MIT"
] | null | null | null |
models/container.py
|
sharkwouter/Whale
|
cf149c4f57a3837f0ff1d38449826a87eb481d3d
|
[
"MIT"
] | null | null | null |
models/container.py
|
sharkwouter/Whale
|
cf149c4f57a3837f0ff1d38449826a87eb481d3d
|
[
"MIT"
] | null | null | null |
class Container:
def __init__(self, container_id):
self.container_id = container_id
| 19.4
| 40
| 0.71134
| 12
| 97
| 5.166667
| 0.5
| 0.532258
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216495
| 97
| 4
| 41
| 24.25
| 0.815789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1623b5d207a9f7329996965772fa65dec4de7fd8
| 222
|
py
|
Python
|
cuhnsw/__init__.py
|
js1010/cuhnsw
|
7115e222a8b0406a5c371eff720f8280f335f2cc
|
[
"Apache-2.0"
] | 63
|
2021-01-05T07:21:44.000Z
|
2022-03-13T10:27:22.000Z
|
cuhnsw/__init__.py
|
js1010/cuhnsw
|
7115e222a8b0406a5c371eff720f8280f335f2cc
|
[
"Apache-2.0"
] | 3
|
2021-02-09T02:44:38.000Z
|
2021-04-20T12:34:01.000Z
|
cuhnsw/__init__.py
|
js1010/cuhnsw
|
7115e222a8b0406a5c371eff720f8280f335f2cc
|
[
"Apache-2.0"
] | 12
|
2021-02-01T08:22:10.000Z
|
2022-03-11T07:04:21.000Z
|
# Copyright (c) 2020 Jisang Yoon
# All rights reserved.
#
# This source code is licensed under the Apache 2.0 license found in the
# LICENSE file in the root directory of this source tree.
from cuhnsw.pyhnsw import CuHNSW
| 31.714286
| 72
| 0.77027
| 37
| 222
| 4.621622
| 0.810811
| 0.116959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032967
| 0.18018
| 222
| 6
| 73
| 37
| 0.906593
| 0.801802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
163d398f05d88f2351bdcd53c41622618e4b67c3
| 121
|
py
|
Python
|
applemusicpy/__init__.py
|
mateuuszzzzz/applemusicpy
|
2cfb8e5ff79587c938d09969f2839a0e8b061dda
|
[
"MIT"
] | null | null | null |
applemusicpy/__init__.py
|
mateuuszzzzz/applemusicpy
|
2cfb8e5ff79587c938d09969f2839a0e8b061dda
|
[
"MIT"
] | null | null | null |
applemusicpy/__init__.py
|
mateuuszzzzz/applemusicpy
|
2cfb8e5ff79587c938d09969f2839a0e8b061dda
|
[
"MIT"
] | null | null | null |
from .auth import AuthBase, Auth
from .client import Client, ResourceType
from .types import *
from .exceptions import *
| 24.2
| 40
| 0.785124
| 16
| 121
| 5.9375
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14876
| 121
| 4
| 41
| 30.25
| 0.92233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
164149753d41e33b989cc62991ecab91e07c9ad4
| 52
|
py
|
Python
|
CalNotes/calnote/forms.py
|
erwinlucas/CalNotes
|
8971140802d95028deb036cd7f2568ba824a00e8
|
[
"MIT"
] | null | null | null |
CalNotes/calnote/forms.py
|
erwinlucas/CalNotes
|
8971140802d95028deb036cd7f2568ba824a00e8
|
[
"MIT"
] | null | null | null |
CalNotes/calnote/forms.py
|
erwinlucas/CalNotes
|
8971140802d95028deb036cd7f2568ba824a00e8
|
[
"MIT"
] | null | null | null |
from django import forms
# Insert form classes here
| 17.333333
| 26
| 0.807692
| 8
| 52
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173077
| 52
| 3
| 26
| 17.333333
| 0.976744
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16646d2c41d5da894aea046de4e8fc7533f3dc99
| 269
|
py
|
Python
|
infratabapp/admin.py
|
sheeshmohsin/infratabtask
|
5884c06d21d0c62a92ae203d941baafffab2e278
|
[
"MIT"
] | null | null | null |
infratabapp/admin.py
|
sheeshmohsin/infratabtask
|
5884c06d21d0c62a92ae203d941baafffab2e278
|
[
"MIT"
] | null | null | null |
infratabapp/admin.py
|
sheeshmohsin/infratabtask
|
5884c06d21d0c62a92ae203d941baafffab2e278
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from infratabapp.models import ReminderDetails, \
EmailNotification, SMSNotification
# Register your models here.
admin.site.register(ReminderDetails)
admin.site.register(EmailNotification)
admin.site.register(SMSNotification)
| 29.888889
| 49
| 0.825279
| 28
| 269
| 7.928571
| 0.5
| 0.121622
| 0.22973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104089
| 269
| 8
| 50
| 33.625
| 0.921162
| 0.096654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
166d84789674e7392faac57081156d323b920666
| 82
|
py
|
Python
|
benchmarks/python/PythonApp1.py
|
satoshigeyuki/Centaurus
|
032ffec87fc8ddb129347974d3478fd1ee5f305a
|
[
"MIT"
] | 3
|
2021-02-23T01:34:28.000Z
|
2021-07-19T08:07:10.000Z
|
benchmarks/python/PythonApp1.py
|
satoshigeyuki/Centaurus
|
032ffec87fc8ddb129347974d3478fd1ee5f305a
|
[
"MIT"
] | null | null | null |
benchmarks/python/PythonApp1.py
|
satoshigeyuki/Centaurus
|
032ffec87fc8ddb129347974d3478fd1ee5f305a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import CoreLib
if __name__ == "__main__":
pass
| 11.714286
| 26
| 0.695122
| 11
| 82
| 4.454545
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.182927
| 82
| 7
| 27
| 11.714286
| 0.716418
| 0.207317
| 0
| 0
| 0
| 0
| 0.123077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
168ca73342c55e3f92d12b742f60849c5bad55fa
| 585
|
py
|
Python
|
momba/model/errors.py
|
koehlma/momba
|
68d6431d2732570696d3c67a9e23006e6e3a7740
|
[
"MIT"
] | 12
|
2021-01-18T14:38:32.000Z
|
2022-01-17T09:16:52.000Z
|
momba/model/errors.py
|
koehlma/momba
|
68d6431d2732570696d3c67a9e23006e6e3a7740
|
[
"MIT"
] | 3
|
2021-05-16T15:26:34.000Z
|
2022-02-21T20:46:55.000Z
|
momba/model/errors.py
|
koehlma/momba
|
68d6431d2732570696d3c67a9e23006e6e3a7740
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
#
# Copyright (C) 2019-2021, Saarland University
# Copyright (C) 2019-2021, Maximilian Köhl <koehl@cs.uni-saarland.de>
from __future__ import annotations
class ModelingError(Exception):
"""
A general modeling error.
"""
class InvalidTypeError(ModelingError):
pass
class IncompatibleAssignmentsError(ModelingError):
pass
class TypeConstructionError(ModelingError):
pass
class InvalidDeclarationError(ModelingError):
pass
class UnboundIdentifierError(ModelingError):
pass
class NotFoundError(ModelingError):
pass
| 15.810811
| 69
| 0.738462
| 55
| 585
| 7.781818
| 0.6
| 0.238318
| 0.257009
| 0.084112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034979
| 0.169231
| 585
| 36
| 70
| 16.25
| 0.845679
| 0.273504
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.428571
| 0.071429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
bc936239413f28660765310ee5e7e8d673042b3c
| 258
|
py
|
Python
|
month01/day05/homework.py
|
Amiao-miao/all-codes
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
[
"Apache-2.0"
] | 1
|
2021-02-02T02:17:37.000Z
|
2021-02-02T02:17:37.000Z
|
month01/day05/homework.py
|
Amiao-miao/all-codes
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
[
"Apache-2.0"
] | null | null | null |
month01/day05/homework.py
|
Amiao-miao/all-codes
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
[
"Apache-2.0"
] | null | null | null |
"""
根据列表中的数字,重复生成*.
list01 = [1, 2, 3, 4, 5, 4, 3, 2, 1]
效果:
*
**
***
****
*****
****
***
**
*
"""
list01 = [1, 2, 3, 4, 5, 4, 3, 2, 1]
for item in list01:
print(item*"*")
| 14.333333
| 40
| 0.263566
| 29
| 258
| 2.344828
| 0.448276
| 0.205882
| 0.235294
| 0.264706
| 0.441176
| 0.441176
| 0.441176
| 0.441176
| 0.441176
| 0.441176
| 0
| 0.181818
| 0.488372
| 258
| 17
| 41
| 15.176471
| 0.333333
| 0.658915
| 0
| 0
| 0
| 0
| 0.0125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bc966ea7be6560d060c3668c23bf36d0d84a78ff
| 74
|
py
|
Python
|
wifi_config.example.py
|
simulatedsimian/iot_light
|
068a2201b782f0d62a31da18aaeaed17469911f4
|
[
"MIT"
] | null | null | null |
wifi_config.example.py
|
simulatedsimian/iot_light
|
068a2201b782f0d62a31da18aaeaed17469911f4
|
[
"MIT"
] | null | null | null |
wifi_config.example.py
|
simulatedsimian/iot_light
|
068a2201b782f0d62a31da18aaeaed17469911f4
|
[
"MIT"
] | null | null | null |
wifi_ssid = "insert wifi ssid here"
wifi_pw = "insert wifi password here"
| 24.666667
| 37
| 0.756757
| 12
| 74
| 4.5
| 0.5
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 74
| 2
| 38
| 37
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0.621622
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.5
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
bc9aeedfc61a0e921459199d502316e20100969e
| 15,719
|
py
|
Python
|
fn_phish_tank/fn_phish_tank/util/customize.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 65
|
2017-12-04T13:58:32.000Z
|
2022-03-24T18:33:17.000Z
|
fn_phish_tank/fn_phish_tank/util/customize.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 48
|
2018-03-02T19:17:14.000Z
|
2022-03-09T22:00:38.000Z
|
fn_phish_tank/fn_phish_tank/util/customize.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 95
|
2018-01-11T16:23:39.000Z
|
2022-03-21T11:34:29.000Z
|
# -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_phish_tank"""
from __future__ import print_function
from resilient_circuits.util import *
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_phish_tank package"""
reload_params = {"package": u"fn_phish_tank",
"incident_fields": [],
"action_fields": [],
"function_params": [u"phish_tank_check_url"],
"datatables": [],
"message_destinations": [u"fn_phish_tank"],
"functions": [u"fn_phish_tank_submit_url"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_phishtank_submit_url"],
"actions": [u"Example: PhishTank: Submit URL"],
"incident_artifact_types": []
}
return reload_params
def customization_data(client=None):
"""Produce any customization definitions (types, fields, message destinations, etc)
that should be installed by `resilient-circuits customize`
"""
# This import data contains:
# Function inputs:
# phish_tank_check_url
# Message Destinations:
# fn_phish_tank
# Functions:
# fn_phish_tank_submit_url
# Workflows:
# example_phishtank_submit_url
# Rules:
# Example: PhishTank: Submit URL
yield ImportDefinition(u"""
eyJ0YXNrX29yZGVyIjogW10sICJ3b3JrZmxvd3MiOiBbeyJ1dWlkIjogIjMxMGVkMmZiLWRkYzEt
NDk3OS1iNjJlLTRjNjQzNWZjOWJjNSIsICJkZXNjcmlwdGlvbiI6ICJTZWFyY2hlcyB0aGUgUGhp
c2hUYW5rIGRhdGFiYXNlIChodHRwczovL3d3dy5waGlzaHRhbmsuY29tLykgdG8gZGV0ZXJtaW5l
IGlmIGEgVVJMIGlzIGEgcGhpc2hpbmcgVVJMIG9yIG5vdC4gVGhlIGluZm9ybWF0aW9uIHJldHVy
bmVkIGZyb20gUGhpc2hUYW5rIGlzIHVzZWQgdG8gdXBkYXRlIHRoZSBBcnRpZmFjdHMgZGVzY3Jp
cHRpb24gYW5kIGFkZCBhIG5vdGUgdG8gdGhlIGluY2lkZW50LiIsICJvYmplY3RfdHlwZSI6ICJh
cnRpZmFjdCIsICJleHBvcnRfa2V5IjogImV4YW1wbGVfcGhpc2h0YW5rX3N1Ym1pdF91cmwiLCAi
d29ya2Zsb3dfaWQiOiAzNiwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYWRtaW5AZXhhbXBsZS5jb20i
LCAiY29udGVudCI6IHsieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBcIiBlbmNvZGluZz1cIlVU
Ri04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4v
MjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9C
UE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9E
RC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQv
MjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxpZW50LmlibS5jb20v
YnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hXCIgeG1s
bnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2VcIiB0YXJn
ZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48cHJvY2VzcyBpZD1c
ImV4YW1wbGVfcGhpc2h0YW5rX3N1Ym1pdF91cmxcIiBpc0V4ZWN1dGFibGU9XCJ0cnVlXCIgbmFt
ZT1cIkV4YW1wbGU6IFBoaXNoVGFuazogU3VibWl0IFVSTFwiPjxkb2N1bWVudGF0aW9uPlNlYXJj
aGVzIHRoZSBQaGlzaFRhbmsgZGF0YWJhc2UgKGh0dHBzOi8vd3d3LnBoaXNodGFuay5jb20vKSB0
byBkZXRlcm1pbmUgaWYgYSBVUkwgaXMgYSBwaGlzaGluZyBVUkwgb3Igbm90LiBUaGUgaW5mb3Jt
YXRpb24gcmV0dXJuZWQgZnJvbSBQaGlzaFRhbmsgaXMgdXNlZCB0byB1cGRhdGUgdGhlIEFydGlm
YWN0cyBkZXNjcmlwdGlvbiBhbmQgYWRkIGEgbm90ZSB0byB0aGUgaW5jaWRlbnQuPC9kb2N1bWVu
dGF0aW9uPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNl
cXVlbmNlRmxvd18xbWluN28zPC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlk
PVwiU2VydmljZVRhc2tfMTV0ZTR3alwiIG5hbWU9XCJQaGlzaCBUYW5rIFN1Ym1pdCBVUkxcIiBy
ZXNpbGllbnQ6dHlwZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6
ZnVuY3Rpb24gdXVpZD1cIjViOWU2NzE3LThjOWQtNGZkMy1iZWYzLTQ2ZmFlMDg5ZjFiMVwiPntc
ImlucHV0c1wiOnt9LFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiZGVmIGFwcGVuZF9hcnRp
ZmFjdF9kZXNjcmlwdGlvbih0aGVfYXJ0aWZhY3QsIHRoZV90ZXh0KTpcXG4gIFxcXCJcXFwiXFxc
IkFwcGVuZHMgdGhlX3RleHQgdG8gdGhlX2FydGlmYWN0LmRlc2NyaXB0aW9uIHNhZmVseVxcbiAg
aGFuZGxpbmcgdW5pY29kZVxcXCJcXFwiXFxcIlxcbiAgXFxuICBuZXdfZGVzY3JpcHRpb24gPSB1
XFxcIlxcXCJcXG4gIFxcbiAgaWYgdGhlX2FydGlmYWN0LmRlc2NyaXB0aW9uIGlzIE5vbmU6XFxu
ICAgIGN1cnJlbnRfZGVzY3JpcHRpb24gPSBOb25lXFxuICBlbHNlOlxcbiAgICBjdXJyZW50X2Rl
c2NyaXB0aW9uID0gdGhlX2FydGlmYWN0LmRlc2NyaXB0aW9uLmdldChcXFwiY29udGVudFxcXCIs
IE5vbmUpXFxuXFxuICBpZiBjdXJyZW50X2Rlc2NyaXB0aW9uIGlzIG5vdCBOb25lOlxcbiAgICBu
ZXdfZGVzY3JpcHRpb24gPSB1XFxcInswfSZsdDticiZndDstLS0mbHQ7YnImZ3Q7ezF9XFxcIi5m
b3JtYXQodW5pY29kZShjdXJyZW50X2Rlc2NyaXB0aW9uKSwgdW5pY29kZSh0aGVfdGV4dCkpXFxu
XFxuICBlbHNlOlxcbiAgICBuZXdfZGVzY3JpcHRpb24gPSB1XFxcInswfVxcXCIuZm9ybWF0KHVu
aWNvZGUodGhlX3RleHQpKVxcblxcbiAgdGhlX2FydGlmYWN0LmRlc2NyaXB0aW9uID0gaGVscGVy
LmNyZWF0ZVJpY2hUZXh0KG5ld19kZXNjcmlwdGlvbilcXG5cXG5cXG5pZiByZXN1bHRzLnN1Y2Nl
c3M6XFxuICBcXG4gICMgR2V0IHRoZSBQaGlzaFRhbmsgUmVzdWx0c1xcbiAgcGhpc2hfdGFua19y
ZXN1bHRzID0gcmVzdWx0cy5jb250ZW50LmdldChcXFwicmVzdWx0c1xcXCIsIHt9KVxcbiAgdXJs
ID0gcGhpc2hfdGFua19yZXN1bHRzLmdldChcXFwidXJsXFxcIiwgdVxcXCJcXFwiKVxcbiAgaW5f
ZGF0YWJhc2UgPSBwaGlzaF90YW5rX3Jlc3VsdHMuZ2V0KFxcXCJpbl9kYXRhYmFzZVxcXCIsIEZh
bHNlKVxcbiAgaXNfdmVyaWZpZWQgPSBwaGlzaF90YW5rX3Jlc3VsdHMuZ2V0KFxcXCJ2ZXJpZmll
ZFxcXCIsIEZhbHNlKVxcbiAgaXNfdmFsaWQgPSBwaGlzaF90YW5rX3Jlc3VsdHMuZ2V0KFxcXCJ2
YWxpZFxcXCIsIEZhbHNlKVxcbiAgXFxuICAjIERlZmluZSB0aGUgY29tbWVudCBhbmQgbXNnIHRv
IGJlIGFwcGVuZGVkIHRvIHRoZSBBcnRpZmFjdCdzIERlc2NyaXB0aW9uXFxuICBjb21tZW50ID0g
dVxcXCJcXFwiXFxuICBtc2cgPSB1XFxcIlxcXCJcXFwiJmx0O2ImZ3Q7UGhpc2hUYW5rIExvb2t1
cCZsdDsvYiZndDsgaGFzIGNvbXBsZXRlXFxuICAgICAgICAgICAgJmx0O2JyJmd0OyZsdDtiJmd0
O1VSTDombHQ7L2ImZ3Q7IHswfSZsdDsvYiZndDtcXG4gICAgICAgICAgICAmbHQ7YnImZ3Q7Jmx0
O2ImZ3Q7Rm91bmQgaW4gRGF0YWJhc2U6Jmx0Oy9iJmd0OyB7MX1cXFwiXFxcIlxcXCIuZm9ybWF0
KHVybCwgdW5pY29kZShpbl9kYXRhYmFzZSkpXFxuXFxuICBpZiBub3QgaW5fZGF0YWJhc2U6XFxu
ICAgIGNvbW1lbnQgPSB1XFxcIk5vdGhpbmcga25vd24gYWJvdXQgdGhpcyB1cmxcXFwiXFxuICBc
XG4gIGVsc2U6XFxuICAgIHBoaXNoX2lkID0gcGhpc2hfdGFua19yZXN1bHRzLmdldChcXFwicGhp
c2hfaWRcXFwiKVxcbiAgICBwaGlzaF9kZXRhaWxfcGFnZV91cmwgPSBwaGlzaF90YW5rX3Jlc3Vs
dHMuZ2V0KFxcXCJwaGlzaF9kZXRhaWxfcGFnZVxcXCIpXFxuICAgIFxcbiAgICBtc2cgPSB1XFxc
IlxcXCJcXFwiezB9XFxuICAgICAgICAgICZsdDticiZndDsmbHQ7YiZndDtQaGlzaCBJRDombHQ7
L2ImZ3Q7IHsxfVxcbiAgICAgICAgICAmbHQ7YnImZ3Q7Jmx0O2ImZ3Q7VmFsaWQgUGhpc2g6Jmx0
Oy9iJmd0OyB7Mn1cXG4gICAgICAgICAgJmx0O2JyJmd0OyZsdDtiJmd0O1ZlcmlmaWVkOiZsdDsv
YiZndDsgezN9XFxuICAgICAgICAgICZsdDticiZndDsmbHQ7YiZndDtMaW5rIHRvIFBoaXNoVGFu
azogJmx0O2EgaHJlZj17NH0mZ3Q7ezR9Jmx0Oy9hJmd0OyZsdDsvYiZndDtcXFwiXFxcIlxcXCIu
Zm9ybWF0KG1zZywgcGhpc2hfaWQsIHVcXFwiWWVzXFxcIiBpZiBpc192YWxpZCBlbHNlIHVcXFwi
Tm9cXFwiLCB1XFxcIlllc1xcXCIgaWYgaXNfdmVyaWZpZWQgZWxzZSBcXFwiTm9cXFwiLCBwaGlz
aF9kZXRhaWxfcGFnZV91cmwpXFxuICAgIFxcbiAgICBpZiBpc192ZXJpZmllZCBhbmQgaXNfdmFs
aWQ6XFxuICAgICAgY29tbWVudCA9IHVcXFwiVmVyaWZpZWQ6IElzIGEgcGhpc2hpbmcgc2l0ZVxc
XCJcXG4gIFxcbiAgICBlbGlmIGlzX3ZlcmlmaWVkIGFuZCBub3QgaXNfdmFsaWQ6XFxuICAgICAg
Y29tbWVudCA9IHVcXFwiVGhpcyBzaXRlIGlzIG5vdCBhIHBoaXNoaW5nIHNpdGVcXFwiXFxuICAg
ICAgXFxuICAgIGVsaWYgbm90IGlzX3ZlcmlmaWVkOlxcbiAgICAgIGNvbW1lbnQgPSB1XFxcIlRo
aXMgdXJsIGhhcyBub3QgYmVlbiB2ZXJpZmllZFxcXCJcXG4gIFxcbiAgbXNnID0gdVxcXCJcXFwi
XFxcInswfSZsdDticiZndDsmbHQ7YnImZ3Q7Jmx0O2ImZ3Q7Q29tbWVudDombHQ7L2ImZ3Q7IHsx
fVxcXCJcXFwiXFxcIi5mb3JtYXQobXNnLCBjb21tZW50KVxcbiAgXFxuICBhcHBlbmRfYXJ0aWZh
Y3RfZGVzY3JpcHRpb24oYXJ0aWZhY3QsIG1zZylcXG4gIGluY2lkZW50LmFkZE5vdGUoaGVscGVy
LmNyZWF0ZVJpY2hUZXh0KG1zZykpXCIsXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcIiMgR2V0
IHRoZSB1cmwgZnJvbSB0aGUgQXJ0aWZhY3QncyBWYWx1ZVxcbmlucHV0cy5waGlzaF90YW5rX2No
ZWNrX3VybCA9IGFydGlmYWN0LnZhbHVlXCJ9PC9yZXNpbGllbnQ6ZnVuY3Rpb24+PC9leHRlbnNp
b25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzFtaW43bzM8L2luY29taW5nPjxvdXRn
b2luZz5TZXF1ZW5jZUZsb3dfMHd0ejk4aTwvb3V0Z29pbmc+PC9zZXJ2aWNlVGFzaz48c2VxdWVu
Y2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzFtaW43bzNcIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50
XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJTZXJ2aWNlVGFza18xNXRlNHdqXCIvPjxlbmRFdmVudCBp
ZD1cIkVuZEV2ZW50XzA2eGo1ZGxcIj48aW5jb21pbmc+U2VxdWVuY2VGbG93XzB3dHo5OGk8L2lu
Y29taW5nPjwvZW5kRXZlbnQ+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wd3R6OThp
XCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMTV0ZTR3alwiIHRhcmdldFJlZj1cIkVuZEV2ZW50
XzA2eGo1ZGxcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3Jh
bV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQ
TU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1
NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0
PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjIzNFwiIHk9XCI4OVwiLz48YnBtbmRpOkJQTU5MYWJl
bD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjIyOVwiIHk9XCIx
MjRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5T
aGFwZSBicG1uRWxlbWVudD1cIlNlcnZpY2VUYXNrXzE1dGU0d2pcIiBpZD1cIlNlcnZpY2VUYXNr
XzE1dGU0d2pfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4
PVwiNDAzXCIgeT1cIjY3XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJw
bW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzFtaW43bzNcIiBpZD1cIlNlcXVlbmNlRmxvd18xbWlu
N28zX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIyNzBcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50
XCIgeT1cIjEwN1wiLz48b21nZGk6d2F5cG9pbnQgeD1cIjQwM1wiIHhzaTp0eXBlPVwib21nZGM6
UG9pbnRcIiB5PVwiMTA3XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0
PVwiMTNcIiB3aWR0aD1cIjkwXCIgeD1cIjI5MS41XCIgeT1cIjg1LjVcIi8+PC9icG1uZGk6QlBN
TkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwi
RW5kRXZlbnRfMDZ4ajVkbFwiIGlkPVwiRW5kRXZlbnRfMDZ4ajVkbF9kaVwiPjxvbWdkYzpCb3Vu
ZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjYxMFwiIHk9XCI4OVwiLz48YnBtbmRp
OkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCI5MFwiIHg9XCI1
ODNcIiB5PVwiMTI4XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJw
bW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18wd3R6OThpXCIgaWQ9XCJT
ZXF1ZW5jZUZsb3dfMHd0ejk4aV9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNTAzXCIgeHNpOnR5
cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxMDdcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI2MTBcIiB4
c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjEwN1wiLz48YnBtbmRpOkJQTU5MYWJlbD48b21n
ZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCI5MFwiIHg9XCI1MTEuNVwiIHk9XCI4NS41
XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxh
bmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4iLCAid29ya2Zsb3dfaWQiOiAi
ZXhhbXBsZV9waGlzaHRhbmtfc3VibWl0X3VybCIsICJ2ZXJzaW9uIjogMn0sICJsYXN0X21vZGlm
aWVkX3RpbWUiOiAxNTYyMDUzMjM0OTY1LCAiY3JlYXRvcl9pZCI6ICJpbnRlZ3JhdGlvbnNAZXhh
bXBsZS5jb20iLCAiYWN0aW9ucyI6IFtdLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9w
aGlzaHRhbmtfc3VibWl0X3VybCIsICJuYW1lIjogIkV4YW1wbGU6IFBoaXNoVGFuazogU3VibWl0
IFVSTCJ9XSwgImFjdGlvbnMiOiBbeyJsb2dpY190eXBlIjogImFsbCIsICJuYW1lIjogIkV4YW1w
bGU6IFBoaXNoVGFuazogU3VibWl0IFVSTCIsICJ2aWV3X2l0ZW1zIjogW10sICJ0eXBlIjogMSwg
IndvcmtmbG93cyI6IFsiZXhhbXBsZV9waGlzaHRhbmtfc3VibWl0X3VybCJdLCAib2JqZWN0X3R5
cGUiOiAiYXJ0aWZhY3QiLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ1dWlkIjogImU4NDVl
NTE1LTAzZmQtNDZkYy04MTUwLTY4NTFkMTlmNDY4YSIsICJhdXRvbWF0aW9ucyI6IFtdLCAiZXhw
b3J0X2tleSI6ICJFeGFtcGxlOiBQaGlzaFRhbms6IFN1Ym1pdCBVUkwiLCAiY29uZGl0aW9ucyI6
IFt7InR5cGUiOiBudWxsLCAiZXZhbHVhdGlvbl9pZCI6IG51bGwsICJmaWVsZF9uYW1lIjogImFy
dGlmYWN0LnR5cGUiLCAibWV0aG9kIjogImVxdWFscyIsICJ2YWx1ZSI6ICJVUkwifV0sICJpZCI6
IDQ5LCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbXX1dLCAibGF5b3V0cyI6IFtdLCAiZXhwb3J0
X2Zvcm1hdF92ZXJzaW9uIjogMiwgImlkIjogMzYsICJpbmR1c3RyaWVzIjogbnVsbCwgInBoYXNl
cyI6IFtdLCAiYWN0aW9uX29yZGVyIjogW10sICJnZW9zIjogbnVsbCwgImxvY2FsZSI6IG51bGws
ICJzZXJ2ZXJfdmVyc2lvbiI6IHsibWFqb3IiOiAzMSwgInZlcnNpb24iOiAiMzEuMC40MjU0Iiwg
ImJ1aWxkX251bWJlciI6IDQyNTQsICJtaW5vciI6IDB9LCAidGltZWZyYW1lcyI6IG51bGwsICJ3
b3Jrc3BhY2VzIjogW10sICJhdXRvbWF0aWNfdGFza3MiOiBbXSwgImZ1bmN0aW9ucyI6IFt7ImRp
c3BsYXlfbmFtZSI6ICJQaGlzaCBUYW5rIFN1Ym1pdCBVUkwiLCAiZGVzY3JpcHRpb24iOiB7ImNv
bnRlbnQiOiAiU2VhcmNoZXMgdGhlIFBoaXNoVGFuayBkYXRhYmFzZSAoaHR0cHM6Ly93d3cucGhp
c2h0YW5rLmNvbS8pIHRvIGRldGVybWluZSBpZiBhIFVSTCBpcyBhIHBoaXNoaW5nIFVSTCBvciBu
b3QuIFRoZSBpbmZvcm1hdGlvbiByZXR1cm5lZCBmcm9tIFBoaXNoVGFuayBpcyB1c2VkIHRvIHVw
ZGF0ZSB0aGUgQXJ0aWZhY3RzIGRlc2NyaXB0aW9uIGFuZCBhZGQgYSBub3RlIHRvIHRoZSBpbmNp
ZGVudC4iLCAiZm9ybWF0IjogInRleHQifSwgImNyZWF0b3IiOiB7ImRpc3BsYXlfbmFtZSI6ICJP
cmNoZXN0cmF0aW9uIEVuZ2luZSIsICJ0eXBlIjogInVzZXIiLCAiaWQiOiAzOCwgIm5hbWUiOiAi
aW50ZWdyYXRpb25zQGV4YW1wbGUuY29tIn0sICJ2aWV3X2l0ZW1zIjogW3sic2hvd19pZiI6IG51
bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNl
LCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiZTNjOWU0NDYtOTM1Yy00ZDdk
LTliYzAtOTlhZGQ2OTMyMDk5IiwgInN0ZXBfbGFiZWwiOiBudWxsfV0sICJleHBvcnRfa2V5Ijog
ImZuX3BoaXNoX3Rhbmtfc3VibWl0X3VybCIsICJ1dWlkIjogIjViOWU2NzE3LThjOWQtNGZkMy1i
ZWYzLTQ2ZmFlMDg5ZjFiMSIsICJsYXN0X21vZGlmaWVkX2J5IjogeyJkaXNwbGF5X25hbWUiOiAi
QWRtaW4gVXNlciIsICJ0eXBlIjogInVzZXIiLCAiaWQiOiA3MSwgIm5hbWUiOiAiYWRtaW5AZXhh
bXBsZS5jb20ifSwgInZlcnNpb24iOiAyLCAid29ya2Zsb3dzIjogW3siZGVzY3JpcHRpb24iOiBu
dWxsLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiYWN0aW9ucyI6IFtdLCAibmFtZSI6ICJF
eGFtcGxlOiBQaGlzaFRhbms6IFN1Ym1pdCBVUkwiLCAid29ya2Zsb3dfaWQiOiAzNiwgInByb2dy
YW1tYXRpY19uYW1lIjogImV4YW1wbGVfcGhpc2h0YW5rX3N1Ym1pdF91cmwiLCAidXVpZCI6IG51
bGx9XSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NjIwNTM5NTcwNjgsICJkZXN0aW5hdGlvbl9o
YW5kbGUiOiAiZm5fcGhpc2hfdGFuayIsICJpZCI6IDY4LCAibmFtZSI6ICJmbl9waGlzaF90YW5r
X3N1Ym1pdF91cmwifV0sICJub3RpZmljYXRpb25zIjogbnVsbCwgInJlZ3VsYXRvcnMiOiBudWxs
LCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJjcmVhdGVfZGF0ZSI6IDE1NjIwNTM5Njg2MTcsICJkZXNj
cmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZXhwb3J0X2tl
eSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiaWQiOiAwLCAibmFtZSI6
ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAidXBkYXRlX2RhdGUiOiAxNTYy
MDUzOTY4NjE3LCAidXVpZCI6ICJiZmVlYzJkNC0zNzcwLTExZTgtYWQzOS00YTAwMDQwNDRhYTAi
LCAiZW5hYmxlZCI6IGZhbHNlLCAic3lzdGVtIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAi
aGlkZGVuIjogZmFsc2V9XSwgInNjcmlwdHMiOiBbXSwgInR5cGVzIjogW10sICJtZXNzYWdlX2Rl
c3RpbmF0aW9ucyI6IFt7InV1aWQiOiAiZDljYjU5NjItZTU0Mi00NzIzLThiZjItOGZkNzUxOWZl
Mzk4IiwgImV4cG9ydF9rZXkiOiAiZm5fcGhpc2hfdGFuayIsICJuYW1lIjogImZuX3BoaXNoX3Rh
bmsiLCAiZGVzdGluYXRpb25fdHlwZSI6IDAsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJmbl9waGlz
aF90YW5rIiwgImV4cGVjdF9hY2siOiB0cnVlLCAidXNlcnMiOiBbImludGVncmF0aW9uc0BleGFt
cGxlLmNvbSJdfV0sICJpbmNpZGVudF9hcnRpZmFjdF90eXBlcyI6IFtdLCAicm9sZXMiOiBbXSwg
ImZpZWxkcyI6IFt7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAwLCAib3BlcmF0aW9uX3Bl
cm1zIjoge30sICJ0ZXh0IjogIlNpbXVsYXRpb24iLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJw
cmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDM4LCAicmVhZF9vbmx5Ijog
dHJ1ZSwgInV1aWQiOiAiYzNmMGUzZWQtMjFlMS00ZDUzLWFmZmItZmU1Y2EzMzA4Y2NhIiwgImNo
b3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgInRvb2x0aXAiOiAiV2hldGhl
ciB0aGUgaW5jaWRlbnQgaXMgYSBzaW11bGF0aW9uIG9yIGEgcmVndWxhciBpbmNpZGVudC4gIFRo
aXMgZmllbGQgaXMgcmVhZC1vbmx5LiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0Ijog
ZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvaW5jX3RyYWlu
aW5nIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJuYW1lIjogImluY190cmFpbmluZyIs
ICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwg
InZhbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJvcGVyYXRp
b25fcGVybXMiOiB7fSwgInRleHQiOiAicGhpc2hfdGFua19jaGVja191cmwiLCAiYmxhbmtfb3B0
aW9uIjogZmFsc2UsICJwcmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDI0
NSwgInJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6ICJlM2M5ZTQ0Ni05MzVjLTRkN2QtOWJjMC05
OWFkZDY5MzIwOTkiLCAiY2hvc2VuIjogZmFsc2UsICJpbnB1dF90eXBlIjogInRleHQiLCAidG9v
bHRpcCI6ICJVUkwgdG8gbG9va3VwIGluIFBoaXNoVGFuaydzIERhdGFiYXNlIiwgImludGVybmFs
IjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tl
eSI6ICJfX2Z1bmN0aW9uL3BoaXNoX3RhbmtfY2hlY2tfdXJsIiwgImhpZGVfbm90aWZpY2F0aW9u
IjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICJodHRwOi8vd3d3LmV4YW1wbGUuY29tIiwgIm5hbWUi
OiAicGhpc2hfdGFua19jaGVja191cmwiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9j
aG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJyZXF1aXJlZCI6ICJhbHdheXMiLCAidmFsdWVzIjog
W119XSwgIm92ZXJyaWRlcyI6IFtdLCAiZXhwb3J0X2RhdGUiOiAxNTYyMDUzOTY2NzQ1fQ==
"""
)
| 68.047619
| 87
| 0.944335
| 351
| 15,719
| 42.153846
| 0.777778
| 0.005474
| 0.005204
| 0.006759
| 0.006218
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115909
| 0.043896
| 15,719
| 231
| 88
| 68.047619
| 0.868587
| 0.034162
| 0
| 0
| 1
| 0
| 0.954107
| 0.92809
| 0
| 1
| 0
| 0
| 0
| 1
| 0.009709
| false
| 0
| 0.014563
| 0
| 0.029126
| 0.004854
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bc9fca2edd433f3580de0a7e52107af3e3eaf6c1
| 58
|
py
|
Python
|
desafio_iafront/jobs/pedidos/__init__.py
|
LuizJunior98/desafio-iafront
|
6769fcbbe85d4a8b2570c08af65dfd87e8135526
|
[
"MIT"
] | null | null | null |
desafio_iafront/jobs/pedidos/__init__.py
|
LuizJunior98/desafio-iafront
|
6769fcbbe85d4a8b2570c08af65dfd87e8135526
|
[
"MIT"
] | null | null | null |
desafio_iafront/jobs/pedidos/__init__.py
|
LuizJunior98/desafio-iafront
|
6769fcbbe85d4a8b2570c08af65dfd87e8135526
|
[
"MIT"
] | 1
|
2020-08-10T21:55:54.000Z
|
2020-08-10T21:55:54.000Z
|
from desafio_iafront.jobs.pedidos.job_pedidos import main
| 29
| 57
| 0.87931
| 9
| 58
| 5.444444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 58
| 1
| 58
| 58
| 0.907407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bcb65403918584abfd832d5f235cde9eb9df561b
| 41
|
py
|
Python
|
tests/components/utility_meter/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/utility_meter/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/utility_meter/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Tests for Utility Meter component."""
| 20.5
| 40
| 0.707317
| 5
| 41
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 41
| 1
| 41
| 41
| 0.805556
| 0.829268
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bce27c2ba052bbdf61d9a93abcadfc46a753e59e
| 993
|
py
|
Python
|
fsl_sub/system.py
|
mcraig-ibme/fsl_sub
|
6db9b70e90579c6fc6965c5a4eeeb8206267011c
|
[
"FTL",
"CNRI-Python",
"RSA-MD",
"Linux-OpenIB"
] | null | null | null |
fsl_sub/system.py
|
mcraig-ibme/fsl_sub
|
6db9b70e90579c6fc6965c5a4eeeb8206267011c
|
[
"FTL",
"CNRI-Python",
"RSA-MD",
"Linux-OpenIB"
] | null | null | null |
fsl_sub/system.py
|
mcraig-ibme/fsl_sub
|
6db9b70e90579c6fc6965c5a4eeeb8206267011c
|
[
"FTL",
"CNRI-Python",
"RSA-MD",
"Linux-OpenIB"
] | null | null | null |
# fsl_sub python module
# Copyright (c) 2018-2021 University of Oxford (Duncan Mortimer)
import subprocess
def system_stdout(
command, shell=False, cwd=None, timeout=None, check=True):
result = subprocess.run(
command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=shell, cwd=cwd, timeout=timeout,
check=check, universal_newlines=True)
return result.stdout.split('\n')
def system_stderr(
command, shell=False, cwd=None, timeout=None, check=True):
result = subprocess.run(
command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=shell, cwd=cwd, timeout=timeout,
check=check, universal_newlines=True)
return result.stderr.split('\n')
def system(
command, shell=False, cwd=None, timeout=None, check=True):
subprocess.run(
command,
stderr=subprocess.PIPE,
shell=shell, cwd=cwd, timeout=timeout,
check=check, universal_newlines=True)
| 27.583333
| 66
| 0.668681
| 119
| 993
| 5.529412
| 0.294118
| 0.106383
| 0.077508
| 0.091185
| 0.746201
| 0.746201
| 0.746201
| 0.746201
| 0.746201
| 0.679331
| 0
| 0.010336
| 0.220544
| 993
| 35
| 67
| 28.371429
| 0.839793
| 0.084592
| 0
| 0.666667
| 0
| 0
| 0.004415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.041667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bce74d2b8dd2bacc760badd0613e0e7c9f85f087
| 90
|
py
|
Python
|
scipy/lib/__init__.py
|
lesserwhirls/scipy-cwt
|
ee673656d879d9356892621e23ed0ced3d358621
|
[
"BSD-3-Clause"
] | 8
|
2015-10-07T00:37:32.000Z
|
2022-01-21T17:02:33.000Z
|
scipy/lib/__init__.py
|
lesserwhirls/scipy-cwt
|
ee673656d879d9356892621e23ed0ced3d358621
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/lib/__init__.py
|
lesserwhirls/scipy-cwt
|
ee673656d879d9356892621e23ed0ced3d358621
|
[
"BSD-3-Clause"
] | 8
|
2015-05-09T14:23:57.000Z
|
2018-11-15T05:56:00.000Z
|
from info import __doc__, __all__
from numpy.testing import Tester
test = Tester().test
| 15
| 33
| 0.777778
| 13
| 90
| 4.769231
| 0.692308
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 90
| 5
| 34
| 18
| 0.815789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bcec920e89e09e1672ad668bfeca09ccb20d7688
| 89
|
py
|
Python
|
passgen/apps.py
|
diyajaiswal11/HackCorona
|
dc6f7803a77b7ce6325f8d61f5f4fe54076ea461
|
[
"MIT"
] | 7
|
2020-04-06T13:00:14.000Z
|
2021-08-07T04:26:53.000Z
|
passgen/apps.py
|
diyajaiswal11/HackCorona
|
dc6f7803a77b7ce6325f8d61f5f4fe54076ea461
|
[
"MIT"
] | 10
|
2020-04-07T07:07:44.000Z
|
2022-03-12T00:22:47.000Z
|
passgen/apps.py
|
diyajaiswal11/HackCorona
|
dc6f7803a77b7ce6325f8d61f5f4fe54076ea461
|
[
"MIT"
] | 2
|
2020-04-03T08:36:34.000Z
|
2021-07-27T19:22:15.000Z
|
from django.apps import AppConfig
class PassgenConfig(AppConfig):
name = 'passgen'
| 14.833333
| 33
| 0.752809
| 10
| 89
| 6.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168539
| 89
| 5
| 34
| 17.8
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.666667
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
bcfe11b8e73839b147f7bcc0c4cc2f3eebd4d7f0
| 46,120
|
py
|
Python
|
include/HydrusServerResources.py
|
antonpaquin/hydrus
|
33eae1c0566cfc528cedca84b606335531968ee3
|
[
"WTFPL"
] | null | null | null |
include/HydrusServerResources.py
|
antonpaquin/hydrus
|
33eae1c0566cfc528cedca84b606335531968ee3
|
[
"WTFPL"
] | null | null | null |
include/HydrusServerResources.py
|
antonpaquin/hydrus
|
33eae1c0566cfc528cedca84b606335531968ee3
|
[
"WTFPL"
] | null | null | null |
import HydrusConstants as HC
import HydrusExceptions
import HydrusFileHandling
import HydrusImageHandling
import HydrusNetwork
import HydrusPaths
import HydrusSerialisable
import os
import time
import traceback
from twisted.internet import reactor, defer
from twisted.internet.threads import deferToThread
from twisted.web.server import NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.web.static import File as FileResource, NoRangeStaticProducer
import HydrusData
import HydrusGlobals as HG
def GenerateEris( service, domain ):
name = service.GetName()
service_type = service.GetServiceType()
welcome_text_1 = 'This is <b>' + name + '</b>,'
welcome_text_2 = 'a ' + HC.service_string_lookup[ service_type ] + '.'
welcome_text_3 = 'Software version ' + str( HC.SOFTWARE_VERSION )
welcome_text_4 = 'Network version ' + str( HC.NETWORK_VERSION )
if domain.IsLocal():
welcome_text_5 = 'It only responds to requests from localhost.'
else:
welcome_text_5 = 'It responds to requests from any host.'
return '''<html><head><title>''' + name + '''</title></head><body><pre>
<font color="red">8888 8888888</font>
<font color="red">888888888888888888888888</font>
<font color="red">8888</font>:::<font color="red">8888888888888888888888888</font>
<font color="red">8888</font>::::::<font color="red">8888888888888888888888888888</font>
<font color="red">88</font>::::::::<font color="red">888</font>:::<font color="red">8888888888888888888888888</font>
<font color="red">88888888</font>::::<font color="red">8</font>:::::::::::<font color="red">88888888888888888888</font>
<font color="red">888 8</font>::<font color="red">888888</font>::::::::::::::::::<font color="red">88888888888 888</font>
<font color="red">88</font>::::<font color="red">88888888</font>::::<font color="gray">m</font>::::::::::<font color="red">88888888888 8</font>
<font color="red">888888888888888888</font>:<font color="gray">M</font>:::::::::::<font color="red">8888888888888</font>
<font color="red">88888888888888888888</font>::::::::::::<font color="gray">M</font><font color="red">88888888888888</font>
<font color="red">8888888888888888888888</font>:::::::::<font color="gray">M</font><font color="red">8888888888888888</font>
<font color="red">8888888888888888888888</font>:::::::<font color="gray">M</font><font color="red">888888888888888888</font>
<font color="red">8888888888888888</font>::<font color="red">88888</font>::::::<font color="gray">M</font><font color="red">88888888888888888888</font>
<font color="red">88888888888888888</font>:::<font color="red">88888</font>:::::<font color="gray">M</font><font color="red">888888888888888 8888</font>
<font color="red">88888888888888888</font>:::<font color="red">88888</font>::::<font color="gray">M</font>::<font color="black">;o</font><font color="maroon">*</font><font color="green">M</font><font color="maroon">*</font><font color="black">o;</font><font color="red">888888888 88</font>
<font color="red">88888888888888888</font>:::<font color="red">8888</font>:::::<font color="gray">M</font>:::::::::::<font color="red">88888888 8</font>
<font color="red">88888888888888888</font>::::<font color="red">88</font>::::::<font color="gray">M</font>:<font color="gray">;</font>:::::::::::<font color="red">888888888</font>
<font color="red">8888888888888888888</font>:::<font color="red">8</font>::::::<font color="gray">M</font>::<font color="gray">aAa</font>::::::::<font color="gray">M</font><font color="red">8888888888 8</font>
<font color="red">88 8888888888</font>::<font color="red">88</font>::::<font color="red">8</font>::::<font color="gray">M</font>:::::::::::::<font color="red">888888888888888 8888</font>
<font color="red">88 88888888888</font>:::<font color="red">8</font>:::::::::<font color="gray">M</font>::::::::::;::<font color="red">88</font><font color="black">:</font><font color="red">88888888888888888</font>
<font color="red">8 8888888888888</font>:::::::::::<font color="gray">M</font>::<font color="violet">"@@@@@@@"</font>::::<font color="red">8</font><font color="gray">w</font><font color="red">8888888888888888</font>
<font color="red">88888888888</font>:<font color="red">888</font>::::::::::<font color="gray">M</font>:::::<font color="violet">"@a@"</font>:::::<font color="gray">M</font><font color="red">8</font><font color="gray">i</font><font color="red">888888888888888</font>
<font color="red">8888888888</font>::::<font color="red">88</font>:::::::::<font color="gray">M</font><font color="red">88</font>:::::::::::::<font color="gray">M</font><font color="red">88</font><font color="gray">z</font><font color="red">88888888888888888</font>
<font color="red">8888888888</font>:::::<font color="red">8</font>:::::::::<font color="gray">M</font><font color="red">88888</font>:::::::::<font color="gray">MM</font><font color="red">888</font><font color="gray">!</font><font color="red">888888888888888888</font>
<font color="red">888888888</font>:::::<font color="red">8</font>:::::::::<font color="gray">M</font><font color="red">8888888</font><font color="gray">MAmmmAMVMM</font><font color="red">888</font><font color="gray">*</font><font color="red">88888888 88888888</font>
<font color="red">888888</font> <font color="gray">M</font>:::::::::::::::<font color="gray">M</font><font color="red">888888888</font>:::::::<font color="gray">MM</font><font color="red">88888888888888 8888888</font>
<font color="red">8888</font> <font color="gray">M</font>::::::::::::::<font color="gray">M</font><font color="red">88888888888</font>::::::<font color="gray">MM</font><font color="red">888888888888888 88888</font>
<font color="red">888</font> <font color="gray">M</font>:::::::::::::<font color="gray">M</font><font color="red">8888888888888</font><font color="gray">M</font>:::::<font color="gray">mM</font><font color="red">888888888888888 8888</font>
<font color="red">888</font> <font color="gray">M</font>::::::::::::<font color="gray">M</font><font color="red">8888</font>:<font color="red">888888888888</font>::::<font color="gray">m</font>::<font color="gray">Mm</font><font color="red">88888 888888 8888</font>
<font color="red">88</font> <font color="gray">M</font>::::::::::::<font color="red">8888</font>:<font color="red">88888888888888888</font>::::::<font color="gray">Mm</font><font color="red">8 88888 888</font>
<font color="red">88</font> <font color="gray">M</font>::::::::::<font color="red">8888</font><font color="gray">M</font>::<font color="red">88888</font>::<font color="red">888888888888</font>:::::::<font color="gray">Mm</font><font color="red">88888 88</font>
<font color="red">8</font> <font color="gray">MM</font>::::::::<font color="red">8888</font><font color="gray">M</font>:::<font color="red">8888</font>:::::<font color="red">888888888888</font>::::::::<font color="gray">Mm</font><font color="red">8 4</font> ''' + welcome_text_1 + '''
<font color="red">8</font><font color="gray">M</font>:::::::<font color="red">8888</font><font color="gray">M</font>:::::<font color="red">888</font>:::::::<font color="red">88</font>:::<font color="red">8888888</font>::::::::<font color="gray">Mm</font> <font color="red">2</font> ''' + welcome_text_2 + '''
<font color="red">88</font><font color="gray">MM</font>:::::<font color="red">8888</font><font color="gray">M</font>:::::::<font color="red">88</font>::::::::<font color="red">8</font>:::::<font color="red">888888</font>:::<font color="gray">M</font>:::::<font color="gray">M</font>
<font color="red">8888</font><font color="gray">M</font>:::::<font color="red">888</font><font color="gray">MM</font>::::::::<font color="red">8</font>:::::::::::<font color="gray">M</font>::::<font color="red">8888</font>::::<font color="gray">M</font>::::<font color="gray">M</font> ''' + welcome_text_3 + '''
<font color="red">88888</font><font color="gray">M</font>:::::<font color="red">88</font>:<font color="gray">M</font>::::::::::<font color="red">8</font>:::::::::::<font color="gray">M</font>:::<font color="red">8888</font>::::::<font color="gray">M</font>::<font color="gray">M</font> ''' + welcome_text_4 + '''
<font color="red">88 888</font><font color="gray">MM</font>:::<font color="red">888</font>:<font color="gray">M</font>:::::::::::::::::::::::<font color="gray">M</font>:<font color="red">8888</font>:::::::::<font color="gray">M</font>:
<font color="red">8 88888</font><font color="gray">M</font>:::<font color="red">88</font>::<font color="gray">M</font>:::::::::::::::::::::::<font color="gray">MM</font>:<font color="red">88</font>::::::::::::<font color="gray">M</font> ''' + welcome_text_5 + '''
<font color="red">88888</font><font color="gray">M</font>:::<font color="red">88</font>::<font color="gray">M</font>::::::::::<font color="thistle">*88*</font>::::::::::<font color="gray">M</font>:<font color="red">88</font>::::::::::::::<font color="gray">M</font>
<font color="red">888888</font><font color="gray">M</font>:::<font color="red">88</font>::<font color="gray">M</font>:::::::::<font color="thistle">88@@88</font>:::::::::<font color="gray">M</font>::<font color="red">88</font>::::::::::::::<font color="gray">M</font>
<font color="red">888888</font><font color="gray">MM</font>::<font color="red">88</font>::<font color="gray">MM</font>::::::::<font color="thistle">88@@88</font>:::::::::<font color="gray">M</font>:::<font color="red">8</font>::::::::::::::<font color="thistle">*8</font>
<font color="red">88888</font> <font color="gray">M</font>:::<font color="red">8</font>::<font color="gray">MM</font>:::::::::<font color="thistle">*88*</font>::::::::::<font color="gray">M</font>:::::::::::::::::<font color="thistle">88@@</font>
<font color="red">8888</font> <font color="gray">MM</font>::::::<font color="gray">MM</font>:::::::::::::::::::::<font color="gray">MM</font>:::::::::::::::::<font color="thistle">88@@</font>
<font color="red">888</font> <font color="gray">M</font>:::::::<font color="gray">MM</font>:::::::::::::::::::<font color="gray">MM</font>::<font color="gray">M</font>::::::::::::::::<font color="thistle">*8</font>
<font color="red">888</font> <font color="gray">MM</font>:::::::<font color="gray">MMM</font>::::::::::::::::<font color="gray">MM</font>:::<font color="gray">MM</font>:::::::::::::::<font color="gray">M</font>
<font color="red">88</font> <font color="gray">M</font>::::::::<font color="gray">MMMM</font>:::::::::::<font color="gray">MMMM</font>:::::<font color="gray">MM</font>::::::::::::<font color="gray">MM</font>
<font color="red">88</font> <font color="gray">MM</font>:::::::::<font color="gray">MMMMMMMMMMMMMMM</font>::::::::<font color="gray">MMM</font>::::::::<font color="gray">MMM</font>
<font color="red">88</font> <font color="gray">MM</font>::::::::::::<font color="gray">MMMMMMM</font>::::::::::::::<font color="gray">MMMMMMMMMM</font>
<font color="red">88 8</font><font color="gray">MM</font>::::::::::::::::::::::::::::::::::<font color="gray">MMMMMM</font>
<font color="red">8 88</font><font color="gray">MM</font>::::::::::::::::::::::<font color="gray">M</font>:::<font color="gray">M</font>::::::::<font color="gray">MM</font>
<font color="red">888</font><font color="gray">MM</font>::::::::::::::::::<font color="gray">MM</font>::::::<font color="gray">MM</font>::::::<font color="gray">MM</font>
<font color="red">88888</font><font color="gray">MM</font>:::::::::::::::<font color="gray">MMM</font>:::::::<font color="gray">mM</font>:::::<font color="gray">MM</font>
<font color="red">888888</font><font color="gray">MM</font>:::::::::::::<font color="gray">MMM</font>:::::::::<font color="gray">MMM</font>:::<font color="gray">M</font>
<font color="red">88888888</font><font color="gray">MM</font>:::::::::::<font color="gray">MMM</font>:::::::::::<font color="gray">MM</font>:::<font color="gray">M</font>
<font color="red">88 8888888</font><font color="gray">M</font>:::::::::<font color="gray">MMM</font>::::::::::::::<font color="gray">M</font>:::<font color="gray">M</font>
<font color="red">8 888888</font> <font color="gray">M</font>:::::::<font color="gray">MM</font>:::::::::::::::::<font color="gray">M</font>:::<font color="gray">M</font>:
<font color="red">888888</font> <font color="gray">M</font>::::::<font color="gray">M</font>:::::::::::::::::::<font color="gray">M</font>:::<font color="gray">MM</font>
<font color="red">888888</font> <font color="gray">M</font>:::::<font color="gray">M</font>::::::::::::::::::::::::<font color="gray">M</font>:<font color="gray">M</font>
<font color="red">888888</font> <font color="gray">M</font>:::::<font color="gray">M</font>:::::::::<font color="gray">@</font>::::::::::::::<font color="gray">M</font>::<font color="gray">M</font>
<font color="red">88888</font> <font color="gray">M</font>::::::::::::::<font color="gray">@@</font>:::::::::::::::<font color="gray">M</font>::<font color="gray">M</font>
<font color="red">88888</font> <font color="gray">M</font>::::::::::::::<font color="gray">@@@</font>::::::::::::::::<font color="gray">M</font>::<font color="gray">M</font>
<font color="red">88888</font> <font color="gray">M</font>:::::::::::::::<font color="gray">@@</font>::::::::::::::::::<font color="gray">M</font>::<font color="gray">M</font>
<font color="red">88888</font> <font color="gray">M</font>:::::<font color="gray">m</font>::::::::::<font color="gray">@</font>::::::::::<font color="gray">Mm</font>:::::::<font color="gray">M</font>:::<font color="gray">M</font>
<font color="red">8888</font> <font color="gray">M</font>:::::<font color="gray">M</font>:::::::::::::::::::::::<font color="gray">MM</font>:::::::<font color="gray">M</font>:::<font color="gray">M</font>
<font color="red">8888</font> <font color="gray">M</font>:::::<font color="gray">M</font>:::::::::::::::::::::::<font color="gray">MMM</font>::::::::<font color="gray">M</font>:::<font color="gray">M</font>
<font color="red">888</font> <font color="gray">M</font>:::::<font color="gray">Mm</font>::::::::::::::::::::::<font color="gray">MMM</font>:::::::::<font color="gray">M</font>::::<font color="gray">M</font>
<font color="red">8888</font> <font color="gray">MM</font>::::<font color="gray">Mm</font>:::::::::::::::::::::<font color="gray">MMMM</font>:::::::::<font color="gray">m</font>::<font color="gray">m</font>:::<font color="gray">M</font>
<font color="red">888</font> <font color="gray">M</font>:::::<font color="gray">M</font>::::::::::::::::::::<font color="gray">MMM</font>::::::::::::<font color="gray">M</font>::<font color="gray">mm</font>:::<font color="gray">M</font>
<font color="red">8888</font> <font color="gray">MM</font>:::::::::::::::::::::::::<font color="gray">MM</font>:::::::::::::<font color="gray">mM</font>::<font color="gray">MM</font>:::<font color="gray">M</font>:
<font color="gray">M</font>:::::::::::::::::::::::::<font color="gray">M</font>:::::::::::::::<font color="gray">mM</font>::<font color="gray">MM</font>:::<font color="gray">Mm</font>
<font color="gray">MM</font>::::::<font color="gray">m</font>:::::::::::::::::::::::::::::::::::<font color="gray">M</font>::<font color="gray">MM</font>:::<font color="gray">MM</font>
<font color="gray">M</font>::::::::<font color="gray">M</font>:::::::::::::::::::::::::::::::::::<font color="gray">M</font>::<font color="gray">M</font>:::<font color="gray">MM</font>
<font color="gray">MM</font>:::::::::<font color="gray">M</font>:::::::::::::<font color="gray">M</font>:::::::::::::::::::::<font color="gray">M</font>:<font color="gray">M</font>:::<font color="gray">MM</font>
<font color="gray">M</font>:::::::::::<font color="gray">M</font><font color="maroon">88</font>:::::::::<font color="gray">M</font>:::::::::::::::::::::::<font color="gray">MM</font>::<font color="gray">MMM</font>
<font color="gray">M</font>::::::::::::<font color="maroon">8888888888</font><font color="gray">M</font>::::::::::::::::::::::::<font color="gray">MM</font>::<font color="gray">MM</font>
<font color="gray">M</font>:::::::::::::<font color="maroon">88888888</font><font color="gray">M</font>:::::::::::::::::::::::::<font color="gray">M</font>::<font color="gray">MM</font>
<font color="gray">M</font>::::::::::::::<font color="maroon">888888</font><font color="gray">M</font>:::::::::::::::::::::::::<font color="gray">M</font>::<font color="gray">MM</font>
<font color="gray">M</font>:::::::::::::::<font color="maroon">88888</font><font color="gray">M</font>:::::::::::::::::::::::::<font color="gray">M</font>:<font color="gray">MM</font>
<font color="gray">M</font>:::::::::::::::::<font color="maroon">88</font><font color="gray">M</font>::::::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">M</font>:::::::::::::::::::<font color="gray">M</font>::::::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>:::::::::::::::::<font color="gray">M</font>::::::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">M</font>:::::::::::::::::<font color="gray">M</font>::::::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>:::::::::::::::<font color="gray">M</font>::::::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">M</font>:::::::::::::::<font color="gray">M</font>:::::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>:::::::::::::<font color="gray">M</font>:::::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">M</font>:::::::::::::<font color="gray">M</font>::::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>:::::::::::<font color="gray">M</font>::::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">M</font>:::::::::::<font color="gray">M</font>:::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>:::::::::<font color="gray">M</font>:::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">M</font>:::::::::<font color="gray">M</font>::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>:::::::<font color="gray">M</font>::::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>::::::<font color="gray">M</font>:::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>:::::<font color="gray">M</font>:::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>::::<font color="gray">M</font>::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>:::<font color="gray">M</font>::::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>::<font color="gray">M</font>:::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>:<font color="gray">M</font>:::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MMM</font>::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>::::::::::::::::::<font color="gray">MMM</font>
<font color="gray">M</font>:::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>::::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>:::::::::::::::<font color="gray">MMM</font>
<font color="gray">MM</font>::::<font color="gray">M</font>:::::::::<font color="gray">MMM</font>:
<font color="gray">mMM</font>::::<font color="gray">MM</font>:::::::<font color="gray">MMMM</font>
<font color="gray">MMM</font>:::::::::::<font color="gray">MMM</font>:<font color="gray">M</font>
<font color="gray">mMM</font>:::<font color="gray">M</font>:::::::<font color="gray">M</font>:<font color="gray">M</font>:<font color="gray">M</font>
<font color="gray">MM</font>::<font color="gray">MMMM</font>:::::::<font color="gray">M</font>:<font color="gray">M</font>
<font color="gray">MM</font>::<font color="gray">MMM</font>::::::::<font color="gray">M</font>:<font color="gray">M</font>
<font color="gray">mMM</font>::<font color="gray">MM</font>::::::::<font color="gray">M</font>:<font color="gray">M</font>
<font color="gray">MM</font>::<font color="gray">MM</font>:::::::::<font color="gray">M</font>:<font color="gray">M</font>
<font color="gray">MM</font>::<font color="gray">MM</font>::::::::::<font color="gray">M</font>:<font color="gray">m</font>
<font color="gray">MM</font>:::<font color="gray">M</font>:::::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>:::::::::::::::<font color="gray">M</font>:
<font color="gray">MMM</font>:::::::::::::::<font color="gray">M</font>:
<font color="gray">MMM</font>::::::::::::::::<font color="gray">M</font>
<font color="gray">MMM</font>::::::::::::::::<font color="gray">M</font>
<font color="gray">MMM</font>::::::::::::::::<font color="gray">Mm</font>
<font color="gray">MM</font>::::::::::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>:::::::::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>:::::::::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>:::::::::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>:::::::::::::::<font color="gray">MM</font>
<font color="gray">MM</font>::::::::::::::<font color="gray">MMM</font>
<font color="gray">MMM</font>:::::::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>:::::::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>::::::::::::<font color="gray">MM</font>
<font color="gray">MM</font>::::::::::::<font color="gray">MM</font>
<font color="gray">MM</font>::::::::::::<font color="gray">MM</font>
<font color="gray">MM</font>:::::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>::::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>::::::::::<font color="gray">MM</font>
<font color="gray">MM</font>:::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>::::::::<font color="gray">MM</font>
<font color="gray">MM</font>::::::::<font color="gray">MM</font>
<font color="gray">MMM</font>::::::<font color="gray">MM</font>
<font color="gray">MMM</font>::::::<font color="gray">MM</font>
<font color="gray">MM</font>::::::<font color="gray">MM</font>
<font color="gray">MM</font>::::::<font color="gray">MM</font>
<font color="gray">MM</font>:::::<font color="gray">MM</font>
<font color="gray">MM</font>:::::<font color="gray">MM</font>:
<font color="gray">MM</font>:::::<font color="gray">M</font>:<font color="gray">M</font>
<font color="gray">MM</font>:::::<font color="gray">M</font>:<font color="gray">M</font>
:<font color="gray">M</font>::::::<font color="gray">M</font>:
<font color="gray">M</font>:<font color="gray">M</font>:::::::<font color="gray">M</font>
<font color="gray">M</font>:::<font color="gray">M</font>::::::<font color="gray">M</font>
<font color="gray">M</font>::::<font color="gray">M</font>::::::<font color="gray">M</font>
<font color="gray">M</font>:::::<font color="gray">M</font>:::::::<font color="gray">M</font>
<font color="gray">M</font>::::::<font color="gray">MM</font>:::::::<font color="gray">M</font>
<font color="gray">M</font>:::::::<font color="gray">M</font>::::::::<font color="gray">M</font>
<font color="gray">M;</font>:<font color="gray">;</font>::::<font color="gray">M</font>:::::::::<font color="gray">M</font>
<font color="gray">M</font>:<font color="gray">m</font>:<font color="gray">;</font>:::<font color="gray">M</font>::::::::::<font color="gray">M</font>
<font color="gray">MM</font>:<font color="gray">m</font>:<font color="gray">m</font>::<font color="gray">M</font>::::::::<font color="gray">;</font>:<font color="gray">M</font>
<font color="gray">MM</font>:<font color="gray">m</font>::<font color="gray">MM</font>:::::::<font color="gray">;</font>:<font color="gray">;M</font>
<font color="gray">MM</font>::<font color="gray">MMM</font>::::::<font color="gray">;</font>:<font color="gray">m</font>:<font color="gray">M</font>
<font color="gray">MMMM MM</font>::::<font color="gray">m</font>:<font color="gray">m</font>:<font color="gray">MM</font>
<font color="gray">MM</font>::::<font color="gray">m</font>:<font color="gray">MM</font>
<font color="gray">MM</font>::::<font color="gray">MM</font>
<font color="gray">MM</font>::<font color="gray">MM</font>
<font color="gray">MMMM</font>
</pre></body></html>'''
def ParseFileArguments( path ):
HydrusImageHandling.ConvertToPngIfBmp( path )
hash = HydrusFileHandling.GetHashFromPath( path )
try:
mime = HydrusFileHandling.GetMime( path )
if mime in HC.IMAGES and HydrusImageHandling.IsDecompressionBomb( path ):
raise HydrusExceptions.ForbiddenException( 'File seemed to be a Decompression Bomb!' )
( size, mime, width, height, duration, num_frames, num_words ) = HydrusFileHandling.GetFileInfo( path, mime )
except HydrusExceptions.SizeException:
raise HydrusExceptions.ForbiddenException( 'File is of zero length!' )
except HydrusExceptions.MimeException:
raise HydrusExceptions.ForbiddenException( 'Filetype is not permitted!' )
except Exception as e:
raise HydrusExceptions.ForbiddenException( HydrusData.ToUnicode( e ) )
args = {}
args[ 'path' ] = path
args[ 'hash' ] = hash
args[ 'size' ] = size
args[ 'mime' ] = mime
if width is not None: args[ 'width' ] = width
if height is not None: args[ 'height' ] = height
if duration is not None: args[ 'duration' ] = duration
if num_frames is not None: args[ 'num_frames' ] = num_frames
if num_words is not None: args[ 'num_words' ] = num_words
if mime in HC.MIMES_WITH_THUMBNAILS:
try:
thumbnail = HydrusFileHandling.GenerateThumbnail( path, mime )
except Exception as e:
tb = traceback.format_exc()
raise HydrusExceptions.ForbiddenException( 'Could not generate thumbnail from that file:' + os.linesep + tb )
args[ 'thumbnail' ] = thumbnail
return args
hydrus_favicon = FileResource( os.path.join( HC.STATIC_DIR, 'hydrus.ico' ), defaultType = 'image/x-icon' )
class HydrusDomain( object ):
def __init__( self, local_only ):
self._local_only = local_only
def CheckValid( self, client_ip ):
if self._local_only and client_ip != '127.0.0.1':
raise HydrusExceptions.ForbiddenException( 'Only local access allowed!' )
def IsLocal( self ):
return self._local_only
class HydrusResource( Resource ):
def __init__( self, service, domain ):
Resource.__init__( self )
self._service = service
self._service_key = self._service.GetServiceKey()
self._domain = domain
service_type = self._service.GetServiceType()
self._server_version_string = HC.service_string_lookup[ service_type ] + '/' + str( HC.NETWORK_VERSION )
def _callbackCheckRestrictions( self, request ):
self._domain.CheckValid( request.getClientIP() )
self._checkService( request )
self._checkUserAgent( request )
return request
def _checkService( self, request ):
if HG.server_busy:
raise HydrusExceptions.ServerBusyException( 'This server is busy, please try again later.' )
return request
def _checkUserAgent( self, request ):
request.is_hydrus_user_agent = False
if request.requestHeaders.hasHeader( 'User-Agent' ):
user_agent_texts = request.requestHeaders.getRawHeaders( 'User-Agent' )
user_agent_text = user_agent_texts[0]
try:
user_agents = user_agent_text.split( ' ' )
except: return # crazy user agent string, so just assume not a hydrus client
for user_agent in user_agents:
if '/' in user_agent:
( client, network_version ) = user_agent.split( '/', 1 )
if client == 'hydrus':
request.is_hydrus_user_agent = True
network_version = int( network_version )
if network_version == HC.NETWORK_VERSION: return
else:
if network_version < HC.NETWORK_VERSION: message = 'Your client is out of date; please download the latest release.'
else: message = 'This server is out of date; please ask its admin to update to the latest release.'
raise HydrusExceptions.NetworkVersionException( 'Network version mismatch! This server\'s network version is ' + str( HC.NETWORK_VERSION ) + ', whereas your client\'s is ' + str( network_version ) + '! ' + message )
def _callbackParseGETArgs( self, request ):
hydrus_args = HydrusNetwork.ParseGETArgs( request.args )
request.hydrus_args = hydrus_args
return request
def _callbackParsePOSTArgs( self, request ):
request.content.seek( 0 )
if not request.requestHeaders.hasHeader( 'Content-Type' ):
hydrus_args = {}
else:
content_types = request.requestHeaders.getRawHeaders( 'Content-Type' )
content_type = content_types[0]
try:
mime = HC.mime_enum_lookup[ content_type ]
except:
raise HydrusExceptions.ForbiddenException( 'Did not recognise Content-Type header!' )
total_bytes_read = 0
if mime == HC.APPLICATION_JSON:
json_string = request.content.read()
total_bytes_read += len( json_string )
hydrus_args = HydrusNetwork.ParseBodyString( json_string )
else:
( os_file_handle, temp_path ) = HydrusPaths.GetTempPath()
request.temp_file_info = ( os_file_handle, temp_path )
with open( temp_path, 'wb' ) as f:
for block in HydrusPaths.ReadFileLikeAsBlocks( request.content ):
f.write( block )
total_bytes_read += len( block )
hydrus_args = ParseFileArguments( temp_path )
self._reportDataUsed( request, total_bytes_read )
request.hydrus_args = hydrus_args
return request
def _callbackRenderResponseContext( self, request ):
self._CleanUpTempFile( request )
if request.channel is None:
raise HydrusExceptions.ServerException( 'Channel was closed! Probably a connectionLost that was not caught!' )
response_context = request.hydrus_response_context
status_code = response_context.GetStatusCode()
request.setResponseCode( status_code )
for ( k, v, kwargs ) in response_context.GetCookies():
request.addCookie( k, v, **kwargs )
do_finish = True
if response_context.HasPath():
path = response_context.GetPath()
size = os.path.getsize( path )
mime = response_context.GetMime()
content_type = HC.mime_string_lookup[ mime ]
content_length = size
( base, filename ) = os.path.split( path )
content_disposition = 'inline; filename="' + filename + '"'
# can't be unicode!
request.setHeader( 'Content-Type', str( content_type ) )
request.setHeader( 'Content-Length', str( content_length ) )
request.setHeader( 'Content-Disposition', str( content_disposition ) )
request.setHeader( 'Expires', time.strftime( '%a, %d %b %Y %H:%M:%S GMT', time.gmtime( time.time() + 86400 * 365 ) ) )
request.setHeader( 'Cache-Control', str( 86400 * 365 ) )
fileObject = open( path, 'rb' )
producer = NoRangeStaticProducer( request, fileObject )
producer.start()
do_finish = False
elif response_context.HasBody():
mime = response_context.GetMime()
body = response_context.GetBody()
content_type = HC.mime_string_lookup[ mime ]
content_length = len( body )
content_disposition = 'inline'
request.setHeader( 'Content-Type', content_type )
request.setHeader( 'Content-Length', str( content_length ) )
request.setHeader( 'Content-Disposition', content_disposition )
request.write( HydrusData.ToByteString( body ) )
else:
content_length = 0
request.setHeader( 'Content-Length', str( content_length ) )
self._reportDataUsed( request, content_length )
self._reportRequestUsed( request )
if do_finish:
request.finish()
def _callbackDoGETJob( self, request ):
def wrap_thread_result( response_context ):
request.hydrus_response_context = response_context
return request
d = deferToThread( self._threadDoGETJob, request )
d.addCallback( wrap_thread_result )
return d
def _callbackDoPOSTJob( self, request ):
def wrap_thread_result( response_context ):
request.hydrus_response_context = response_context
return request
d = deferToThread( self._threadDoPOSTJob, request )
d.addCallback( wrap_thread_result )
return d
def _errbackDisconnected( self, failure, request_deferred ):
request_deferred.cancel()
def _errbackHandleEmergencyError( self, failure, request ):
try: self._CleanUpTempFile( request )
except: pass
try: HydrusData.DebugPrint( failure.getTraceback() )
except: pass
if request.channel is not None:
try: request.setResponseCode( 500 )
except: pass
try: request.write( failure.getTraceback() )
except: pass
if not request.finished:
try: request.finish()
except: pass
def _errbackHandleProcessingError( self, failure, request ):
self._CleanUpTempFile( request )
default_mime = HC.TEXT_HTML
default_encoding = HydrusData.ToByteString
if failure.type == KeyError:
response_context = ResponseContext( 400, mime = default_mime, body = default_encoding( 'It appears one or more parameters required for that request were missing:' + os.linesep + failure.getTraceback() ) )
elif failure.type == HydrusExceptions.BandwidthException:
response_context = ResponseContext( 509, mime = default_mime, body = default_encoding( failure.value ) )
elif failure.type == HydrusExceptions.PermissionException:
response_context = ResponseContext( 401, mime = default_mime, body = default_encoding( failure.value ) )
elif failure.type == HydrusExceptions.ForbiddenException:
response_context = ResponseContext( 403, mime = default_mime, body = default_encoding( failure.value ) )
elif failure.type in ( HydrusExceptions.NotFoundException, HydrusExceptions.DataMissing, HydrusExceptions.FileMissingException ):
response_context = ResponseContext( 404, mime = default_mime, body = default_encoding( failure.value ) )
elif failure.type == HydrusExceptions.NetworkVersionException:
response_context = ResponseContext( 426, mime = default_mime, body = default_encoding( failure.value ) )
elif failure.type == HydrusExceptions.ServerBusyException:
response_context = ResponseContext( 503, mime = default_mime, body = default_encoding( failure.value ) )
elif failure.type == HydrusExceptions.SessionException:
response_context = ResponseContext( 419, mime = default_mime, body = default_encoding( failure.value ) )
else:
HydrusData.DebugPrint( failure.getTraceback() )
response_context = ResponseContext( 500, mime = default_mime, body = default_encoding( 'The repository encountered an error it could not handle! Here is a dump of what happened, which will also be written to your client.log file. If it persists, please forward it to hydrus.admin@gmail.com:' + os.linesep * 2 + failure.getTraceback() ) )
request.hydrus_response_context = response_context
return request
def _parseAccessKey( self, request ):
if not request.requestHeaders.hasHeader( 'Hydrus-Key' ):
raise HydrusExceptions.PermissionException( 'No hydrus key header found!' )
hex_keys = request.requestHeaders.getRawHeaders( 'Hydrus-Key' )
hex_key = hex_keys[0]
try:
access_key = hex_key.decode( 'hex' )
except:
raise HydrusExceptions.ForbiddenException( 'Could not parse the hydrus key!' )
return access_key
def _reportDataUsed( self, request, num_bytes ):
self._service.ReportDataUsed( num_bytes )
HG.controller.ReportDataUsed( num_bytes )
def _reportRequestUsed( self, request ):
self._service.ReportRequestUsed()
HG.controller.ReportRequestUsed()
def _threadDoGETJob( self, request ):
raise HydrusExceptions.NotFoundException( 'This service does not support that request!' )
def _threadDoPOSTJob( self, request ):
raise HydrusExceptions.NotFoundException( 'This service does not support that request!' )
def _CleanUpTempFile( self, request ):
if hasattr( request, 'temp_file_info' ):
( os_file_handle, temp_path ) = request.temp_file_info
HydrusPaths.CleanUpTempPath( os_file_handle, temp_path )
del request.temp_file_info
def render_GET( self, request ):
request.setHeader( 'Server', self._server_version_string )
d = defer.Deferred()
d.addCallback( self._callbackCheckRestrictions )
d.addCallback( self._callbackParseGETArgs )
d.addCallback( self._callbackDoGETJob )
d.addErrback( self._errbackHandleProcessingError, request )
d.addCallback( self._callbackRenderResponseContext )
d.addErrback( self._errbackHandleEmergencyError, request )
reactor.callLater( 0, d.callback, request )
request.notifyFinish().addErrback( self._errbackDisconnected, d )
return NOT_DONE_YET
def render_POST( self, request ):
request.setHeader( 'Server', self._server_version_string )
d = defer.Deferred()
d.addCallback( self._callbackCheckRestrictions )
d.addCallback( self._callbackParsePOSTArgs )
d.addCallback( self._callbackDoPOSTJob )
d.addErrback( self._errbackHandleProcessingError, request )
d.addCallback( self._callbackRenderResponseContext )
d.addErrback( self._errbackHandleEmergencyError, request )
reactor.callLater( 0, d.callback, request )
request.notifyFinish().addErrback( self._errbackDisconnected, d )
return NOT_DONE_YET
class HydrusResourceRobotsTXT( HydrusResource ):
def _threadDoGETJob( self, request ):
body = '''User-agent: *
Disallow: /'''
response_context = ResponseContext( 200, mime = HC.TEXT_PLAIN, body = body )
return response_context
class HydrusResourceWelcome( HydrusResource ):
def _threadDoGETJob( self, request ):
body = GenerateEris( self._service, self._domain )
response_context = ResponseContext( 200, mime = HC.TEXT_HTML, body = body )
return response_context
class ResponseContext( object ):
def __init__( self, status_code, mime = HC.APPLICATION_JSON, body = None, path = None, cookies = None ):
if isinstance( body, HydrusSerialisable.SerialisableBase ):
body = body.DumpToNetworkString()
if cookies is None:
cookies = []
self._status_code = status_code
self._mime = mime
self._body = body
self._path = path
self._cookies = cookies
def GetBody( self ):
return self._body
def GetCookies( self ): return self._cookies
def GetLength( self ): return len( self._body )
def GetMime( self ): return self._mime
def GetPath( self ): return self._path
def GetStatusCode( self ): return self._status_code
def HasBody( self ): return self._body is not None
def HasPath( self ): return self._path is not None
| 57.43462
| 349
| 0.530681
| 5,084
| 46,120
| 4.747443
| 0.081629
| 0.246478
| 0.352793
| 0.334562
| 0.670948
| 0.645799
| 0.628812
| 0.62181
| 0.60793
| 0.553779
| 0
| 0.039384
| 0.24575
| 46,120
| 802
| 350
| 57.506234
| 0.654459
| 0.00167
| 0
| 0.197479
| 0
| 0.25
| 0.590465
| 0.446612
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079832
| false
| 0.010504
| 0.035714
| 0.018908
| 0.163866
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4c29ab86d485de492b702e41efe2fa186086d2b0
| 177
|
py
|
Python
|
policykit/integrations/github/apps.py
|
mashton/policyk
|
623523d76d63c06b6d559ad7b477d80512fbd2e7
|
[
"MIT"
] | 78
|
2020-05-08T17:25:38.000Z
|
2022-01-13T05:44:50.000Z
|
policykit/integrations/github/apps.py
|
mashton/policyk
|
623523d76d63c06b6d559ad7b477d80512fbd2e7
|
[
"MIT"
] | 302
|
2020-02-20T07:04:30.000Z
|
2022-02-25T17:44:23.000Z
|
policykit/integrations/github/apps.py
|
mashton/policyk
|
623523d76d63c06b6d559ad7b477d80512fbd2e7
|
[
"MIT"
] | 13
|
2020-04-17T19:44:26.000Z
|
2022-02-25T17:18:04.000Z
|
from django.apps import AppConfig
class GithubIntegrationConfig(AppConfig):
name = 'integrations.github'
def ready(self):
import integrations.github.handlers
| 19.666667
| 43
| 0.745763
| 18
| 177
| 7.333333
| 0.777778
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180791
| 177
| 8
| 44
| 22.125
| 0.910345
| 0
| 0
| 0
| 0
| 0
| 0.107345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4c355f7d9fb95451bdf7da0882eca4f59f17f53a
| 1,611
|
py
|
Python
|
grr/server/grr_response_server/flows/general/registry_init.py
|
raydan4/grr
|
9b94348c01484586777b02ec7361bb48f1d086f7
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/flows/general/registry_init.py
|
raydan4/grr
|
9b94348c01484586777b02ec7361bb48f1d086f7
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/flows/general/registry_init.py
|
raydan4/grr
|
9b94348c01484586777b02ec7361bb48f1d086f7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Load all flows so that they are visible in the registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
# pylint: disable=unused-import
# These imports populate the Flow registry
from grr_response_server.flows import file
from grr_response_server.flows.general import administrative
from grr_response_server.flows.general import apple_firmware
from grr_response_server.flows.general import artifact_fallbacks
from grr_response_server.flows.general import ca_enroller
from grr_response_server.flows.general import checks
from grr_response_server.flows.general import collectors
from grr_response_server.flows.general import discovery
from grr_response_server.flows.general import export
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import filesystem
from grr_response_server.flows.general import filetypes
from grr_response_server.flows.general import find
from grr_response_server.flows.general import fingerprint
from grr_response_server.flows.general import hardware
from grr_response_server.flows.general import memory
from grr_response_server.flows.general import network
from grr_response_server.flows.general import osquery
from grr_response_server.flows.general import processes
from grr_response_server.flows.general import registry
from grr_response_server.flows.general import timeline
from grr_response_server.flows.general import transfer
from grr_response_server.flows.general import webhistory
from grr_response_server.flows.general import windows_vsc
| 48.818182
| 64
| 0.878336
| 235
| 1,611
| 5.73617
| 0.255319
| 0.124629
| 0.267062
| 0.373887
| 0.696588
| 0.696588
| 0.66543
| 0
| 0
| 0
| 0
| 0
| 0.081937
| 1,611
| 32
| 65
| 50.34375
| 0.911427
| 0.091868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.037037
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4c588861968b6350286687b13b706af0333fad54
| 25
|
py
|
Python
|
models/__init__.py
|
Rafaellinos/br_cep_correios_api
|
aa657ecc598c3437f7ef5cb8796cc38d04012bb6
|
[
"MIT"
] | 2
|
2020-12-19T14:17:25.000Z
|
2021-08-03T10:20:21.000Z
|
models/__init__.py
|
Rafaellinos/br_cep_correios_api
|
aa657ecc598c3437f7ef5cb8796cc38d04012bb6
|
[
"MIT"
] | 12
|
2021-02-08T21:02:14.000Z
|
2022-03-12T00:52:27.000Z
|
models/__init__.py
|
Rafaellinos/br_cep_correios_api
|
aa657ecc598c3437f7ef5cb8796cc38d04012bb6
|
[
"MIT"
] | 3
|
2019-10-12T18:54:48.000Z
|
2020-05-30T06:23:53.000Z
|
from . import res_partner
| 25
| 25
| 0.84
| 4
| 25
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d5c3550251258fb9ede06d41da836fdd0f8e2f02
| 274
|
py
|
Python
|
hood/admin.py
|
TheCaffeine/HoodWatch
|
fc21304f63a26b99992d7c561aa13d167b9bc7dc
|
[
"MIT"
] | 1
|
2022-01-21T13:36:58.000Z
|
2022-01-21T13:36:58.000Z
|
hood/admin.py
|
TheCaffeine/HoodWatch
|
fc21304f63a26b99992d7c561aa13d167b9bc7dc
|
[
"MIT"
] | null | null | null |
hood/admin.py
|
TheCaffeine/HoodWatch
|
fc21304f63a26b99992d7c561aa13d167b9bc7dc
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Profile, NeighbourHood, Business
from .models import Profile, NeighbourHood, Business, Post
admin.site.register(Profile)
admin.site.register(NeighbourHood)
admin.site.register(Business)
admin.site.register(Post)
| 27.4
| 59
| 0.80292
| 34
| 274
| 6.470588
| 0.352941
| 0.163636
| 0.309091
| 0.209091
| 0.4
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109489
| 274
| 9
| 60
| 30.444444
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.428571
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d5df9b753c729a4efd60a395b362fb5663f03f94
| 123
|
py
|
Python
|
movies/admin.py
|
vijay0707/REST-API-DRF
|
cb13a0bd7623f0f963d12d15475a7cbf691cbf65
|
[
"MIT"
] | null | null | null |
movies/admin.py
|
vijay0707/REST-API-DRF
|
cb13a0bd7623f0f963d12d15475a7cbf691cbf65
|
[
"MIT"
] | null | null | null |
movies/admin.py
|
vijay0707/REST-API-DRF
|
cb13a0bd7623f0f963d12d15475a7cbf691cbf65
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Moviedata
# Register your models here.
admin.site.register(Moviedata)
| 24.6
| 32
| 0.821138
| 17
| 123
| 5.941176
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113821
| 123
| 5
| 33
| 24.6
| 0.926606
| 0.211382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d5e56d9d31d9b26eeaaf98bb084eb44222658e0b
| 46
|
py
|
Python
|
tests/components/media_source/__init__.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/media_source/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/media_source/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The tests for Media Source integration."""
| 23
| 45
| 0.717391
| 6
| 46
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 46
| 1
| 46
| 46
| 0.825
| 0.847826
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d5e7d5a88411c968a12ee11b573fb7ea0fb9e83e
| 1,579
|
py
|
Python
|
test/test_text.py
|
volfpeter/markyp-html
|
dbac271141fbd956185df20ae91349f740dcee8f
|
[
"MIT"
] | 5
|
2019-07-18T17:25:15.000Z
|
2019-10-27T20:18:56.000Z
|
test/test_text.py
|
volfpeter/markyp-html
|
dbac271141fbd956185df20ae91349f740dcee8f
|
[
"MIT"
] | null | null | null |
test/test_text.py
|
volfpeter/markyp-html
|
dbac271141fbd956185df20ae91349f740dcee8f
|
[
"MIT"
] | null | null | null |
from markyp_html.text import *
def test_h1():
assert h1("Text element content").markup == "<h1 >Text element content</h1>"
def test_h2():
assert h2("Text element content").markup == "<h2 >Text element content</h2>"
def test_h3():
assert h3("Text element content").markup == "<h3 >Text element content</h3>"
def test_h4():
assert h4("Text element content").markup == "<h4 >Text element content</h4>"
def test_h5():
assert h5("Text element content").markup == "<h5 >Text element content</h5>"
def test_h6():
assert h6("Text element content").markup == "<h6 >Text element content</h6>"
def test_p():
assert p("Text element content").markup == "<p >Text element content</p>"
def test_StyledTextFactory():
factory = StyledTextFactory("fancy-text")
assert factory.base_css_class == "fancy-text"
assert factory.h1("Text element content").markup == '<h1 class="fancy-text">Text element content</h1>'
assert factory.h2("Text element content").markup == '<h2 class="fancy-text">Text element content</h2>'
assert factory.h3("Text element content").markup == '<h3 class="fancy-text">Text element content</h3>'
assert factory.h4("Text element content").markup == '<h4 class="fancy-text">Text element content</h4>'
assert factory.h5("Text element content").markup == '<h5 class="fancy-text">Text element content</h5>'
assert factory.h6("Text element content").markup == '<h6 class="fancy-text">Text element content</h6>'
assert factory.p("Text element content").markup == '<p class="fancy-text">Text element content</p>'
| 46.441176
| 106
| 0.687144
| 222
| 1,579
| 4.837838
| 0.108108
| 0.286778
| 0.469274
| 0.312849
| 0.569832
| 0.569832
| 0
| 0
| 0
| 0
| 0
| 0.031019
| 0.142495
| 1,579
| 33
| 107
| 47.848485
| 0.762186
| 0
| 0
| 0
| 0
| 0
| 0.533249
| 0.101963
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.32
| false
| 0
| 0.04
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d5e93014095174ca4582518ae9830369e16b36cd
| 181
|
py
|
Python
|
api/web/authenticate.py
|
selelab/admin
|
d858209da3b7efc4501d503968402268d3f689f8
|
[
"BSD-3-Clause"
] | 82
|
2019-10-04T05:40:45.000Z
|
2020-03-14T06:40:02.000Z
|
server/restapi/v2/exempt_csrf.py
|
datanadi/Video-to-Retail-Platform
|
757c68d9de0778e3da8bbfa678d89251a6955573
|
[
"Apache-2.0"
] | 17
|
2020-03-27T14:18:46.000Z
|
2022-02-27T01:24:58.000Z
|
server/restapi/v2/exempt_csrf.py
|
datanadi/Video-to-Retail-Platform
|
757c68d9de0778e3da8bbfa678d89251a6955573
|
[
"Apache-2.0"
] | 24
|
2019-10-04T05:46:46.000Z
|
2020-05-30T05:22:32.000Z
|
from rest_framework.authentication import SessionAuthentication
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return
| 22.625
| 63
| 0.81768
| 15
| 181
| 9.733333
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138122
| 181
| 7
| 64
| 25.857143
| 0.935897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
d5eec63b133ee90fcf487030daab90dc39d836cd
| 57
|
py
|
Python
|
examples/run_attribution_example/run_attribution_example/__init__.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 4,606
|
2018-06-21T17:45:20.000Z
|
2022-03-31T23:39:42.000Z
|
examples/run_attribution_example/run_attribution_example/__init__.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 6,221
|
2018-06-12T04:36:01.000Z
|
2022-03-31T21:43:05.000Z
|
examples/run_attribution_example/run_attribution_example/__init__.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 619
|
2018-08-22T22:43:09.000Z
|
2022-03-31T22:48:06.000Z
|
from .custom_run_coordinator import CustomRunCoordinator
| 28.5
| 56
| 0.912281
| 6
| 57
| 8.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 57
| 1
| 57
| 57
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
910f2ecaec25f1c292a6d4c9ea9dcba8f53f047c
| 18,607
|
py
|
Python
|
utils/experiments.py
|
nathalia-kim/nu_gan
|
c1d0891945bd7ac3d95869db91f490f57f203110
|
[
"MIT"
] | null | null | null |
utils/experiments.py
|
nathalia-kim/nu_gan
|
c1d0891945bd7ac3d95869db91f490f57f203110
|
[
"MIT"
] | null | null | null |
utils/experiments.py
|
nathalia-kim/nu_gan
|
c1d0891945bd7ac3d95869db91f490f57f203110
|
[
"MIT"
] | null | null | null |
import os
from sklearn.model_selection import KFold
import numpy as np
from segmentation_functions import cell_segment, masks_to_npy
from gan_model import create_model, rotation, train_representation
import multiprocessing
from generate_figures import figure_8
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
def cell_segmentation(positive_images_root, negative_images_root, positive_npy_root,
negative_npy_root, ref_path, intensity, multi_core):
'''
Performs cell segmentation on input images
Parameters
----------
positive_images_root : str
path with positive images.
negative_images_root : str
path with negative images.
positive_npy_root : str
path with positive npy file.
negative_npy_root : str
path with negative npy file.
ref_path : str
path with reference image for stain normalization.
intensity : int
intensity for segmentation thresholding.
multi_core : bool, optional
if the process is multi core. The default is True.
Returns
-------
None.
'''
# get paths of positive and negative images
positive_images_path = [positive_images_root + n for n in
os.listdir(positive_images_root)]
negative_images_path = [negative_images_root + n for n in
os.listdir(negative_images_root)]
# create directories
if 1- os.path.exists(positive_npy_root + str(intensity) + '/'):
os.makedirs(positive_npy_root + str(intensity) + '/')
if 1- os.path.exists(negative_npy_root + str(intensity) + '/'):
os.makedirs(negative_npy_root + str(intensity) + '/')
# apply cell segmentation on images
if (multi_core == True and __name__ == '__main__'):
jobs = []
for index, i in enumerate(positive_images_path):
p = multiprocessing.Process(
target=cell_segment, args=(i, positive_npy_root + str(intensity) +
'/', ref_path, intensity))
p.start()
jobs.append(p)
if (index + 1) % 7 == 0:
p.join()
jobs = []
for job in jobs:
p.join()
jobs = []
for index, i in enumerate(negative_images_path):
p = multiprocessing.Process(
target=cell_segment, args=(i, negative_npy_root + str(intensity)
+ '/', ref_path, intensity))
p.start()
jobs.append(p)
if (index + 1) % 7 == 0:
p.join()
jobs = []
for job in jobs:
p.join()
else:
for index, i in enumerate(positive_images_path):
cell_segment(i, positive_npy_root + str(intensity)
+ '/', ref_path, intensity)
for index, i in enumerate(negative_images_path):
cell_segment(i, negative_npy_root + str(intensity)
+ '/', ref_path, intensity)
def split_dataset(path, fold=4, random_seed=42):
'''
Split dataset in k folds
Parameters
----------
path : str
path to npy file with images.
fold : int, optional
number of folds to split the dataset. The default is 4.
random_seed : int, optional
random seed. The default is 42.
Returns
-------
train_list : list
list with paths of split training data files.
test_list : list
list with paths of split testing data files.
'''
np.random.seed(random_seed)
kf = KFold(n_splits=fold, shuffle=True)
kf.get_n_splits(path)
train_list, test_list = [], []
for train_index, test_index in kf.split(path):
train_list.append([path[n] for n in train_index])
test_list.append([path[n] for n in test_index])
return train_list, test_list
def eval_plots(values_D_G, l_q, purities, experiment_root, sample=30):
'''
Create and save evaluation plots - purity and loss over iterations
Parameters
----------
values_D_G : array
values of function V(D, G) over iterations. Used to evaluate how well the generator distribution matches the real data distribution.
l_q : array
values of loss function of auxiliary network over iterations.
purities : array
values of clustering purity over iterations.
experiment_root : str
path of experiment root.
sample : int, optional
sample rate to plot. The default is 30.
Returns
-------
None.
'''
if len(purities) > 0:
# sample purities to plot
sample_purities = []
indexes = []
for i in range(0, len(purities)):
if i % 30 == 0:
sample_purities.append(purities[i])
indexes.append(i)
figure(figsize=(10, 6), dpi=80)
plt.plot(indexes, sample_purities, label="purities")
plt.xlabel("Generator iterations")
plt.ylabel("Purity")
plt.legend()
plt.savefig(experiment_root + "purities.png")
# sample l_q and vdg to plot
sample_lq = []
sample_vdg = []
indexes = []
for i in range(0, len(l_q)):
if i % 30 == 0:
sample_lq.append(l_q[i])
sample_vdg.append(values_D_G[i])
indexes.append(i)
figure(figsize=(10, 6), dpi=80)
plt.plot(indexes, sample_vdg, label="V(D,G)")
plt.plot(indexes, sample_lq, label="Lq")
plt.xlabel("Generator iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig(experiment_root + "loss.png")
def cell_representation(X_train_path, X_test_path, y_train_path, y_test_path,
experiment_root, n_epoch=50, batchsize=16, rand=32,
dis_category=5, ld = 1e-4, lg = 1e-4, lq = 1e-4,
save_model_steps=100, image_classification = False):
'''
Creates and trains model of cell-level visual representation learning
Parameters
----------
X_train_path : str
path to .npy file with training data
X_test_path : str
path to .npy file with testing data
y_train_path : str
path to .npy file with training labels
y_test_path : str
path to .npy file with testing labels
experiment_root : str
path to experiment root
n_epoch : int
number of epochs for training. The default is 50.
batchsize : int
batch size. The default is 16.
rand : int
number of gaussian noise variables. The default is 32.
dis_category : int
number of categories / clusters. The default is 5.
ld : float
learning rate for discriminator network D. The default is 1e-4.
lg : float
learning rate for generator network G. The default is 1e-4.
lq : float
learning rate for auxiliary network Q. The default is 1e-4.
save_model_steps : int
number of steps to save the model. The default is 100.
image_classification : bool, optional
if the training is for image classification or not. The default is False.
Returns
-------
values_D_G : list
values of function V(D, G) over iterations. Used to evaluate how well the generator distribution matches the real data distribution
l_q : list
values of loss function of auxiliary network over iterations.
purities: list
values of clustering purity over iterations
'''
# load training and testing datasets
X_train = np.load(X_train_path)
X_test = np.load(X_test_path)
y_train = np.load(y_train_path)
y_test = np.load(y_test_path)
# create cell training and testing sets
cell_train_set = np.concatenate([X_train, X_test])
cell_test_set = cell_train_set
cell_test_label = np.concatenate([y_train, y_test])
# initialize empty npys
positive_train_npy = []
positive_test_npy = []
negative_train_npy = []
negative_test_npy = []
# create / initialize the model
netD, netG, netD_D, netD_Q = create_model(rand=rand, dis_category=dis_category)
# train cell representation
values_D_G, l_q, purities = train_representation(
cell_train_set, cell_test_set, cell_test_label,
positive_train_npy, positive_test_npy, negative_train_npy,
negative_test_npy, netD, netG,
netD_D, netD_Q, experiment_root, n_epoch=n_epoch,
batchsize=batchsize, rand=rand,
dis_category=dis_category, ld=ld, lg=lg, lq=lq,
save_model_steps=save_model_steps,
image_classification = image_classification)
# convert l_q to numpy
l_q_np = []
for i in range(0, len(l_q)):
l_q_np.append(l_q[i].cpu().detach().numpy())
# save results
np.save(experiment_root + 'purities', purities)
np.save(experiment_root + 'values_D_G', values_D_G)
np.save(experiment_root + 'l_q', l_q_np)
# save evaluation plots
eval_plots(values_D_G, l_q_np, purities, experiment_root)
# view resulting representations
model_path = experiment_root + "/model/"
figure_8(X_train_path, X_test_path, model_path, output_dir=experiment_root)
return values_D_G, l_q_np, purities
def cell_representation_unlabeled(images_path, ref_path, npy_path, experiment_root, n_epoch=50,
batchsize=16, rand=32, dis_category=5, ld = 1e-4, lg = 1e-4,
lq = 1e-4, save_model_steps=100, image_classification = False):
'''
Creates and trains model of cell-level visual representation learning with unlabeled data
Parameters
----------
images_path : str
path with images for preping.
ref_path : str
path with reference image for stain normalization.
npy_path : str
path to save npy file with single-cell images.
experiment_root : str
path to experiment root
n_epoch : int
number of epochs for training. The default is 50.
batchsize : int
batch size. The default is 16.
rand : int
number of gaussian noise variables. The default is 32.
dis_category : int
number of categories / clusters. The default is 5.
ld : float
learning rate for discriminator network D. The default is 1e-4.
lg : float
learning rate for generator network G. The default is 1e-4.
lq : float
learning rate for auxiliary network Q. The default is 1e-4.
save_model_steps : int
number of steps to save the model. The default is 100.
image_classification : bool, optional
if the training is for image classification or not. The default is False.
Returns
-------
values_D_G : list
values of function V(D, G) over iterations. Used to evaluate how well the generator distribution matches the real data distribution
l_q : list
values of loss function of auxiliary network over iterations.
'''
# prep data, generate npy file
masks_to_npy(images_path, ref_path, npy_path)
# load training data
X_train = np.load(npy_path + "Train.npy")
# create datasets
cell_train_set = X_train
cell_test_set = np.array([])
cell_test_label = np.array([])
# initialize empty npys
positive_train_npy = []
positive_test_npy = []
negative_train_npy = []
negative_test_npy = []
# create / initialize the model
netD, netG, netD_D, netD_Q = create_model(rand=rand, dis_category=dis_category)
# train cell representation
values_D_G, l_q = train_representation(
cell_train_set, cell_test_set, cell_test_label,
positive_train_npy, positive_test_npy, negative_train_npy,
negative_test_npy, netD, netG,
netD_D, netD_Q, experiment_root, n_epoch=n_epoch,
batchsize=batchsize, rand=rand,
dis_category=dis_category, ld=ld, lg=lg, lq=lq,
save_model_steps=save_model_steps,
image_classification = image_classification)
# convert l_q to numpy
l_q_np = []
for i in range(0, len(l_q)):
l_q_np.append(l_q[i].cpu().detach().numpy())
# save results
np.save(experiment_root + 'values_D_G', values_D_G)
np.save(experiment_root + 'l_q', l_q_np)
# save evaluation plots
eval_plots(values_D_G, l_q_np, [], experiment_root)
X_train_path = npy_path + "Train.npy"
X_test_path = None
model_path = experiment_root + "/model/"
figure_8(X_train_path, X_test_path, model_path, rand = rand, dis_category = dis_category, output_dir=experiment_root)
return values_D_G, l_q_np
def image_classification(positive_images_root, negative_images_root,
positive_npy_root,negative_npy_root, ref_path, intensity,
X_train_path, X_test_path, y_train_path, y_test_path,
experiment_root, multi_core = True, fold = 4, random_seed=42,
choosing_fold = 1, n_epoch=10000, batchsize=32, rand=64,
dis_category=5, ld = 1e-4, lg = 1e-4, lq = 1e-4,
save_model_steps = 100, image_classification = True):
'''
Applies cell segmentation to images. Creates and trains model of cell-level visual representation learning. Performs image classification
Parameters
----------
positive_images_root : str
path with positive images.
negative_images_root : str
path with negative images.
positive_npy_root : str
path with positive npy file.
negative_npy_root : str
path with negative npy file.
ref_path : str
path with reference image for stain normalization.
intensity : int
intensity for segmentation thresholding.
X_train_path : str
path with training data.
X_test_path : str
path with testing data.
y_train_path : str
path with training labels.
y_test_path : str
path with testing labels.
experiment_root : str
path of experiment root.
multi_core : bool, optional
if the process is multi core. The default is True.
fold : int, optional
number of folds to split dataset. The default is 4.
random_seed : int, optional
random seed. The default is 42.
choosing_fold : int, optional
The default is 1.
n_epoch : int, optional
number of epochs for training. The default is 10000.
batchsize : int, optional
size of the batch. The default is 32.
rand : int, optional
number of gaussian noise variables. The default is 64.
dis_category : int, optional
number of categories / clusters. The default is 5.
ld : float, optional
learning rate for discriminator network D. The default is 1e-4.
lg : float, optional
learning rate for generator network G. The default is 1e-4.
lq : float, optional
learning rate for auxiliary network Q. The default is 1e-4.
save_model_steps : int, optional
number of steps to save the model. The default is 100.
image_classification : bool, optional
if the training is for image classification or not. The default is True.
Returns
-------
None.
'''
positive_npy_path = [positive_npy_root +str(intensity)+'/' + n[:-3] +
'npy' for n in os.listdir(positive_images_root)]
negative_npy_path =[negative_npy_root +str(intensity)+'/' + n[:-3] +
'npy' for n in os.listdir(negative_images_root)]
positive_train_list, positive_test_list = split_dataset(positive_npy_path,
fold, random_seed)
negative_train_list, negative_test_list = split_dataset(negative_npy_path,
fold, random_seed)
positive_train_npy = [np.load(n, allow_pickle=True) for n in
positive_train_list[choosing_fold]]
positive_test_npy = [np.load(n, allow_pickle=True) for n in
positive_test_list[choosing_fold]]
negative_train_npy = [np.load(n, allow_pickle=True) for n in
negative_train_list[choosing_fold]]
negative_test_npy = [np.load(n, allow_pickle=True) for n in
negative_test_list[choosing_fold]]
cell_train_set = np.concatenate([np.concatenate(positive_train_npy), np.concatenate(negative_train_npy)])
# load training and testing datasets
X_train = np.load(X_train_path)
X_test = np.load(X_test_path)
y_train = np.load(y_train_path)
y_test = np.load(y_test_path)
# create cell training and testing sets
cell_test_set = np.concatenate([X_train, X_test])
cell_test_label = np.concatenate([y_train, y_test])
cell_train_set = rotation(cell_test_set)
# create / initialize the model
netD, netG, netD_D, netD_Q = create_model(rand=rand, dis_category=dis_category)
# train cell representation
values_D_G, l_q, purities = train_representation(
cell_train_set, cell_test_set, cell_test_label, positive_train_npy,
positive_test_npy, negative_train_npy, negative_test_npy, netD, netG, netD_D,
netD_Q, experiment_root, n_epoch=n_epoch, batchsize=batchsize, rand=rand,
dis_category=dis_category,
ld = ld, lg = lg, lq = lq, save_model_steps=save_model_steps,
image_classification=image_classification,)
# convert l_q to numpy
l_q_np = []
for i in range(0, len(l_q)):
l_q_np.append(l_q[i].cpu().detach().numpy())
# save results
np.save(experiment_root + 'purities', purities)
np.save(experiment_root + 'values_D_G', values_D_G)
np.save(experiment_root + 'l_q', l_q_np)
# save evaluation plots
eval_plots(values_D_G, l_q_np, purities, experiment_root)
# view resulting representations
model_path = experiment_root + "/model/"
figure_8(X_train_path, X_test_path, model_path, output_dir=experiment_root)
return values_D_G, l_q_np, purities
| 37.288577
| 142
| 0.624496
| 2,455
| 18,607
| 4.49002
| 0.092464
| 0.006713
| 0.038102
| 0.017237
| 0.80205
| 0.773746
| 0.736914
| 0.712692
| 0.668602
| 0.627688
| 0
| 0.011933
| 0.297415
| 18,607
| 499
| 143
| 37.288577
| 0.831255
| 0.364272
| 0
| 0.514563
| 0
| 0
| 0.018534
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029126
| false
| 0
| 0.043689
| 0
| 0.092233
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
912a555892b83cb9128a5e8b024f72cad0630e9f
| 18
|
py
|
Python
|
src/model_processors/track_utils/__init__.py
|
patrick-ubc/Huawei_HiFly_Drone
|
5dae1b56f49c2b86c3b852bbc5e3a63e84ccd490
|
[
"Apache-2.0"
] | 25
|
2020-11-20T09:01:35.000Z
|
2022-03-29T10:35:38.000Z
|
src/model_processors/track_utils/__init__.py
|
patrick-ubc/Huawei_HiFly_Drone
|
5dae1b56f49c2b86c3b852bbc5e3a63e84ccd490
|
[
"Apache-2.0"
] | 8
|
2021-07-05T21:41:53.000Z
|
2022-02-15T19:46:13.000Z
|
tests/__init__.py
|
serbant/simple_netem
|
e16e556497b91368a2c69afefdf403275c317f9b
|
[
"Apache-2.0"
] | 16
|
2020-12-06T07:26:13.000Z
|
2022-03-01T07:51:55.000Z
|
"""__init__.py"""
| 9
| 17
| 0.555556
| 2
| 18
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 18
| 1
| 18
| 18
| 0.352941
| 0.611111
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e67576518a13a764b07ee608ea6b1f628c653584
| 57,435
|
py
|
Python
|
tests/test_awslambda/test_lambda.py
|
d-ryzhikov/moto
|
d608a617b2b299384ccaeffa5b827d7b5ae042cf
|
[
"Apache-2.0"
] | null | null | null |
tests/test_awslambda/test_lambda.py
|
d-ryzhikov/moto
|
d608a617b2b299384ccaeffa5b827d7b5ae042cf
|
[
"Apache-2.0"
] | 2
|
2021-03-31T19:53:06.000Z
|
2021-12-13T20:44:51.000Z
|
tests/test_awslambda/test_lambda.py
|
d-ryzhikov/moto
|
d608a617b2b299384ccaeffa5b827d7b5ae042cf
|
[
"Apache-2.0"
] | 1
|
2020-06-01T22:13:55.000Z
|
2020-06-01T22:13:55.000Z
|
from __future__ import unicode_literals
import base64
import uuid
import botocore.client
import boto3
import hashlib
import io
import json
import time
import zipfile
import sure # noqa
from freezegun import freeze_time
from moto import (
mock_dynamodb2,
mock_lambda,
mock_iam,
mock_s3,
mock_ec2,
mock_sns,
mock_logs,
settings,
mock_sqs,
)
from moto.sts.models import ACCOUNT_ID
from nose.tools import assert_raises
from botocore.exceptions import ClientError
_lambda_region = "us-west-2"
boto3.setup_default_session(region_name=_lambda_region)
def _process_lambda(func_str):
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED)
zip_file.writestr("lambda_function.py", func_str)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
def get_test_zip_file1():
pfunc = """
def lambda_handler(event, context):
return event
"""
return _process_lambda(pfunc)
def get_test_zip_file2():
func_str = """
import boto3
def lambda_handler(event, context):
ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url='http://{base_url}')
volume_id = event.get('volume_id')
vol = ec2.Volume(volume_id)
return {{'id': vol.id, 'state': vol.state, 'size': vol.size}}
""".format(
base_url="motoserver:5000"
if settings.TEST_SERVER_MODE
else "ec2.us-west-2.amazonaws.com"
)
return _process_lambda(func_str)
def get_test_zip_file3():
pfunc = """
def lambda_handler(event, context):
print("Nr_of_records("+str(len(event['Records']))+")")
print("get_test_zip_file3 success")
return event
"""
return _process_lambda(pfunc)
def get_test_zip_file4():
pfunc = """
def lambda_handler(event, context):
raise Exception('I failed!')
"""
return _process_lambda(pfunc)
@mock_lambda
def test_list_functions():
conn = boto3.client("lambda", _lambda_region)
result = conn.list_functions()
result["Functions"].should.have.length_of(0)
@mock_lambda
def test_invoke_requestresponse_function():
conn = boto3.client("lambda", _lambda_region)
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file1()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
in_data = {"msg": "So long and thanks for all the fish"}
success_result = conn.invoke(
FunctionName="testFunction",
InvocationType="RequestResponse",
Payload=json.dumps(in_data),
)
success_result["StatusCode"].should.equal(200)
result_obj = json.loads(
base64.b64decode(success_result["LogResult"]).decode("utf-8")
)
result_obj.should.equal(in_data)
payload = success_result["Payload"].read().decode("utf-8")
json.loads(payload).should.equal(in_data)
@mock_lambda
def test_invoke_requestresponse_function_with_arn():
from moto.awslambda.models import ACCOUNT_ID
conn = boto3.client("lambda", "us-west-2")
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file1()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
in_data = {"msg": "So long and thanks for all the fish"}
success_result = conn.invoke(
FunctionName="arn:aws:lambda:us-west-2:{}:function:testFunction".format(
ACCOUNT_ID
),
InvocationType="RequestResponse",
Payload=json.dumps(in_data),
)
success_result["StatusCode"].should.equal(200)
result_obj = json.loads(
base64.b64decode(success_result["LogResult"]).decode("utf-8")
)
result_obj.should.equal(in_data)
payload = success_result["Payload"].read().decode("utf-8")
json.loads(payload).should.equal(in_data)
@mock_lambda
def test_invoke_event_function():
conn = boto3.client("lambda", _lambda_region)
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file1()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
conn.invoke.when.called_with(
FunctionName="notAFunction", InvocationType="Event", Payload="{}"
).should.throw(botocore.client.ClientError)
in_data = {"msg": "So long and thanks for all the fish"}
success_result = conn.invoke(
FunctionName="testFunction", InvocationType="Event", Payload=json.dumps(in_data)
)
success_result["StatusCode"].should.equal(202)
json.loads(success_result["Payload"].read().decode("utf-8")).should.equal(in_data)
@mock_lambda
def test_invoke_dryrun_function():
conn = boto3.client("lambda", _lambda_region)
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file1(),},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
conn.invoke.when.called_with(
FunctionName="notAFunction", InvocationType="Event", Payload="{}"
).should.throw(botocore.client.ClientError)
in_data = {"msg": "So long and thanks for all the fish"}
success_result = conn.invoke(
FunctionName="testFunction",
InvocationType="DryRun",
Payload=json.dumps(in_data),
)
success_result["StatusCode"].should.equal(204)
if settings.TEST_SERVER_MODE:
@mock_ec2
@mock_lambda
def test_invoke_function_get_ec2_volume():
conn = boto3.resource("ec2", _lambda_region)
vol = conn.create_volume(Size=99, AvailabilityZone=_lambda_region)
vol = conn.Volume(vol.id)
conn = boto3.client("lambda", _lambda_region)
conn.create_function(
FunctionName="testFunction",
Runtime="python3.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file2()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
in_data = {"volume_id": vol.id}
result = conn.invoke(
FunctionName="testFunction",
InvocationType="RequestResponse",
Payload=json.dumps(in_data),
)
result["StatusCode"].should.equal(200)
actual_payload = json.loads(result["Payload"].read().decode("utf-8"))
expected_payload = {"id": vol.id, "state": vol.state, "size": vol.size}
actual_payload.should.equal(expected_payload)
@mock_logs
@mock_sns
@mock_ec2
@mock_lambda
def test_invoke_function_from_sns():
logs_conn = boto3.client("logs", region_name=_lambda_region)
sns_conn = boto3.client("sns", region_name=_lambda_region)
sns_conn.create_topic(Name="some-topic")
topics_json = sns_conn.list_topics()
topics = topics_json["Topics"]
topic_arn = topics[0]["TopicArn"]
conn = boto3.client("lambda", _lambda_region)
result = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
sns_conn.subscribe(
TopicArn=topic_arn, Protocol="lambda", Endpoint=result["FunctionArn"]
)
result = sns_conn.publish(TopicArn=topic_arn, Message=json.dumps({}))
start = time.time()
while (time.time() - start) < 30:
result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction")
log_streams = result.get("logStreams")
if not log_streams:
time.sleep(1)
continue
assert len(log_streams) == 1
result = logs_conn.get_log_events(
logGroupName="/aws/lambda/testFunction",
logStreamName=log_streams[0]["logStreamName"],
)
for event in result.get("events"):
if event["message"] == "get_test_zip_file3 success":
return
time.sleep(1)
assert False, "Test Failed"
@mock_lambda
def test_create_based_on_s3_with_missing_bucket():
conn = boto3.client("lambda", _lambda_region)
conn.create_function.when.called_with(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "this-bucket-does-not-exist", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
VpcConfig={"SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"]},
).should.throw(botocore.client.ClientError)
@mock_lambda
@mock_s3
@freeze_time("2015-01-01 00:00:00")
def test_create_function_from_aws_bucket():
s3_conn = boto3.client("s3", _lambda_region)
s3_conn.create_bucket(
Bucket="test-bucket",
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", _lambda_region)
result = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
VpcConfig={"SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"]},
)
# this is hard to match against, so remove it
result["ResponseMetadata"].pop("HTTPHeaders", None)
# Botocore inserts retry attempts not seen in Python27
result["ResponseMetadata"].pop("RetryAttempts", None)
result.pop("LastModified")
result.should.equal(
{
"FunctionName": "testFunction",
"FunctionArn": "arn:aws:lambda:{}:{}:function:testFunction".format(
_lambda_region, ACCOUNT_ID
),
"Runtime": "python2.7",
"Role": result["Role"],
"Handler": "lambda_function.lambda_handler",
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"CodeSize": len(zip_content),
"Description": "test lambda function",
"Timeout": 3,
"MemorySize": 128,
"Version": "1",
"VpcConfig": {
"SecurityGroupIds": ["sg-123abc"],
"SubnetIds": ["subnet-123abc"],
"VpcId": "vpc-123abc",
},
"ResponseMetadata": {"HTTPStatusCode": 201},
"State": "Active",
}
)
@mock_lambda
@freeze_time("2015-01-01 00:00:00")
def test_create_function_from_zipfile():
conn = boto3.client("lambda", _lambda_region)
zip_content = get_test_zip_file1()
result = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": zip_content},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
# this is hard to match against, so remove it
result["ResponseMetadata"].pop("HTTPHeaders", None)
# Botocore inserts retry attempts not seen in Python27
result["ResponseMetadata"].pop("RetryAttempts", None)
result.pop("LastModified")
result.should.equal(
{
"FunctionName": "testFunction",
"FunctionArn": "arn:aws:lambda:{}:{}:function:testFunction".format(
_lambda_region, ACCOUNT_ID
),
"Runtime": "python2.7",
"Role": result["Role"],
"Handler": "lambda_function.lambda_handler",
"CodeSize": len(zip_content),
"Description": "test lambda function",
"Timeout": 3,
"MemorySize": 128,
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"Version": "1",
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
"ResponseMetadata": {"HTTPStatusCode": 201},
"State": "Active",
}
)
@mock_lambda
@mock_s3
@freeze_time("2015-01-01 00:00:00")
def test_get_function():
s3_conn = boto3.client("s3", _lambda_region)
s3_conn.create_bucket(
Bucket="test-bucket",
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file1()
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", _lambda_region)
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
Environment={"Variables": {"test_variable": "test_value"}},
)
result = conn.get_function(FunctionName="testFunction")
# this is hard to match against, so remove it
result["ResponseMetadata"].pop("HTTPHeaders", None)
# Botocore inserts retry attempts not seen in Python27
result["ResponseMetadata"].pop("RetryAttempts", None)
result["Configuration"].pop("LastModified")
result["Code"]["Location"].should.equal(
"s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(_lambda_region)
)
result["Code"]["RepositoryType"].should.equal("S3")
result["Configuration"]["CodeSha256"].should.equal(
hashlib.sha256(zip_content).hexdigest()
)
result["Configuration"]["CodeSize"].should.equal(len(zip_content))
result["Configuration"]["Description"].should.equal("test lambda function")
result["Configuration"].should.contain("FunctionArn")
result["Configuration"]["FunctionName"].should.equal("testFunction")
result["Configuration"]["Handler"].should.equal("lambda_function.lambda_handler")
result["Configuration"]["MemorySize"].should.equal(128)
result["Configuration"]["Role"].should.equal(get_role_name())
result["Configuration"]["Runtime"].should.equal("python2.7")
result["Configuration"]["Timeout"].should.equal(3)
result["Configuration"]["Version"].should.equal("$LATEST")
result["Configuration"].should.contain("VpcConfig")
result["Configuration"].should.contain("Environment")
result["Configuration"]["Environment"].should.contain("Variables")
result["Configuration"]["Environment"]["Variables"].should.equal(
{"test_variable": "test_value"}
)
# Test get function with
result = conn.get_function(FunctionName="testFunction", Qualifier="$LATEST")
result["Configuration"]["Version"].should.equal("$LATEST")
result["Configuration"]["FunctionArn"].should.equal(
"arn:aws:lambda:us-west-2:{}:function:testFunction:$LATEST".format(ACCOUNT_ID)
)
# Test get function when can't find function name
with assert_raises(conn.exceptions.ResourceNotFoundException):
conn.get_function(FunctionName="junk", Qualifier="$LATEST")
@mock_lambda
@mock_s3
def test_get_function_by_arn():
bucket_name = "test-bucket"
s3_conn = boto3.client("s3", "us-east-1")
s3_conn.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket=bucket_name, Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", "us-east-1")
fnc = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": bucket_name, "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
result = conn.get_function(FunctionName=fnc["FunctionArn"])
result["Configuration"]["FunctionName"].should.equal("testFunction")
@mock_lambda
@mock_s3
def test_delete_function():
s3_conn = boto3.client("s3", _lambda_region)
s3_conn.create_bucket(
Bucket="test-bucket",
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", _lambda_region)
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
success_result = conn.delete_function(FunctionName="testFunction")
# this is hard to match against, so remove it
success_result["ResponseMetadata"].pop("HTTPHeaders", None)
# Botocore inserts retry attempts not seen in Python27
success_result["ResponseMetadata"].pop("RetryAttempts", None)
success_result.should.equal({"ResponseMetadata": {"HTTPStatusCode": 204}})
function_list = conn.list_functions()
function_list["Functions"].should.have.length_of(0)
@mock_lambda
@mock_s3
def test_delete_function_by_arn():
bucket_name = "test-bucket"
s3_conn = boto3.client("s3", "us-east-1")
s3_conn.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket=bucket_name, Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", "us-east-1")
fnc = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": bucket_name, "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
conn.delete_function(FunctionName=fnc["FunctionArn"])
function_list = conn.list_functions()
function_list["Functions"].should.have.length_of(0)
@mock_lambda
def test_delete_unknown_function():
conn = boto3.client("lambda", _lambda_region)
conn.delete_function.when.called_with(
FunctionName="testFunctionThatDoesntExist"
).should.throw(botocore.client.ClientError)
@mock_lambda
@mock_s3
def test_publish():
s3_conn = boto3.client("s3", _lambda_region)
s3_conn.create_bucket(
Bucket="test-bucket",
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", _lambda_region)
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=False,
)
function_list = conn.list_functions()
function_list["Functions"].should.have.length_of(1)
latest_arn = function_list["Functions"][0]["FunctionArn"]
res = conn.publish_version(FunctionName="testFunction")
assert res["ResponseMetadata"]["HTTPStatusCode"] == 201
function_list = conn.list_functions()
function_list["Functions"].should.have.length_of(2)
# #SetComprehension ;-)
published_arn = list(
{f["FunctionArn"] for f in function_list["Functions"]} - {latest_arn}
)[0]
published_arn.should.contain("testFunction:1")
conn.delete_function(FunctionName="testFunction", Qualifier="1")
function_list = conn.list_functions()
function_list["Functions"].should.have.length_of(1)
function_list["Functions"][0]["FunctionArn"].should.contain("testFunction")
@mock_lambda
@mock_s3
@freeze_time("2015-01-01 00:00:00")
def test_list_create_list_get_delete_list():
"""
test `list -> create -> list -> get -> delete -> list` integration
"""
s3_conn = boto3.client("s3", _lambda_region)
s3_conn.create_bucket(
Bucket="test-bucket",
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", _lambda_region)
conn.list_functions()["Functions"].should.have.length_of(0)
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
expected_function_result = {
"Code": {
"Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(
_lambda_region
),
"RepositoryType": "S3",
},
"Configuration": {
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"CodeSize": len(zip_content),
"Description": "test lambda function",
"FunctionArn": "arn:aws:lambda:{}:{}:function:testFunction".format(
_lambda_region, ACCOUNT_ID
),
"FunctionName": "testFunction",
"Handler": "lambda_function.lambda_handler",
"MemorySize": 128,
"Role": get_role_name(),
"Runtime": "python2.7",
"Timeout": 3,
"Version": "$LATEST",
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
"State": "Active",
},
"ResponseMetadata": {"HTTPStatusCode": 200},
}
func = conn.list_functions()["Functions"][0]
func.pop("LastModified")
func.should.equal(expected_function_result["Configuration"])
func = conn.get_function(FunctionName="testFunction")
# this is hard to match against, so remove it
func["ResponseMetadata"].pop("HTTPHeaders", None)
# Botocore inserts retry attempts not seen in Python27
func["ResponseMetadata"].pop("RetryAttempts", None)
func["Configuration"].pop("LastModified")
func.should.equal(expected_function_result)
conn.delete_function(FunctionName="testFunction")
conn.list_functions()["Functions"].should.have.length_of(0)
@mock_lambda
def test_invoke_lambda_error():
lambda_fx = """
def lambda_handler(event, context):
raise Exception('failsauce')
"""
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED)
zip_file.writestr("lambda_function.py", lambda_fx)
zip_file.close()
zip_output.seek(0)
client = boto3.client("lambda", region_name="us-east-1")
client.create_function(
FunctionName="test-lambda-fx",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
Code={"ZipFile": zip_output.read()},
)
result = client.invoke(
FunctionName="test-lambda-fx", InvocationType="RequestResponse", LogType="Tail"
)
assert "FunctionError" in result
assert result["FunctionError"] == "Handled"
@mock_lambda
@mock_s3
def test_tags():
"""
test list_tags -> tag_resource -> list_tags -> tag_resource -> list_tags -> untag_resource -> list_tags integration
"""
s3_conn = boto3.client("s3", _lambda_region)
s3_conn.create_bucket(
Bucket="test-bucket",
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", _lambda_region)
function = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
# List tags when there are none
conn.list_tags(Resource=function["FunctionArn"])["Tags"].should.equal(dict())
# List tags when there is one
conn.tag_resource(Resource=function["FunctionArn"], Tags=dict(spam="eggs"))[
"ResponseMetadata"
]["HTTPStatusCode"].should.equal(200)
conn.list_tags(Resource=function["FunctionArn"])["Tags"].should.equal(
dict(spam="eggs")
)
# List tags when another has been added
conn.tag_resource(Resource=function["FunctionArn"], Tags=dict(foo="bar"))[
"ResponseMetadata"
]["HTTPStatusCode"].should.equal(200)
conn.list_tags(Resource=function["FunctionArn"])["Tags"].should.equal(
dict(spam="eggs", foo="bar")
)
# Untag resource
conn.untag_resource(Resource=function["FunctionArn"], TagKeys=["spam", "trolls"])[
"ResponseMetadata"
]["HTTPStatusCode"].should.equal(204)
conn.list_tags(Resource=function["FunctionArn"])["Tags"].should.equal(
dict(foo="bar")
)
# Untag a tag that does not exist (no error and no change)
conn.untag_resource(Resource=function["FunctionArn"], TagKeys=["spam"])[
"ResponseMetadata"
]["HTTPStatusCode"].should.equal(204)
@mock_lambda
def test_tags_not_found():
"""
Test list_tags and tag_resource when the lambda with the given arn does not exist
"""
conn = boto3.client("lambda", _lambda_region)
conn.list_tags.when.called_with(
Resource="arn:aws:lambda:{}:function:not-found".format(ACCOUNT_ID)
).should.throw(botocore.client.ClientError)
conn.tag_resource.when.called_with(
Resource="arn:aws:lambda:{}:function:not-found".format(ACCOUNT_ID),
Tags=dict(spam="eggs"),
).should.throw(botocore.client.ClientError)
conn.untag_resource.when.called_with(
Resource="arn:aws:lambda:{}:function:not-found".format(ACCOUNT_ID),
TagKeys=["spam"],
).should.throw(botocore.client.ClientError)
@mock_lambda
def test_invoke_async_function():
conn = boto3.client("lambda", _lambda_region)
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file1()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
success_result = conn.invoke_async(
FunctionName="testFunction", InvokeArgs=json.dumps({"test": "event"})
)
success_result["Status"].should.equal(202)
@mock_lambda
@freeze_time("2015-01-01 00:00:00")
def test_get_function_created_with_zipfile():
conn = boto3.client("lambda", _lambda_region)
zip_content = get_test_zip_file1()
result = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.handler",
Code={"ZipFile": zip_content},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.get_function(FunctionName="testFunction")
response["Configuration"].pop("LastModified")
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
assert len(response["Code"]) == 2
assert response["Code"]["RepositoryType"] == "S3"
assert response["Code"]["Location"].startswith(
"s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com".format(_lambda_region)
)
response["Configuration"].should.equal(
{
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"CodeSize": len(zip_content),
"Description": "test lambda function",
"FunctionArn": "arn:aws:lambda:{}:{}:function:testFunction".format(
_lambda_region, ACCOUNT_ID
),
"FunctionName": "testFunction",
"Handler": "lambda_function.handler",
"MemorySize": 128,
"Role": get_role_name(),
"Runtime": "python2.7",
"Timeout": 3,
"Version": "$LATEST",
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
"State": "Active",
}
)
@mock_lambda
def test_add_function_permission():
conn = boto3.client("lambda", _lambda_region)
zip_content = get_test_zip_file1()
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=(get_role_name()),
Handler="lambda_function.handler",
Code={"ZipFile": zip_content},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.add_permission(
FunctionName="testFunction",
StatementId="1",
Action="lambda:InvokeFunction",
Principal="432143214321",
SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld",
SourceAccount="123412341234",
EventSourceToken="blah",
Qualifier="2",
)
assert "Statement" in response
res = json.loads(response["Statement"])
assert res["Action"] == "lambda:InvokeFunction"
@mock_lambda
def test_get_function_policy():
conn = boto3.client("lambda", _lambda_region)
zip_content = get_test_zip_file1()
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.handler",
Code={"ZipFile": zip_content},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.add_permission(
FunctionName="testFunction",
StatementId="1",
Action="lambda:InvokeFunction",
Principal="432143214321",
SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld",
SourceAccount="123412341234",
EventSourceToken="blah",
Qualifier="2",
)
response = conn.get_policy(FunctionName="testFunction")
assert "Policy" in response
res = json.loads(response["Policy"])
assert res["Statement"][0]["Action"] == "lambda:InvokeFunction"
@mock_lambda
@mock_s3
def test_list_versions_by_function():
s3_conn = boto3.client("s3", _lambda_region)
s3_conn.create_bucket(
Bucket="test-bucket",
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", _lambda_region)
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
res = conn.publish_version(FunctionName="testFunction")
assert res["ResponseMetadata"]["HTTPStatusCode"] == 201
versions = conn.list_versions_by_function(FunctionName="testFunction")
assert len(versions["Versions"]) == 3
assert versions["Versions"][0][
"FunctionArn"
] == "arn:aws:lambda:us-west-2:{}:function:testFunction:$LATEST".format(ACCOUNT_ID)
assert versions["Versions"][1][
"FunctionArn"
] == "arn:aws:lambda:us-west-2:{}:function:testFunction:1".format(ACCOUNT_ID)
assert versions["Versions"][2][
"FunctionArn"
] == "arn:aws:lambda:us-west-2:{}:function:testFunction:2".format(ACCOUNT_ID)
conn.create_function(
FunctionName="testFunction_2",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=False,
)
versions = conn.list_versions_by_function(FunctionName="testFunction_2")
assert len(versions["Versions"]) == 1
assert versions["Versions"][0][
"FunctionArn"
] == "arn:aws:lambda:us-west-2:{}:function:testFunction_2:$LATEST".format(
ACCOUNT_ID
)
@mock_lambda
@mock_s3
def test_create_function_with_already_exists():
s3_conn = boto3.client("s3", _lambda_region)
s3_conn.create_bucket(
Bucket="test-bucket",
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", _lambda_region)
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
assert response["FunctionName"] == "testFunction"
@mock_lambda
@mock_s3
def test_list_versions_by_function_for_nonexistent_function():
conn = boto3.client("lambda", _lambda_region)
versions = conn.list_versions_by_function(FunctionName="testFunction")
assert len(versions["Versions"]) == 0
@mock_logs
@mock_lambda
@mock_sqs
def test_create_event_source_mapping():
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-sqs-queue1")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"]
)
assert response["EventSourceArn"] == queue.attributes["QueueArn"]
assert response["FunctionArn"] == func["FunctionArn"]
assert response["State"] == "Enabled"
@mock_logs
@mock_lambda
@mock_sqs
def test_invoke_function_from_sqs():
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-sqs-queue1")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"]
)
assert response["EventSourceArn"] == queue.attributes["QueueArn"]
assert response["State"] == "Enabled"
sqs_client = boto3.client("sqs", region_name="us-east-1")
sqs_client.send_message(QueueUrl=queue.url, MessageBody="test")
expected_msg = "get_test_zip_file3 success"
log_group = "/aws/lambda/testFunction"
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
assert msg_showed_up, (
expected_msg
+ " was not found after sending an SQS message. All logs: "
+ all_logs
)
@mock_logs
@mock_lambda
@mock_dynamodb2
def test_invoke_function_from_dynamodb_put():
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
table_name = "table_with_stream"
table = dynamodb.create_table(
TableName=table_name,
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
StreamSpecification={
"StreamEnabled": True,
"StreamViewType": "NEW_AND_OLD_IMAGES",
},
)
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function executed after a DynamoDB table is updated",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=table["TableDescription"]["LatestStreamArn"],
FunctionName=func["FunctionArn"],
)
assert response["EventSourceArn"] == table["TableDescription"]["LatestStreamArn"]
assert response["State"] == "Enabled"
dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}})
expected_msg = "get_test_zip_file3 success"
log_group = "/aws/lambda/testFunction"
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
assert msg_showed_up, (
expected_msg + " was not found after a DDB insert. All logs: " + all_logs
)
@mock_logs
@mock_lambda
@mock_dynamodb2
def test_invoke_function_from_dynamodb_update():
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
table_name = "table_with_stream"
table = dynamodb.create_table(
TableName=table_name,
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
StreamSpecification={
"StreamEnabled": True,
"StreamViewType": "NEW_AND_OLD_IMAGES",
},
)
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function executed after a DynamoDB table is updated",
Timeout=3,
MemorySize=128,
Publish=True,
)
conn.create_event_source_mapping(
EventSourceArn=table["TableDescription"]["LatestStreamArn"],
FunctionName=func["FunctionArn"],
)
dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}})
log_group = "/aws/lambda/testFunction"
expected_msg = "get_test_zip_file3 success"
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
assert "Nr_of_records(1)" in all_logs, "Only one item should be inserted"
dynamodb.update_item(
TableName=table_name,
Key={"id": {"S": "item 1"}},
UpdateExpression="set #attr = :val",
ExpressionAttributeNames={"#attr": "new_attr"},
ExpressionAttributeValues={":val": {"S": "new_val"}},
)
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
assert msg_showed_up, (
expected_msg + " was not found after updating DDB. All logs: " + str(all_logs)
)
assert "Nr_of_records(1)" in all_logs, "Only one item should be updated"
assert (
"Nr_of_records(2)" not in all_logs
), "The inserted item should not show up again"
def wait_for_log_msg(expected_msg, log_group):
logs_conn = boto3.client("logs", region_name="us-east-1")
received_messages = []
start = time.time()
while (time.time() - start) < 10:
result = logs_conn.describe_log_streams(logGroupName=log_group)
log_streams = result.get("logStreams")
if not log_streams:
time.sleep(1)
continue
for log_stream in log_streams:
result = logs_conn.get_log_events(
logGroupName=log_group, logStreamName=log_stream["logStreamName"],
)
received_messages.extend(
[event["message"] for event in result.get("events")]
)
if expected_msg in received_messages:
return True, received_messages
time.sleep(1)
return False, received_messages
@mock_logs
@mock_lambda
@mock_sqs
def test_invoke_function_from_sqs_exception():
logs_conn = boto3.client("logs", region_name="us-east-1")
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-sqs-queue1")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file4()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"]
)
assert response["EventSourceArn"] == queue.attributes["QueueArn"]
assert response["State"] == "Enabled"
entries = []
for i in range(3):
body = {"uuid": str(uuid.uuid4()), "test": "test_{}".format(i)}
entry = {"Id": str(i), "MessageBody": json.dumps(body)}
entries.append(entry)
queue.send_messages(Entries=entries)
start = time.time()
while (time.time() - start) < 30:
result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction")
log_streams = result.get("logStreams")
if not log_streams:
time.sleep(1)
continue
assert len(log_streams) >= 1
result = logs_conn.get_log_events(
logGroupName="/aws/lambda/testFunction",
logStreamName=log_streams[0]["logStreamName"],
)
for event in result.get("events"):
if "I failed!" in event["message"]:
messages = queue.receive_messages(MaxNumberOfMessages=10)
# Verify messages are still visible and unprocessed
assert len(messages) == 3
return
time.sleep(1)
assert False, "Test Failed"
@mock_logs
@mock_lambda
@mock_sqs
def test_list_event_source_mappings():
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-sqs-queue1")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"]
)
mappings = conn.list_event_source_mappings(EventSourceArn="123")
assert len(mappings["EventSourceMappings"]) == 0
mappings = conn.list_event_source_mappings(
EventSourceArn=queue.attributes["QueueArn"]
)
assert len(mappings["EventSourceMappings"]) == 1
assert mappings["EventSourceMappings"][0]["UUID"] == response["UUID"]
assert mappings["EventSourceMappings"][0]["FunctionArn"] == func["FunctionArn"]
@mock_lambda
@mock_sqs
def test_get_event_source_mapping():
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-sqs-queue1")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"]
)
mapping = conn.get_event_source_mapping(UUID=response["UUID"])
assert mapping["UUID"] == response["UUID"]
assert mapping["FunctionArn"] == func["FunctionArn"]
conn.get_event_source_mapping.when.called_with(UUID="1").should.throw(
botocore.client.ClientError
)
@mock_lambda
@mock_sqs
def test_update_event_source_mapping():
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-sqs-queue1")
conn = boto3.client("lambda", region_name="us-east-1")
func1 = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
func2 = conn.create_function(
FunctionName="testFunction2",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func1["FunctionArn"]
)
assert response["FunctionArn"] == func1["FunctionArn"]
assert response["BatchSize"] == 10
assert response["State"] == "Enabled"
mapping = conn.update_event_source_mapping(
UUID=response["UUID"], Enabled=False, BatchSize=15, FunctionName="testFunction2"
)
assert mapping["UUID"] == response["UUID"]
assert mapping["FunctionArn"] == func2["FunctionArn"]
assert mapping["State"] == "Disabled"
@mock_lambda
@mock_sqs
def test_delete_event_source_mapping():
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-sqs-queue1")
conn = boto3.client("lambda", region_name="us-east-1")
func1 = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func1["FunctionArn"]
)
assert response["FunctionArn"] == func1["FunctionArn"]
assert response["BatchSize"] == 10
assert response["State"] == "Enabled"
response = conn.delete_event_source_mapping(UUID=response["UUID"])
assert response["State"] == "Deleting"
conn.get_event_source_mapping.when.called_with(UUID=response["UUID"]).should.throw(
botocore.client.ClientError
)
@mock_lambda
@mock_s3
def test_update_configuration():
s3_conn = boto3.client("s3", _lambda_region)
s3_conn.create_bucket(
Bucket="test-bucket",
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", _lambda_region)
fxn = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
Environment={"Variables": {"test_old_environment": "test_old_value"}},
)
assert fxn["Description"] == "test lambda function"
assert fxn["Handler"] == "lambda_function.lambda_handler"
assert fxn["MemorySize"] == 128
assert fxn["Runtime"] == "python2.7"
assert fxn["Timeout"] == 3
updated_config = conn.update_function_configuration(
FunctionName="testFunction",
Description="updated test lambda function",
Handler="lambda_function.new_lambda_handler",
Runtime="python3.6",
Timeout=7,
Environment={"Variables": {"test_environment": "test_value"}},
)
assert updated_config["ResponseMetadata"]["HTTPStatusCode"] == 200
assert updated_config["Description"] == "updated test lambda function"
assert updated_config["Handler"] == "lambda_function.new_lambda_handler"
assert updated_config["MemorySize"] == 128
assert updated_config["Runtime"] == "python3.6"
assert updated_config["Timeout"] == 7
assert updated_config["Environment"]["Variables"] == {
"test_environment": "test_value"
}
@mock_lambda
def test_update_function_zip():
conn = boto3.client("lambda", _lambda_region)
zip_content_one = get_test_zip_file1()
fxn = conn.create_function(
FunctionName="testFunctionZip",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": zip_content_one},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
zip_content_two = get_test_zip_file2()
fxn_updated = conn.update_function_code(
FunctionName="testFunctionZip", ZipFile=zip_content_two, Publish=True
)
response = conn.get_function(FunctionName="testFunctionZip", Qualifier="2")
response["Configuration"].pop("LastModified")
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
assert len(response["Code"]) == 2
assert response["Code"]["RepositoryType"] == "S3"
assert response["Code"]["Location"].startswith(
"s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com".format(_lambda_region)
)
response["Configuration"].should.equal(
{
"CodeSha256": hashlib.sha256(zip_content_two).hexdigest(),
"CodeSize": len(zip_content_two),
"Description": "test lambda function",
"FunctionArn": "arn:aws:lambda:{}:{}:function:testFunctionZip:2".format(
_lambda_region, ACCOUNT_ID
),
"FunctionName": "testFunctionZip",
"Handler": "lambda_function.lambda_handler",
"MemorySize": 128,
"Role": fxn["Role"],
"Runtime": "python2.7",
"Timeout": 3,
"Version": "2",
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
"State": "Active",
}
)
@mock_lambda
@mock_s3
def test_update_function_s3():
s3_conn = boto3.client("s3", _lambda_region)
s3_conn.create_bucket(
Bucket="test-bucket",
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
)
zip_content = get_test_zip_file1()
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
conn = boto3.client("lambda", _lambda_region)
fxn = conn.create_function(
FunctionName="testFunctionS3",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
zip_content_two = get_test_zip_file2()
s3_conn.put_object(Bucket="test-bucket", Key="test2.zip", Body=zip_content_two)
fxn_updated = conn.update_function_code(
FunctionName="testFunctionS3",
S3Bucket="test-bucket",
S3Key="test2.zip",
Publish=True,
)
response = conn.get_function(FunctionName="testFunctionS3", Qualifier="2")
response["Configuration"].pop("LastModified")
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
assert len(response["Code"]) == 2
assert response["Code"]["RepositoryType"] == "S3"
assert response["Code"]["Location"].startswith(
"s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com".format(_lambda_region)
)
response["Configuration"].should.equal(
{
"CodeSha256": hashlib.sha256(zip_content_two).hexdigest(),
"CodeSize": len(zip_content_two),
"Description": "test lambda function",
"FunctionArn": "arn:aws:lambda:{}:{}:function:testFunctionS3:2".format(
_lambda_region, ACCOUNT_ID
),
"FunctionName": "testFunctionS3",
"Handler": "lambda_function.lambda_handler",
"MemorySize": 128,
"Role": fxn["Role"],
"Runtime": "python2.7",
"Timeout": 3,
"Version": "2",
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
"State": "Active",
}
)
@mock_lambda
def test_create_function_with_invalid_arn():
err = create_invalid_lambda("test-iam-role")
err.exception.response["Error"]["Message"].should.equal(
"1 validation error detected: Value 'test-iam-role' at 'role' failed to satisfy constraint: Member must satisfy regular expression pattern: arn:(aws[a-zA-Z-]*)?:iam::(\d{12}):role/?[a-zA-Z_0-9+=,.@\-_/]+"
)
@mock_lambda
def test_create_function_with_arn_from_different_account():
err = create_invalid_lambda("arn:aws:iam::000000000000:role/example_role")
err.exception.response["Error"]["Message"].should.equal(
"Cross-account pass role is not allowed."
)
@mock_lambda
def test_create_function_with_unknown_arn():
err = create_invalid_lambda(
"arn:aws:iam::" + str(ACCOUNT_ID) + ":role/service-role/unknown_role"
)
err.exception.response["Error"]["Message"].should.equal(
"The role defined for the function cannot be assumed by Lambda."
)
@mock_lambda
def test_remove_function_permission():
conn = boto3.client("lambda", _lambda_region)
zip_content = get_test_zip_file1()
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=(get_role_name()),
Handler="lambda_function.handler",
Code={"ZipFile": zip_content},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
conn.add_permission(
FunctionName="testFunction",
StatementId="1",
Action="lambda:InvokeFunction",
Principal="432143214321",
SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld",
SourceAccount="123412341234",
EventSourceToken="blah",
Qualifier="2",
)
remove = conn.remove_permission(
FunctionName="testFunction", StatementId="1", Qualifier="2",
)
remove["ResponseMetadata"]["HTTPStatusCode"].should.equal(204)
policy = conn.get_policy(FunctionName="testFunction", Qualifier="2")["Policy"]
policy = json.loads(policy)
policy["Statement"].should.equal([])
def create_invalid_lambda(role):
conn = boto3.client("lambda", _lambda_region)
zip_content = get_test_zip_file1()
with assert_raises(ClientError) as err:
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=role,
Handler="lambda_function.handler",
Code={"ZipFile": zip_content},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
return err
def get_role_name():
with mock_iam():
iam = boto3.client("iam", region_name=_lambda_region)
try:
return iam.get_role(RoleName="my-role")["Role"]["Arn"]
except ClientError:
return iam.create_role(
RoleName="my-role",
AssumeRolePolicyDocument="some policy",
Path="/my-path/",
)["Role"]["Arn"]
| 32.91404
| 212
| 0.648472
| 6,302
| 57,435
| 5.689622
| 0.071247
| 0.04334
| 0.023427
| 0.038013
| 0.810492
| 0.780873
| 0.754211
| 0.71313
| 0.67799
| 0.662651
| 0
| 0.022658
| 0.21466
| 57,435
| 1,744
| 213
| 32.932913
| 0.77227
| 0.018543
| 0
| 0.663244
| 0
| 0.002053
| 0.251123
| 0.059393
| 0
| 0
| 0
| 0
| 0.052019
| 1
| 0.034908
| false
| 0.000684
| 0.01232
| 0
| 0.057495
| 0.001369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e67bff13a4fe8a189f4b530ef306940f105926ac
| 610
|
py
|
Python
|
zillow/tests/string_to_long.py
|
gsathya/dsalgo
|
61c89ec597ced3e69bfbb438fd856c8fc5f20aba
|
[
"MIT"
] | 2
|
2017-02-25T04:05:29.000Z
|
2018-05-10T16:54:31.000Z
|
zillow/tests/string_to_long.py
|
gsathya/dsalgo
|
61c89ec597ced3e69bfbb438fd856c8fc5f20aba
|
[
"MIT"
] | null | null | null |
zillow/tests/string_to_long.py
|
gsathya/dsalgo
|
61c89ec597ced3e69bfbb438fd856c8fc5f20aba
|
[
"MIT"
] | null | null | null |
import unittest
import stringToLong
class TestStringToLong(unittest.TestCase):
def setUp(self):
self.convert = stringToLong.convert
def test_conversion(self):
self.assertEqual(self.convert("123"), 123)
self.assertEqual(self.convert("-10"), -10)
def test_exceptions(self):
self.assertRaises(TypeError, self.convert, 123)
self.assertRaises(TypeError, self.convert, 123.01)
self.assertRaises(ValueError, self.convert, '')
def test_zeroes(self):
self.assertEqual(self.convert("0"), 0)
self.assertEqual(self.convert("000000"), 0)
| 29.047619
| 58
| 0.677049
| 69
| 610
| 5.942029
| 0.333333
| 0.214634
| 0.185366
| 0.253659
| 0.336585
| 0.190244
| 0
| 0
| 0
| 0
| 0
| 0.05499
| 0.195082
| 610
| 20
| 59
| 30.5
| 0.780041
| 0
| 0
| 0
| 0
| 0
| 0.021311
| 0
| 0
| 0
| 0
| 0
| 0.466667
| 1
| 0.266667
| false
| 0
| 0.133333
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e682ba59ed83888ba427497ec40d45d5665f7319
| 2,052
|
py
|
Python
|
_ctfs/wargamesmy-21/easyrsa/solve.py
|
daniellimws/daniellimws.github.io
|
464548e058ca423548cbe95c4ee38f856f9185c2
|
[
"MIT"
] | 1
|
2019-01-31T16:50:12.000Z
|
2019-01-31T16:50:12.000Z
|
_ctfs/wargamesmy-21/easyrsa/solve.py
|
daniellimws/daniellimws.github.io
|
464548e058ca423548cbe95c4ee38f856f9185c2
|
[
"MIT"
] | null | null | null |
_ctfs/wargamesmy-21/easyrsa/solve.py
|
daniellimws/daniellimws.github.io
|
464548e058ca423548cbe95c4ee38f856f9185c2
|
[
"MIT"
] | 3
|
2019-03-04T12:46:18.000Z
|
2021-05-18T16:10:44.000Z
|
#!/usr/bin/env python3
from Crypto.Util.number import *
n = 18304313499627278872497347106781088765844971752924494936581137294399251598122054491970352624997804891597368572151975199012875361892862378834285683652903007044947055619342684830043887388520198889986897901082447405903406407594879218477486010303949150584062013743002102223027695933305578474279402543515765336412208006859054393534570841203144021034140207743659756456355176186058669431050817944354369543943180071004478988621167578794309738088175437410128123233192513558339607610423092758817077116782370405814263505215749482674852889252017589696530422547393123978624883889069727557585210879305319883301371816205528090434407
c = 3265951707172242709727472739386873494703249912285505265371146393196030372413781803930164663149372228439333733865082330011642704913091249653497200378094433504467567333190234739925103462552251607608028955840038227014596825040840010571742127988310589087523302675740007385714132298726983039469798322204553974625219330303100743678921966397869657279014124458772194638540183755564805923024931908465309561190023476921354145819156231466029924640305856472832006337877361608232560677427859686638842493170149297308146210190273895689592042634336709605961185187133364422992915741362901814469213680894412670787153775867317785600107
hint = 18304313499627278872497347106781088765844971752924494936581137294399251598122054491970352624997804891597368572151975199012875361892862378834285683652903007044947055619342684830043887388520198889986897901082447405903406407594879218477486010303949150584062013743002102223027695933305578474279402543515765336411937188290852419206090437467585625464287266017790518369458197274062030222343619328717682343452270958441659650865431008054752337347886117365637267521107136683063583439472152237290665940293045834312267414723015783277860478421527455012050450676028820776344490403116318288525984705526824558715069751252595489120600
e = 0x10001
phi = hint
d = pow(e, -1, phi)
m = pow(c, d, n)
print(long_to_bytes(m))
# wgmy{227d1562df0d940d94d75b0512f4bc6c}
| 136.8
| 624
| 0.966862
| 36
| 2,052
| 55.055556
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.932506
| 0.018031
| 2,052
| 15
| 625
| 136.8
| 0.051117
| 0.02924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003516
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.111111
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e6c4bfe50731e3394436728f8d646b94a83dbcd6
| 203
|
py
|
Python
|
src/wai/annotations/core/logging/__init__.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | null | null | null |
src/wai/annotations/core/logging/__init__.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | 3
|
2021-06-30T23:42:47.000Z
|
2022-03-01T03:45:07.000Z
|
src/wai/annotations/core/logging/__init__.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | null | null | null |
"""
Core utilities for logging in the library.
"""
from ._LoggingEnabled import LoggingEnabled
from ._root_logger import ROOT_LOGGER_NAME, get_library_root_logger
from ._StreamLogger import StreamLogger
| 29
| 67
| 0.837438
| 26
| 203
| 6.192308
| 0.576923
| 0.186335
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108374
| 203
| 6
| 68
| 33.833333
| 0.889503
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e6e25fa4d874099dc11b086a96a9f196c58d84fc
| 33
|
py
|
Python
|
agents/utils.py
|
jayminSuhagiya/Turing-Q-Learning
|
b2b378fcbcdffed7d3aa12ef3c2311a0a7b89076
|
[
"MIT"
] | null | null | null |
agents/utils.py
|
jayminSuhagiya/Turing-Q-Learning
|
b2b378fcbcdffed7d3aa12ef3c2311a0a7b89076
|
[
"MIT"
] | null | null | null |
agents/utils.py
|
jayminSuhagiya/Turing-Q-Learning
|
b2b378fcbcdffed7d3aa12ef3c2311a0a7b89076
|
[
"MIT"
] | 1
|
2021-11-21T16:22:22.000Z
|
2021-11-21T16:22:22.000Z
|
def visualize_run(dir):
pass
| 11
| 23
| 0.69697
| 5
| 33
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 33
| 2
| 24
| 16.5
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
fc206884f08ca720933b55ce52c3e928f8ce1809
| 93,414
|
py
|
Python
|
vyvyan/API_userdata/__init__.py
|
downneck/vyvyan
|
e5375b9ed049d47cff69618359ce45159b53853b
|
[
"Apache-2.0"
] | null | null | null |
vyvyan/API_userdata/__init__.py
|
downneck/vyvyan
|
e5375b9ed049d47cff69618359ce45159b53853b
|
[
"Apache-2.0"
] | null | null | null |
vyvyan/API_userdata/__init__.py
|
downneck/vyvyan
|
e5375b9ed049d47cff69618359ce45159b53853b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 WebEffects, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
vyvyan.API_userdata
Package for interacting with user and group data in vyvyan
"""
from sqlalchemy import or_, desc, MetaData
import sys
import passlib.hash
import vyvyan
from vyvyan.vyvyan_models import *
from vyvyan.common import *
from vyvyan.validate import *
class UserdataError(Exception):
pass
class API_userdata:
def __init__(self, cfg):
self.cfg = cfg
self.version = 1
self.namespace = 'API_userdata'
self.metadata = {
'config': {
'description': 'allows for the creation and manipulation of users and groups within vyvyan',
'shortname': 'ud',
'module_dependencies': {
'common': 1,
},
},
'methods': {
'list_users': {
'description': 'list users',
'short': 'lsu',
'rest_type': 'GET',
'admin_only': False,
'required_args': {
},
'optional_args': {
'min': 1,
'max': 1,
'args': {
'all': {
'vartype': 'bool',
'desc': 'all domains',
'ol': 'a',
},
'domain': {
'vartype': 'str',
'desc': 'domain of the user',
'ol': 'd',
},
},
},
'return': {
'domain': [
'username',
'username',
],
'domain': [
'username',
'username',
],
},
},
'list_groups': {
'description': 'list groups',
'short': 'lsg',
'rest_type': 'GET',
'admin_only': False,
'required_args': {
},
'optional_args': {
'min': 1,
'max': 1,
'args': {
'all': {
'vartype': 'bool',
'desc': 'all domains',
'ol': 'a',
},
'domain': {
'vartype': 'str',
'desc': 'domain of the user',
'ol': 'd',
},
},
},
'return': {
'domain': [
'groupname',
'groupname',
],
'domain': [
'groupname',
'groupname',
],
},
},
'list_domains': {
'description': 'list domains (does not take arguments)',
'short': 'lsd',
'rest_type': 'GET',
'admin_only': False,
'required_args': {
},
'optional_args': {
},
'return': [
'domain',
'domain',
],
},
'udisplay': {
'description': 'display a user\'s info',
'short': 'ud',
'rest_type': 'GET',
'admin_only': False,
'required_args': {
'args': {
'username': {
'vartype': 'str',
'desc': 'username of the user',
'ol': 'u',
},
},
},
'optional_args': {
'min': 0,
'max': 1,
'args': {
'domain': {
'vartype': 'str',
'desc': 'domain of the user',
'ol': 'd',
},
},
},
'return': [
{
'user': 'ORMobject',
'groups': [
'group ORMobject',
'group ORMobject',
],
},
],
},
'uadd': {
'description': 'create a user entry in the user table',
'short': 'ua',
'rest_type': 'POST',
'admin_only': True,
'required_args': {
'args': {
'username': {
'vartype': 'str',
'desc': 'username of the user to add to the database',
'ol': 'u',
},
'password': {
'vartype': 'str',
'desc': 'user\'s password',
'ol': 'p',
},
},
},
'optional_args': {
'min': 0,
'max': 9,
'args': {
'domain': {
'vartype': 'str',
'desc': "LDAP domain to add user under (default: %s)" % cfg.default_domain,
'ol': 'd',
},
'first_name': {
'vartype': 'str',
'desc': 'user\'s first name (default John)',
'ol': 'f',
},
'last_name': {
'vartype': 'str',
'desc': 'user\'s last name (default Doe)',
'ol': 'l',
},
'ssh_key': {
'vartype': 'file',
'desc': 'a file containing the user\'s ssh key(s)',
'ol': 'k',
},
'shell': {
'vartype': 'str',
'desc': "user's shell (default: %s)" % cfg.shell,
'ol': 's',
},
'email_address': {
'vartype': 'str',
'desc': "user's email address (default: username@%s)" % cfg.default_domain,
'ol': 'e',
},
'home_dir': {
'vartype': 'str',
'desc': "user's home directory (default: %s/username)" % cfg.hdir,
'ol': 'o',
},
'user_type': {
'vartype': 'str',
'desc': "user type, pick one of: %s (default: %s)" % (" ".join(cfg.user_types), cfg.def_user_type),
'ol': 't',
},
'uid': {
'vartype': 'str',
'desc': "user's uid (default will pick the next available uid)",
'ol': 'i',
},
},
},
'return': {
'string': 'success',
},
},
'uremove': {
'description': 'delete a user entry from the users table',
'short': 'urm',
'rest_type': 'DELETE',
'admin_only': True,
'required_args': {
'args': {
'username': {
'vartype': 'str',
'desc': 'username of the user to delete from the database',
'ol': 'u',
},
},
},
'optional_args': {
'min': 0,
'max': 1,
'args': {
'domain': {
'vartype': 'str',
'desc': 'domain of the user to delete',
'ol': 'd',
},
},
},
'return': {
'string': 'success',
},
},
'umodify': {
'description': 'modify an existing user entry',
'short': 'um',
'rest_type': 'PUT',
'admin_only': True,
'required_args': {
'args': {
'username': {
'vartype': 'str',
'desc': 'username of the user to modify',
'ol': 'u',
},
},
},
'optional_args': {
'min': 1,
'max': 10,
'args': {
'password': {
'vartype': 'str',
'desc': 'user\'s password',
'ol': 'p',
},
'domain': {
'vartype': 'str',
'desc': 'domain of the user to modify',
'ol': 'd',
},
'first_name': {
'vartype': 'str',
'desc': 'user\'s first name (default John)',
'ol': 'f',
},
'last_name': {
'vartype': 'str',
'desc': 'user\'s last name (default Doe)',
'ol': 'l',
},
'ssh_key': {
'vartype': 'file',
'desc': 'a file containing the user\'s ssh key(s)',
'ol': 'k',
},
'shell': {
'vartype': 'str',
'desc': "user's shell (default: %s)" % cfg.shell,
'ol': 's',
},
'email_address': {
'vartype': 'str',
'desc': "user's email address (default: username@%s)" % cfg.default_domain,
'ol': 'e',
},
'home_dir': {
'vartype': 'str',
'desc': "user's home directory (default: %s/username)" % cfg.hdir,
'ol': 'o',
},
'user_type': {
'vartype': 'str',
'desc': "user type, pick one of: %s (default: %s)" % (" ".join(cfg.user_types), cfg.def_user_type),
'ol': 't',
},
'uid': {
'vartype': 'str',
'desc': "user's uid (default will pick the next available uid)",
'ol': 'i',
},
'active': {
'vartype': 'str',
'desc': "true/false (or t/f), activate/deactivate. deactivate the user to disable without removing the user info from vyvyan",
'ol': 'a',
},
},
},
'return': {
'string': 'success',
},
},
'uclone': {
'description': 'clone a user from one domain to another',
'short': 'uc',
'rest_type': 'POST',
'admin_only': True,
'required_args': {
'args': {
'username': {
'vartype': 'str',
'desc': 'username of the user to clone from',
'ol': 'u',
},
'domain': {
'vartype': 'str',
'desc': 'domain to clone the user from',
'ol': 'd',
},
'newdomain': {
'vartype': 'str',
'desc': 'domain to clone the user into',
'ol': 'n',
},
},
},
'optional_args': {
},
'return': {
'string': 'success',
},
},
'gdisplay': {
'description': 'display a group\'s info',
'short': 'gd',
'rest_type': 'GET',
'admin_only': False,
'required_args': {
'args': {
'groupname': {
'vartype': 'str',
'desc': 'name of the group',
'ol': 'g',
},
},
},
'optional_args': {
'min': 0,
'max': 1,
'args': {
'domain': {
'vartype': 'str',
'desc': 'domain of the group',
'ol': 'd',
},
},
},
'return': {
'groupname': 'ORMobject',
'users': [
'user ORMobject',
'user ORMobject',
],
},
},
'gadd': {
'description': 'create a group entry in the group table',
'short': 'ga',
'rest_type': 'POST',
'admin_only': True,
'required_args': {
'args': {
'groupname': {
'vartype': 'str',
'desc': 'group of the group to add to the database',
'ol': 'g',
},
},
},
'optional_args': {
'min': 0,
'max': 4,
'args': {
'domain': {
'vartype': 'str',
'desc': 'domain of the group',
'ol': 'd',
},
'description': {
'vartype': 'str',
'desc': 'a description of the group',
'ol': 'e',
},
'sudo_cmds': {
'vartype': 'str',
'desc': 'comma-delimited commands users of this group are allowd to run as root',
'ol': 's',
},
'gid': {
'vartype': 'str',
'desc': 'group id number to assign to the group',
'ol': 'i',
},
},
},
'return': {
'string': 'success',
},
},
'gremove': {
'description': 'delete a group entry from the groups table',
'short': 'grm',
'rest_type': 'DELETE',
'admin_only': True,
'required_args': {
'args': {
'groupname': {
'vartype': 'str',
'desc': 'groupname of the group to delete from the database',
'ol': 'g',
},
},
},
'optional_args': {
'min': 0,
'max': 1,
'args': {
'domain': {
'vartype': 'str',
'desc': 'domain to delete the group from',
'ol': 'd',
},
},
},
'return': {
'string': 'success',
},
},
'gmodify': {
'description': 'modify an existing group entry',
'short': 'gm',
'rest_type': 'PUT',
'admin_only': True,
'required_args': {
'args': {
'groupname': {
'vartype': 'str',
'desc': 'groupname of the group to modify',
'ol': 'g',
},
},
},
'optional_args': {
'min': 1,
'max': 4,
'args': {
'domain': {
'vartype': 'str',
'desc': 'domain to which the group belongs',
'ol': 'd',
},
'description': {
'vartype': 'str',
'desc': 'a description of the group',
'ol': 'e',
},
'sudo_cmds': {
'vartype': 'str',
'desc': 'comma-delimited commands users of this group are allowd to run as root',
'ol': 's',
},
'gid': {
'vartype': 'str',
'desc': 'group id number to assign to the group',
'ol': 'i',
},
},
},
'return': {
'string': 'success',
},
},
'gclone': {
'description': 'clone a group from one domain to another',
'short': 'gc',
'rest_type': 'POST',
'admin_only': True,
'required_args': {
'args': {
'groupname': {
'vartype': 'str',
'desc': 'groupname of the group to clone from',
'ol': 'g',
},
'domain': {
'vartype': 'str',
'desc': 'domain to clone from',
'ol': 'd',
},
'newdomain': {
'vartype': 'str',
'desc': 'new domain to clone the group into',
'ol': 'n',
},
},
},
'optional_args': {
},
'return': {
'string': 'success',
},
},
'utog': {
'description': 'map a username to groupname in the same domain',
'short': 'ut',
'rest_type': 'POST',
'admin_only': True,
'required_args': {
'args': {
'username': {
'vartype': 'str',
'desc': 'username of the user to map',
'ol': 'u',
},
'groupname': {
'vartype': 'str',
'desc': 'groupname to map the user to',
'ol': 'g',
},
},
},
'optional_args': {
'min': 0,
'max': 1,
'args': {
'domain': {
'vartype': 'str',
'desc': 'domain within which to map',
'ol': 'd',
},
},
},
'return': {
'string': 'success',
},
},
'urmg': {
'description': 'remove a username from a group in the same domain',
'short': 'ur',
'rest_type': 'POST',
'admin_only': True,
'required_args': {
'args': {
'username': {
'vartype': 'str',
'desc': 'username of the user to unmap',
'ol': 'u',
},
'groupname': {
'vartype': 'str',
'desc': 'groupname to remove the user from',
'ol': 'g',
},
},
},
'optional_args': {
'min': 0,
'max': 1,
'args': {
'domain': {
'vartype': 'str',
'desc': 'domain within which to unmap',
'ol': 'd',
},
},
},
'return': {
'string': 'success',
},
},
},
}
#############################
# listing methods #
#############################
def list_users(self, query):
"""
[description]
lists all users
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return value]
returns a dict of lists of users per domain
"""
try:
self.cfg.log.debug(query.keys())
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'list_users')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/list_users: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/list_users: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# if the user specifies a domain, restrict output.
# otherwise return userdata for all domains
if 'domain' not in query.keys() or not query['domain']:
domain = None
else:
domain = query['domain']
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'list_users')
# iterate through all domains and spit out some users
buf = {}
self.cfg.log.debug("API_userdata/list_users: querying for all users")
if domain:
usertable = self.cfg.dbsess.query(Users).\
filter(Users.domain==domain).all()
else:
usertable = self.cfg.dbsess.query(Users).all()
for u in usertable:
if u.domain not in buf.keys():
buf[u.domain] = []
for u in usertable:
if u.active:
act = "active"
else:
act = "inactive"
buf[u.domain].append("%s uid:%s %s" % (u.username, u.uid, act))
# if the user specified a domain but it's empty
if buf == {} and domain:
self.cfg.log.debug("API_userdata/list_users: no users in domain %s" % domain)
raise UserdataError("API_userdata/list_users: no users in domain %s" % domain)
elif buf == {}:
self.cfg.log.debug("API_userdata/list_users: no users found")
raise UserdataError("API_userdata/list_users: no users found")
# return our listing
self.cfg.log.debug(buf)
return buf
except Exception, e:
self.cfg.log.debug("API_userdata/list_users: query failed for users. Error: %s" % e)
raise UserdataError("API_userdata/list_users: query failed for groups. Error: %s" % e)
def list_groups(self, query):
"""
[description]
lists all groups
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return value]
returns a dict of lists of groups per domain
"""
try:
self.cfg.log.debug(query.keys())
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'list_groups')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/list_groups: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/list_groups: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# if the user specifies a domain, restrict output.
# otherwise return userdata for all domains
if 'domain' not in query.keys() or not query['domain']:
domain = None
else:
domain = query['domain']
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'list_groups')
# iterate through all domains and spit out some groups
buf = {}
self.cfg.log.debug("API_userdata/list_groups: querying for all groups")
if domain:
grouptable = self.cfg.dbsess.query(Groups).\
filter(Groups.domain==domain).all()
else:
grouptable = self.cfg.dbsess.query(Groups).all()
for g in grouptable:
if g.domain not in buf.keys():
buf[g.domain] = []
for g in grouptable:
buf[g.domain].append("%s gid:%s" % (g.groupname, g.gid))
# if the user specified a domain but it's empty
if buf == {} and domain:
self.cfg.log.debug("API_userdata/list_groups: no groups in domain %s" % domain)
raise UserdataError("API_userdata/list_groups: no groups in domain %s" % domain)
elif buf == {}:
self.cfg.log.debug("API_userdata/list_groups: no groups found")
raise UserdataError("API_userdata/list_groups: no groups found")
# return our listing
self.cfg.log.debug(buf)
return buf
except Exception, e:
self.cfg.log.debug("API_userdata/list_groups: query failed for groups. Error: %s" % e)
raise UserdataError("API_userdata/list_groups: query failed for groups. Error: %s" % e)
def list_domains(self, query):
"""
[description]
lists all domains
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return value]
returns a list of domains
"""
try:
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'list_domains')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/list_domains: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/list_domains: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# if the user specifies a domain, restrict output.
# otherwise return userdata for all domains
if 'domain' not in query.keys() or not query['domain']:
domain = None
else:
domain = query['domain']
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'list_domains')
# little bit of setup work
buf = []
self.cfg.log.debug("API_userdata/list_domains: querying for all domains")
# iterate through all users and spit out some domains
usertable = self.cfg.dbsess.query(Users).all()
for u in usertable:
if u.domain not in buf:
buf.append(u.domain)
# iterate through all groups and spit out some domains
# just in case we have some oddball domain with groups and no users
grouptable = self.cfg.dbsess.query(Groups).all()
for g in grouptable:
if g.domain not in buf:
buf.append(g.domain)
# if nothing exists
if buf == []:
self.cfg.log.debug("API_userdata/list_domains: no domains found")
raise UserdataError("API_userdata/list_domains: no domains found")
# return our listing
return buf
except Exception, e:
self.cfg.log.debug("API_userdata/list_domains: query failed for domains. Error: %s" % e)
raise UserdataError("API_userdata/list_domains: query failed for groups. Error: %s" % e)
#############################
# user manipulation methods #
#############################
def udisplay(self, query):
"""
[description]
display the information for a user
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns a list of user ORMobjects and associated list of group ORMobjects if successful, raises an error if unsuccessful
"""
try:
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'udisplay')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/udisplay: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/udisplay: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# to make our conditionals easier
if 'username' not in query.keys() or not query['username']:
self.cfg.log.debug("API_userdata/udisplay: no username provided!")
raise UserdataError("API_userdata/udisplay: no username provided!")
# if the user specifies a domain, restrict output.
# otherwise return userdata for all domains
if 'domain' not in query.keys() or not query['domain']:
query['domain'] = self.cfg.default_domain
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'udisplay')
# find us a username to display, validation done in the __get_user_obj function
try:
u = self.__get_user_obj(query['username'], query['domain'])
except:
self.cfg.log.debug("API_userdata/udisplay: user %s not found (domain: %s)" % (query['username'], query['domain']))
raise UserdataError("API_userdata/udisplay: user %s not found (domain: %s)" % (query['username'], query['domain']))
# got a user, populate the return
if u:
ret = {}
ret['user'] = u.to_dict()
ret['groups'] = []
# user exists, find out what groups it's in
glist = self.__get_groups_by_user(u.username, u.domain)
if glist:
for g in glist:
ret['groups'].append(g.to_dict())
else:
self.cfg.log.debug("API_userdata/udisplay: user %s not found." % query['username'])
raise UserdataError("API_userdata/udisplay: user %s not found" % query['username'])
# return's populated, return it
return ret
except Exception, e:
self.cfg.log.debug("API_userdata/udisplay: %s" % e)
raise UserdataError("API_userdata/udisplay: %s" % e)
def uadd(self, query, files=None):
"""
[description]
create a new Users entry
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns "success" if successful, raises an error if not
"""
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'uadd')
try:
# to make our conditionals easier
if 'username' not in query.keys() or not query['username']:
self.cfg.log.debug("API_userdata/uadd: no username provided!")
raise UserdataError("API_userdata/uadd: no username provided!")
else:
username = query['username']
if 'password' not in query.keys() or not query['password']:
self.cfg.log.debug("API_userdata/uadd: no password provided!")
raise UserdataError("API_userdata/uadd: no password provided!")
else:
password = query['password']
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/uadd: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/uadd: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'uadd')
# first name, validate or default
if 'domain' in query.keys() and query['domain']:
domain = query['domain']
v_domain(domain)
else:
domain = self.cfg.default_domain
# first name, validate or default
if 'first_name' in query.keys() and query['first_name']:
first_name = query['first_name']
v_name(first_name)
else:
first_name = "John"
# last name, validate or default
if 'last_name' in query.keys() and query['last_name']:
last_name = query['last_name']
v_name(last_name)
else:
last_name = "Doe"
# user type, validate or default
if 'user_type' in query.keys() and query['user_type']:
user_type = query['user_type']
if user_type not in self.cfg.user_types:
self.cfg.log.debug("API_userdata/uadd: Invalid user type, please use one of the following: " + ', '.join(self.cfg.user_types))
raise UserdataError("API_userdata/uadd: Invalid user type, please use one of the following: " + ', '.join(self.cfg.user_types))
else:
user_type = self.cfg.def_user_type
# shell, assign or default
if 'shell' in query.keys() and query['shell']:
shell = query['shell']
else:
shell = self.cfg.shell
# ssh_keys file, validate or assign empty string
if files:
if len(files) > 1:
self.cfg.log.debug("API_userdata/uadd: too many files uploaded for ssh_keys, refusing to continue")
raise UserdataError("API_userdata/uadd: too many files uploaded for ssh_keys, refusing to continue")
ssh_keys = []
for key in files[0].readlines():
if v_ssh2_pubkey(key):
ssh_keys.append(key)
ssh_public_key = ''.join(ssh_keys).rstrip()
else:
ssh_public_key = ""
# home dir, assign or default
if 'home_dir' in query.keys() and query['home_dir']:
home_dir = query['home_dir']
else:
home_dir = "%s/%s" % (self.cfg.hdir, username)
# email address, assign or default
if 'email_address' in query.keys() and query['email_address']:
email_address = query['email_address']
else:
email_address = "%s@%s" % (username, domain)
# input validation for username
if username:
v_name(username)
else:
self.cfg.log.debug("API_userdata/uadd: invalid username: %s" % username)
raise UserdataError("API_userdata/uadd: invalid username: %s" % username)
# make sure we're not trying to add a duplicate, validation done in the __get_user_obj function
u = self.__get_user_obj(username, domain)
if u:
self.cfg.log.debug("API_userdata/uadd: user %s exists in domain %s" % (username, domain))
raise UserdataError("API_userdata/uadd: user %s exists in domain %s" % (username, domain))
# uid, validate or generate
if 'uid' in query.keys() and query['uid']:
uid = int(query['uid'])
v_uid(self.cfg, uid)
if v_uid_in_db(self.cfg, uid, domain):
self.cfg.log.debug("API_userdata/uadd: uid %s exists in domain %s already" % (uid, domain))
raise UserdataError("API_userdata/uadd: uid %s exists in domain %s already" % (uid, domain))
else:
uid = self.__next_available_uid(domain)
# deal with default groups
dg = []
if self.cfg.default_groups:
for grp in self.cfg.default_groups:
try:
# see if our default group(s) exist
g = self.__get_group_obj(grp, domain)
if g:
dg.append(g)
else:
# if not, explode
self.cfg.log.debug("API_userdata/uadd: default groups must exist before we can add users to them! missing group: %s in %s" % (grp, domain))
raise UserdataError("API_userdata/uadd: default groups must exist before we can add users to them! missing group: %s in %s" % (grp, domain))
except:
# if anything goes wrong, explode.
self.cfg.log.debug("API_userdata/uadd: default groups must exist before we can add users to them! missing group: %s in %s" % (grp, domain))
raise UserdataError("API_userdata/uadd: default groups must exist before we can add users to them! missing group: %s in %s" % (grp, domain))
# hash the password. since we're LDAP-oriented we'll use SSHA
passhash = passlib.hash.ldap_salted_sha1.encrypt(password, salt_size=self.cfg.salt_size)
# create the user object, push it to the db, return status
u = Users(first_name, last_name, ssh_public_key, passhash, username, domain, uid, user_type, home_dir, shell, email_address, active=True)
self.cfg.dbsess.add(u)
self.cfg.dbsess.commit()
# if our default group(s) exist, shove the user into it/them
if dg:
for g in dg:
ugquery = {'username': username, 'groupname': g.groupname, 'domain': domain}
self.utog(ugquery)
self.cfg.log.debug("API_userdata/uadd: adding user %s to default group %s in domain %s" % (username, g.groupname, domain))
return 'success'
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/uadd: error: %s" % e)
raise UserdataError("API_userdata/uadd: error: %s" % e)
def uremove(self, query):
"""
[description]
delete a user from the users table
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns "success" if successful, None if unsuccessful
"""
try:
# to make our conditionals easier
if 'username' not in query.keys() or not query['username']:
self.cfg.log.debug("API_userdata/uremove: no username provided!")
raise UserdataError("API_userdata/uremove: no username provided!")
else:
username = query['username']
v_name(username)
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'uremove')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/uremove: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/uremove: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'uremove')
# domain, validate or assign default
if 'domain' in query.keys() and query['domain']:
domain = query['domain']
v_domain(domain)
else:
domain = self.cfg.default_domain
# find us a username to delete, validation done in the __get_user_obj function
u = self.__get_user_obj(username, domain)
if u:
groups = self.__get_groups_by_user(username, domain)
if groups:
for group in groups:
query = {"username": username, "groupname": group.groupname, "domain": domain}
self.urmg(query)
self.cfg.dbsess.delete(u)
self.cfg.dbsess.commit()
self.cfg.log.debug("API_userdata/uremove: deleted user %s from domain %s" % (username, domain))
return "success"
else:
self.cfg.log.debug("API_userdata/uremove: user %s not found in domain %s" % (username, domain))
raise UserdataError("API_userdata/uremove: user %s not found in domain %s" % (username, domain))
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/uremove: error: %s" % e)
raise UserdataError("API_userdata/uremove: error: %s" % e)
def umodify(self, query, files=None):
"""
[description]
create a new tags entry
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns true if successful, raises an error if not
"""
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'umodify')
try:
# to make our conditionals easier
if 'username' not in query.keys() or not query['username']:
self.cfg.log.debug("API_userdata/umodify: no username provided!")
raise UserdataError("API_userdata/umodify: no username provided!")
else:
username = query['username']
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/umodify: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/umodify: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'umodify')
# we do this here nad not below with the rest of the options because
# we need this set to get a valid user object
#
# domain, validate or assign default
if 'domain' in query.keys() and query['domain']:
domain = query['domain']
v_domain(domain)
else:
domain = self.cfg.default_domain
# find us a username to modify, validation done in the __get_user_obj function
u = self.__get_user_obj(username, domain)
if not u:
self.cfg.log.debug("API_userdata/umodify: refusing to modify nonexistent user: %s" % username)
raise UserdataError("API_userdata/umodify: refusing to modify nonexistent user: %s" % username)
# first name, validate or leave alone
if 'first_name' in query.keys() and query['first_name']:
v_name(query['first_name'])
u.first_name = query['first_name']
# last name, validate or leave alone
if 'last_name' in query.keys() and query['last_name']:
v_name(query['last_name'])
u.last_name = query['last_name']
# user type, validate or leave alone
if 'user_type' in query.keys() and query['user_type']:
if query['user_type'] not in self.cfg.user_types:
self.cfg.log.debug("API_userdata/umodify: Invalid user type, please use one of the following: " + ', '.join(self.cfg.user_types))
raise UserdataError("API_userdata/umodify: Invalid user type, please use one of the following: " + ', '.join(self.cfg.user_types))
else:
u.user_type = query['user_type']
# shell, assign or leave alone
if 'shell' in query.keys() and query['shell']:
u.shell = query['shell']
# ssh_keys file, validate or leave alone
if files:
if len(files) > 1:
self.cfg.log.debug("API_userdata/umodify: too many files uploaded for ssh_keys, refusing to continue")
raise UserdataError("API_userdata/umodify: too many files uploaded for ssh_keys, refusing to continue")
ssh_keys = []
for key in files[0].readlines():
if v_ssh2_pubkey(key):
ssh_keys.append(key)
u.ssh_public_key = ''.join(ssh_keys).rstrip()
# home dir, assign or leave alone
if 'home_dir' in query.keys() and query['home_dir']:
u.hdir = query['home_dir']
# email, assign or leave alone
if 'email_address' in query.keys() and query['email_address']:
u.email = query['email_address']
# uid, validate or leave alone
if 'uid' in query.keys() and query['uid']:
if u.uid != int(query['uid']) and v_uid_in_db(self.cfg, int(query['uid']), u.domain):
self.cfg.log.debug("API_userdata/umodify: uid exists already: %s" % query['uid'])
raise UserdataError("API_userdata/umodify: uid exists already: %s" % query['uid'])
u.uid = int(query['uid'])
# hash the password. since we're LDAP-oriented we'll use SSHA
# TODO: provide support for checking passwords against a configurable number
# TODO: of passwords and rejecting matches
if 'password' in query.keys() and query['password']:
passhash = passlib.hash.ldap_salted_sha1.encrypt(password, salt_size=cfg.salt_size)
u.password = passhash
# activate/deactivate the user
if 'active' in query.keys() and query['active'] in ['F', 'f', 'False', 'false']:
u.active = False
elif 'active' in query.keys() and query['active'] in ['T', 't', 'True', 'true']:
u.active = True
# push the modified user object to the db, return status
self.cfg.dbsess.add(u)
self.cfg.dbsess.commit()
return 'success'
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/umodify: error: %s" % e)
raise UserdataError("API_userdata/umodify: error: %s" % e)
def uclone(self, query):
"""
[description]
delete a user from the users table
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns "success" if successful, None if unsuccessful
"""
try:
# to make our conditionals easier
if 'username' not in query.keys() or not query['username']:
self.cfg.log.debug("API_useradata/uclone: no username provided!")
raise UserdataError("API_useradata/uclone: no username provided!")
if 'domain' not in query.keys() or not query['domain']:
self.cfg.log.debug("API_useradata/uclone: no domain provided!")
raise UserdataError("API_useradata/uclone: no domain provided!")
if 'newdomain' not in query.keys() or not query['newdomain']:
self.cfg.log.debug("API_useradata/uclone: no new domain provided!")
raise UserdataError("API_useradata/uclone: no new domain provided!")
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'uclone')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/uclone: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/uclone: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# input validation for new domain
v_domain(query['domain'])
# find us a username to clone, validation done in the __get_user_obj function
u = self.__get_user_obj(query['username'], query['domain'])
if u:
query = {
'username': query['username'],
'domain': query['newdomain'],
'first_name': u.first_name,
'last_name': u.last_name,
'uid': u.uid,
'user_type': u.type,
'shell': u.shell,
'email_address': u.email,
'ssh_key': u.ssh_public_key,
'home_dir': u.hdir,
}
self.uadd(query)
self.cfg.log.debug("API_userdata/uclone: created user %s in query['domain'] %s based on query['domain'] %s" % (query['username'], query['newdomain'], query['domain']))
return "success"
else:
self.cfg.log.debug("API_userdata/uclone: user %s not found in query['domain'] %s" % (query['username'], query['domain']))
raise UserdataError("API_userdata/uclone: user %s not found in %s" % (query['username'], query['domain']))
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/uclone: error: %s" % e)
raise UserdataError("API_userdata/uclone: error: %s" % e)
##############################
# group manipulation methods #
##############################
def gdisplay(self, query):
"""
[description]
display the information for a group
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns the group ORMobject dict if successful, raises an error if unsuccessful
"""
try:
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'gdisplay')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/gdisplay: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/gidsplay: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# to make our conditionals easier
if 'groupname' not in query.keys() or not query['groupname']:
self.cfg.log.debug("API_userdata/gdisplay: no groupname provided!")
raise UserdataError("API_userdata/gdisplay: no groupname provided!")
else:
groupname = query['groupname']
# domain, validate or substitute default
if 'domain' in query.keys() and query['domain']:
domain = query['domain']
v_domain(domain)
else:
domain = self.cfg.default_domain
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'gdisplay')
# look for the group
try:
g = self.__get_group_obj(groupname, domain)
ret = {}
ret['group'] = g.to_dict()
except Exception, e:
self.cfg.log.debug("API_userdata/gdisplay: group %s not found in domain %s." % (groupname, domain))
raise UserdataError("API_userdata/gdisplay: group %s not found in domain %s" % (groupname, domain))
# now that we know the group exists, we can see if it's populated with users
ret['users'] = []
ulist = self.__get_users_by_group(groupname, domain)
if ulist:
for u in ulist:
ret['users'].append(u.to_dict())
# now that we know the group exists, we can see if it's populated with users
sclist = self.__get_sudo_cmds_by_group(groupname, domain)
if sclist:
commands = []
for sc in sclist:
commands.append(sc.sudocommand)
ret['sudo_cmds'] = ','.join(commands)
else:
ret['sudo_cmds'] = "no commands"
# return is populated, return it
return ret
except Exception, e:
self.cfg.log.debug("API_userdata/gdisplay: %s" % e)
raise UserdataError("API_userdata/gdisplay: %s" % e)
def gadd(self, query, files=None):
"""
[description]
create a new tags entry
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns true if successful, raises an error if not
"""
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'gadd')
try:
# to make our conditionals easier
if 'groupname' not in query.keys() or not query['groupname']:
self.cfg.log.debug("API_userdata/gadd: no groupname provided!")
raise UserdataError("API_userdata/gadd: no groupname provided!")
else:
groupname = query['groupname']
v_name(groupname)
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/gadd: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/gadd: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'gadd')
# domain, validate or default
self.cfg.log.debug("default_domain: %s" % self.cfg.default_domain)
if 'domain' in query.keys() and query['domain']:
domain = query['domain']
v_domain(domain)
else:
domain = self.cfg.default_domain
# description, assign or default
if 'description' in query.keys() and query['description']:
description = query['description']
else:
description = 'Please add a description for this group!'
# sudo commands. if "all" (case insensetive), translate to "ALL". if not and not blank, assign.
if 'sudo_cmds' in query.keys() and query['sudo_cmds']:
sudo_cmds = query['sudo_cmds'].split(',')
if 'ALL' in map(str.upper, sudo_cmds):
sudo_cmds = ['ALL']
else:
sudo_cmds = None
# make sure we're not trying to add a duplicate
g = self.__get_group_obj(groupname, domain)
if g:
self.cfg.log.debug("API_userdata/gadd: group exists already: %s" % groupname)
raise UserdataError("API_userdata/gadd: group exists already: %s" % groupname)
# gid, validate or generate
# this is down here instead of up above with its buddies because we need the
# domain to query for existing uids
if 'gid' in query.keys() and query['gid']:
gid = int(query['gid'])
v_gid(self.cfg, gid)
if v_gid_in_db(self.cfg, gid, domain):
self.cfg.log.debug("API_userdata/gadd: gid exists already: %s" % gid)
raise UserdataError("API_userdata/gadd: gid exists already: %s" % gid)
else:
gid = self.__next_available_gid(domain)
# create the group object, push it to the db, return status
g = Groups(description, groupname, domain, gid)
self.cfg.dbsess.add(g)
self.cfg.dbsess.commit()
# map any sudo commands to the group
if sudo_cmds:
self.__map_sudoers(g, sudo_cmds)
return 'success'
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/gadd: error: %s" % e)
raise UserdataError("API_userdata/gadd: error: %s" % e)
def gremove(self, query):
"""
[description]
delete a group from the groups table
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns "success" if successful, None if unsuccessful
"""
try:
# to make our conditionals easier
if 'groupname' not in query.keys() or not query['groupname']:
self.cfg.log.debug("API_userdata/gremove: no groupname provided!")
raise UserdataError("API_userdata/gremove: no groupname provided!")
else:
groupname = query['groupname']
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'gremove')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/gremove: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/gremove: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'gremove')
# domain, validate or default
if 'domain' in query.keys() and query['domain']:
domain = query['domain']
v_domain(domain)
else:
domain = self.cfg.default_domain
# find us a groupname to delete, validation done in the __get_group_obj function
g = self.__get_group_obj(groupname, domain)
if g:
# make sure the group is empty
if self.__get_users_by_group(groupname, domain):
self.cfg.log.debug("API_userdata/gremove: please remove all users from this group before deleting!")
raise UserdataError("API_userdata/gremove: please remove all users from this group before deleting!")
# unmap any existing commands
self.__unmap_sudoers(g)
# delete the group
self.cfg.dbsess.delete(g)
# commit the transaction
self.cfg.dbsess.commit()
self.cfg.log.debug("API_userdata/gremove: deleted group %s from domain %s" % (groupname, domain))
# declare victory
return "success"
else:
self.cfg.log.debug("API_userdata/gremove: group %s not found in domain %s" % (groupname, domain))
raise UserdataError("API_userdata/gremove: group %s not found in domain %s" % (groupname, domain))
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/gremove: error: %s" % e)
raise UserdataError("API_userdata/gremove: error: %s" % e)
def gmodify(self, query, files=None):
"""
[description]
create a new tags entry
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns true if successful, raises an error if not
"""
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'gmodify')
try:
# to make our conditionals easier
if 'groupname' not in query.keys() or not query['groupname']:
self.cfg.log.debug("API_userdata/gmodify: no groupname provided!")
raise UserdataError("API_userdata/gmodify: no groupname provided!")
else:
groupname = query['groupname']
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/gmodify: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/gmodify: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'gmodify')
# domain, validate or default
if 'domain' in query.keys() and query['domain']:
domain = query['domain']
v_domain(domain)
else:
domain = self.cfg.default_domain
# find us a groupname to modify, validation done in the __get_group_obj function
g = self.__get_group_obj(groupname, domain)
if not g:
self.cfg.log.debug("API_userdata/gmodify: refusing to modify nonexistent group %s in domain %s" % (groupname, domain))
raise UserdataError("API_userdata/gmodify: refusing to modify nonexistent group %s in domain %s" % (groupname, domain))
# description, assign or leave alone
if 'description' in query.keys() and query['description']:
g.description = query['description']
# sudo commands. if "all" (case insensetive), translate to "ALL". if not and not blank, assign.
if 'sudo_cmds' in query.keys() and query['sudo_cmds']:
sudo_cmds = query['sudo_cmds'].split(',')
if 'ALL' in map(str.upper, sudo_cmds):
sudo_cmds = ['ALL']
# gid, validate or leave alone
if 'gid' in query.keys() and query['gid']:
if g.gid != int(query['gid']) and v_gid_in_db(self.cfg, int(query['gid']), g.domain):
self.cfg.log.debug("API_userdata/gmodify: gid exists in domain %s already: %s" % (domain, query['gid']))
raise UserdataError("API_userdata/gmodify: gid exists in domain %s already: %s" % (domain, query['gid']))
g.gid = int(query['gid'])
# push the modified group object to the db, return status
self.cfg.dbsess.add(g)
self.cfg.dbsess.commit()
# remap sudoers commands
# NOTE: you must provide the entire sudo_cmds array each time you modify a group
# or it will delete existing commands
if sudo_cmds:
self.__map_sudoers(g, sudo_cmds)
# declare victory
return 'success'
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/gmodify: error: %s" % e)
raise UserdataError("API_userdata/gmodify: error: %s" % e)
def gclone(self, query):
"""
[description]
delete a group from the groups table
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns "success" if successful, None if unsuccessful
"""
try:
# to make our conditionals easier
if 'groupname' not in query.keys() or not query['groupname']:
self.cfg.log.debug("API_userdata/gclone: no groupname provided!")
raise UserdataError("API_userdata/gclone: no groupname provided!")
if 'domain' not in query.keys() or not query['domain']:
self.cfg.log.debug("API_userdata/gclone: no domain provided!")
raise UserdataError("API_userdata/gclone: no domain provided!")
if 'newdomain' not in query.keys() or not query['newdomain']:
self.cfg.log.debug("API_userdata/gclone: no new domain provided!")
raise UserdataError("API_userdata/gclone: no new domain provided!")
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'gclone')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/gclone: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/gclone: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# input validation for new domain
v_domain(self.cfg, domain)
# find us a groupname to clone, validation done in the __get_group_obj function
g = self.__get_group_obj(groupname, domain)
# get sudo commands for the group
sudo_cmds = []
for gsmap in self.cfg.dbsess.query(GroupSudocommandMapping).filter(GroupSudocommandMapping.groups_id==group.id).all():
sudo_cmds.append(gsmap.sudocommand)
# put it all together
if g:
query = {
'groupname': groupname,
'domain': newdomain,
'gid': g.gid,
'sudo_cmds': ','.join(sudo_cmds),
'description': g.description,
}
self.gadd(query)
self.cfg.log.debug("API_userdata/gclone: created group %s in domain %s" % (groupname, newdomain))
return "success"
else:
self.cfg.log.debug("API_userdata/gclone: group %s not found in domain %s" % (groupname, domain))
raise UserdataError("API_userdata/gclone: group %s not found in domain %s" % (groupname, domain))
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/gclone: error: %s" % e)
raise UserdataError("API_userdata/gclone: error: %s" % e)
###################################
# user-to-group mapping functions #
###################################
def utog(self, query):
"""
[description]
map a user into a group
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns "success" if successful, raises an error if unsuccessful
"""
try:
# to make our conditionals easier
if 'username' not in query.keys() or not query['username']:
self.cfg.log.debug("API_userdata/utog: no username provided!")
raise UserdataError("API_userdata/utog: no username provided!")
if 'groupname' not in query.keys() or not query['groupname']:
self.cfg.log.debug("API_userdata/utog: no group provided!")
raise UserdataError("API_userdata/utog: no group provided!")
# validate things
v_name(query['username'])
v_name(query['groupname'])
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'utog')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/utog: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/tog: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'utog')
# domain, validate or assign
if 'domain' in query.keys() and query['domain']:
domain = query['domain']
v_domain(domain)
else:
domain = self.cfg.default_domain
# fetch our user and group
u = self.__get_user_obj(query['username'], domain)
g = self.__get_group_obj(query['groupname'], domain)
if not u:
self.cfg.log.debug("API_userdata/utog: user not found: %s in %s" % (query['username'], domain))
raise UserdataError("API_userdata/utog: user not found: %s in %s" % (query['username'], domain))
elif not g:
self.cfg.log.debug("API_userdata/utog: group not found: %s in %s" % (query['groupname'], domain))
raise UserdataError("API_userdata/utog: group not found: %s in %s" % (query['groupname'], domain))
else:
if self.cfg.dbsess.query(UserGroupMapping).\
filter(UserGroupMapping.users_id==u.id).\
filter(UserGroupMapping.groups_id==g.id).first():
self.cfg.log.debug("API_userdata/utog: mapping exists! refusing to create duplicate mapping")
raise UserdataError("API_userdata/utog: mapping exists! refusing to create duplicate mapping")
ugmap = UserGroupMapping(g.id, u.id)
self.cfg.dbsess.add(ugmap)
self.cfg.dbsess.commit()
return 'success'
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/utog: error: %s" % e)
raise UserdataError("API_userdata/utog: error: %s" % e)
def urmg(self, query):
"""
[description]
unmap a user from a group
[parameter info]
required:
query: the query dict being passed to us from the called URI
[return]
Returns "success" if successful, raises an error if unsuccessful
"""
try:
# to make our conditionals easier
if 'username' not in query.keys() or not query['username']:
self.cfg.log.debug("API_userdata/utog: no username provided!")
raise UserdataError("API_userdata/utog: no username provided!")
if 'groupname' not in query.keys() or not query['groupname']:
self.cfg.log.debug("API_userdata/utog: no group provided!")
raise UserdataError("API_userdata/utog: no group provided!")
# setting our valid query keys
common = VyvyanCommon(self.cfg)
valid_qkeys = common.get_valid_qkeys(self.namespace, 'urmg')
# check for wierd query keys, explode
for qk in query.keys():
if qk not in valid_qkeys:
self.cfg.log.debug("API_userdata/utog: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
raise UserdataError("API_userdata/tog: unknown querykey \"%s\"\ndumping valid_qkeys: %s" % (qk, valid_qkeys))
# check for min/max number of optional arguments
common.check_num_opt_args(query, self.namespace, 'urmg')
# domain, validate or default
if 'domain' in query.keys() and query['domain']:
domain = query['domain']
v_domain(domain)
else:
domain = self.cfg.default_domain
# fetch our user and group
u = self.__get_user_obj(query['username'], domain)
g = self.__get_group_obj(query['groupname'], domain)
# explode if things are missing
if not u:
self.cfg.log.debug("API_userdata/utog: user %s not found in domain %s" % (query['username'], domain))
raise UserdataError("API_userdata/utog: user %s not found in domain %s" % (query['username'], domain))
elif not g:
self.cfg.log.debug("API_userdata/utog: group %s not found in domain %s" % (query['groupname'], domain))
raise UserdataError("API_userdata/utog: group %s not found in domain %s" % (query['groupname'], domain))
else:
# get the mapping entry
ugmap = self.cfg.dbsess.query(UserGroupMapping).\
filter(UserGroupMapping.users_id==u.id).\
filter(UserGroupMapping.groups_id==g.id).first()
if not ugmap:
self.cfg.log.debug("API_userdata/utog: mapping does not exist")
raise UserdataError("API_userdata/utog: mapping does not exist")
# rm it
self.cfg.dbsess.delete(ugmap)
self.cfg.dbsess.commit()
return 'success'
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/utog: error: %s" % e)
raise UserdataError("API_userdata/utog: error: %s" % e)
#################################
# internal functions below here #
#################################
def __get_group_obj(self, groupname, domain):
"""
[description]
for a given groupname, fetch the group object
[parameter info]
required:
groupname: the groupname we want to parse
[return value]
returns a Groups ORM object or None
"""
try:
# validate things
v_name(groupname)
v_domain(domain)
# go get our group
g = self.cfg.dbsess.query(Groups).\
filter(Groups.groupname==groupname).\
filter(Groups.domain==domain).first()
if g:
return g
else:
return None
except Exception, e:
raise UserdataError("API_userdata/__get_group_obj: error: %s" % e)
def __get_user_obj(self, username, domain):
"""
[description]
for a given username, fetch the user object
[parameter info]
required:
username: the username we want to find
domain: the domain that contains our user
[return value]
returns a Users ORM object or None
"""
try:
# validate stuff
v_name(username)
v_domain(domain)
# go get our user
u = self.cfg.dbsess.query(Users).\
filter(Users.username==username).\
filter(Users.domain==domain).first()
if u:
return u
else:
return None
except Exception, e:
raise UserdataError("API_userdata/__get_group_obj: error: %s" % e)
def __next_available_uid(self, domain):
"""
[description]
searches the db for existing UIDS and picks the next available UID within the parameters configured in vyvyan.yaml
[parameter info]
required:
domain: the domain we're checking uids for
[return value]
returns an integer representing the next available UID
"""
try:
# get the first allowable uid
i = self.cfg.uid_start
uidlist = []
# fetch all existing users
u = self.cfg.dbsess.query(Users).\
filter(Users.domain==domain).all()
# add all known uids to a list
for userentry in u:
uidlist.append(userentry.uid)
uidlist.sort(key=int)
# if we don't have any users in the users table
# return the default first uid as configured in the yaml
# otherwise, pick the next open uid and return it
if not uidlist:
return self.cfg.uid_start
else:
for uu in uidlist:
if uu < self.cfg.uid_start:
pass
elif not i == uu and i < self.cfg.uid_end:
return i
elif i < self.cfg.uid_end:
i += 1
else:
self.cfg.log.debug("API_userdata/__next_available_uid: No available UIDs!")
raise UserdataError("API_userdata/__next_available_uid: No available UIDs!")
return i
except Exception, e:
raise UserdataError("API_userdata/__next_available_uid: %s" % e)
def __next_available_gid(self, domain):
"""
[description]
searches the db for existing GIDS and picks the next available GID within the parameters configured in vyvyan.yaml
[parameter info]
required:
domain: the domain we're checking gids for
[return value]
returns an integer representing the next available GID
"""
try:
# get the first allowable gid
i = self.cfg.gid_start
gidlist = []
# fetch all existing groups
g = self.cfg.dbsess.query(Groups).\
filter(Groups.domain==domain).all()
# add all known uids to a list
for groupentry in g:
gidlist.append(groupentry.gid)
gidlist.sort(key=int)
# if we don't have any groups in the groups table
# return the default first gid as configured in the yaml
# otherwise pick the next open gid and return it
if not gidlist:
return self.cfg.gid_start
else:
for gg in gidlist:
if gg < self.cfg.gid_start:
pass
elif not i == gg and i < self.cfg.gid_end:
return i
elif i < self.cfg.gid_end:
i += 1
else:
self.cfg.log.debug("API_userdata/__next_available_gid: No available GIDs!")
raise UserdataError("API_userdata/__next_available_gid: No available GIDs!")
return i
except Exception, e:
raise UserdataError("API_userdata/__next_available_gid: %s" % e)
def __get_groups_by_user(self, username, domain):
"""
[description]
searches the db for user-group mappings by username
[parameter info]
required:
username: the username to search for
domain: the domain to search in
[return value]
returns a list of Groups ORMobjects or nothing
"""
try:
# fetch our user
glist = []
u = self.__get_user_obj(username, domain)
if not u:
self.cfg.log.debug("API_userdata/__get_groups_by_user: user not found: %s" % username)
raise UserdataError("API_userdata/__get_groups_by_user: user not found: %s" % username)
# get all groups to which the user is mapped
maplist = []
maplist = self.cfg.dbsess.query(UserGroupMapping).\
filter(UserGroupMapping.users_id==u.id).all()
# add each group mapped to the user to a list, return it
if maplist:
for ugmap in maplist:
glist.append(self.cfg.dbsess.query(Groups).filter(Groups.id==ugmap.groups_id).first())
return glist
except Exception, e:
raise UserdataError("API_userdata/__get_groups_by_user: %s" % e)
def __get_users_by_group(self, groupname, domain):
"""
[description]
searches the db for user-group mappings by groupname
[parameter info]
required:
groupname: the groupname to search for
domain: the domain to search in
[return value]
returns a list of Groups ORMobjects or nothing
"""
try:
# fetch our group
ulist = []
g = self.__get_group_obj(groupname, domain)
if not g:
self.cfg.log.debug("API_userdata/__get_users_by_group: group %s not found in %s" % (groupname, domain))
raise UserdataError("API_userdata/__get_groups_by_user: group %s not found in %s" % (groupname, domain))
# get all users mapped to this group
maplist = []
maplist = self.cfg.dbsess.query(UserGroupMapping).\
filter(UserGroupMapping.groups_id==g.id).all()
# add each user mapped to the group to a list, return it
if maplist:
for ugmap in maplist:
ulist.append(self.cfg.dbsess.query(Users).filter(Users.id==ugmap.users_id).first())
return ulist
except Exception, e:
raise UserdataError("API_userdata/__get_users_by_group: %s" % e)
def __get_sudo_cmds_by_group(self, groupname, domain):
"""
[description]
searches the db for user-group mappings by groupname
[parameter info]
required:
groupname: the groupname to search for
domain: the domain to search in
[return value]
returns a list of groups_sudocommands ORMobjects or nothing
"""
try:
# fetch our group
g = self.__get_group_obj(groupname, domain)
if not g:
self.cfg.log.debug("API_userdata/__get_sudo_cmds_by_group: group %s not found in %s" % (groupname, domain))
raise UserdataError("API_userdata/__get_sudo_cmds_by_user: group %s not found in %s" % (groupname, domain))
# get all sudo commands mapped to this group
maplist = []
maplist = self.cfg.dbsess.query(GroupSudocommandMapping).\
filter(GroupSudocommandMapping.groups_id==g.id).all()
return maplist
except Exception, e:
raise UserdataError("API_userdata/__get_sudo_commands_by_group: %s" % e)
def __map_sudoers(self, group, sudo_cmds):
"""
[description]
map a set of sudo commands to a group
[parameter info]
required:
group: the ORM object of the group we want to map
sudo_cmds: a list of strings representing the commands to map to the group
[return]
Returns "success" if successful, raises an error if unsuccessful
"""
try:
# for sanity's sake
if not group:
self.cfg.log.debug("API_userdata/__map_sudoers: group not found, missing parameter")
raise UserdataError("API_userdata/__map_sudoers: group not found, missing parameter")
elif not sudo_cmds:
self.cfg.log.debug("API_userdata/__map_sudoers: sudo_cmds not found, missing parameter")
raise UserdataError("API_userdata/__map_sudoers: sudo_cmds not found, missing parameter")
else:
# housekeeping
clean_sudo_cmds = []
for command in sudo_cmds:
command = command.strip(' \t\n\r')
clean_sudo_cmds.append(command)
# loop over commands and map them to the group
for command in clean_sudo_cmds:
if not self.cfg.dbsess.query(GroupSudocommandMapping).\
filter(GroupSudocommandMapping.sudocommand==command).\
filter(GroupSudocommandMapping.groups_id==group.id).first():
gsmap = GroupSudocommandMapping(group.id, command)
self.cfg.dbsess.add(gsmap)
# loop over existing mappings and ensure they're valid. remove any invalid mappings
for gsmap in self.cfg.dbsess.query(GroupSudocommandMapping).filter(GroupSudocommandMapping.groups_id==group.id).all():
if gsmap.sudocommand not in clean_sudo_cmds:
self.cfg.dbsess.delete(gsmap)
# commit our transaction
self.cfg.dbsess.commit()
# declare victory
return 'success'
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/__map_sudoers: error: %s" % e)
raise UserdataError("API_userdata/__map_sudoers: error: %s" % e)
def __unmap_sudoers(self, group):
"""
[description]
map a set of sudo commands to a group
[parameter info]
required:
group: the ORM object of the group we want to unmap
[return]
Returns "success" if successful, raises an error if unsuccessful
"""
try:
# for sanity's sake
if not group:
self.cfg.log.debug("API_userdata/__unmap_sudoers: group not found, missing parameter")
raise UserdataError("API_userdata/__unmap_sudoers: group not found, missing parameter")
# loop over existing mappings and remove
else:
for gsmap in self.cfg.dbsess.query(GroupSudocommandMapping).filter(GroupSudocommandMapping.groups_id==group.id).all():
self.cfg.dbsess.delete(gsmap)
# commit our transaction
self.cfg.dbsess.commit()
# declare victory
return 'success'
except Exception, e:
# something odd happened, explode violently
self.cfg.dbsess.rollback()
self.cfg.log.debug("API_userdata/__unmap_sudoers: error: %s" % e)
raise UserdataError("API_userdata/__unmap_sudoers: error: %s" % e)
| 42.596443
| 183
| 0.472873
| 9,258
| 93,414
| 4.662562
| 0.055304
| 0.03519
| 0.024556
| 0.036835
| 0.816661
| 0.779641
| 0.750568
| 0.696405
| 0.643886
| 0.614094
| 0
| 0.000841
| 0.427516
| 93,414
| 2,192
| 184
| 42.615876
| 0.806332
| 0.092791
| 0
| 0.54235
| 0
| 0.004098
| 0.23199
| 0.038031
| 0
| 0
| 0
| 0.000456
| 0
| 0
| null | null | 0.011612
| 0.004781
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc3dd87173d8e3e1a808e0fda902045e844dddbf
| 12
|
py
|
Python
|
mock/ten.py
|
kevinghst/UDA_sup
|
c970622370d5de6b8c48b458cb8b4fe59e37effb
|
[
"Apache-2.0"
] | null | null | null |
mock/ten.py
|
kevinghst/UDA_sup
|
c970622370d5de6b8c48b458cb8b4fe59e37effb
|
[
"Apache-2.0"
] | null | null | null |
mock/ten.py
|
kevinghst/UDA_sup
|
c970622370d5de6b8c48b458cb8b4fe59e37effb
|
[
"Apache-2.0"
] | null | null | null |
print("ten")
| 12
| 12
| 0.666667
| 2
| 12
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 1
| 12
| 12
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
fc7ac7852ece5f0fb1f04bdb89ea06ad919cb5ac
| 29
|
py
|
Python
|
python/testData/refactoring/changeSignature/newParameterWithSignatureDefaultBeforeExistingWithoutSignatureDefault.py
|
Sajaki/intellij-community
|
6748af2c40567839d11fd652ec77ba263c074aad
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/refactoring/changeSignature/newParameterWithSignatureDefaultBeforeExistingWithoutSignatureDefault.py
|
Sajaki/intellij-community
|
6748af2c40567839d11fd652ec77ba263c074aad
|
[
"Apache-2.0"
] | 1
|
2020-07-30T19:04:47.000Z
|
2020-07-30T19:04:47.000Z
|
python/testData/refactoring/changeSignature/newParameterWithSignatureDefaultBeforeExistingWithoutSignatureDefault.py
|
bradleesand/intellij-community
|
750ff9c10333c9c1278c00dbe8d88c877b1b9749
|
[
"Apache-2.0"
] | 1
|
2020-10-15T05:56:42.000Z
|
2020-10-15T05:56:42.000Z
|
def fu<caret>nc(b):
pass
| 9.666667
| 19
| 0.586207
| 6
| 29
| 2.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 29
| 2
| 20
| 14.5
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.5
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5d7b9409d60c9966863eddb0e02f18f582adde9c
| 1,171
|
py
|
Python
|
tests/kyu_7_tests/test_countdown_to_christmas.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
tests/kyu_7_tests/test_countdown_to_christmas.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
tests/kyu_7_tests/test_countdown_to_christmas.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
import unittest
from datetime import date
from katas.kyu_7.countdown_to_christmas import days_until_christmas
class DaysUntilChristmasTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(days_until_christmas(date(2016, 12, 9)), 16)
def test_equals_2(self):
self.assertEqual(days_until_christmas(date(2016, 12, 8)), 17)
def test_equals_3(self):
self.assertEqual(days_until_christmas(date(1996, 12, 7)), 18)
def test_equals_4(self):
self.assertEqual(days_until_christmas(date(2015, 2, 23)), 305)
def test_equals_5(self):
self.assertEqual(days_until_christmas(date(2001, 7, 11)), 167)
def test_equals_6(self):
self.assertEqual(days_until_christmas(date(2000, 12, 9)), 16)
def test_equals_7(self):
self.assertEqual(days_until_christmas(date(1978, 3, 18)), 282)
def test_equals_8(self):
self.assertEqual(days_until_christmas(date(2016, 12, 25)), 0)
def test_equals_9(self):
self.assertEqual(days_until_christmas(date(2016, 12, 26)), 364)
def test_equals_10(self):
self.assertEqual(days_until_christmas(date(2015, 12, 26)), 365)
| 31.648649
| 71
| 0.70965
| 170
| 1,171
| 4.629412
| 0.288235
| 0.125794
| 0.251588
| 0.292249
| 0.604828
| 0.604828
| 0.561626
| 0.35324
| 0.238882
| 0
| 0
| 0.111686
| 0.17421
| 1,171
| 36
| 72
| 32.527778
| 0.702172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 1
| 0.416667
| false
| 0
| 0.125
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
5d9e9c956ed0de31018ca7c6e4117e2dfb3f9f5a
| 168
|
py
|
Python
|
src/utilities/thread_utilities.py
|
pmcneely/utilities
|
efd5aa161e041fd0f33741c46bc3cb6b8b9c0653
|
[
"MIT"
] | null | null | null |
src/utilities/thread_utilities.py
|
pmcneely/utilities
|
efd5aa161e041fd0f33741c46bc3cb6b8b9c0653
|
[
"MIT"
] | 4
|
2022-02-14T20:26:18.000Z
|
2022-02-18T16:35:49.000Z
|
src/utilities/thread_utilities.py
|
pmcneely/utilities
|
efd5aa161e041fd0f33741c46bc3cb6b8b9c0653
|
[
"MIT"
] | null | null | null |
class AtomicBool():
def __init__(self, initial_bool=False):
self.value = initial_bool
def set_value(self, new_value):
self.value = new_value
| 18.666667
| 43
| 0.660714
| 22
| 168
| 4.636364
| 0.5
| 0.215686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.244048
| 168
| 8
| 44
| 21
| 0.80315
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
5d9ff71ea0e3d7fc23cc00f58bf75e83d6cbb379
| 47,722
|
py
|
Python
|
notebooks/Users/sonya/airports_inbound_outbound_diverted_sonya_v7.py
|
superli3/ucb-w261-sp2021-team25
|
102e9859e878a54f84554e66425097217a0485e7
|
[
"Apache-2.0"
] | null | null | null |
notebooks/Users/sonya/airports_inbound_outbound_diverted_sonya_v7.py
|
superli3/ucb-w261-sp2021-team25
|
102e9859e878a54f84554e66425097217a0485e7
|
[
"Apache-2.0"
] | null | null | null |
notebooks/Users/sonya/airports_inbound_outbound_diverted_sonya_v7.py
|
superli3/ucb-w261-sp2021-team25
|
102e9859e878a54f84554e66425097217a0485e7
|
[
"Apache-2.0"
] | 1
|
2021-04-19T20:41:48.000Z
|
2021-04-19T20:41:48.000Z
|
# Databricks notebook source
from pyspark.sql import functions as f
from pyspark.sql.functions import col, sum, avg, max, count, countDistinct, weekofyear, to_timestamp, date_format, to_date, lit, lag, unix_timestamp, expr, ceil, floor, when, hour, array
from pyspark.sql.types import StructType, StructField, StringType, DoubleType, IntegerType, NullType, ShortType, DateType, BooleanType, BinaryType, TimestampType
from pyspark.sql import SQLContext
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pyspark
from pyspark.ml.classification import LogisticRegression, RandomForestClassifier
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler, VectorIndexer, StringIndexer, MinMaxScaler
from pyspark.ml.classification import LogisticRegression
from pyspark.mllib.classification import LogisticRegressionWithSGD, LogisticRegressionWithLBFGS
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from distutils.version import LooseVersion
from pyspark.ml import Pipeline
from pyspark.sql.window import Window
from pandas.tseries.holiday import USFederalHolidayCalendar
import datetime
from dateutil import parser, tz
spark.conf.set("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation","true")
# COMMAND ----------
# MAGIC %md
# MAGIC # Main Table: group25.airlines_utc_main
# COMMAND ----------
##################################################
df_airlines_utc_main = sqlContext.sql("""
SELECT * FROM group25.airlines_utc_main
""")
# COMMAND ----------
# MAGIC %md #Median
# COMMAND ----------
# MAGIC %md ##Max outbound median
# COMMAND ----------
##########################
# outbound count for each airport at each hour
# restrict to year 2015 - 2017
def compute_max_median_outbound_flights_per_airport():
# compute outbound flights /airport/fl_date/hour
df_outbound_flights_per_airport_per_hour = sqlContext.sql("""
SELECT
CALL_SIGN_DEP,
FL_DATE,
DEP_LOCAL_HOUR,
COUNT(*) AS HOURLY_OUTBOUND_COUNT
FROM group25.airlines_utc_main
WHERE CALL_SIGN_DEP IS NOT NULL
AND DEP_LOCAL_HOUR IS NOT NULL
AND (YEAR=2015 OR YEAR=2016 OR YEAR=2017)
GROUP BY 1, 2, 3
ORDER BY 1, 2, 3 ASC
""").cache()
df_outbound_flights_per_airport_per_hour.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_outbound_flights_per_airport_per_hour");
sqlContext.sql("create table group25.df_outbound_flights_per_airport_per_hour as select * from mytempTable");
display(df_outbound_flights_per_airport_per_hour)
# median of outbound for each airport by each hour
from pyspark.sql import Window
import pyspark.sql.functions as F
grp_window = Window.partitionBy('CALL_SIGN_DEP', 'DEP_LOCAL_HOUR')
magic_percentile = F.expr('percentile_approx(HOURLY_OUTBOUND_COUNT, 0.5)')
df_median_outbound_flights_per_airport = df_outbound_flights_per_airport_per_hour.withColumn('MEDIAN_OUTBOUND_COUNT', magic_percentile.over(grp_window))
df_median_outbound_flights_per_airport = df_median_outbound_flights_per_airport.drop('FL_DATE').drop('HOURLY_OUTBOUND_COUNT').dropDuplicates()
df_median_outbound_flights_per_airport.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_median_outbound_flights_per_airport");
sqlContext.sql("create table group25.df_median_outbound_flights_per_airport as select * from mytempTable");
display(df_median_outbound_flights_per_airport.filter(df_median_outbound_flights_per_airport.CALL_SIGN_DEP=='KORD').orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
##### max median of each airport
df_max_median_outbound_flights_per_airport = sqlContext.sql("""
SELECT
CALL_SIGN_DEP,
MAX(MEDIAN_OUTBOUND_COUNT) AS MAX_MEDIAN_OUTBOUND
FROM group25.df_median_outbound_flights_per_airport
GROUP BY 1
ORDER BY 1
""")
df_max_median_outbound_flights_per_airport.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_max_median_outbound_flights_per_airport");
sqlContext.sql("create table group25.df_max_median_outbound_flights_per_airport as select * from mytempTable");
display(df_max_median_outbound_flights_per_airport)
display(df_max_median_outbound_flights_per_airport.where("CALL_SIGN_DEP==\'KORD\'").orderBy("CALL_SIGN_DEP"))
return df_max_median_outbound_flights_per_airport
df_max_median_outbound_flights_per_airport = compute_max_median_outbound_flights_per_airport()
# COMMAND ----------
# MAGIC %sql
# MAGIC select count(*) from group25.df_max_median_outbound_flights_per_airport
# COMMAND ----------
# display(df_max_median_outbound_flights_per_airport.filter(df_max_median_outbound_flights_per_airport.CALL_SIGN_DEP=='KORD').orderBy("CALL_SIGN_DEP"))
# COMMAND ----------
# MAGIC %md ##max_inbound_median
# COMMAND ----------
##########################
# inbound count for each airport at each hour
# restrict to year 2015 - 2017
def compute_max_median_inbound_flights_per_airport():
df_inbound_flights_per_airport_per_hour = sqlContext.sql("""
SELECT
CALL_SIGN_ARR,
FL_DATE,
ARR_LOCAL_HOUR,
COUNT(*) AS HOURLY_INBOUND_COUNT
FROM group25.airlines_utc_main
WHERE CALL_SIGN_ARR IS NOT NULLSpark
AND ARR_LOCAL_HOUR IS NOT NULL
AND (YEAR=2015 OR YEAR=2016 OR YEAR=2017)
GROUP BY 1, 2, 3
ORDER BY 1, 2, 3 ASC
""").cache()
df_inbound_flights_per_airport_per_hour.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_inbound_flights_per_airport_per_hour");
sqlContext.sql("create table group25.df_inbound_flights_per_airport_per_hour as select * from mytempTable");
display(df_inbound_flights_per_airport_per_hour)
# median of inbound
from pyspark.sql import Window
import pyspark.sql.functions as F
grp_window = Window.partitionBy('CALL_SIGN_ARR', 'ARR_LOCAL_HOUR')
magic_percentile = F.expr('percentile_approx(HOURLY_INBOUND_COUNT, 0.5)')
df_median_inbound_flights_per_airport = df_inbound_flights_per_airport_per_hour.withColumn('MEDIAN_INBOUND_COUNT', magic_percentile.over(grp_window))
# df_median_inbound_flights_per_airport = df_median_inbound_flights_per_airport.drop('ARR_LOCAL_HOUR').drop('FL_DATE').drop('HOURLY_INBOUND_COUNT').dropDuplicates()
df_median_inbound_flights_per_airport = df_median_inbound_flights_per_airport.drop('FL_DATE').drop('HOURLY_INBOUND_COUNT').dropDuplicates()
df_median_inbound_flights_per_airport.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_median_inbound_flights_per_airport");
sqlContext.sql("create table group25.df_median_inbound_flights_per_airport as select * from mytempTable");
display(df_median_inbound_flights_per_airport.filter(df_median_inbound_flights_per_airport.CALL_SIGN_ARR=='KORD').orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
##### max median of each airport
df_max_median_inbound_flights_per_airport = sqlContext.sql("""
SELECT
CALL_SIGN_ARR,
MAX(MEDIAN_INBOUND_COUNT) AS MAX_MEDIAN_INBOUND
FROM group25.df_median_inbound_flights_per_airport
GROUP BY 1
ORDER BY 1
""")
df_max_median_inbound_flights_per_airport.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_max_median_inbound_flights_per_airport");
sqlContext.sql("create table group25.df_max_median_inbound_flights_per_airport as select * from mytempTable");
display(df_max_median_inbound_flights_per_airport)
display(df_max_median_inbound_flights_per_airport.where("CALL_SIGN_ARR==\'KORD\'").orderBy("CALL_SIGN_ARR"))
return df_max_median_inbound_flights_per_airport
df_max_median_inbound_flights_per_airport = compute_max_median_inbound_flights_per_airport()
# COMMAND ----------
# MAGIC %md
# MAGIC # Inbound & Outbound rolling window
# COMMAND ----------
# MAGIC %md
# MAGIC ##normalized outbound
# COMMAND ----------
#####################################################################
def compute_normalized_outbound_flights(df_max_median_outbound_flights_per_airport):
# outbound rolling window
hours = lambda i: i * 60 * 60
airlines_outbound_flights_main_version_time_series_rolling_counts = None
# select the dataframe
df1 = sqlContext.sql("""
SELECT
CALL_SIGN_DEP,
FL_DATE,
DEP_LOCAL_HOUR,
COUNT(*) AS OUTBOUND_COUNT
FROM group25.airlines_utc_main
WHERE CALL_SIGN_DEP IS NOT NULL
AND DEP_LOCAL_HOUR IS NOT NULL
GROUP BY 1, 2, 3
ORDER BY 1, 2, 3 ASC
""")
# make column FL_TIME and Cast it into timestamp
df1 = df1.withColumn("FL_TIME", unix_timestamp(col("FL_DATE").cast("timestamp")) + df1.DEP_LOCAL_HOUR*60*60)
df1 = df1.withColumn("FL_TIME", col("FL_TIME").cast("timestamp"))
display(df1.filter(df1.CALL_SIGN_DEP=='KORD').orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
# calculate the rolling window
# handle the 0th column
df1 = df1.withColumn('OUTBOUND_COUNT_0H', df1.OUTBOUND_COUNT.cast('double'))
# handle columns 1-6
total_lag_time = 6
for i in range(1, total_lag_time+1):
window = (Window.partitionBy("CALL_SIGN_DEP", "FL_DATE").orderBy(col("FL_TIME").cast('long')).rangeBetween(-hours(int(i)), 0))
df1 = df1.withColumn('OUTBOUND_COUNT_' +str(i) + 'H', sum("OUTBOUND_COUNT_0H").over(window))
for i in range(total_lag_time, 0, -1):
df1 = df1.withColumn('OUTBOUND_COUNT_' +str(i) + 'H', col('OUTBOUND_COUNT_' +str(i) + 'H') - col('OUTBOUND_COUNT_' +str(i-1) + 'H'))
# display(df1.filter(df1.CALL_SIGN_DEP=='KORD').orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
# save table
df1.createOrReplaceTempView("mytempTable")
airlines_outbound_flights_main_version_time_series_rolling_counts = df1
sqlContext.sql("DROP TABLE IF EXISTS group25.airlines_outbound_flights_main_version_time_series_rolling_counts");
sqlContext.sql("create table group25.airlines_outbound_flights_main_version_time_series_rolling_counts as select * from mytempTable");
# display
display(
airlines_outbound_flights_main_version_time_series_rolling_counts
.where("CALL_SIGN_DEP==\'KORD\'")
.orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
##### normalized outbound rolling window by max_median
df1 = airlines_outbound_flights_main_version_time_series_rolling_counts
df2 = df_max_median_outbound_flights_per_airport
df_normalized_outbound_flights_per_airport=df1.alias("a").join(
df2.alias("b"), df1['CALL_SIGN_DEP'] == df2['CALL_SIGN_DEP'], how="left"
).select( 'a.CALL_SIGN_DEP',
'FL_DATE',
'DEP_LOCAL_HOUR',
'OUTBOUND_COUNT',
'FL_TIME',
'OUTBOUND_COUNT_0H',
'OUTBOUND_COUNT_1H',
'OUTBOUND_COUNT_2H',
'OUTBOUND_COUNT_3H',
'OUTBOUND_COUNT_4H',
'OUTBOUND_COUNT_5H',
'OUTBOUND_COUNT_6H',
'MAX_MEDIAN_OUTBOUND'
)
for i in range(0, total_lag_time+1):
df_normalized_outbound_flights_per_airport = df_normalized_outbound_flights_per_airport.withColumn(
'NORMALIZED_OUTBOUND_COUNT_' +str(i) + 'H',
col('OUTBOUND_COUNT_' +str(i) + 'H').cast('int')/col('MAX_MEDIAN_OUTBOUND')
)
# display(df_normalized_outbound_flights_per_airport)
display(df_normalized_outbound_flights_per_airport.filter(df_normalized_outbound_flights_per_airport.CALL_SIGN_DEP=='KORD').orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
df_normalized_outbound_flights_per_airport.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_normalized_outbound_flights_per_airport");
sqlContext.sql("create table group25.df_normalized_outbound_flights_per_airport as select * from mytempTable");
# # showing the median outbound flight of airport KORD
# display(df_median_outbound_flights_per_airport.where("CALL_SIGN_DEP==\'KORD\'").orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
return df_normalized_outbound_flights_per_airport
df_normalized_outbound_flights_per_airport = compute_normalized_outbound_flights(df_max_median_outbound_flights_per_airport)
# COMMAND ----------
# MAGIC %md ##normalized inbound
# COMMAND ----------
def compute_normalized_inbound_flights(df_max_median_inbound_flights_per_airport):
# inbound rolling window
hours = lambda i: i * 60 * 60
# given a name to the rolling window
airlines_inbound_flights_main_version_time_series_rolling_counts = None
# select the dataframe
df1 = sqlContext.sql("""
SELECT
CALL_SIGN_ARR,
FL_DATE,
ARR_LOCAL_HOUR,
COUNT(*) AS INBOUND_COUNT
FROM group25.airlines_utc_main
WHERE CALL_SIGN_ARR IS NOT NULL
AND ARR_LOCAL_HOUR IS NOT NULL
GROUP BY 1, 2, 3
ORDER BY 1, 2, 3 ASC
""")
# make column FL_TIME and ast it into timestamp
df1 = df1.withColumn("FL_TIME", unix_timestamp(col("FL_DATE").cast("timestamp")) + df1.ARR_LOCAL_HOUR*60*60)
df1 = df1.withColumn("FL_TIME", col("FL_TIME").cast("timestamp"))
display(df1.filter(df1.CALL_SIGN_ARR=='KORD').orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
# calculate the rolling window
# handle the 0th column
df1 = df1.withColumn('INBOUND_COUNT_0H', df1.INBOUND_COUNT.cast('double'))
# handle columns 1-6
total_lag_time = 6
for i in range(1, total_lag_time+1):
window = (Window.partitionBy("CALL_SIGN_ARR", "FL_DATE").orderBy(col("FL_TIME").cast('long')).rangeBetween(-hours(int(i)), 0))
df1 = df1.withColumn('INBOUND_COUNT_' +str(i) + 'H', sum("INBOUND_COUNT_0H").over(window))
for i in range(total_lag_time, 0, -1):
df1 = df1.withColumn('INBOUND_COUNT_' +str(i) + 'H', col('INBOUND_COUNT_' +str(i) + 'H') - col('INBOUND_COUNT_' +str(i-1) + 'H'))
# display(df1.filter(df1.CALL_SIGN_ARR=='KORD').orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
# save table
df1.createOrReplaceTempView("mytempTable")
airlines_inbound_flights_main_version_time_series_rolling_counts = df1
sqlContext.sql("DROP TABLE IF EXISTS group25.airlines_inbound_flights_main_version_time_series_rolling_counts");
sqlContext.sql("create table group25.airlines_inbound_flights_main_version_time_series_rolling_counts as select * from mytempTable");
# display
display(
airlines_inbound_flights_main_version_time_series_rolling_counts
.where("CALL_SIGN_ARR==\'KORD\'")
.orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
##### normalized inbound rolling window by max_median
# Set df2 as rolling windown
# set df3 as max median window
df2 = airlines_inbound_flights_main_version_time_series_rolling_counts
df3 = df_max_median_inbound_flights_per_airport
# create the normalized window
df_normalized_inbound_flights_per_airport=df2.alias("a").join(
df3.alias("b"), df2['CALL_SIGN_ARR'] == df3['CALL_SIGN_ARR'], how="left"
).select( 'a.CALL_SIGN_ARR',
'FL_DATE',
'ARR_LOCAL_HOUR',
'INBOUND_COUNT',
'FL_TIME',
'INBOUND_COUNT_0H',
'INBOUND_COUNT_1H',
'INBOUND_COUNT_2H',
'INBOUND_COUNT_3H',
'INBOUND_COUNT_4H',
'INBOUND_COUNT_5H',
'INBOUND_COUNT_6H',
'MAX_MEDIAN_INBOUND'
)
for i in range(0, total_lag_time+1):
df_normalized_inbound_flights_per_airport = df_normalized_inbound_flights_per_airport.withColumn(
'NORMALIZED_INBOUND_COUNT_' +str(i) + 'H',
col('INBOUND_COUNT_' +str(i) + 'H').cast('int')/col('MAX_MEDIAN_INBOUND')
)
# display(df_normalized_inbound_flights_per_airport)
display(df_normalized_inbound_flights_per_airport.filter(df_normalized_inbound_flights_per_airport.CALL_SIGN_ARR=='KORD').orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
df_normalized_inbound_flights_per_airport.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_normalized_inbound_flights_per_airport");
sqlContext.sql("create table group25.df_normalized_inbound_flights_per_airport as select * from mytempTable");
# # showing the median inbound flight of airport KORD
# display(df_median_inbound_flights_per_airport.where("CALL_SIGN_ARR==\'KORD\'").orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
##### print counts
row_counts = [
('inbound counts ', df1.count()),
('max median inbound df ', df_max_median_inbound_flights_per_airport.count()),
('airlines_inbound_flights_main_version_time_series_rolling_counts ', airlines_inbound_flights_main_version_time_series_rolling_counts.count()),
('df_normalized_inbound_flights_per_airport ', df_normalized_inbound_flights_per_airport.count())
('airlines_utc_main '. df_airlins_utc_main.count())
]
print('inbound counts ', df1.count())
print('max median inbound df ', df_max_median_inbound_flights_per_airport.count())
print('airlines_inbound_flights_main_version_time_series_rolling_counts ', airlines_inbound_flights_main_version_time_series_rolling_counts.count())
print('df_normalized_inbound_flights_per_airport ', df_normalized_inbound_flights_per_airport.count())
print('airlines_utc_main '. df_airlins_utc_main.count())
return df_normalized_inbound_flights_per_airport, row_counts
df_normalized_inbound_flights_per_airport, normalized_inbound_row_counts = compute_normalized_inbound_flights(df_max_median_inbound_flights_per_airport)
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT count(*) FROM (
# MAGIC SELECT
# MAGIC CALL_SIGN_ARR,
# MAGIC FL_DATE,
# MAGIC ARR_LOCAL_HOUR,
# MAGIC COUNT(*) AS INBOUND_COUNT
# MAGIC FROM group25.airlines_utc_main
# MAGIC WHERE CALL_SIGN_ARR IS NOT NULL
# MAGIC AND ARR_LOCAL_HOUR IS NOT NULL
# MAGIC GROUP BY 1, 2, 3
# MAGIC ORDER BY 1, 2, 3 ASC
# MAGIC )
# COMMAND ----------
# MAGIC %sql
# MAGIC
# MAGIC select
# MAGIC CALL_SIGN_ARR,
# MAGIC FL_DATE,
# MAGIC ARR_LOCAL_HOUR,
# MAGIC hour(dep_local_timestamp + INTERVAL 2 HOUR) as ARR_LOCAL_HOUR_2H,
# MAGIC sum(dep_del15) as DELAY_INBOUND_COUNT_0H
# MAGIC from group25.airlines_utc_main
# MAGIC where dep_del15 is not null
# MAGIC group by 1,2,3,4
# COMMAND ----------
# MAGIC %sql
# MAGIC
# MAGIC SELECT count(*) FROM group25.airlines_utc_main
# COMMAND ----------
# MAGIC %md #Diverted FLights
# COMMAND ----------
# MAGIC %md ##normalized diverted outbound
# COMMAND ----------
#####################################################################
def compute_normalized_diverted_outbound_flights(df_max_median_outbound_flights_per_airport):
# diverted_outbound rolling window
hours = lambda i: i * 60 * 60
airlines_diverted_outbound_flights_main_version_time_series_rolling_counts = None
# select the dataframe
df1 = sqlContext.sql("""
SELECT
CALL_SIGN_DEP,
FL_DATE,
DEP_LOCAL_HOUR,
COUNT(*) AS DIVERTED_OUTBOUND_COUNT
FROM group25.airlines_utc_main
WHERE CALL_SIGN_DEP IS NOT NULL
AND DEP_LOCAL_HOUR IS NOT NULL
AND diverted=1
GROUP BY 1, 2, 3
ORDER BY 1, 2, 3 ASC
""")
# make column FL_TIME and ast it into timestamp
df1 = df1.withColumn("FL_TIME", unix_timestamp(col("FL_DATE").cast("timestamp")) + df1.DEP_LOCAL_HOUR*60*60)
df1 = df1.withColumn("FL_TIME", col("FL_TIME").cast("timestamp"))
display(df1.filter(df1.CALL_SIGN_DEP=='KORD').orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
# calculate the rolling window
# handle the 0th column
df1 = df1.withColumn('DIVERTED_OUTBOUND_COUNT_0H', df1.DIVERTED_OUTBOUND_COUNT.cast('double'))
# handle columns 1-6
total_lag_time = 6
for i in range(1, total_lag_time+1):
window = (Window.partitionBy("CALL_SIGN_DEP", "FL_DATE").orderBy(col("FL_TIME").cast('long')).rangeBetween(-hours(int(i)), 0))
df1 = df1.withColumn('DIVERTED_OUTBOUND_COUNT_' +str(i) + 'H', sum("DIVERTED_OUTBOUND_COUNT_0H").over(window))
for i in range(total_lag_time, 0, -1):
df1 = df1.withColumn('DIVERTED_OUTBOUND_COUNT_' +str(i) + 'H', col('DIVERTED_OUTBOUND_COUNT_' +str(i) + 'H') - col('DIVERTED_OUTBOUND_COUNT_' +str(i-1) + 'H'))
# display(df1.filter(df1.CALL_SIGN_DEP=='KORD').orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
# save table
df1.createOrReplaceTempView("mytempTable")
airlines_diverted_outbound_flights_main_version_time_series_rolling_counts = df1
sqlContext.sql("DROP TABLE IF EXISTS group25.airlines_diverted_outbound_flights_main_version_time_series_rolling_counts");
sqlContext.sql("create table group25.airlines_diverted_outbound_flights_main_version_time_series_rolling_counts as select * from mytempTable");
# display
display(
airlines_diverted_outbound_flights_main_version_time_series_rolling_counts
.where("CALL_SIGN_DEP==\'KORD\'")
.orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
##### normalized diverted_outbound rolling window by max_median
df1 = airlines_diverted_outbound_flights_main_version_time_series_rolling_counts
df2 = df_max_median_outbound_flights_per_airport
df_normalized_diverted_outbound_flights_per_airport=df1.alias("a").join(
df2.alias("b"), df1['CALL_SIGN_DEP'] == df2['CALL_SIGN_DEP'], how="left"
).select( 'a.CALL_SIGN_DEP',
'FL_DATE',
'DEP_LOCAL_HOUR',
'DIVERTED_OUTBOUND_COUNT',
'FL_TIME',
'DIVERTED_OUTBOUND_COUNT_0H',
'DIVERTED_OUTBOUND_COUNT_1H',
'DIVERTED_OUTBOUND_COUNT_2H',
'DIVERTED_OUTBOUND_COUNT_3H',
'DIVERTED_OUTBOUND_COUNT_4H',
'DIVERTED_OUTBOUND_COUNT_5H',
'DIVERTED_OUTBOUND_COUNT_6H',
'MAX_MEDIAN_OUTBOUND'
)
for i in range(0, total_lag_time+1):
df_normalized_diverted_outbound_flights_per_airport = df_normalized_diverted_outbound_flights_per_airport.withColumn(
'NORMALIZED_DIVERTED_OUTBOUND_COUNT_' +str(i) + 'H',
col('DIVERTED_OUTBOUND_COUNT_' +str(i) + 'H').cast('int')/col('MAX_MEDIAN_OUTBOUND')
)
# display(df_normalized_diverted_outbound_flights_per_airport)
display(df_normalized_diverted_outbound_flights_per_airport.filter(df_normalized_diverted_outbound_flights_per_airport.CALL_SIGN_DEP=='KORD').orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
df_normalized_diverted_outbound_flights_per_airport.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_normalized_diverted_outbound_flights_per_airport");
sqlContext.sql("create table group25.df_normalized_diverted_outbound_flights_per_airport as select * from mytempTable");
# # showing the median diverted_outbound flight of airport KORD
# display(df_median_diverted_outbound_flights_per_airport.where("CALL_SIGN_DEP==\'KORD\'").orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
return df_normalized_diverted_outbound_flights_per_airport
df_normalized_diverted_outbound_flights_per_airport = compute_normalized_diverted_outbound_flights(df_max_median_outbound_flights_per_airport)
# COMMAND ----------
# MAGIC %md ##normalized diverted inbound
# COMMAND ----------
#####################################################################
def compute_normalized_diverted_inbound_flights(df_max_median_inbound_flights_per_airport):
# diverted_inbound rolling window
hours = lambda i: i * 60 * 60
airlines_diverted_inbound_flights_main_version_time_series_rolling_counts = None
# select the dataframe
df1 = sqlContext.sql("""
SELECT
CALL_SIGN_ARR,
FL_DATE,
ARR_LOCAL_HOUR,
COUNT(*) AS DIVERTED_INBOUND_COUNT
FROM group25.airlines_utc_main
WHERE CALL_SIGN_ARR IS NOT NULL
AND ARR_LOCAL_HOUR IS NOT NULL
AND diverted=1
GROUP BY 1, 2, 3
ORDER BY 1, 2, 3 ASC
""")
# make column FL_TIME and ast it into timestamp
df1 = df1.withColumn("FL_TIME", unix_timestamp(col("FL_DATE").cast("timestamp")) + df1.ARR_LOCAL_HOUR*60*60)
df1 = df1.withColumn("FL_TIME", col("FL_TIME").cast("timestamp"))
display(df1.filter(df1.CALL_SIGN_ARR=='KORD').orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
# calculate the rolling window
# handle the 0th column
df1 = df1.withColumn('DIVERTED_INBOUND_COUNT_0H', df1.DIVERTED_INBOUND_COUNT.cast('double'))
# handle columns 1-6
total_lag_time = 6
for i in range(1, total_lag_time+1):
window = (Window.partitionBy("CALL_SIGN_ARR", "FL_DATE").orderBy(col("FL_TIME").cast('long')).rangeBetween(-hours(int(i)), 0))
df1 = df1.withColumn('DIVERTED_INBOUND_COUNT_' +str(i) + 'H', sum("DIVERTED_INBOUND_COUNT_0H").over(window))
for i in range(total_lag_time, 0, -1):
df1 = df1.withColumn('DIVERTED_INBOUND_COUNT_' +str(i) + 'H', col('DIVERTED_INBOUND_COUNT_' +str(i) + 'H') - col('DIVERTED_INBOUND_COUNT_' +str(i-1) + 'H'))
# display(df1.filter(df1.CALL_SIGN_ARR=='KORD').orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
# save table
df1.createOrReplaceTempView("mytempTable")
airlines_diverted_inbound_flights_main_version_time_series_rolling_counts = df1
sqlContext.sql("DROP TABLE IF EXISTS group25.airlines_diverted_inbound_flights_main_version_time_series_rolling_counts");
sqlContext.sql("create table group25.airlines_diverted_inbound_flights_main_version_time_series_rolling_counts as select * from mytempTable");
# display
display(
airlines_diverted_inbound_flights_main_version_time_series_rolling_counts
.where("CALL_SIGN_ARR==\'KORD\'")
.orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
##### normalized diverted_inbound rolling window by max_median
df1 = airlines_diverted_inbound_flights_main_version_time_series_rolling_counts
df2 = df_max_median_inbound_flights_per_airport
df_normalized_diverted_inbound_flights_per_airport=df1.alias("a").join(
df2.alias("b"), df1['CALL_SIGN_ARR'] == df2['CALL_SIGN_ARR'], how="left"
).select( 'a.CALL_SIGN_ARR',
'FL_DATE',
'ARR_LOCAL_HOUR',
'DIVERTED_INBOUND_COUNT',
'FL_TIME',
'DIVERTED_INBOUND_COUNT_0H',
'DIVERTED_INBOUND_COUNT_1H',
'DIVERTED_INBOUND_COUNT_2H',
'DIVERTED_INBOUND_COUNT_3H',
'DIVERTED_INBOUND_COUNT_4H',
'DIVERTED_INBOUND_COUNT_5H',
'DIVERTED_INBOUND_COUNT_6H',
'MAX_MEDIAN_INBOUND'
)
for i in range(0, total_lag_time+1):
df_normalized_diverted_inbound_flights_per_airport = df_normalized_diverted_inbound_flights_per_airport.withColumn(
'NORMALIZED_DIVERTED_INBOUND_COUNT_' +str(i) + 'H',
col('DIVERTED_INBOUND_COUNT_' +str(i) + 'H').cast('int')/col('MAX_MEDIAN_INBOUND')
)
# display(df_normalized_diverted_inbound_flights_per_airport)
display(df_normalized_diverted_inbound_flights_per_airport.filter(df_normalized_diverted_inbound_flights_per_airport.CALL_SIGN_ARR=='KORD').orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
df_normalized_diverted_inbound_flights_per_airport.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_normalized_diverted_inbound_flights_per_airport");
sqlContext.sql("create table group25.df_normalized_diverted_inbound_flights_per_airport as select * from mytempTable");
# # showing the median diverted_inbound flight of airport KORD
# display(df_median_inbound_flights_per_airport.where("CALL_SIGN_ARR==\'KORD\'").orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
return df_normalized_diverted_inbound_flights_per_airport
df_normalized_diverted_inbound_flights_per_airport = compute_normalized_diverted_inbound_flights(df_max_median_inbound_flights_per_airport)
# COMMAND ----------
# MAGIC %md # Delay Flights
# COMMAND ----------
# MAGIC %md ##normalized delay outbound flights
# COMMAND ----------
############################################################################################
# dalay outbound flights
## calculate the number of delays per airpor per hour per airline - departure/outbound
def compute_normalized_delay_outbound_flights(df_max_median_outbound_flights_per_airport):
#function to calculate number of seconds from number of days
hours = lambda i: i * 60 * 60
df_delay_outbound_flights_per_airport_per_hour = sqlContext.sql("""
select
CALL_SIGN_DEP,
FL_DATE,
DEP_LOCAL_HOUR,
sum(dep_del15) as DELAY_OUTBOUND_COUNT_0H
from group25.airlines_utc_main
where dep_del15 is not null
group by 1,2,3
""")
df_delay_outbound_flights_per_airport_per_hour = df_delay_outbound_flights_per_airport_per_hour.withColumn("FL_TIME", unix_timestamp(col("FL_DATE").cast("timestamp")) + df_delay_outbound_flights_per_airport_per_hour.DEP_LOCAL_HOUR*60*60)
df_delay_outbound_flights_per_airport_per_hour = df_delay_outbound_flights_per_airport_per_hour.withColumn("FL_TIME", col("FL_TIME").cast("timestamp"))
display(df_delay_outbound_flights_per_airport_per_hour.filter(df_delay_outbound_flights_per_airport_per_hour.CALL_SIGN_DEP=='KORD').orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
# building the delays columns
total_lag_time = 6
for i in range(1, total_lag_time+1):
window = (Window.partitionBy("FL_DATE", "CALL_SIGN_DEP").orderBy(col("FL_TIME").cast('long')).rangeBetween(-hours(int(i)), 0))
df_delay_outbound_flights_per_airport_per_hour = df_delay_outbound_flights_per_airport_per_hour.withColumn('DELAY_OUTBOUND_COUNT_' +str(i) + 'H', sum("DELAY_OUTBOUND_COUNT_0H").over(window))
for i in range(total_lag_time, 0, -1):
df_delay_outbound_flights_per_airport_per_hour = df_delay_outbound_flights_per_airport_per_hour.withColumn('DELAY_OUTBOUND_COUNT_' +str(i) + 'H', col('DELAY_OUTBOUND_COUNT_' +str(i) + 'H') - col('DELAY_OUTBOUND_COUNT_' +str(i-1) + 'H'))
display(df_delay_outbound_flights_per_airport_per_hour.filter(df_delay_outbound_flights_per_airport_per_hour.CALL_SIGN_DEP=='KORD').orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
df_delay_outbound_flights_per_airport_per_hour.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_delayed_outbound_flights_per_airport_per_hour_rolling_window");
sqlContext.sql("create table group25.df_delayed_outbound_flights_per_airport_per_hour_rolling_window as select * from mytempTable");
##### normalized outbound rolling window by max_median
df2 = df_max_median_outbound_flights_per_airport
df_normalized_delay_outbound_flights_per_airport=df_delay_outbound_flights_per_airport_per_hour.alias("a").join(
df2.alias("b"), df_delay_outbound_flights_per_airport_per_hour['CALL_SIGN_DEP'] == df2['CALL_SIGN_DEP'], how="left"
).select('a.CALL_SIGN_DEP',
'FL_DATE',
'DEP_LOCAL_HOUR',
'FL_TIME',
'DELAY_OUTBOUND_COUNT_0H',
'DELAY_OUTBOUND_COUNT_1H',
'DELAY_OUTBOUND_COUNT_2H',
'DELAY_OUTBOUND_COUNT_3H',
'DELAY_OUTBOUND_COUNT_4H',
'DELAY_OUTBOUND_COUNT_5H',
'DELAY_OUTBOUND_COUNT_6H',
'MAX_MEDIAN_OUTBOUND'
)
for i in range(0, total_lag_time+1):
df_normalized_delay_outbound_flights_per_airport = df_normalized_delay_outbound_flights_per_airport.withColumn('NORMALIZED_DELAY_OUTBOUND_COUNT_' +str(i) + 'H', col('DELAY_OUTBOUND_COUNT_' +str(i) + 'H').cast('int')/col('MAX_MEDIAN_OUTBOUND'))
# display(df_normalized_delay_outbound_flights_per_airport)
display(df_normalized_delay_outbound_flights_per_airport.filter(df_normalized_delay_outbound_flights_per_airport.CALL_SIGN_DEP=='KORD').orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
df_normalized_delay_outbound_flights_per_airport.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_normalized_delay_outbound_flights_per_airport");
sqlContext.sql("create table group25.df_normalized_delay_outbound_flights_per_airport as select * from mytempTable");
# # showing the median outbound flight of airport KORD
# display(df_median_outbound_flights_per_airport.where("CALL_SIGN_DEP==\'KORD\'").orderBy("FL_DATE", "DEP_LOCAL_HOUR"))
return df_normalized_delay_outbound_flights_per_airport
df_normalized_delay_outbound_flights_per_airport = compute_normalized_delay_outbound_flights(df_max_median_outbound_flights_per_airport)
# COMMAND ----------
# MAGIC %md ##normalized delay inbound flights
# COMMAND ----------
############################################################################################
# dalay inbound flights
## calculate the number of delays per airpor per hour per airline - departure/inbound
def compute_normalized_delay_inbound_flights(df_max_median_inbound_flights_per_airport):
#function to calculate number of seconds from number of days
hours = lambda i: i * 60 * 60
df_delay_inbound_flights_per_airport_per_hour = sqlContext.sql("""
select
CALL_SIGN_ARR,
FL_DATE,
ARR_LOCAL_HOUR,
sum(dep_del15) as DELAY_INBOUND_COUNT_0H
from group25.airlines_utc_main
where dep_del15 is not null
group by 1,2,3
""")
df_delay_inbound_flights_per_airport_per_hour = df_delay_inbound_flights_per_airport_per_hour.withColumn("FL_TIME", unix_timestamp(col("FL_DATE").cast("timestamp")) + df_delay_inbound_flights_per_airport_per_hour.ARR_LOCAL_HOUR*60*60)
df_delay_inbound_flights_per_airport_per_hour = df_delay_inbound_flights_per_airport_per_hour.withColumn("FL_TIME", col("FL_TIME").cast("timestamp"))
display(df_delay_inbound_flights_per_airport_per_hour.filter(df_delay_inbound_flights_per_airport_per_hour.CALL_SIGN_ARR=='KORD').orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
# building the delays columns
total_lag_time = 6
for i in range(1, total_lag_time+1):
window = (Window.partitionBy("FL_DATE", "CALL_SIGN_ARR").orderBy(col("FL_TIME").cast('long')).rangeBetween(-hours(int(i)), 0))
df_delay_inbound_flights_per_airport_per_hour = df_delay_inbound_flights_per_airport_per_hour.withColumn('DELAY_INBOUND_COUNT_' +str(i) + 'H', sum("DELAY_INBOUND_COUNT_0H").over(window))
for i in range(total_lag_time, 0, -1):
df_delay_inbound_flights_per_airport_per_hour = df_delay_inbound_flights_per_airport_per_hour.withColumn('DELAY_INBOUND_COUNT_' +str(i) + 'H', col('DELAY_INBOUND_COUNT_' +str(i) + 'H') - col('DELAY_INBOUND_COUNT_' +str(i-1) + 'H'))
display(df_delay_inbound_flights_per_airport_per_hour.filter(df_delay_inbound_flights_per_airport_per_hour.CALL_SIGN_ARR=='KORD').orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
df_delay_inbound_flights_per_airport_per_hour.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_delayed_inbound_flights_per_airport_per_hour_rolling_window");
sqlContext.sql("create table group25.df_delayed_inbound_flights_per_airport_per_hour_rolling_window as select * from mytempTable");
##### normalized inbound rolling window by max_median
df2 = df_max_median_inbound_flights_per_airport
df_normalized_delay_inbound_flights_per_airport=df_delay_inbound_flights_per_airport_per_hour.alias("a").join(
df2.alias("b"), df_delay_inbound_flights_per_airport_per_hour['CALL_SIGN_ARR'] == df2['CALL_SIGN_ARR'], how="left"
).select( 'a.CALL_SIGN_ARR',
'FL_DATE',
'ARR_LOCAL_HOUR',
'FL_TIME',
'DELAY_INBOUND_COUNT_0H',
'DELAY_INBOUND_COUNT_1H',
'DELAY_INBOUND_COUNT_2H',
'DELAY_INBOUND_COUNT_3H',
'DELAY_INBOUND_COUNT_4H',
'DELAY_INBOUND_COUNT_5H',
'DELAY_INBOUND_COUNT_6H',
'MAX_MEDIAN_INBOUND'
)
for i in range(0, total_lag_time+1):
df_normalized_delay_inbound_flights_per_airport = df_normalized_delay_inbound_flights_per_airport.withColumn('NORMALIZED_DELAY_INBOUND_COUNT_' +str(i) + 'H', col('DELAY_INBOUND_COUNT_' +str(i) + 'H').cast('int')/col('MAX_MEDIAN_INBOUND'))
# display(df_normalized_delay_inbound_flights_per_airport)
display(df_normalized_delay_inbound_flights_per_airport.filter(df_normalized_delay_inbound_flights_per_airport.CALL_SIGN_ARR=='KORD').orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
df_normalized_delay_inbound_flights_per_airport.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.df_normalized_delay_inbound_flights_per_airport");
sqlContext.sql("create table group25.df_normalized_delay_inbound_flights_per_airport as select * from mytempTable");
# # showing the median inbound flight of airport KORD
# display(df_median_inbound_flights_per_airport.where("CALL_SIGN_ARR==\'KORD\'").orderBy("FL_DATE", "ARR_LOCAL_HOUR"))
return df_normalized_delay_inbound_flights_per_airport
df_normalized_delay_inbound_flights_per_airport = compute_normalized_delay_inbound_flights(df_max_median_inbound_flights_per_airport)
# COMMAND ----------
# COMMAND ----------
# MAGIC %md # Join all table
# COMMAND ----------
#v5
# df_airlines_utc_main,
# df_normalized_outbound,
# df_normalized_delay_outbound
def joining_tables():
baseline_model_data = sqlContext.sql("""
SELECT
T1.*,
T2.MAX_MEDIAN_OUTBOUND,
T3.MAX_MEDIAN_INBOUND,
T2.OUTBOUND_COUNT,
T2.OUTBOUND_COUNT_0H,
T2.OUTBOUND_COUNT_1H,
T2.OUTBOUND_COUNT_2H,
T2.OUTBOUND_COUNT_3H,
T2.OUTBOUND_COUNT_4H,
T2.OUTBOUND_COUNT_5H,
T2.OUTBOUND_COUNT_6H,
T2.NORMALIZED_OUTBOUND_COUNT_0H,
T2.NORMALIZED_OUTBOUND_COUNT_1H,
T2.NORMALIZED_OUTBOUND_COUNT_2H,
T2.NORMALIZED_OUTBOUND_COUNT_3H,
T2.NORMALIZED_OUTBOUND_COUNT_4H,
T2.NORMALIZED_OUTBOUND_COUNT_5H,
T2.NORMALIZED_OUTBOUND_COUNT_6H,
T3.INBOUND_COUNT,
T3.INBOUND_COUNT_0H,
T3.INBOUND_COUNT_1H,
T3.INBOUND_COUNT_2H,
T3.INBOUND_COUNT_3H,
T3.INBOUND_COUNT_4H,
T3.INBOUND_COUNT_5H,
T3.INBOUND_COUNT_6H,
T3.NORMALIZED_INBOUND_COUNT_0H,
T3.NORMALIZED_INBOUND_COUNT_1H,
T3.NORMALIZED_INBOUND_COUNT_2H,
T3.NORMALIZED_INBOUND_COUNT_3H,
T3.NORMALIZED_INBOUND_COUNT_4H,
T3.NORMALIZED_INBOUND_COUNT_5H,
T3.NORMALIZED_INBOUND_COUNT_6H,
T4.DIVERTED_OUTBOUND_COUNT,
T4.DIVERTED_OUTBOUND_COUNT_0H,
T4.DIVERTED_OUTBOUND_COUNT_1H,
T4.DIVERTED_OUTBOUND_COUNT_2H,
T4.DIVERTED_OUTBOUND_COUNT_3H,
T4.DIVERTED_OUTBOUND_COUNT_4H,
T4.DIVERTED_OUTBOUND_COUNT_5H,
T4.DIVERTED_OUTBOUND_COUNT_6H,
T4.NORMALIZED_DIVERTED_OUTBOUND_COUNT_0H,
T4.NORMALIZED_DIVERTED_OUTBOUND_COUNT_1H,
T4.NORMALIZED_DIVERTED_OUTBOUND_COUNT_2H,
T4.NORMALIZED_DIVERTED_OUTBOUND_COUNT_3H,
T4.NORMALIZED_DIVERTED_OUTBOUND_COUNT_4H,
T4.NORMALIZED_DIVERTED_OUTBOUND_COUNT_5H,
T4.NORMALIZED_DIVERTED_OUTBOUND_COUNT_6H,
T5.DIVERTED_INBOUND_COUNT,
T5.DIVERTED_INBOUND_COUNT_0H,
T5.DIVERTED_INBOUND_COUNT_1H,
T5.DIVERTED_INBOUND_COUNT_2H,
T5.DIVERTED_INBOUND_COUNT_3H,
T5.DIVERTED_INBOUND_COUNT_4H,
T5.DIVERTED_INBOUND_COUNT_5H,
T5.DIVERTED_INBOUND_COUNT_6H,
T5.NORMALIZED_DIVERTED_INBOUND_COUNT_0H,
T5.NORMALIZED_DIVERTED_INBOUND_COUNT_1H,
T5.NORMALIZED_DIVERTED_INBOUND_COUNT_2H,
T5.NORMALIZED_DIVERTED_INBOUND_COUNT_3H,
T5.NORMALIZED_DIVERTED_INBOUND_COUNT_4H,
T5.NORMALIZED_DIVERTED_INBOUND_COUNT_5H,
T5.NORMALIZED_DIVERTED_INBOUND_COUNT_6H,
T6.DELAY_OUTBOUND_COUNT_0H,
T6.DELAY_OUTBOUND_COUNT_1H,
T6.DELAY_OUTBOUND_COUNT_2H,
T6.DELAY_OUTBOUND_COUNT_3H,
T6.DELAY_OUTBOUND_COUNT_4H,
T6.DELAY_OUTBOUND_COUNT_5H,
T6.DELAY_OUTBOUND_COUNT_6H,
T6.NORMALIZED_DELAY_OUTBOUND_COUNT_0H,
T6.NORMALIZED_DELAY_OUTBOUND_COUNT_1H,
T6.NORMALIZED_DELAY_OUTBOUND_COUNT_2H,
T6.NORMALIZED_DELAY_OUTBOUND_COUNT_3H,
T6.NORMALIZED_DELAY_OUTBOUND_COUNT_4H,
T6.NORMALIZED_DELAY_OUTBOUND_COUNT_5H,
T6.NORMALIZED_DELAY_OUTBOUND_COUNT_6H,
T7.DELAY_INBOUND_COUNT_0H,
T7.DELAY_INBOUND_COUNT_1H,
T7.DELAY_INBOUND_COUNT_2H,
T7.DELAY_INBOUND_COUNT_3H,
T7.DELAY_INBOUND_COUNT_4H,
T7.DELAY_INBOUND_COUNT_5H,
T7.DELAY_INBOUND_COUNT_6H,
T7.NORMALIZED_DELAY_INBOUND_COUNT_0H,
T7.NORMALIZED_DELAY_INBOUND_COUNT_1H,
T7.NORMALIZED_DELAY_INBOUND_COUNT_2H,
T7.NORMALIZED_DELAY_INBOUND_COUNT_3H,
T7.NORMALIZED_DELAY_INBOUND_COUNT_4H,
T7.NORMALIZED_DELAY_INBOUND_COUNT_5H,
T7.NORMALIZED_DELAY_INBOUND_COUNT_6H
FROM
group25.airlines_utc_main T1
LEFT JOIN group25.df_normalized_outbound_flights_per_airport T2
ON T1.CALL_SIGN_DEP = T2.CALL_SIGN_DEP
AND T1.FL_DATE = T2.FL_DATE
AND T1.DEP_LOCAL_HOUR = T2.DEP_LOCAL_HOUR
LEFT JOIN group25.df_normalized_inbound_flights_per_airport T3
ON T1.CALL_SIGN_ARR = T3.CALL_SIGN_ARR
AND T1.FL_DATE = T3.FL_DATE
AND T1.ARR_LOCAL_HOUR = T3.ARR_LOCAL_HOUR
LEFT JOIN group25.df_normalized_diverted_outbound_flights_per_airport T4
ON T1.CALL_SIGN_DEP = T4.CALL_SIGN_DEP
AND T1.FL_DATE = T4.FL_DATE
AND T1.DEP_LOCAL_HOUR = T4.DEP_LOCAL_HOUR
LEFT JOIN group25.df_normalized_diverted_inbound_flights_per_airport T5
ON
T1.CALL_SIGN_ARR = T5.CALL_SIGN_ARR
AND T1.FL_DATE = T5.FL_DATE
AND T1.ARR_LOCAL_HOUR = T5.ARR_LOCAL_HOUR
LEFT JOIN group25.df_normalized_delay_outbound_flights_per_airport T6
ON T1.CALL_SIGN_DEP = T6.CALL_SIGN_DEP
AND T1.FL_DATE = T6.FL_DATE
AND T1.DEP_LOCAL_HOUR = T6.DEP_LOCAL_HOUR
LEFT JOIN group25.df_normalized_delay_inbound_flights_per_airport T7
ON
T1.CALL_SIGN_ARR = T7.CALL_SIGN_ARR
AND T1.FL_DATE = T7.FL_DATE
AND T1.ARR_LOCAL_HOUR = T7.ARR_LOCAL_HOUR
WHERE T1.CALL_SIGN_DEP != "99999"
""")
# save table to db
display(baseline_model_data)
baseline_model_data.createOrReplaceTempView("mytempTable")
sqlContext.sql("DROP TABLE IF EXISTS group25.baseline_model_data");
sqlContext.sql("create table group25.baseline_model_data as select * from mytempTable");
print('baseline_model_data rows count: ', baseline_model_data.count())
return baseline_model_data
baseline_model_data = joining_tables()
# COMMAND ----------
# MAGIC %md # check rows counts
# COMMAND ----------
# print and compare the rows
def check_row_counts():
print("airline utc main", df_airlines_utc_main.count()) # 31,746,841
print('max median: ', df_max_median_outbound_flights_per_airport.count()) #334
print('baseline_model_data rows count: ', baseline_model_data.count()) #31,746,841
return
check_row_counts()
# COMMAND ----------
# MAGIC %sql
# MAGIC select count(*) , "df_normalized_outbound_flights_per_airport" as table from group25.df_normalized_outbound_flights_per_airport
# MAGIC union
# MAGIC select count(*), "df_normalized_inbound_flights_per_airport" as table from group25.df_normalized_inbound_flights_per_airport
# MAGIC union
# MAGIC select count(*), "df_normalized_diverted_outbound_flights_per_airport" as table from group25.df_normalized_diverted_outbound_flights_per_airport
# MAGIC union
# MAGIC select count(*), "df_normalized_diverted_inbound_flights_per_airport" as table from group25.df_normalized_diverted_inbound_flights_per_airport
# MAGIC union
# MAGIC select count(*), "df_normalized_delay_outbound_flights_per_airport" as table from group25.df_normalized_delay_outbound_flights_per_airport
# MAGIC union
# MAGIC select count(*), "df_normalized_delay_inbound_flights_per_airport" as table from group25.df_normalized_delay_inbound_flights_per_airport
# MAGIC union
# MAGIC select count(*), "df_normalized_delay_inbound_flights_per_airport" as table from group25.df_normalized_delay_inbound_flights_per_airport
# MAGIC union
# MAGIC select count(*), "baseline_model_data" as table from group25.baseline_model_data
# COMMAND ----------
# MAGIC %sql
# MAGIC select count(*), "df_normalized_outbound_flights_per_airport" as table from (select distinct call_sign_dep, FL_DATE, DEP_LOCAL_HOUR from group25.df_normalized_outbound_flights_per_airport where call_sign_dep != '99999' and call_sign_dep is not null and dep_local_hour is not null group by call_sign_dep, FL_DATE, DEP_LOCAL_HOUR)
# MAGIC union
# MAGIC select count(*), "df_normalized_inbound_flights_per_airport" as table from (select distinct call_sign_arr, FL_DATE, ARR_LOCAL_HOUR from group25.df_normalized_inbound_flights_per_airport where call_sign_arr != '99999' and call_sign_arr is not null and ARR_LOCAL_HOUR is not null group by call_sign_arr, FL_DATE, ARR_LOCAL_HOUR)
# MAGIC union
# MAGIC select count(*), "df_normalized_diverted_outbound_flights_per_airport" as table from (select distinct call_sign_dep, FL_DATE, DEP_LOCAL_HOUR from group25.df_normalized_diverted_outbound_flights_per_airport where call_sign_dep != '99999' and call_sign_dep is not null and DEP_LOCAL_HOUR is not null group by call_sign_dep, FL_DATE, DEP_LOCAL_HOUR)
# MAGIC union
# MAGIC select count(*), "df_normalized_diverted_inbound_flights_per_airport" as table from (select distinct call_sign_arr, FL_DATE, ARR_LOCAL_HOUR from group25.df_normalized_diverted_inbound_flights_per_airport where call_sign_arr != '99999' and call_sign_arr is not null and ARR_LOCAL_HOUR is not null group by call_sign_arr, FL_DATE, ARR_LOCAL_HOUR)
# MAGIC union
# MAGIC select count(*), "df_normalized_delay_outbound_flights_per_airport" as table from (select distinct call_sign_dep, FL_DATE, DEP_LOCAL_HOUR from group25.df_normalized_delay_outbound_flights_per_airport where call_sign_dep != '99999' and call_sign_dep is not null and dep_local_hour is not null group by call_sign_dep, FL_DATE, DEP_LOCAL_HOUR)
# MAGIC union
# MAGIC select count(*), "df_normalized_delay_inbound_flights_per_airport" as table from (select distinct call_sign_arr, FL_DATE, ARR_LOCAL_HOUR from group25.df_normalized_delay_inbound_flights_per_airport where call_sign_arr != '99999' and call_sign_arr is not null and ARR_LOCAL_HOUR is not null group by call_sign_arr, FL_DATE, ARR_LOCAL_HOUR)
# MAGIC union
# MAGIC select count(*), "airlines_utc_main" as table from (select distinct call_sign_dep, FL_DATE, DEP_LOCAL_HOUR from group25.airlines_utc_main where call_sign_dep != '99999' and call_sign_dep is not null and dep_local_hour is not null group by call_sign_dep, FL_DATE, DEP_LOCAL_HOUR)
# COMMAND ----------
# COMMAND ----------
# MAGIC %md #check the case for KORD in baseline_model_data
# COMMAND ----------
# check at the if the tables looks okay
select_cols = [
'FL_DATE',
'OP_UNIQUE_CARRIER',
'DEP_LOCAL_HOUR',
'OUTBOUND_COUNT',
'OUTBOUND_COUNT_0H',
'OUTBOUND_COUNT_1H',
'OUTBOUND_COUNT_2H',
'OUTBOUND_COUNT_3H',
'OUTBOUND_COUNT_4H',
'OUTBOUND_COUNT_5H',
'OUTBOUND_COUNT_6H',
'MAX_MEDIAN_OUTBOUND',
'NORMALIZED_OUTBOUND_COUNT_0H',
'NORMALIZED_OUTBOUND_COUNT_1H',
'NORMALIZED_OUTBOUND_COUNT_2H',
'NORMALIZED_OUTBOUND_COUNT_3H',
'NORMALIZED_OUTBOUND_COUNT_4H',
'NORMALIZED_OUTBOUND_COUNT_5H',
'NORMALIZED_OUTBOUND_COUNT_6H'
]
display(baseline_model_data.select(select_cols).filter(baseline_model_data.call_sign_dep=='KORD').filter(baseline_model_data.OP_UNIQUE_CARRIER=='UA').dropDuplicates().orderBy("FL_DATE", "DEP_LOCAL_HOUR", ))
# COMMAND ----------
df_normalized_delay_outbound_flights_per_airport.columns
# COMMAND ----------
# MAGIC %md # sampling df
# COMMAND ----------
def sampling_df():
list_of_df = [
df_airlines_utc_main,
df_normalized_outbound,
df_normalized_inbound,
df_normalized_diverted_outbound,
df_normalized_diverted_inbound,
df_normalized_delay_outbound,
df_normalized_delay_inbound,
]
for df in list_of_df:
df = df.filter(df.CALL_SIGN_DEP=='KORD')
return
# sampling_df()
| 41.282007
| 354
| 0.752651
| 6,599
| 47,722
| 4.984392
| 0.040309
| 0.067798
| 0.115256
| 0.083181
| 0.85723
| 0.796729
| 0.776967
| 0.739815
| 0.687796
| 0.665967
| 0
| 0.020537
| 0.142932
| 47,722
| 1,155
| 355
| 41.317749
| 0.783648
| 0.208017
| 0
| 0.351938
| 0
| 0
| 0.43942
| 0.207526
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017054
| false
| 0
| 0.04031
| 0
| 0.074419
| 0.013953
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5ddebd289910cebc5e983f2eb6297f939ec04190
| 219
|
py
|
Python
|
api/articles/admin.py
|
urfonline/api
|
fbd76c246eb1c64c9a12dbcffb21a317e64f8ada
|
[
"MIT"
] | 4
|
2017-06-13T16:20:34.000Z
|
2019-10-03T09:19:54.000Z
|
api/articles/admin.py
|
urfonline/api
|
fbd76c246eb1c64c9a12dbcffb21a317e64f8ada
|
[
"MIT"
] | 162
|
2017-05-23T11:41:33.000Z
|
2022-03-11T23:26:15.000Z
|
api/articles/admin.py
|
urfonline/api
|
fbd76c246eb1c64c9a12dbcffb21a317e64f8ada
|
[
"MIT"
] | 1
|
2017-05-19T09:25:25.000Z
|
2017-05-19T09:25:25.000Z
|
from django.contrib import admin
from django.contrib.admin import register
from .models import Article
@register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'tone', 'associated_show')
| 21.9
| 55
| 0.776256
| 27
| 219
| 6.222222
| 0.62963
| 0.119048
| 0.202381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123288
| 219
| 9
| 56
| 24.333333
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f8e09e7beecf96c749c8192bfe016b378807583a
| 45
|
py
|
Python
|
experimental/model_builder.py
|
adrnswanberg/deep_learning
|
b03bac7ab1a80b75ac9a99c4440b1dbf3212a7eb
|
[
"CNRI-Python"
] | 1
|
2021-04-17T17:29:11.000Z
|
2021-04-17T17:29:11.000Z
|
experimental/model_builder.py
|
adrnswanberg/deep_learning
|
b03bac7ab1a80b75ac9a99c4440b1dbf3212a7eb
|
[
"CNRI-Python"
] | null | null | null |
experimental/model_builder.py
|
adrnswanberg/deep_learning
|
b03bac7ab1a80b75ac9a99c4440b1dbf3212a7eb
|
[
"CNRI-Python"
] | null | null | null |
"""Implements some other types of models."""
| 22.5
| 44
| 0.711111
| 6
| 45
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 45
| 1
| 45
| 45
| 0.820513
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d075352e41d8cf02da5bae9f388f097a84f97e6
| 131
|
py
|
Python
|
avro_to_python_types/__init__.py
|
dangreenisrael/avro-to-python
|
d0e0657367348c7ffcd1af2fcc06fad27345cd33
|
[
"MIT"
] | 7
|
2020-12-12T20:19:28.000Z
|
2020-12-14T21:28:28.000Z
|
avro_to_python_types/__init__.py
|
dangreenisrael/avro-to-python-types
|
d0e0657367348c7ffcd1af2fcc06fad27345cd33
|
[
"MIT"
] | null | null | null |
avro_to_python_types/__init__.py
|
dangreenisrael/avro-to-python-types
|
d0e0657367348c7ffcd1af2fcc06fad27345cd33
|
[
"MIT"
] | null | null | null |
__version__ = "0.12.1"
from .typed_dict_from_schema import (
typed_dict_from_schema_file,
typed_dict_from_schema_string,
)
| 21.833333
| 37
| 0.78626
| 20
| 131
| 4.4
| 0.55
| 0.306818
| 0.443182
| 0.647727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035398
| 0.137405
| 131
| 5
| 38
| 26.2
| 0.743363
| 0
| 0
| 0
| 0
| 0
| 0.045802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d1f88e596312e832200029f1d3dce19266e5b80
| 81
|
py
|
Python
|
accelerometer/__init__.py
|
CASSON-LAB/BiobankActivityCSF
|
d3e6d7283aed72fa329da1e045fa49cc7e6b2e9c
|
[
"BSD-2-Clause"
] | 3
|
2020-08-03T12:08:34.000Z
|
2021-03-16T11:31:01.000Z
|
accelerometer/__init__.py
|
CASSON-LAB/BiobankActivityCSF
|
d3e6d7283aed72fa329da1e045fa49cc7e6b2e9c
|
[
"BSD-2-Clause"
] | null | null | null |
accelerometer/__init__.py
|
CASSON-LAB/BiobankActivityCSF
|
d3e6d7283aed72fa329da1e045fa49cc7e6b2e9c
|
[
"BSD-2-Clause"
] | 1
|
2020-08-05T16:13:02.000Z
|
2020-08-05T16:13:02.000Z
|
import accelerometer.accUtils
import accelerometer.device
name = "accelerometer"
| 20.25
| 29
| 0.851852
| 8
| 81
| 8.625
| 0.625
| 0.550725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08642
| 81
| 3
| 30
| 27
| 0.932432
| 0
| 0
| 0
| 0
| 0
| 0.160494
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d21ebd860e47768a22e217c7fe0d1f2d54e9fb2
| 174
|
py
|
Python
|
supervised/constants.py
|
vishal-vardhan/Text_Summarization
|
5b9f3e2e21effb42e55574fc972375d7668a1889
|
[
"MIT"
] | null | null | null |
supervised/constants.py
|
vishal-vardhan/Text_Summarization
|
5b9f3e2e21effb42e55574fc972375d7668a1889
|
[
"MIT"
] | null | null | null |
supervised/constants.py
|
vishal-vardhan/Text_Summarization
|
5b9f3e2e21effb42e55574fc972375d7668a1889
|
[
"MIT"
] | 1
|
2021-03-08T12:47:08.000Z
|
2021-03-08T12:47:08.000Z
|
DATA_FOLDER = "./data"
DATA_PATH = DATA_FOLDER + "/news_summary_cleaned.csv"
EXPORT_DATA_PATH = DATA_FOLDER + "/new_summary_cleaned.csv"
PREPROC_DATA_PATH = EXPORT_DATA_PATH
| 34.8
| 59
| 0.804598
| 26
| 174
| 4.846154
| 0.384615
| 0.253968
| 0.190476
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091954
| 174
| 4
| 60
| 43.5
| 0.797468
| 0
| 0
| 0
| 0
| 0
| 0.316092
| 0.281609
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d5c255dfa30826051f50767e12ff2ba4cbc2778
| 181
|
py
|
Python
|
orca/__init__.py
|
jscohen4/cohen-policy-tree-adaptation
|
a6efafda591e5850fc264f3756cce2bb8e07719f
|
[
"MIT"
] | 1
|
2022-01-05T12:20:18.000Z
|
2022-01-05T12:20:18.000Z
|
orca/__init__.py
|
jscohen4/cohen-policy-tree-adaptation
|
a6efafda591e5850fc264f3756cce2bb8e07719f
|
[
"MIT"
] | null | null | null |
orca/__init__.py
|
jscohen4/cohen-policy-tree-adaptation
|
a6efafda591e5850fc264f3756cce2bb8e07719f
|
[
"MIT"
] | null | null | null |
from .model import Model
from .model_SA import Model_SA
from .reservoir import Reservoir
from .delta import Delta
from .util import *
from .plotter import *
from .offstream import *
| 25.857143
| 32
| 0.79558
| 27
| 181
| 5.259259
| 0.333333
| 0.126761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149171
| 181
| 7
| 33
| 25.857143
| 0.922078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d61e3ffec2c6f933ab5caf55b4cfe661baff9fc
| 1,179
|
py
|
Python
|
pyconcz_2016/proposals/migrations/0005_auto_20160812_0805.py
|
pyvec/cz.pycon.org-2016
|
b4affabcf2b1cdd629a2dc67dba671b3414b3682
|
[
"MIT"
] | 10
|
2016-01-27T08:37:41.000Z
|
2018-04-26T08:33:44.000Z
|
pyconcz_2016/proposals/migrations/0005_auto_20160812_0805.py
|
pyvec/cz.pycon.org-2016
|
b4affabcf2b1cdd629a2dc67dba671b3414b3682
|
[
"MIT"
] | 101
|
2015-11-15T11:20:33.000Z
|
2019-04-03T15:17:47.000Z
|
pyconcz_2016/proposals/migrations/0005_auto_20160812_0805.py
|
pyvec/cz.pycon.org-2016
|
b4affabcf2b1cdd629a2dc67dba671b3414b3682
|
[
"MIT"
] | 10
|
2015-11-15T21:35:53.000Z
|
2017-01-25T14:30:27.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-08-12 06:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0004_auto_20160811_0816'),
]
operations = [
migrations.AlterField(
model_name='talk',
name='github',
field=models.CharField(blank=True, help_text='Optional', max_length=255, verbose_name='GitHub username'),
),
migrations.AlterField(
model_name='talk',
name='twitter',
field=models.CharField(blank=True, help_text='Optional', max_length=255, verbose_name='Twitter handle'),
),
migrations.AlterField(
model_name='workshop',
name='github',
field=models.CharField(blank=True, help_text='Optional', max_length=255, verbose_name='GitHub username'),
),
migrations.AlterField(
model_name='workshop',
name='twitter',
field=models.CharField(blank=True, help_text='Optional', max_length=255, verbose_name='Twitter handle'),
),
]
| 32.75
| 117
| 0.614928
| 125
| 1,179
| 5.608
| 0.416
| 0.114123
| 0.142653
| 0.165478
| 0.724679
| 0.724679
| 0.607703
| 0.607703
| 0.607703
| 0.607703
| 0
| 0.050459
| 0.26039
| 1,179
| 35
| 118
| 33.685714
| 0.75344
| 0.056828
| 0
| 0.714286
| 1
| 0
| 0.155095
| 0.020739
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
53aca3ae590ba107bb169a744b23395d52d85fe2
| 184
|
py
|
Python
|
lib/utils/classifier.py
|
geraudster/zevision
|
26566547c37046f239b94d545bdfefee2d6bb4e0
|
[
"Apache-2.0"
] | 14
|
2019-07-06T19:53:04.000Z
|
2021-08-29T15:22:25.000Z
|
lib/utils/classifier.py
|
IhabBendidi/zevision
|
26566547c37046f239b94d545bdfefee2d6bb4e0
|
[
"Apache-2.0"
] | 9
|
2019-09-03T11:16:10.000Z
|
2022-01-31T14:34:02.000Z
|
lib/utils/classifier.py
|
IhabBendidi/zevision
|
26566547c37046f239b94d545bdfefee2d6bb4e0
|
[
"Apache-2.0"
] | 6
|
2019-07-06T19:54:09.000Z
|
2020-05-21T09:30:11.000Z
|
import numpy as np
def face_distance(face_encodings, face_to_compare):
if len(face_encodings) == 0:
return np.empty((0))
return np.linalg.norm(face_encodings - face_to_compare)
| 20.444444
| 56
| 0.766304
| 30
| 184
| 4.433333
| 0.566667
| 0.293233
| 0.255639
| 0.285714
| 0.390977
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 0.130435
| 184
| 8
| 57
| 23
| 0.81875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
53be35a6ea732e054e035374a1178794258e9ea4
| 2,266
|
py
|
Python
|
tests/test_default_loop.py
|
eublefar/botbowl_bot
|
d7189297291ce7dccb46297c651ef3e5c9210dc8
|
[
"MIT"
] | 3
|
2020-06-29T18:08:05.000Z
|
2020-06-30T10:13:35.000Z
|
tests/test_default_loop.py
|
KirillPlaksin/gym-loop
|
3708925820ebdc9bbdcc05a1f000f1bf0995f3df
|
[
"MIT"
] | 71
|
2020-07-13T15:24:11.000Z
|
2022-02-14T15:47:13.000Z
|
tests/test_default_loop.py
|
KirillPlaksin/gym-loop
|
3708925820ebdc9bbdcc05a1f000f1bf0995f3df
|
[
"MIT"
] | 1
|
2020-06-29T18:25:33.000Z
|
2020-06-29T18:25:33.000Z
|
#!/usr/bin/env python
"""Tests for `deafult_loop` module."""
import gym
from gym_loop.loops.default_loop import DefaultLoop
from gym_loop.agents.random_agent import RandomAgent
def test_train_run(mocker):
mock_env = gym.make("CartPole-v0")
mock_agent = RandomAgent(action_space=mock_env.action_space)
reset = mocker.patch.object(
mock_env,
"reset",
autospec=True,
return_value=mock_env.observation_space.sample(),
)
step = mocker.patch.object(
mock_env,
"step",
autospec=True,
return_value=mock_env.observation_space.sample(),
)
act = mocker.patch.object(
mock_agent, "act", autospec=True, return_value=mock_env.action_space.sample(),
)
memorize = mocker.patch.object(mock_agent, "memorize", autospec=True)
update = mocker.patch.object(mock_agent, "update", autospec=True)
loop = DefaultLoop(
agent=mock_agent, env=mock_env, max_episodes=1, max_episode_len=1
)
loop.train()
step.assert_called_once()
reset.assert_called_once()
act.assert_called_once()
memorize.assert_called_once()
update.assert_called_once()
def test_evaluate_run(mocker):
mock_env = gym.make("CartPole-v0")
mock_agent = RandomAgent(action_space=mock_env.action_space)
reset = mocker.patch.object(
mock_env,
"reset",
autospec=True,
return_value=mock_env.observation_space.sample(),
)
step = mocker.patch.object(
mock_env,
"step",
autospec=True,
return_value=mock_env.observation_space.sample(),
)
act = mocker.patch.object(
mock_agent, "act", autospec=True, return_value=mock_env.action_space.sample(),
)
memorize = mocker.patch.object(mock_agent, "memorize", autospec=True)
update = mocker.patch.object(mock_agent, "update", autospec=True)
loop = DefaultLoop(
agent=mock_agent,
env=mock_env,
max_episodes=1,
max_episode_len=1,
eval_episodes=1,
eval_render=False,
eval_record=False,
)
loop.evaluate()
step.assert_called_once()
reset.assert_called_once()
act.assert_called_once()
memorize.assert_not_called()
update.assert_not_called()
| 27.975309
| 86
| 0.670344
| 282
| 2,266
| 5.102837
| 0.202128
| 0.077832
| 0.118138
| 0.145935
| 0.797776
| 0.797776
| 0.797776
| 0.797776
| 0.797776
| 0.797776
| 0
| 0.00395
| 0.218005
| 2,266
| 80
| 87
| 28.325
| 0.808126
| 0.023389
| 0
| 0.597015
| 0
| 0
| 0.03353
| 0
| 0
| 0
| 0
| 0
| 0.149254
| 1
| 0.029851
| false
| 0
| 0.044776
| 0
| 0.074627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
53cb5fc6322bbbf85dc1f829a26dbe6fad811af1
| 188
|
py
|
Python
|
apps/forum_core/admin.py
|
WebPres-org/webpres-forum
|
acb89a474404fd2bb06168aa40fe5a2d321f45be
|
[
"Apache-2.0"
] | 1
|
2021-12-27T10:03:30.000Z
|
2021-12-27T10:03:30.000Z
|
apps/forum_core/admin.py
|
WebPres-org/webpres-forum
|
acb89a474404fd2bb06168aa40fe5a2d321f45be
|
[
"Apache-2.0"
] | null | null | null |
apps/forum_core/admin.py
|
WebPres-org/webpres-forum
|
acb89a474404fd2bb06168aa40fe5a2d321f45be
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Post, Replie, Profile
admin.site.register(Post)
admin.site.register(Replie)
admin.site.register(Profile)
| 23.5
| 41
| 0.803191
| 27
| 188
| 5.592593
| 0.481481
| 0.178808
| 0.337748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101064
| 188
| 8
| 42
| 23.5
| 0.893491
| 0.138298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
53ff80cdb3865a6c199ebb1184fe21cb0621524b
| 9,304
|
py
|
Python
|
pytamil/தமிழ்/codegen/மாத்திரைLexer.py
|
srix/pytamil
|
cfcaee618bb947242f3a9a4432c9e9a9b9a5bab0
|
[
"MIT"
] | 45
|
2019-09-08T14:11:21.000Z
|
2021-09-21T16:18:26.000Z
|
pytamil/தமிழ்/codegen/மாத்திரைLexer.py
|
srix/pytamil
|
cfcaee618bb947242f3a9a4432c9e9a9b9a5bab0
|
[
"MIT"
] | 2
|
2019-12-07T13:51:41.000Z
|
2021-05-14T06:08:34.000Z
|
pytamil/தமிழ்/codegen/மாத்திரைLexer.py
|
srix/pytamil
|
cfcaee618bb947242f3a9a4432c9e9a9b9a5bab0
|
[
"MIT"
] | 9
|
2019-09-08T15:41:30.000Z
|
2021-02-13T07:09:24.000Z
|
# Generated from /home/srix/workspace/pytamil/pytamil/தமிழ்/resources/மாத்திரை.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2,")
buf.write("\u00d3\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\3\2\3\2\3\3\3\3")
buf.write("\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\t\3\t\3\t\3\n\3\n\3\n")
buf.write("\3\13\3\13\3\13\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3")
buf.write("\16\3\16\3\17\3\17\3\20\3\20\3\21\3\21\3\22\3\22\3\23")
buf.write("\3\23\3\24\3\24\3\25\3\25\3\26\3\26\3\27\3\27\3\27\3\30")
buf.write("\3\30\3\30\3\31\3\31\3\31\3\32\3\32\3\32\3\33\3\33\3\33")
buf.write("\3\34\3\34\3\34\3\35\3\35\3\35\3\36\3\36\3\36\3\37\3\37")
buf.write("\3\37\3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3#\3#\3#\3$\3$\3$")
buf.write("\3%\3%\3%\3&\3&\3&\3\'\3\'\3\'\3(\3(\3(\3)\3)\3)\3*\3")
buf.write("*\3*\3+\3+\3+\2\2,\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n")
buf.write("\23\13\25\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'")
buf.write("\25)\26+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ")
buf.write("?!A\"C#E$G%I&K\'M(O)Q*S+U,\3\2\2\2\u00d2\2\3\3\2\2\2\2")
buf.write("\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3")
buf.write("\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2")
buf.write("\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2")
buf.write("\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3")
buf.write("\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61")
buf.write("\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2")
buf.write("\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3")
buf.write("\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M")
buf.write("\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\3")
buf.write("W\3\2\2\2\5Y\3\2\2\2\7[\3\2\2\2\t]\3\2\2\2\13b\3\2\2\2")
buf.write("\rg\3\2\2\2\17n\3\2\2\2\21p\3\2\2\2\23s\3\2\2\2\25v\3")
buf.write("\2\2\2\27y\3\2\2\2\31|\3\2\2\2\33\u0082\3\2\2\2\35\u0084")
buf.write("\3\2\2\2\37\u0086\3\2\2\2!\u0088\3\2\2\2#\u008a\3\2\2")
buf.write("\2%\u008c\3\2\2\2\'\u008e\3\2\2\2)\u0090\3\2\2\2+\u0092")
buf.write("\3\2\2\2-\u0094\3\2\2\2/\u0097\3\2\2\2\61\u009a\3\2\2")
buf.write("\2\63\u009d\3\2\2\2\65\u00a0\3\2\2\2\67\u00a3\3\2\2\2")
buf.write("9\u00a6\3\2\2\2;\u00a9\3\2\2\2=\u00ac\3\2\2\2?\u00af\3")
buf.write("\2\2\2A\u00b2\3\2\2\2C\u00b5\3\2\2\2E\u00b8\3\2\2\2G\u00bb")
buf.write("\3\2\2\2I\u00be\3\2\2\2K\u00c1\3\2\2\2M\u00c4\3\2\2\2")
buf.write("O\u00c7\3\2\2\2Q\u00ca\3\2\2\2S\u00cd\3\2\2\2U\u00d0\3")
buf.write("\2\2\2WX\7\u0b8b\2\2X\4\3\2\2\2YZ\7\u0b92\2\2Z\6\3\2\2")
buf.write("\2[\\\7\u0b96\2\2\\\b\3\2\2\2]^\7\u0b85\2\2^_\7\u0bb3")
buf.write("\2\2_`\7\u0bcf\2\2`a\7\u0b89\2\2a\n\3\2\2\2bc\7\u0b85")
buf.write("\2\2cd\7\u0ba1\2\2de\7\u0bcf\2\2ef\7\u0b8a\2\2f\f\3\2")
buf.write("\2\2gh\7\u0bb0\2\2hi\7\u0bcf\2\2ij\7\u0b89\2\2jk\7\u0bb1")
buf.write("\2\2kl\7\u0bcf\2\2lm\7\u0b88\2\2m\16\3\2\2\2no\7\u0b89")
buf.write("\2\2o\20\3\2\2\2pq\7\u0bb1\2\2qr\7\u0bcf\2\2r\22\3\2\2")
buf.write("\2st\7\u0ba5\2\2tu\7\u0bcf\2\2u\24\3\2\2\2vw\7\u0bb0\2")
buf.write("\2wx\7\u0bcf\2\2x\26\3\2\2\2yz\7\u0bab\2\2z{\7\u0bcf\2")
buf.write("\2{\30\3\2\2\2|}\7\u0bb0\2\2}~\7\u0bcf\2\2~\177\7\u0bb7")
buf.write("\2\2\177\u0080\7\u0bcf\2\2\u0080\u0081\7\u0b87\2\2\u0081")
buf.write("\32\3\2\2\2\u0082\u0083\7\u0b87\2\2\u0083\34\3\2\2\2\u0084")
buf.write("\u0085\7\u0b90\2\2\u0085\36\3\2\2\2\u0086\u0087\7\u0b94")
buf.write("\2\2\u0087 \3\2\2\2\u0088\u0089\7\u0b88\2\2\u0089\"\3")
buf.write("\2\2\2\u008a\u008b\7\u0b8a\2\2\u008b$\3\2\2\2\u008c\u008d")
buf.write("\7\u0b8c\2\2\u008d&\3\2\2\2\u008e\u008f\7\u0b91\2\2\u008f")
buf.write("(\3\2\2\2\u0090\u0091\7\u0b95\2\2\u0091*\3\2\2\2\u0092")
buf.write("\u0093\7\u0b85\2\2\u0093,\3\2\2\2\u0094\u0095\7\u0b97")
buf.write("\2\2\u0095\u0096\7\u0bcf\2\2\u0096.\3\2\2\2\u0097\u0098")
buf.write("\7\u0b9c\2\2\u0098\u0099\7\u0bcf\2\2\u0099\60\3\2\2\2")
buf.write("\u009a\u009b\7\u0ba1\2\2\u009b\u009c\7\u0bcf\2\2\u009c")
buf.write("\62\3\2\2\2\u009d\u009e\7\u0ba6\2\2\u009e\u009f\7\u0bcf")
buf.write("\2\2\u009f\64\3\2\2\2\u00a0\u00a1\7\u0bac\2\2\u00a1\u00a2")
buf.write("\7\u0bcf\2\2\u00a2\66\3\2\2\2\u00a3\u00a4\7\u0bb3\2\2")
buf.write("\u00a4\u00a5\7\u0bcf\2\2\u00a58\3\2\2\2\u00a6\u00a7\7")
buf.write("\u0ba0\2\2\u00a7\u00a8\7\u0bcf\2\2\u00a8:\3\2\2\2\u00a9")
buf.write("\u00aa\7\u0b9b\2\2\u00aa\u00ab\7\u0bcf\2\2\u00ab<\3\2")
buf.write("\2\2\u00ac\u00ad\7\u0baa\2\2\u00ad\u00ae\7\u0bcf\2\2\u00ae")
buf.write(">\3\2\2\2\u00af\u00b0\7\u0bb2\2\2\u00b0\u00b1\7\u0bcf")
buf.write("\2\2\u00b1@\3\2\2\2\u00b2\u00b3\7\u0bb4\2\2\u00b3\u00b4")
buf.write("\7\u0bcf\2\2\u00b4B\3\2\2\2\u00b5\u00b6\7\u0bb7\2\2\u00b6")
buf.write("\u00b7\7\u0bcf\2\2\u00b7D\3\2\2\2\u00b8\u00b9\7\u0bb6")
buf.write("\2\2\u00b9\u00ba\7\u0bcf\2\2\u00baF\3\2\2\2\u00bb\u00bc")
buf.write("\7\u0bb5\2\2\u00bc\u00bd\7\u0bcf\2\2\u00bdH\3\2\2\2\u00be")
buf.write("\u00bf\7\u0b88\2\2\u00bf\u00c0\7\u0b87\2\2\u00c0J\3\2")
buf.write("\2\2\u00c1\u00c2\7\u0b8a\2\2\u00c2\u00c3\7\u0b89\2\2\u00c3")
buf.write("L\3\2\2\2\u00c4\u00c5\7\u0b8c\2\2\u00c5\u00c6\7\u0b8b")
buf.write("\2\2\u00c6N\3\2\2\2\u00c7\u00c8\7\u0b91\2\2\u00c8\u00c9")
buf.write("\7\u0b90\2\2\u00c9P\3\2\2\2\u00ca\u00cb\7\u0b92\2\2\u00cb")
buf.write("\u00cc\7\u0b89\2\2\u00ccR\3\2\2\2\u00cd\u00ce\7\u0b95")
buf.write("\2\2\u00ce\u00cf\7\u0b94\2\2\u00cfT\3\2\2\2\u00d0\u00d1")
buf.write("\7\u0b96\2\2\u00d1\u00d2\7\u0b8b\2\2\u00d2V\3\2\2\2\3")
buf.write("\2\2")
return buf.getvalue()
class மாத்திரைLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
T__21 = 22
T__22 = 23
T__23 = 24
T__24 = 25
T__25 = 26
T__26 = 27
T__27 = 28
T__28 = 29
T__29 = 30
T__30 = 31
T__31 = 32
T__32 = 33
T__33 = 34
T__34 = 35
T__35 = 36
T__36 = 37
T__37 = 38
T__38 = 39
T__39 = 40
T__40 = 41
T__41 = 42
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'\u0B89'", "'\u0B90'", "'\u0B94'", "'\u0B83\u0BB1\u0BCD\u0B87'",
"'\u0B83\u0B9F\u0BCD\u0B88'", "'\u0BAE\u0BCD\u0B87\u0BAF\u0BCD\u0B86'",
"'\u0B87'", "'\u0BAF\u0BCD'", "'\u0BA3\u0BCD'", "'\u0BAE\u0BCD'",
"'\u0BA9\u0BCD'", "'\u0BAE\u0BCD\u0BB5\u0BCD\u0B85'", "'\u0B85'",
"'\u0B8E'", "'\u0B92'", "'\u0B86'", "'\u0B88'", "'\u0B8A'",
"'\u0B8F'", "'\u0B93'", "'\u0B83'", "'\u0B95\u0BCD'", "'\u0B9A\u0BCD'",
"'\u0B9F\u0BCD'", "'\u0BA4\u0BCD'", "'\u0BAA\u0BCD'", "'\u0BB1\u0BCD'",
"'\u0B9E\u0BCD'", "'\u0B99\u0BCD'", "'\u0BA8\u0BCD'", "'\u0BB0\u0BCD'",
"'\u0BB2\u0BCD'", "'\u0BB5\u0BCD'", "'\u0BB4\u0BCD'", "'\u0BB3\u0BCD'",
"'\u0B86\u0B85'", "'\u0B88\u0B87'", "'\u0B8A\u0B89'", "'\u0B8F\u0B8E'",
"'\u0B90\u0B87'", "'\u0B93\u0B92'", "'\u0B94\u0B89'" ]
symbolicNames = [ "<INVALID>",
]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "T__19",
"T__20", "T__21", "T__22", "T__23", "T__24", "T__25",
"T__26", "T__27", "T__28", "T__29", "T__30", "T__31",
"T__32", "T__33", "T__34", "T__35", "T__36", "T__37",
"T__38", "T__39", "T__40", "T__41" ]
grammarFileName = "மாத்திரை.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 50.291892
| 103
| 0.540198
| 2,132
| 9,304
| 2.277674
| 0.15666
| 0.13056
| 0.079077
| 0.078254
| 0.207784
| 0.140033
| 0.07743
| 0.057455
| 0.04201
| 0.033567
| 0
| 0.316595
| 0.171647
| 9,304
| 184
| 104
| 50.565217
| 0.311665
| 0.009888
| 0
| 0
| 1
| 0.319277
| 0.573089
| 0.486099
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012048
| false
| 0
| 0.024096
| 0
| 0.349398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
99084dc9edd114c4d96128866aefc150e1efe752
| 48
|
py
|
Python
|
django_object_actions/models.py
|
DjangoAdminHackers/django-object-actions
|
e8990ae7ee1208b42613d362b0b60339d8aea4e3
|
[
"Apache-2.0"
] | null | null | null |
django_object_actions/models.py
|
DjangoAdminHackers/django-object-actions
|
e8990ae7ee1208b42613d362b0b60339d8aea4e3
|
[
"Apache-2.0"
] | null | null | null |
django_object_actions/models.py
|
DjangoAdminHackers/django-object-actions
|
e8990ae7ee1208b42613d362b0b60339d8aea4e3
|
[
"Apache-2.0"
] | null | null | null |
# Empty models.py so django picks the templates
| 24
| 47
| 0.791667
| 8
| 48
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 1
| 48
| 48
| 0.95
| 0.9375
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
54cd440e19cc3746b29f73abaace2885b3a60f81
| 256
|
py
|
Python
|
wfdb/io/convert/__init__.py
|
ECGKit/wfdb-python
|
0d42dfb4b2946625f00cbf500d830d374a201153
|
[
"MIT"
] | null | null | null |
wfdb/io/convert/__init__.py
|
ECGKit/wfdb-python
|
0d42dfb4b2946625f00cbf500d830d374a201153
|
[
"MIT"
] | null | null | null |
wfdb/io/convert/__init__.py
|
ECGKit/wfdb-python
|
0d42dfb4b2946625f00cbf500d830d374a201153
|
[
"MIT"
] | null | null | null |
from wfdb.io.convert.csv import csv2ann, csv_to_wfdb
from wfdb.io.convert.edf import rdedfann, read_edf, wfdb_to_edf
from wfdb.io.convert.matlab import wfdb_to_mat
from wfdb.io.convert.tff import rdtff
from wfdb.io.convert.wav import wfdb_to_wav, read_wav
| 42.666667
| 63
| 0.832031
| 49
| 256
| 4.142857
| 0.326531
| 0.197044
| 0.246305
| 0.418719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00431
| 0.09375
| 256
| 5
| 64
| 51.2
| 0.87069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
54d2e6a5b5a244289ee51665cb9c85fcc6243db7
| 293
|
py
|
Python
|
knlp/seq_labeling/crf/train.py
|
ERICMIAO0817/knlp
|
56052e49be87d604839d8f81a7295c73a5ac62cf
|
[
"MIT"
] | 19
|
2021-03-17T02:15:18.000Z
|
2021-12-14T04:46:21.000Z
|
knlp/seq_labeling/crf/train.py
|
ERICMIAO0817/knlp
|
56052e49be87d604839d8f81a7295c73a5ac62cf
|
[
"MIT"
] | null | null | null |
knlp/seq_labeling/crf/train.py
|
ERICMIAO0817/knlp
|
56052e49be87d604839d8f81a7295c73a5ac62cf
|
[
"MIT"
] | 7
|
2021-03-17T02:15:23.000Z
|
2022-03-17T15:41:04.000Z
|
# !/usr/bin/python
# -*- coding:UTF-8 -*-
# -----------------------------------------------------------------------#
# File Name: train
# Author: Junyi Li
# Mail: 4ljy@163.com
# Created Time: 2021-04-10
# Description:
# -----------------------------------------------------------------------#
| 29.3
| 74
| 0.313993
| 22
| 293
| 4.181818
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048872
| 0.09215
| 293
| 9
| 75
| 32.555556
| 0.296992
| 0.928328
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
54de182dd8757822cc2470e8f76487eed043312a
| 145
|
py
|
Python
|
community/views/nested_views/__init__.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
community/views/nested_views/__init__.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
community/views/nested_views/__init__.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
from .invite_link_nview import InviteLinkNestedViewSet
from .member_nview import MemberNestedViewSet
from .rules_nview import RulesNestedViewSet
| 36.25
| 54
| 0.896552
| 16
| 145
| 7.875
| 0.625
| 0.261905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082759
| 145
| 3
| 55
| 48.333333
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
54e23ac16cebfe53f63a4c346b3f3e4e7260aa9a
| 4,863
|
py
|
Python
|
app.py
|
dragonman164/K-Means-Cluster-Visualizer
|
5db38a211ee5732108e393fabb0a9d2afa91d49b
|
[
"MIT"
] | null | null | null |
app.py
|
dragonman164/K-Means-Cluster-Visualizer
|
5db38a211ee5732108e393fabb0a9d2afa91d49b
|
[
"MIT"
] | null | null | null |
app.py
|
dragonman164/K-Means-Cluster-Visualizer
|
5db38a211ee5732108e393fabb0a9d2afa91d49b
|
[
"MIT"
] | 1
|
2021-11-25T09:23:42.000Z
|
2021-11-25T09:23:42.000Z
|
from flask import Flask,render_template,request,redirect
from pycaret.clustering import plot_model, setup, create_model
import pandas as pd
from pycaret.internal.tabular import pull
import os
import shutil
app = Flask(__name__)
def nopreprocessing():
df = pd.read_csv('static/dataset.csv')
pre = setup(data = df,silent = True,verbose=False)
model = create_model('kmeans',verbose=False)
results = pull()
plot_model(model, plot = 'elbow',save=True)
os.remove('static/Elbow.png')
shutil.move('Elbow.png','static/')
results.to_csv('static/results.csv')
def onlynorm():
df = pd.read_csv('static/dataset.csv')
pre = setup(data = df,silent = True,verbose=False,normalize=True, normalize_method='zscore')
model = create_model('kmeans',verbose=False)
results = pull()
plot_model(model, plot = 'elbow',save=True)
os.remove('static/Elbow.png')
shutil.move('Elbow.png','static/')
results.to_csv('static/results.csv')
def onlypca():
df = pd.read_csv('static/dataset.csv')
pre = setup(data = df,silent = True,verbose=False,pca = True, pca_method='linear')
model = create_model('kmeans',verbose=False)
results = pull()
plot_model(model, plot = 'elbow',save=True)
os.remove('static/Elbow.png')
shutil.move('Elbow.png','static/')
results.to_csv('static/results.csv')
def onlytransform():
df = pd.read_csv('static/dataset.csv')
pre = setup(data = df,silent = True,verbose=False,transformation=True, transformation_method='yeo-johnson')
model = create_model('kmeans',verbose=False)
results = pull()
plot_model(model, plot = 'elbow',save=True)
os.remove('static/Elbow.png')
shutil.move('Elbow.png','static/')
results.to_csv('static/results.csv')
def everything():
df = pd.read_csv('static/dataset.csv')
pre = setup(data = df,silent = True,verbose=False,transformation=True, transformation_method='yeo-johnson'
,normalize=True,normalize_method='zscore',pca = True, pca_method='linear')
model = create_model('kmeans',verbose=False)
results = pull()
plot_model(model, plot = 'elbow',save=True)
os.remove('static/Elbow.png')
shutil.move('Elbow.png','static/')
results.to_csv('static/results.csv')
def pca_and_norm():
df = pd.read_csv('static/dataset.csv')
pre = setup(data = df,silent = True,verbose=False
,normalize=True,normalize_method='zscore',pca = True, pca_method='linear')
model = create_model('kmeans',verbose=False)
results = pull()
plot_model(model, plot = 'elbow',save=True)
os.remove('static/Elbow.png')
shutil.move('Elbow.png','static/')
results.to_csv('static/results.csv')
def norm_and_trans():
df = pd.read_csv('static/dataset.csv')
pre = setup(data = df,silent = True,verbose=False,transformation=True, transformation_method='yeo-johnson'
,normalize=True,normalize_method='zscore')
model = create_model('kmeans',verbose=False)
results = pull()
plot_model(model, plot = 'elbow',save=True)
os.remove('static/Elbow.png')
shutil.move('Elbow.png','static/')
results.to_csv('static/results.csv')
def pca_and_transform():
df = pd.read_csv('static/dataset.csv')
pre = setup(data = df,silent = True,verbose=False,transformation=True, transformation_method='yeo-johnson'
,pca = True, pca_method='linear')
model = create_model('kmeans',verbose=False)
results = pull()
plot_model(model, plot = 'elbow',save=True)
os.remove('static/Elbow.png')
shutil.move('Elbow.png','static/')
results.to_csv('static/results.csv')
@app.route("/",methods = ["GET","POST"])
def index():
if request.method == 'POST':
norm = request.form.get('norm')
pca = request.form.get('pca')
trans = request.form.get('trans')
file = request.files["dataset"]
file.save("static/dataset.csv")
if pca is None and norm is None and trans is None:
nopreprocessing()
elif pca is not None and norm is not None and trans is not None :
everything()
elif pca is not None and norm is not None :
pca_and_norm()
elif pca is not None and trans is not None:
pca_and_transform()
elif norm is not None and trans is not None :
norm_and_trans()
elif norm is not None :
onlynorm()
elif pca is not None :
onlypca()
elif trans is not None:
onlytransform()
return redirect("/results")
return render_template("index.html")
@app.route("/results")
def results():
plot_image = os.path.join('static','Elbow.png')
return render_template("results.html",content = {
'elbow' : 'static//Elbow.png',
'results' : 'static//results.csv'
})
if __name__ == "__main__":
app.run(debug= True,threaded = True)
| 34.735714
| 111
| 0.659469
| 653
| 4,863
| 4.797856
| 0.127106
| 0.045962
| 0.034472
| 0.028088
| 0.748803
| 0.733163
| 0.730929
| 0.730929
| 0.72263
| 0.707628
| 0
| 0
| 0.19124
| 4,863
| 140
| 112
| 34.735714
| 0.796593
| 0
| 0
| 0.516949
| 0
| 0
| 0.181332
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084746
| false
| 0
| 0.050847
| 0
| 0.161017
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
54e490e1b758d30b5f71750c6aaad2d7edeabffe
| 180
|
py
|
Python
|
main/routes/beat.py
|
ohioh/ohioh_Framework_Cluster_3_Flask
|
69e50b9d697b5e8818305328335d26314b625732
|
[
"Apache-2.0"
] | 1
|
2020-08-11T18:37:36.000Z
|
2020-08-11T18:37:36.000Z
|
main/routes/beat.py
|
ohioh/ohioh_Framework_Cluster_3_Flask
|
69e50b9d697b5e8818305328335d26314b625732
|
[
"Apache-2.0"
] | null | null | null |
main/routes/beat.py
|
ohioh/ohioh_Framework_Cluster_3_Flask
|
69e50b9d697b5e8818305328335d26314b625732
|
[
"Apache-2.0"
] | null | null | null |
import os
from flask_restful import Resource
class Beat(Resource):
def get(self):
return 'the app Location Lat is running on {}'.format(os.getenv("FLASK_RUN_PORT"))
| 20
| 90
| 0.711111
| 27
| 180
| 4.62963
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188889
| 180
| 8
| 91
| 22.5
| 0.856164
| 0
| 0
| 0
| 0
| 0
| 0.286517
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
ab1d3ffeba4d4e3ef2e2520f5948a9e9707a247e
| 13,192
|
py
|
Python
|
src/dl/seg2text.py
|
tianzongw/Podcast-Segment-Retrieval-Spotify
|
f089520b1d70ed2607b56b2e50f97b65f34d5002
|
[
"MIT"
] | null | null | null |
src/dl/seg2text.py
|
tianzongw/Podcast-Segment-Retrieval-Spotify
|
f089520b1d70ed2607b56b2e50f97b65f34d5002
|
[
"MIT"
] | null | null | null |
src/dl/seg2text.py
|
tianzongw/Podcast-Segment-Retrieval-Spotify
|
f089520b1d70ed2607b56b2e50f97b65f34d5002
|
[
"MIT"
] | null | null | null |
import json
# test_rslt= {'9': [{'1nu9RsgfYsbNb7CDmHlvKx': (1326.4, 1355.9)}, {'6yYLn0MroCiOYrUCYpgx1K': (3011.5, 3018.3)}, {'3oX5yYOjd5qoCPWC5pTy9W': (3095.5, 3123.5)}, {'5DVdcHBM0jauUSxsmmwBre': (30.0, 56.2)}, {'2VVBXyGRGd2cydXPxHJme6': (139.3, 148.4)}], '10': [{'1tBwWZksIEKXKakbizSLWe': (620.6, 650.1)}, {'1NpwJujZ5BCPi6VXNDhTy6': (279.6, 309.0)}, {'3A3dW5PIfSnQZFHXGYGF7r': (1246.3, 1259.7)}, {'1WNUVDnbkExBXV48Qii3uf': (841.2, 870.2)}, {'1tBwWZksIEKXKakbizSLWe': (740.5, 770.4)}], '11': [{'1SDDlFhV1Z2VSe9hpd4RPT': (1329.4, 1359.3)}, {'3WJ9yNsZ1y4IY46bkuUsvA': (101.1, 130.4)}, {'7bLgTYDrRkeGxZEpTCoZYV': (713.6, 742.6)}, {'6oLRGJ580Vwk3eteA5Fupa': (2475.9, 2482.4)}, {'79c6V6ASeQQkxMvo0dxXea': (360.2, 389.6)}], '12': [{'20l7RcHtcVbJAEVeV3Cn3N': (2817.3, 2847.1)}, {'5fSLaE9BLkWtDt4zgjEudx': (306.7, 336.6)}, {'7k4zzSLm0MyMQfBudNDGlt': (726.6, 756.3)}, {'1BHhvHTUOkkOAruVAxvlwu': (30.1, 59.8)}, {'3ytHEQvEC5mjVWgycVGrsm': (654.0, 683.8)}], '13': [{'3d6WRPYpmUH6C8DvQrJeB0': (732.8, 751.2)}, {'3q9c0q5sc5WqbBgAdOVZqH': (76.9, 79.2)}, {'4d6XCNG113PuSTg5XnfV6C': (1133.1, 1162.9)}, {'5j9tDbfxtHEFLvy4LbqAnG': (0.7, 23.8)}, {'4ri9tSWMzp8O9opzDuiVcv': (2986.2, 3016.0)}], '14': [{'5cAL4avbpwvLbjPYEpHccB': (1860.3, 1888.5)}, {'2XcIt89N5W1aXN130kUYNi': (136.5, 148.1)}, {'3UVkA5qKYXDgXTJ0y0BTuR': (635.1, 664.8)}, {'4snFgZm09b6QmF5xfoeiuk': (1453.8, 1474.8)}, {'2B48aifWbO5UE1UmErmost': (418.0, 423.5)}], '15': [{'2QnV3RGPjwF62aFsPyqzM2': (2994.7, 3011.7)}, {'3Dp7K8Xus9kO9vNuKbo387': (529.9, 543.4)}, {'08DDXG4H338X174G1OFBKv': (374.0, 403.5)}, {'4qDbcrGpVXm3Cre4Ia19eC': (60.5, 90.1)}, {'4nxzKyuKq7lB2NTH4O2fmF': (122.0, 151.8)}], '16': [{'2e6RAg54Pphczs8MuYCsYQ': (2508.1, 2530.2)}, {'5ZQyvA84ZXbx5qXc7jZlul': (1792.8, 1822.6)}, {'6rbClMXXjgVjtfA16vj2YV': (2693.7, 2714.4)}, {'7d2NQEezd67SCvTT2SyqYO': (827.1, 856.0)}, {'28gUG2KiPFEwhCyIBNs1Fi': (60.4, 89.7)}], '17': [{'4pfZD1c9yT3Y6adpSdF4vP': (757.4, 787.3)}, {'515YJl4DL7FTv9d6PxARUi': (518.9, 530.7)}, {'2EbFXpCC9tjwRMwNoQkDO9': (873.0, 902.8)}, {'3E8bVjz7uWo9CgqFTNWenJ': (4110.0, 4114.7)}, {'7yNe05KM7Okd7NZUxSspDW': (348.9, 356.4)}], '18': [{'0U5esz5OdHF8rPhPq3eMAr': (642.0, 671.9)}, {'7wG8Vx3iU0USazS4ybkBn3': (736.1, 765.3)}, {'61zAFlhiCgvnGjV1tdGl6S': (394.2, 420.8)}, {'515F4SoQqHRzYOS8GG0w2A': (1725.7, 1754.4)}, {'1TpYYJ12J0Gs4ZFG1P38f1': (2542.3, 2567.8)}], '19': [{'3qHrSGBe0sxpL2H90BJLPR': (330.1, 359.7)}, {'2EoYf1NvLqmBaXW51SgRca': (1546.2, 1575.7)}, {'3qHrSGBe0sxpL2H90BJLPR': (1049.7, 1079.5)}, {'2EoYf1NvLqmBaXW51SgRca': (1724.5, 1753.2)}, {'6S1dp88FpnkrTzKOiZZo2Y': (1360.4, 1390.3)}], '20': [{'4BUMYoy7PiAhqc4nllhOxe': (740.1, 770.0)}, {'47JVGqOgQTNGYI73RbfCzK': (2370.5, 2399.2)}, {'0FgJEDNim4FRzhw2aBSlTH': (702.8, 732.7)}, {'2mIJfjQjU138yuQh0T9lZk': (599.9, 629.8)}, {'30TV1iXAfcFKS0tgJ3MlB8': (1831.3, 1860.5)}], '21': [{'6uulIspd1hEdTkoTeV9GGg': (1091.5, 1105.0)}, {'7lVrfLI6WIIwqQQfNwn9IU': (2328.1, 2358.0)}, {'0XnDgNWcGrm39A9zoElrD8': (458.0, 473.7)}, {'7z1x3HIh6px6VtxWHHpSNu': (1367.1, 1383.3)}, {'6OYgZ23WlpdrRctqJCDnUk': (1719.6, 1749.6)}], '22': [{'0xmx9StGweiC5eYvsIre1C': (145.6, 173.0)}, {'0hlO3MdhaCoDnhbAnkvjwr': (2343.0, 2372.8)}, {'2Rms0pcdVLQKpS8lTqJJIR': (1410.5, 1440.3)}, {'2o5BYjVKqRC6SpEr2u8pQz': (1526.3, 1555.9)}, {'3RffQ3V0uly9npufuWRFeX': (795.4, 799.4)}], '23': [{'4NvL58wD3ClVrj3j9sEXkT': (641.7, 671.1)}, {'2pl0xpiGbs6fDOYyfUac9G': (479.8, 509.8)}, {'613xXczaymDNN0vQ8vIZc7': (360.4, 389.5)}, {'2VUYcukQMSG3HaXbScyn0C': (275.3, 304.6)}, {'0JBbvDsNC7gIGxH1rhZbLc': (854.6, 883.8)}], '24': [{'6zppcozZ7nyKdryGMP8Dc7': (1035.7, 1065.0)}, {'0kwn2IW8OH465AOQWC8hhD': (3191.7, 3210.1)}, {'0HP2kHsFcIlcfmKGxX4v1z': (1410.7, 1438.8)}, {'5izX143v2RlBD3vR6N7CHS': (2504.7, 2534.4)}, {'7jwbEJdB2bd6DUii2PI9Fm': (60.1, 90.0)}], '25': [{'5Wgfsg4YYCUT0BOLpb7Sm2': (3282.6, 3289.6)}, {'6jBppZOiAVurFbMupnLnMH': (2006.0, 2035.7)}, {'3SLXSRTV5HmoXwvr0Csuad': (152.9, 182.7)}, {'5188tUPIWVwRlLSbHdLJGO': (3175.7, 3205.6)}, {'17w9fZZtAgOxE279AAcIN5': (52.3, 76.9)}], '26': [{'0CZzhzs767ri1FopKsCRD7': (3199.5, 3222.5)}, {'0CZzhzs767ri1FopKsCRD7': (696.1, 725.8)}, {'4UMl3gVO0Qr8dbbe1uebTy': (1952.5, 1982.0)}, {'3FKMdWpQBprrVpir9mXLLJ': (16.4, 28.7)}, {'5SaFsYvUoThZNwYtHuAnPZ': (1795.0, 1824.1)}], '27': [{'0yuFVIAbYJr2cR5PFTtSaF': (86.1, 103.9)}, {'1UhLiTpZJ9NfShpmwloAPw': (1889.5, 1915.1)}, {'733luUFBNKVo6qukjiVbg6': (419.9, 449.7)}, {'27v21bF1DYn9C6yQfjTpLJ': (2413.3, 2435.0)}, {'20u6nEZWvGW3xibFqW4yoL': (1040.9, 1070.6)}], '28': [{'1WeuaTXr6j74ZAdJ8ZP7EU': (1532.9, 1556.5)}, {'7I1KCZZPz1nk24Qe4rEIku': (418.4, 448.1)}, {'2z9Sv0kMSIGE38Vvy7zl3A': (628.5, 658.3)}, {'3Q0YHlqO8wNdFN76NSTXq7': (489.1, 507.7)}, {'3vEjP7yESMpscIc6ZEhjs5': (1054.1, 1084.0)}], '29': [{'39wZCiRz5mR8GwJZD5N5dA': (306.1, 335.9)}, {'39wZCiRz5mR8GwJZD5N5dA': (276.1, 305.9)}, {'5y86Ff6GpDAQw3424VTY3R': (938.2, 967.9)}, {'6vuZXESVtr3t9TOLb5Rarz': (227.6, 257.5)}, {'2I86dKI6D8DTuLEJm3DvvX': (120.5, 149.7)}], '30': [{'3nOhRH7FSURQQisgyaheqW': (601.3, 630.2)}, {'3UMAd7DVwud0BFIwbcwKiO': (451.4, 480.3)}, {'3l168ZnYpbwEsSeDa9n7wR': (1498.0, 1527.7)}, {'4OhmIpB2m6d1nueb29XZJ7': (744.7, 753.5)}, {'7eWRJMraYsRFKKsDtVGmVw': (449.3, 479.2)}], '31': [{'5Km0HrkWpsjwNXGzxn1Pvy': (1424.5, 1440.4)}, {'5qVJolbD56sQXkNWxWHH7y': (316.3, 346.0)}, {'3Iwfaaf98ZQtyHEqtIdNjb': (1454.0, 1468.3)}, {'5Km0HrkWpsjwNXGzxn1Pvy': (56.4, 86.1)}, {'6S1dp88FpnkrTzKOiZZo2Y': (56.4, 86.1)}], '32': [{'3YPrQcpfBKHtgAqiC8nITr': (187.4, 217.3)}, {'13cWt57IV2zavfROMuecAr': (240.1, 269.6)}, {'1LpdgqSOOFYNvP3rfOvL4i': (2288.3, 2317.9)}, {'4Cna3VjxADb5c9ECy9CYit': (51.0, 77.8)}, {'13eCm0BeNpz5fHUnJOYcY0': (1177.3, 1188.0)}], '33': [{'2C2kcGzm3qj5fVd5j5NFf5': (4525.6, 4535.2)}, {'7xZxuj4gzJ2LrJjugLGqel': (1838.9, 1842.9)}, {'7LPowIG6V6UErKDR3ZsmRy': (214.0, 243.4)}, {'263dei9amyPsMd8zgwkRyh': (406.8, 431.6)}, {'601d5X8VkfDhjr96PpxzSd': (912.6, 928.0)}], '34': [{'7LrlEm1QT3rO0oWNZaRcef': (1492.3, 1511.8)}, {'1WDniRS2Jl5zXk09ruxT2M': (758.4, 787.5)}, {'4yrbY0sJ5JxDZpQqxH6sSa': (216.3, 244.1)}, {'34pwI8sMRTIB2A3N0saqU1': (89.0, 118.7)}, {'4QvekcKWNIeXVT0aQvQpXe': (1543.4, 1573.2)}], '35': [{'20Rxox5JAR894PcIMGpsV9': (1535.4, 1563.8)}, {'264sqLIIEqFHG58MQCzznd': (1625.6, 1655.2)}, {'6psxfn4N3rPq9PK5EwCXjP': (1648.2, 1678.1)}, {'5QKE5EW6UWN0Xwb4zH6SBM': (2523.5, 2552.7)}, {'5QKE5EW6UWN0Xwb4zH6SBM': (2763.4, 2792.7)}], '36': [{'6AAZQ40K6ewvwhFLKpE424': (2342.6, 2372.0)}, {'6y2DKWIPJFg951uoJzXQmg': (314.0, 327.7)}, {'0ps1K1SQGjo2a4WvfvHl6q': (3479.0, 3500.6)}, {'64UC1PFga07mIjlBkO4jxU': (1148.9, 1178.4)}, {'6AAZQ40K6ewvwhFLKpE424': (769.4, 798.7)}], '37': [{'0N8d6hnQlqPjYkTIATxWsJ': (472.0, 500.7)}, {'0e5ND7R6ieMeL9b9yqwLAY': (1541.2, 1571.1)}, {'4ZUOvIP9fq109Zb3EqDvCS': (1158.8, 1176.8)}, {'6XpvSw2ChxXdKsGtagkbGl': (330.3, 359.9)}, {'1Bjtu0d6EYqBEDMRylmlwv': (835.6, 864.7)}], '38': [{'4JSgXkOqhBbe4EbCJvhBHc': (662.3, 683.0)}, {'632GgLICRAhoHL6BybypRy': (623.9, 646.0)}, {'4pQs0Zxyy3K5J8aKV3cPx3': (1216.5, 1245.9)}, {'1jRXwCrvXoaymqgDhCUqmc': (435.1, 445.2)}, {'33GKVnvJLMN7NnVcBnZZ2K': (1725.7, 1755.6)}], '39': [{'0gWObCGEkb5mLXFOEgsKqc': (807.5, 837.3)}, {'1C3mjLUdmmWLDABBJ8FzE3': (466.3, 473.6)}, {'038F0uu8ILdoehCYYm7klf': (89.9, 103.6)}, {'2h3N5NSiKTn5gQRCySLYTh': (659.9, 689.8)}, {'0ZwMaNDixROvFctWisd7cf': (2512.3, 2542.1)}], '40': [{'0QAhZv5uG1mLOTu2mqCBv0': (3123.5, 3153.2)}, {'0QAhZv5uG1mLOTu2mqCBv0': (1943.8, 1973.7)}, {'0QAhZv5uG1mLOTu2mqCBv0': (1464.2, 1493.8)}, {'4IYaYCuA29Pg9wFNi1wPVS': (1894.4, 1924.1)}, {'0QAhZv5uG1mLOTu2mqCBv0': (3635.3, 3665.1)}], '41': [{'6BM0QxiH8CIhITr7NgKBLB': (759.4, 789.0)}, {'6Y59ofDlvbWm3xmof4LB25': (120.0, 149.8)}, {'4JgJ4IjahKhOkIiZQMBvDH': (2207.8, 2236.3)}, {'0cmjKhAsZLEvzOrhTlwx3D': (719.8, 749.8)}, {'6MBHqzUA45bwvb49gFyEqf': (1554.7, 1584.1)}], '42': [{'5e6K15eplGPOKfLj8gy3cl': (1974.8, 2004.0)}, {'0qnbqipv1MawXHSIUD9nFk': (3589.0, 3610.6)}, {'10FacfpEjqMYBMhNKrzWVB': (3225.4, 3255.1)}, {'0RpxbcOOUiRyn8LFDz5uY9': (23.6, 53.2)}, {'5Kotvuir1G6C4qUBuSUzkz': (1180.0, 1209.6)}], '43': [{'5J7PnnCVfvNdcOKuJos8zI': (1014.7, 1042.7)}, {'7JNNJjz1pywBZ3h4Owuc6e': (740.8, 764.3)}, {'6sv1eCP0LtsaRTscDUClFI': (2654.7, 2664.6)}, {'0X1iaUVLeqVnQKGaRlBBbl': (270.8, 300.1)}, {'4PGMsJzRYxK0xaU8D5mIOD': (3575.1, 3604.5)}], '44': [{'1Sip0NxiAiGWcbelTs7Phx': (6.6, 35.9)}, {'7G5w8FngJGTMHmGqiuMumf': (1837.7, 1843.6)}, {'69meUC6FEEbOtIsXrU2DKC': (756.2, 785.8)}, {'18Y5VMZ3g3olSbC3Ft1FA7': (1.1, 8.8)}, {'394PqCOFAcLE7ZPUNIkTQm': (2645.9, 2675.8)}], '45': [{'3IVmMZGXKR4w1EGxvpyepY': (446.8, 476.7)}, {'28LKf7Epdkwhs4YtJSzhEp': (989.9, 1019.7)}, {'1EQ59WUlPZVMwvzUnpp5ui': (2359.7, 2389.4)}, {'1lO3i4s425iozMaKPVup6V': (2085.0, 2114.8)}, {'1lO3i4s425iozMaKPVup6V': (2159.4, 2189.2)}], '46': [{'2gVRHaCkpSqwRA5TEbUgM3': (299.9, 329.7)}, {'4tdVVg2EBDhSjaP0EObu3D': (30.0, 59.9)}, {'68nxZero8tdcfOmg1bvYA0': (1488.5, 1518.2)}, {'75v0whZp6CVtotfEabKAwK': (2114.1, 2143.7)}, {'0i2yLVxnzCtD5IweMduuLg': (1103.6, 1133.5)}], '47': [{'5Bz3TczFQioNKF7mrhWUNZ': (1732.1, 1761.9)}, {'7JFfuGJNsQIeLRB1ZxJ8XR': (3467.4, 3496.9)}, {'1vKZezKKn7GfXGEx9oU7KL': (2260.4, 2290.2)}, {'1hsZbh9TB2uhvBMMa6ZARZ': (2277.5, 2307.3)}, {'5uv4JMaPJspBUiTrPWvNqV': (1994.9, 2004.0)}], '48': [{'4UL7pyEHR7yge0aVmcMh7J': (30.0, 59.9)}, {'79mkOPdaIxS72OrqbJ6Ynu': (3988.2, 4018.2)}, {'4b2VOreu91QAYSUzzZvqqU': (276.6, 306.4)}, {'3GI8HXhbHyLhtGO0O1lIiV': (124.7, 128.0)}, {'6lexjzyYCIROTbeZK8HuJp': (3544.3, 3573.7)}], '49': [{'7e4q2PigBY74NiZwfg8QXa': (3739.7, 3746.5)}, {'7EWxvCdfIifXdrx1yxKFO8': (750.1, 779.7)}, {'7AzbvTcIyqerPRd6zqOmue': (988.9, 1018.5)}, {'7EWxvCdfIifXdrx1yxKFO8': (864.1, 893.8)}, {'4MCL7sFQv8i2XFDNosbl9w': (266.3, 296.1)}], '50': [{'515F4SoQqHRzYOS8GG0w2A': (1277.0, 1306.6)}, {'21yuRLr3P5RCeCG0Uldm0C': (1114.8, 1142.2)}, {'3Lo1HMeSohAox1dKjJ2Vrx': (928.9, 958.8)}, {'4moTutMvtYWmbjmOUyZKzt': (145.9, 175.8)}, {'14VWU0wDAyj8OMVzEhy6zC': (4039.7, 4069.4)}], '51': [{'23Dbiknqx9QcKcIAsO9OHX': (90.2, 119.6)}, {'24D14fXF10T1D0j7VJ6YRr': (539.9, 568.0)}, {'3hyBikqzCy1mCWJRnuQak8': (1828.9, 1855.7)}, {'4fYTZ2Qzd3avXGmDhL03ME': (1177.5, 1207.3)}, {'0TcmMFBlbWmSuHmwbIyJD3': (35.6, 65.3)}], '52': [{'7GnKpfX8S7lJ8alf2j6XJy': (2396.7, 2426.4)}, {'6X1YYNr7WbfQIbrrasP4JA': (971.9, 1001.5)}, {'02jT98Bp0MWBbYirPgwJvH': (120.0, 149.8)}, {'76aVdeFMhH0o4FWt1JztVf': (832.9, 862.6)}, {'1cM0uG3sy7OB0m7zo1xSpX': (49.4, 79.1)}], '53': [{'0dQGSecZTlNAbgtwuD8WCE': (33.5, 60.5)}, {'2W4LsTWRqm1Z3E7q2ZN71B': (2.3, 30.4)}, {'4rcs39itVgFjwFAKx9Q0si': (2.2, 30.4)}, {'3vlKp5aecgMLeB8TKJYp3L': (1417.1, 1433.0)}, {'04bITXFN1OBr193ozHFaQV': (1505.2, 1524.3)}], '54': [{'2b8D5u6BS7IUypIHQlAtxG': (2100.4, 2130.2)}, {'47zhhONlw353ULwJ5Ktz4W': (364.3, 393.9)}, {'6TNRP3TEcRSntPrC0BEndB': (134.8, 164.6)}, {'3L4nz1wk8LSkGaiZpacAqd': (114.8, 144.5)}, {'529D1owvKxmM8AIMUMj4YA': (3581.8, 3611.4)}], '55': [{'4fWLH1I6m9FlT6KhtxXGrY': (2234.1, 2264.0)}, {'6B6VhV5hRkrwBiQ3iJqvXB': (1373.9, 1397.8)}, {'102vWgJarBIxyBdFqjS9ik': (76.1, 102.9)}, {'6w51kEcVzTuJpH5NQuQyxi': (2777.9, 2805.2)}, {'6Glo5Se76m8em3FMbBq2b9': (74.0, 92.2)}], '56': [{'08zHtwZvFDIELhunUD8Jvu': (1566.6, 1596.5)}, {'38P9opb88LDfRaZFXspJN4': (62.8, 92.1)}, {'29IqKb0qYkDdKRGe70OJPO': (1256.5, 1277.9)}, {'2w3RIxmAvQbw5lUJFqvYrx': (107.3, 137.0)}, {'7xZxuj4gzJ2LrJjugLGqel': (1480.9, 1506.8)}], '57': [{'471d6Wlr6excioaVkqGzaV': (101.8, 114.4)}, {'7iDEir5WrREBj5xGybzvS6': (1861.7, 1879.2)}, {'7jiyPXUcZmpNSFxUCcEJV2': (95.3, 105.5)}, {'6T69HjKwn5CC8pPUvw3WMV': (0.1, 5.6)}, {'6kV5Xp9YPUdlr9Q7faGczK': (0.8, 30.4)}], '58': [{'2C8CBKF5imucrbCkK3mBmZ': (1133.7, 1163.3)}, {'4I7cDzXzt3oAyJfFKr1wpz': (3021.8, 3041.7)}, {'5cNgn9gK0ZTyxDhQ3Lmtm7': (30.0, 58.8)}, {'5u21crAIrgjTYtQUQIdYfp': (0.0, 29.9)}, {'73DUyPBLGqGS6dv2SVDB0R': (0.1, 4.1)}]}
# with open('../result/test_rslt.json', 'w') as fp:
# json.dump(test_rslt, fp)
with open('../result/test_rslt.json', 'r') as f:
results = json.load(f)
with open('../data/testing.json', 'r') as f:
testing_segments = json.load(f)
rslt_text = {}
rslt_segnum = {}
rslt_episode = {}
for topid in results:
rslt_text[topid] = []
rslt_segnum[topid] = []
rslt_episode[topid] = []
for result in results[topid]:
for episode_id in result:
result_start_time = result[episode_id][0]
target_episode = testing_segments[episode_id]
for idx, segment in enumerate(target_episode):
segment_start_time = float(segment['startTime'][:-1])
if segment_start_time == result_start_time:
rslt_episode[topid].append(episode_id)
rslt_segnum[topid].append(segment['segNum'])
if not idx+1 >= len(target_episode):
prev = target_episode[idx-1]['transcript']
nxt = target_episode[idx+1]['transcript']
rslt_text[topid].append(prev + segment['transcript'] + nxt)
else:
prev = target_episode[idx-1]['transcript']
rslt_text[topid].append(prev + segment['transcript'])
print('model: roberta-large-nli-stsb-mean-tokens' )
for key, value in rslt_text.items():
for i in range(len(value)):
print('Topic ID:', key, 'Episode ID:', rslt_episode[key][i], 'Segment Number:', rslt_segnum[key][i], 'Extracted Segment:', value[i])
| 299.818182
| 11,573
| 0.660324
| 1,501
| 13,192
| 5.778814
| 0.502998
| 0.008992
| 0.004496
| 0.00588
| 0.025478
| 0.025478
| 0.015448
| 0.015448
| 0.015448
| 0.015448
| 0
| 0.283357
| 0.094982
| 13,192
| 44
| 11,574
| 299.818182
| 0.443169
| 0.883111
| 0
| 0.0625
| 0
| 0
| 0.133464
| 0.03776
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.03125
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ab2987033e05912ce8a040f51af2c65c5ede3581
| 94
|
py
|
Python
|
src/main/resources/classes/monk/step.py
|
WynnLab/WynnLab
|
9950bc1485fa187394c1b1326fa0b5c6b6a1ac96
|
[
"MIT"
] | 2
|
2021-03-17T19:28:36.000Z
|
2021-03-26T09:31:22.000Z
|
src/main/resources/classes/monk/step.py
|
FauxKiwi/Wynnlab
|
9950bc1485fa187394c1b1326fa0b5c6b6a1ac96
|
[
"MIT"
] | 5
|
2021-06-08T12:13:40.000Z
|
2021-08-09T15:04:23.000Z
|
src/main/resources/classes/monk/step.py
|
FauxKiwi/Wynnlab
|
9950bc1485fa187394c1b1326fa0b5c6b6a1ac96
|
[
"MIT"
] | 4
|
2021-08-09T15:17:23.000Z
|
2022-03-05T14:08:26.000Z
|
from com.wynnlab.spells import PySpell
class Spell(PySpell):
def tick(self):
pass
| 18.8
| 38
| 0.691489
| 13
| 94
| 5
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223404
| 94
| 5
| 39
| 18.8
| 0.890411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
db49eadb44dc32f711477c3ee19ffd52d89c748e
| 38
|
py
|
Python
|
hik_manager/__init__.py
|
S0nic014/hik_manager
|
78c3ff4851b6f4a3e58bafb5777fce8d9635f73f
|
[
"MIT"
] | null | null | null |
hik_manager/__init__.py
|
S0nic014/hik_manager
|
78c3ff4851b6f4a3e58bafb5777fce8d9635f73f
|
[
"MIT"
] | null | null | null |
hik_manager/__init__.py
|
S0nic014/hik_manager
|
78c3ff4851b6f4a3e58bafb5777fce8d9635f73f
|
[
"MIT"
] | null | null | null |
from hik_manager.logger import Logger
| 19
| 37
| 0.868421
| 6
| 38
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
db4c63b28c236517ef35f0942df90c16556f3c97
| 108
|
py
|
Python
|
simple/app2/src/index.py
|
tuanquynet/docker-compose-example
|
545a850bb441031aa9260ea7cb53ea4e332c32b7
|
[
"MIT"
] | null | null | null |
simple/app2/src/index.py
|
tuanquynet/docker-compose-example
|
545a850bb441031aa9260ea7cb53ea4e332c32b7
|
[
"MIT"
] | null | null | null |
simple/app2/src/index.py
|
tuanquynet/docker-compose-example
|
545a850bb441031aa9260ea7cb53ea4e332c32b7
|
[
"MIT"
] | null | null | null |
import os
import time
print(os.environ.get('CONFIG_MONGODB_USERNAME'))
while True:
time.sleep(10)
pass
| 13.5
| 48
| 0.759259
| 17
| 108
| 4.705882
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.12963
| 108
| 8
| 49
| 13.5
| 0.829787
| 0
| 0
| 0
| 0
| 0
| 0.211009
| 0.211009
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.333333
| 0
| 0.333333
| 0.166667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
db589fe00072e696d45340b0c22fb48e991286c7
| 2,615
|
py
|
Python
|
den/parser/den_ast/maths.py
|
MonliH/Den
|
9c2e69744dcf26ae01154eac32aa4ea8ff2adee3
|
[
"MIT"
] | null | null | null |
den/parser/den_ast/maths.py
|
MonliH/Den
|
9c2e69744dcf26ae01154eac32aa4ea8ff2adee3
|
[
"MIT"
] | null | null | null |
den/parser/den_ast/maths.py
|
MonliH/Den
|
9c2e69744dcf26ae01154eac32aa4ea8ff2adee3
|
[
"MIT"
] | null | null | null |
from .node import Node
# Maths AST nodes
class maths:
class Add(Node):
"""Addition node."""
def __init__(self, left, right, position):
"""
Args:
left (Expression): Left hand side of operation.
right (Expression): Right hand side of operation.
position (Location): Position of addition.
"""
self.left = left
self.right = right
self.position = position
class Sub(Node):
"""Subtraction node."""
def __init__(self, left, right, position):
"""
Args:
left (Expression): Left hand side of operation.
right (Expression): Right hand side of operation.
position (Location): Position of subtraction.
"""
self.left = left
self.right = right
self.position = position
class Mul(Node):
"""Multiplication node."""
def __init__(self, left, right, position):
"""
Args:
left (Expression): Left hand side of operation.
right (Expression): Right hand side of operation.
position (Location): Position of multiplication.
"""
self.left = left
self.right = right
self.position = position
class Div(Node):
"""Division node."""
def __init__(self, left, right, position):
"""
Args:
left (Expression): Left hand side of operation.
right (Expression): Right hand side of operation.
position (Location): Position of divide.
"""
self.left = left
self.right = right
self.position = position
class Mod(Node):
"""Modulo node."""
def __init__(self, left, right, position):
"""
Args:
left (Expression): Left hand side of operation.
right (Expression): Right hand side of operation.
position (Location): Position of modulo.
"""
self.left = left
self.right = right
self.position = position
class Neg(Node):
"""Negate/negative node."""
def __init__(self, value, position):
"""
Args:
value (Expression): Value to be negated/turned negative.
position (Location): Position of negate.
"""
self.value = value
self.position = position
| 29.715909
| 72
| 0.499426
| 240
| 2,615
| 5.341667
| 0.1625
| 0.062403
| 0.078003
| 0.148206
| 0.737129
| 0.737129
| 0.737129
| 0.737129
| 0.737129
| 0.737129
| 0
| 0
| 0.407266
| 2,615
| 87
| 73
| 30.057471
| 0.827097
| 0.394646
| 0
| 0.677419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.193548
| false
| 0
| 0.032258
| 0
| 0.451613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
db66720ae15387441f21bbaf5a290a7d6718417f
| 5,196
|
py
|
Python
|
accounts/models.py
|
Jawmo/Hope_Pub
|
1d142a712c064c2fb9dc63b38d8c8e65fb089386
|
[
"MIT"
] | null | null | null |
accounts/models.py
|
Jawmo/Hope_Pub
|
1d142a712c064c2fb9dc63b38d8c8e65fb089386
|
[
"MIT"
] | null | null | null |
accounts/models.py
|
Jawmo/Hope_Pub
|
1d142a712c064c2fb9dc63b38d8c8e65fb089386
|
[
"MIT"
] | null | null | null |
import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
# add additional fields in here
first_name = models.CharField(max_length=30)
char_name = models.CharField(max_length=50, blank=True)
uuid_id = models.CharField(default=uuid.uuid4, editable=True, max_length=50)
def __str__(self):
return self.email
class Players(models.Model):
unique_id = models.CharField(max_length=10, primary_key=True)
uuid_id = models.CharField(max_length=50)
password = models.CharField(max_length=50, blank=True)
entity_type = models.CharField(max_length=500, blank=False)
description = models.CharField(max_length=999)
gender = models.CharField(max_length=30)
race = models.CharField(max_length=30)
vitals = models.CharField(max_length=999, blank=True)
core_attributes = models.CharField(max_length=999, blank=True)
player_state = models.CharField(max_length=30, default="logged_out")
conditions = models.CharField(default="{'stance': 'standing'}", max_length=255)
stow_loc = models.CharField(max_length=100, blank=True)
location = models.UUIDField(blank=True)
def __str__(self):
return self.description['name']
class Rooms(models.Model):
# add additional fields in here
unique_id = models.CharField(max_length=10, primary_key=True)
uuid_id = models.CharField(max_length=50)
entity_type = models.CharField(max_length=150)
name = models.CharField(max_length=999)
description = models.CharField(max_length=999)
exits = models.CharField(max_length=100)
region = models.CharField(max_length=30, blank=True)
zone = models.CharField(max_length=50, blank=True)
elevation = models.CharField(max_length=30, blank=True)
effects = models.CharField(max_length=500, blank=True)
owner = models.CharField(max_length=30, blank=True)
ship_id = models.CharField(max_length=36, blank=True)
coordinates = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.name, self.description
class Npcs(models.Model):
# add additional fields in here
unique_id = models.CharField(max_length=10, primary_key=True)
uuid_id = models.CharField(max_length=50)
join_table = models.CharField(max_length=50)
description = models.CharField(max_length=999, blank=True)
gender = models.CharField(max_length=30, blank=True)
race = models.CharField(max_length=30, blank=True)
vitals = models.CharField(max_length=999, blank=True)
core_attributes = models.CharField(default="{}", max_length=255, blank=True)
npc_state = models.CharField(max_length=50, blank=True)
conditions = models.CharField(max_length=500, blank=True)
credit = models.CharField(max_length=50)
supply = models.CharField(max_length=255, blank=True)
demand = models.CharField(max_length=255, blank=True)
home_loc = models.CharField(max_length=255, blank=True)
demeanor = models.CharField(max_length=50, blank=True)
location = models.CharField(max_length=50)
def __str__(self):
return self.description
class Items(models.Model):
# add additional fields in here
unique_id = models.CharField(max_length=10, primary_key=True)
uuid_id = models.CharField(max_length=50)
description = models.CharField(max_length=999)
keyword = models.CharField(max_length=30)
attributes = models.CharField(max_length=999, blank=True)
dynamic_stats = models.CharField(max_length=30)
room_target = models.CharField(max_length=30, blank=True)
is_open = models.BooleanField(default=True)
location = models.CharField(max_length=50)
location_body = models.CharField(max_length=999, blank=True)
owner = models.CharField(max_length=30, blank=True)
def __str__(self):
return self.description
class Orgs(models.Model):
# add additional fields in here
unique_id = models.CharField(max_length=10, primary_key=True)
uuid_id = models.CharField(max_length=50)
name = models.CharField(max_length=30)
org_desc = models.CharField(max_length=255, blank=True)
supply = models.CharField(max_length=255, blank=True)
demand = models.CharField(max_length=255, blank=True)
home = models.CharField(max_length=50, blank=True)
def __str__(self):
return self.name
class Wiki(models.Model):
# add additional fields in here
unique_id = models.CharField(max_length=10, primary_key=True)
base_type = models.CharField(max_length=50)
title = models.CharField(max_length=255, blank=True)
content = models.CharField(max_length=999, blank=True)
usage = models.CharField(max_length=50, blank=True)
def __str__(self):
return self.title
class News(models.Model):
# add additional fields in here
unique_id = models.CharField(max_length=999, primary_key=True)
post_title = models.CharField(max_length=999, blank=True)
post_date = models.DateField(max_length=999, blank=True)
post_author = models.CharField(max_length=999, blank=True)
post_content = models.TextField(max_length=999, blank=True)
def __str__(self):
return self.unique_id
| 40.913386
| 83
| 0.736336
| 714
| 5,196
| 5.152661
| 0.154062
| 0.173689
| 0.322914
| 0.430552
| 0.828214
| 0.738516
| 0.654526
| 0.446045
| 0.35988
| 0.35988
| 0
| 0.039872
| 0.155312
| 5,196
| 127
| 84
| 40.913386
| 0.79836
| 0.040223
| 0
| 0.34
| 0
| 0
| 0.007631
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0.01
| 0.03
| 0.08
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
db9287f87eaebb89339a653acd43174641908f37
| 152
|
py
|
Python
|
sentence_transformers/models/XLNet.py
|
dd-dos/sentence-transformers
|
8f9c36b788e15141f723d80fea67ed16785cd18e
|
[
"Apache-2.0"
] | 19
|
2021-08-03T11:17:01.000Z
|
2022-02-07T13:48:41.000Z
|
sentence_transformers/models/XLNet.py
|
dd-dos/sentence-transformers
|
8f9c36b788e15141f723d80fea67ed16785cd18e
|
[
"Apache-2.0"
] | 3
|
2021-08-05T08:35:02.000Z
|
2021-12-02T09:32:39.000Z
|
sentence_transformers/models/XLNet.py
|
dd-dos/sentence-transformers
|
8f9c36b788e15141f723d80fea67ed16785cd18e
|
[
"Apache-2.0"
] | 1
|
2021-04-20T14:20:09.000Z
|
2021-04-20T14:20:09.000Z
|
from . import Transformer
class XLNet(Transformer):
"""
DEPRECATED: Please use models.Transformer instead.
"""
pass
| 10.857143
| 55
| 0.585526
| 13
| 152
| 6.846154
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.328947
| 152
| 13
| 56
| 11.692308
| 0.872549
| 0.328947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
dbb9419020107ce17103b80466207bdc250ca30b
| 204
|
py
|
Python
|
RoboSounds.roboFontExt/lib/menu_toggleState.py
|
typesupply/robosounds
|
b73970c7d768c2f964c61b977d2cb55b15555391
|
[
"MIT"
] | 10
|
2020-03-14T15:38:26.000Z
|
2020-07-02T19:51:37.000Z
|
RoboSounds.roboFontExt/lib/menu_toggleState.py
|
typesupply/robosounds
|
b73970c7d768c2f964c61b977d2cb55b15555391
|
[
"MIT"
] | 2
|
2020-03-15T13:24:22.000Z
|
2020-03-15T13:26:46.000Z
|
RoboSounds.roboFontExt/lib/menu_toggleState.py
|
typesupply/robosounds
|
b73970c7d768c2f964c61b977d2cb55b15555391
|
[
"MIT"
] | null | null | null |
from mojo.roboFont import OpenWindow
from robosounds import RoboSoundsController
if RoboSoundsController.isListening():
RoboSoundsController.stopListening()
else:
RoboSoundsController.startListening()
| 25.5
| 43
| 0.862745
| 17
| 204
| 10.352941
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 204
| 8
| 44
| 25.5
| 0.93617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9160b7e98a214f3cbea578c9c749865376bd6718
| 69
|
py
|
Python
|
src/models/ioc_densenet/__init__.py
|
gsc2001/ConvexNet
|
a17609bd5bca0a02b6330b1ad8035f2b280109f0
|
[
"MIT"
] | null | null | null |
src/models/ioc_densenet/__init__.py
|
gsc2001/ConvexNet
|
a17609bd5bca0a02b6330b1ad8035f2b280109f0
|
[
"MIT"
] | null | null | null |
src/models/ioc_densenet/__init__.py
|
gsc2001/ConvexNet
|
a17609bd5bca0a02b6330b1ad8035f2b280109f0
|
[
"MIT"
] | null | null | null |
from .module import IOCDensenetModule
from .model import IOCDenseNet
| 23
| 37
| 0.855072
| 8
| 69
| 7.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 2
| 38
| 34.5
| 0.967213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9162ab4191a9d991bc5410b33b5e365cea75bacc
| 1,145
|
py
|
Python
|
dnpy/regularizers.py
|
salvacarrion/dnpy
|
7685355076dbdbf76a4c06c0004aa65fe2e62603
|
[
"MIT"
] | null | null | null |
dnpy/regularizers.py
|
salvacarrion/dnpy
|
7685355076dbdbf76a4c06c0004aa65fe2e62603
|
[
"MIT"
] | null | null | null |
dnpy/regularizers.py
|
salvacarrion/dnpy
|
7685355076dbdbf76a4c06c0004aa65fe2e62603
|
[
"MIT"
] | null | null | null |
import numpy as np
class Regularizer:
def __init__(self, name="Base regularizer"):
self.name = name
def forward(self, param):
pass
def backward(self, param):
pass
class L1(Regularizer):
def __init__(self, lmda=0.01):
super().__init__(name="L1")
self.lmda_l1 = lmda
def forward(self, param):
return self.lmda_l1 * np.abs(param)
def backward(self, param):
return self.lmda_l1 * np.sign(param)
class L2(Regularizer):
def __init__(self, lmda=0.01):
super().__init__(name="L2")
self.lmda_l2 = lmda
def forward(self, param):
return self.lmda_l2 * (param**2)
def backward(self, param):
return self.lmda_l2 * (2 * param)
class L1L2(Regularizer):
def __init__(self, lmda_l1=0.01, lmda_l2=0.01):
super().__init__(name="L1L2")
self.l1_reg = L1(lmda_l1)
self.l2_reg = L2(lmda_l2)
def forward(self, param):
return self.l1_reg.forward(param) + self.l2_reg.forward(param)
def backward(self, param):
return self.l1_reg.backward(param) + self.l2_reg.backward(param)
| 21.203704
| 72
| 0.616594
| 161
| 1,145
| 4.111801
| 0.167702
| 0.108761
| 0.135952
| 0.172205
| 0.542296
| 0.478852
| 0.413897
| 0.238671
| 0.126888
| 0.126888
| 0
| 0.046893
| 0.255022
| 1,145
| 53
| 73
| 21.603774
| 0.729191
| 0
| 0
| 0.363636
| 0
| 0
| 0.020961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0.060606
| 0.030303
| 0.181818
| 0.69697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.