hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
387eb64186d9be38a6ef7eb92362e60714c12f89 | 981 | py | Python | src/custom_user_app/urls.py | JackCX777/user_polls_2 | fa8fe9ad4c1fa36b4ea5bb402b3d485852a98d3b | [
"BSD-3-Clause"
] | null | null | null | src/custom_user_app/urls.py | JackCX777/user_polls_2 | fa8fe9ad4c1fa36b4ea5bb402b3d485852a98d3b | [
"BSD-3-Clause"
] | null | null | null | src/custom_user_app/urls.py | JackCX777/user_polls_2 | fa8fe9ad4c1fa36b4ea5bb402b3d485852a98d3b | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from custom_user_app.views import (CustomUserLoginView,
CustomUserLogoutView,
CustomUserCreationView,
CustomUserUpdateView,
CustomUserPasswordChangeView,
CustomUserPasswordChangeDoneView)
urlpatterns = [
path('login/', CustomUserLoginView.as_view(), name='user_login'),
path('logout/', CustomUserLogoutView.as_view(), name='user_logout'),
path('registration/', CustomUserCreationView.as_view(), name='user_registration'),
path('profile/<int:profile_id>', CustomUserUpdateView.as_view(), name='user_profile'),
path('password_change/<int:profile_id>', CustomUserPasswordChangeView.as_view(), name='user_password_change'),
path('password_change_done/<int:profile_id>', CustomUserPasswordChangeDoneView.as_view(),
name='password_change_done'),
]
| 51.631579 | 114 | 0.643221 | 80 | 981 | 7.6125 | 0.35 | 0.059113 | 0.098522 | 0.114943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.249745 | 981 | 18 | 115 | 54.5 | 0.827446 | 0 | 0 | 0 | 0 | 0 | 0.213048 | 0.094801 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.3125 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
387f0112c992c1eb1d347a52c37faadb884c7c51 | 1,691 | py | Python | helpme/migrations/0003_auto_20200901_2025.py | renderbox/django-help-me | 6efdaf715d2770305a7187c008354e0f784f9f5b | [
"MIT"
] | 1 | 2020-09-30T22:21:02.000Z | 2020-09-30T22:21:02.000Z | helpme/migrations/0003_auto_20200901_2025.py | renderbox/django-help-me | 6efdaf715d2770305a7187c008354e0f784f9f5b | [
"MIT"
] | 8 | 2020-09-11T00:50:57.000Z | 2022-03-30T22:10:45.000Z | helpme/migrations/0003_auto_20200901_2025.py | renderbox/django-help-me | 6efdaf715d2770305a7187c008354e0f784f9f5b | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-09-01 20:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_alter_domain_unique'),
('helpme', '0002_category_question'),
]
operations = [
migrations.AddField(
model_name='category',
name='excluded_sites',
field=models.ManyToManyField(blank=True, related_name='excluded_categories', to='sites.Site'),
),
migrations.AddField(
model_name='category',
name='global_category',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='question',
name='excluded_sites',
field=models.ManyToManyField(blank=True, related_name='excluded_questions', to='sites.Site'),
),
migrations.AddField(
model_name='question',
name='global_question',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='ticket',
name='question',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='helpme.question'),
),
migrations.AlterField(
model_name='category',
name='sites',
field=models.ManyToManyField(related_name='categories', to='sites.Site'),
),
migrations.AlterField(
model_name='question',
name='sites',
field=models.ManyToManyField(blank=True, related_name='questions', to='sites.Site'),
),
]
| 33.156863 | 127 | 0.595506 | 162 | 1,691 | 6.067901 | 0.320988 | 0.06409 | 0.116989 | 0.137335 | 0.549339 | 0.467955 | 0.399797 | 0.322482 | 0.2706 | 0.144456 | 0 | 0.018092 | 0.280899 | 1,691 | 50 | 128 | 33.82 | 0.790296 | 0.025429 | 0 | 0.590909 | 1 | 0 | 0.181045 | 0.027947 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.113636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38823e27450525f05ac5a168826d916d3ea60ed9 | 382 | py | Python | projects/migrations/0005_alter_location_location.py | Gomax-07/gallery | 934b667d79d9a98e43648864a420cc559e9456e6 | [
"Unlicense"
] | null | null | null | projects/migrations/0005_alter_location_location.py | Gomax-07/gallery | 934b667d79d9a98e43648864a420cc559e9456e6 | [
"Unlicense"
] | null | null | null | projects/migrations/0005_alter_location_location.py | Gomax-07/gallery | 934b667d79d9a98e43648864a420cc559e9456e6 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-07 02:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_location'),
]
operations = [
migrations.AlterField(
model_name='location',
name='location',
field=models.CharField(max_length=150),
),
]
| 20.105263 | 51 | 0.594241 | 40 | 382 | 5.6 | 0.8 | 0.107143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081181 | 0.290576 | 382 | 18 | 52 | 21.222222 | 0.745387 | 0.117801 | 0 | 0 | 1 | 0 | 0.110448 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38838a2148cd8410cf38dde80c33588255de0106 | 487 | py | Python | CoquoBot/order_manager.py | Josef212/CoquoBot | adb9744b04454a4591237937dfb2c9f00da30077 | [
"MIT"
] | null | null | null | CoquoBot/order_manager.py | Josef212/CoquoBot | adb9744b04454a4591237937dfb2c9f00da30077 | [
"MIT"
] | null | null | null | CoquoBot/order_manager.py | Josef212/CoquoBot | adb9744b04454a4591237937dfb2c9f00da30077 | [
"MIT"
] | null | null | null | from order import Order
class OrderManager:
def __init__(self):
self.orders = {}
def user_has_any_order(self, chat_id: int, user: str) -> bool:
order = self.get_order(chat_id)
return order.user_has_any_order(user)
def get_order(self, id: int) -> Order:
if id not in self.orders:
self.orders[id] = Order()
return self.orders[id]
def reset_order(self, id: int) -> None:
self.get_order(id).reset() | 27.055556 | 66 | 0.601643 | 69 | 487 | 4.014493 | 0.347826 | 0.144404 | 0.072202 | 0.108303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.285421 | 487 | 18 | 67 | 27.055556 | 0.795977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.307692 | false | 0 | 0.076923 | 0 | 0.615385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
388a33a4c640a949c0d9f3e5677661be8943cc55 | 4,755 | py | Python | tests/components/unifi/test_services.py | rahulsinghsss/core | 1156275db4e53a556ef58bb2038ae7d8ad103556 | [
"Apache-2.0"
] | 1 | 2021-12-30T09:37:48.000Z | 2021-12-30T09:37:48.000Z | tests/components/unifi/test_services.py | rahulsinghsss/core | 1156275db4e53a556ef58bb2038ae7d8ad103556 | [
"Apache-2.0"
] | 18 | 2021-11-03T06:21:46.000Z | 2022-03-31T06:21:15.000Z | tests/components/unifi/test_services.py | rahulsinghsss/core | 1156275db4e53a556ef58bb2038ae7d8ad103556 | [
"Apache-2.0"
] | 1 | 2021-12-30T09:37:53.000Z | 2021-12-30T09:37:53.000Z | """deCONZ service tests."""
from unittest.mock import Mock, patch
from homeassistant.components.unifi.const import DOMAIN as UNIFI_DOMAIN
from homeassistant.components.unifi.services import (
SERVICE_REMOVE_CLIENTS,
UNIFI_SERVICES,
async_setup_services,
async_unload_services,
)
from .test_controller import setup_unifi_integration
async def test_service_setup(hass):
"""Verify service setup works."""
assert UNIFI_SERVICES not in hass.data
with patch(
"homeassistant.core.ServiceRegistry.async_register", return_value=Mock(True)
) as async_register:
await async_setup_services(hass)
assert hass.data[UNIFI_SERVICES] is True
assert async_register.call_count == 1
async def test_service_setup_already_registered(hass):
"""Make sure that services are only registered once."""
hass.data[UNIFI_SERVICES] = True
with patch(
"homeassistant.core.ServiceRegistry.async_register", return_value=Mock(True)
) as async_register:
await async_setup_services(hass)
async_register.assert_not_called()
async def test_service_unload(hass):
"""Verify service unload works."""
hass.data[UNIFI_SERVICES] = True
with patch(
"homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True)
) as async_remove:
await async_unload_services(hass)
assert hass.data[UNIFI_SERVICES] is False
assert async_remove.call_count == 1
async def test_service_unload_not_registered(hass):
"""Make sure that services can only be unloaded once."""
with patch(
"homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True)
) as async_remove:
await async_unload_services(hass)
assert UNIFI_SERVICES not in hass.data
async_remove.assert_not_called()
async def test_remove_clients(hass, aioclient_mock):
"""Verify removing different variations of clients work."""
clients = [
{
"first_seen": 100,
"last_seen": 500,
"mac": "00:00:00:00:00:01",
},
{
"first_seen": 100,
"last_seen": 1100,
"mac": "00:00:00:00:00:02",
},
{
"first_seen": 100,
"last_seen": 500,
"fixed_ip": "1.2.3.4",
"mac": "00:00:00:00:00:03",
},
{
"first_seen": 100,
"last_seen": 500,
"hostname": "hostname",
"mac": "00:00:00:00:00:04",
},
{
"first_seen": 100,
"last_seen": 500,
"name": "name",
"mac": "00:00:00:00:00:05",
},
]
config_entry = await setup_unifi_integration(
hass, aioclient_mock, clients_all_response=clients
)
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
aioclient_mock.clear_requests()
aioclient_mock.post(
f"https://{controller.host}:1234/api/s/{controller.site}/cmd/stamgr",
)
await hass.services.async_call(UNIFI_DOMAIN, SERVICE_REMOVE_CLIENTS, blocking=True)
assert aioclient_mock.mock_calls[0][2] == {
"cmd": "forget-sta",
"macs": ["00:00:00:00:00:01"],
}
async def test_remove_clients_controller_unavailable(hass, aioclient_mock):
"""Verify no call is made if controller is unavailable."""
clients = [
{
"first_seen": 100,
"last_seen": 500,
"mac": "00:00:00:00:00:01",
}
]
config_entry = await setup_unifi_integration(
hass, aioclient_mock, clients_all_response=clients
)
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
controller.available = False
aioclient_mock.clear_requests()
aioclient_mock.post(
f"https://{controller.host}:1234/api/s/{controller.site}/cmd/stamgr",
)
await hass.services.async_call(UNIFI_DOMAIN, SERVICE_REMOVE_CLIENTS, blocking=True)
assert aioclient_mock.call_count == 0
async def test_remove_clients_no_call_on_empty_list(hass, aioclient_mock):
"""Verify no call is made if no fitting client has been added to the list."""
clients = [
{
"first_seen": 100,
"last_seen": 1100,
"mac": "00:00:00:00:00:01",
}
]
config_entry = await setup_unifi_integration(
hass, aioclient_mock, clients_all_response=clients
)
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
aioclient_mock.clear_requests()
aioclient_mock.post(
f"https://{controller.host}:1234/api/s/{controller.site}/cmd/stamgr",
)
await hass.services.async_call(UNIFI_DOMAIN, SERVICE_REMOVE_CLIENTS, blocking=True)
assert aioclient_mock.call_count == 0
| 31.282895 | 87 | 0.646477 | 586 | 4,755 | 5 | 0.204778 | 0.043686 | 0.049147 | 0.043686 | 0.76041 | 0.722526 | 0.637543 | 0.597952 | 0.580205 | 0.554949 | 0 | 0.04495 | 0.242061 | 4,755 | 151 | 88 | 31.490066 | 0.768036 | 0.004416 | 0 | 0.525 | 0 | 0 | 0.168274 | 0.044077 | 0 | 0 | 0 | 0 | 0.091667 | 1 | 0 | false | 0 | 0.033333 | 0 | 0.033333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
388a3e1c967bd69504b07a1ff1bfdb07f5722281 | 632 | py | Python | {{cookiecutter.directory_name}}/config/settings/development.py | ragnarok22/cookiecutter-django | 082196dde5ad932bf99bee138dc80de8c3823e03 | [
"Apache-2.0"
] | 2 | 2021-07-23T18:58:49.000Z | 2022-02-23T18:44:40.000Z | {{cookiecutter.directory_name}}/config/settings/development.py | ragnarok22/cookiecutter-django | 082196dde5ad932bf99bee138dc80de8c3823e03 | [
"Apache-2.0"
] | null | null | null | {{cookiecutter.directory_name}}/config/settings/development.py | ragnarok22/cookiecutter-django | 082196dde5ad932bf99bee138dc80de8c3823e03 | [
"Apache-2.0"
] | null | null | null | """
This is the settings file that you use when you're working on the project locally.
Local development-specific include DEBUG mode, log level, and activation of developer tools like django-debug-toolsbar
"""
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qov#ce&bl3z8@ymehv1byt^beru%el-0wjo%e#1q8#og6331ik'
ALLOWED_HOSTS = ['*']
MEDIA_ROOT = os.path.join(BASE_DIR, '{{cookiecutter.directory_name}}', 'media')
# email settings
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| 31.6 | 118 | 0.764241 | 94 | 632 | 5.074468 | 0.797872 | 0.062893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018215 | 0.131329 | 632 | 19 | 119 | 33.263158 | 0.850638 | 0.547468 | 0 | 0 | 0 | 0 | 0.481884 | 0.460145 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3892d5674879bbcf71468a4b3b615df537552e19 | 729 | py | Python | HackTheVote/2020/fileshare/cleaner.py | mystickev/ctf-archives | 89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e | [
"MIT"
] | 1 | 2021-11-02T20:53:58.000Z | 2021-11-02T20:53:58.000Z | HackTheVote/2020/fileshare/cleaner.py | ruhan-islam/ctf-archives | 8c2bf6a608c821314d1a1cfaa05a6cccef8e3103 | [
"MIT"
] | null | null | null | HackTheVote/2020/fileshare/cleaner.py | ruhan-islam/ctf-archives | 8c2bf6a608c821314d1a1cfaa05a6cccef8e3103 | [
"MIT"
] | 1 | 2021-12-19T11:06:24.000Z | 2021-12-19T11:06:24.000Z | import os, time, shutil
def get_used_dirs():
pids = [p for p in os.listdir("/proc") if p.isnumeric()]
res = set()
for p in pids:
try:
path = os.path.realpath("/proc/%s/cwd"%p)
if path.startswith("/tmp/fileshare."):
res.add(path)
except:
pass
return res
while True:
try:
dirs = ["/tmp/"+d for d in os.listdir("/tmp") if d.startswith("fileshare.")]
used = get_used_dirs()
for d in dirs:
if d not in used:
try:
os.system("umount %s/proc"%d)
shutil.rmtree(d)
except:
pass
except:
pass
time.sleep(5)
| 25.137931 | 84 | 0.463649 | 92 | 729 | 3.630435 | 0.423913 | 0.08982 | 0.065868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002331 | 0.411523 | 729 | 28 | 85 | 26.035714 | 0.776224 | 0 | 0 | 0.346154 | 0 | 0 | 0.089163 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.115385 | 0.038462 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
389e3359d1c51e16167fb066518dcce270c6cf1d | 1,150 | py | Python | qa_tool/models.py | pg-irc/pathways-backend | 05a8c4e750523d2d081b030a248c5444d1ed7992 | [
"BSD-3-Clause"
] | 12 | 2017-08-30T18:21:00.000Z | 2021-12-09T04:04:17.000Z | qa_tool/models.py | pg-irc/pathways-backend | 05a8c4e750523d2d081b030a248c5444d1ed7992 | [
"BSD-3-Clause"
] | 424 | 2017-08-08T18:32:14.000Z | 2022-03-30T21:42:51.000Z | qa_tool/models.py | pg-irc/pathways-backend | 05a8c4e750523d2d081b030a248c5444d1ed7992 | [
"BSD-3-Clause"
] | 7 | 2017-09-29T21:14:37.000Z | 2019-12-30T21:07:37.000Z | from django.contrib.gis.db import models
from common.models import (RequiredURLField,
OptionalTextField, RequiredCharField)
from human_services.locations.models import ServiceAtLocation
from search.models import Task
from users.models import User
class Algorithm(models.Model):
url = RequiredURLField()
name = RequiredCharField(max_length=200)
notes = OptionalTextField()
class Meta:
ordering = ['id']
class SearchLocation(models.Model):
name = OptionalTextField()
point = models.PointField(blank=True, null=True)
class Meta:
ordering = ['id']
class RelevancyScore(models.Model):
value = models.IntegerField()
time_stamp = models.DateTimeField()
algorithm = models.ForeignKey(Algorithm, on_delete=models.PROTECT)
topic = models.ForeignKey(Task, on_delete=models.PROTECT)
search_location = models.ForeignKey(SearchLocation, on_delete=models.PROTECT)
user = models.ForeignKey(User, on_delete=models.PROTECT)
service_at_location = models.ForeignKey(
ServiceAtLocation, on_delete=models.PROTECT)
class Meta:
ordering = ["id"]
| 30.263158 | 81 | 0.722609 | 123 | 1,150 | 6.666667 | 0.414634 | 0.097561 | 0.085366 | 0.128049 | 0.058537 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003205 | 0.186087 | 1,150 | 37 | 82 | 31.081081 | 0.872863 | 0 | 0 | 0.178571 | 0 | 0 | 0.005217 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.178571 | 0 | 0.821429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
38a0b921f983b2d2b9365a4bb47c16ebd9b5348e | 449 | py | Python | wazimap_ng/boundaries/migrations/0007_auto_20200121_0907.py | arghyaiitb/wazimap-ng | 2a77860526d865b8fd0c22a2204f121fdb3b28a0 | [
"Apache-2.0"
] | 11 | 2019-12-31T20:27:22.000Z | 2022-03-10T03:55:38.000Z | wazimap_ng/boundaries/migrations/0007_auto_20200121_0907.py | arghyaiitb/wazimap-ng | 2a77860526d865b8fd0c22a2204f121fdb3b28a0 | [
"Apache-2.0"
] | 164 | 2020-02-06T15:02:22.000Z | 2022-03-30T22:42:00.000Z | wazimap_ng/boundaries/migrations/0007_auto_20200121_0907.py | arghyaiitb/wazimap-ng | 2a77860526d865b8fd0c22a2204f121fdb3b28a0 | [
"Apache-2.0"
] | 16 | 2020-01-03T20:30:24.000Z | 2022-01-11T11:05:15.000Z | # Generated by Django 2.2.8 on 2020-01-21 09:07
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('boundaries', '0006_worldborder'),
]
operations = [
migrations.AlterField(
model_name='worldborder',
name='mpoly',
field=django.contrib.gis.db.models.fields.PolygonField(srid=4326),
),
]
| 22.45 | 78 | 0.632517 | 50 | 449 | 5.64 | 0.68 | 0.092199 | 0.113475 | 0.12766 | 0.212766 | 0.212766 | 0 | 0 | 0 | 0 | 0 | 0.068249 | 0.249443 | 449 | 19 | 79 | 23.631579 | 0.768546 | 0.100223 | 0 | 0 | 1 | 0 | 0.104478 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38a1aeec73b5c25381b82d560fdb3ca48a37c74c | 701 | py | Python | autoPyTorch/pipeline/components/setup/network_initializer/NoInit.py | ravinkohli/Auto-PyTorch | a1512d56d4db89133e895e85765e3b72afbfe157 | [
"Apache-2.0"
] | 1 | 2021-05-12T10:11:58.000Z | 2021-05-12T10:11:58.000Z | autoPyTorch/pipeline/components/setup/network_initializer/NoInit.py | maxpark/Auto-PyTorch | 06e67de5017b4cccad9398e24a3d9f0bd8176da3 | [
"Apache-2.0"
] | 34 | 2020-10-06T08:06:46.000Z | 2021-01-21T13:23:34.000Z | autoPyTorch/pipeline/components/setup/network_initializer/NoInit.py | maxpark/Auto-PyTorch | 06e67de5017b4cccad9398e24a3d9f0bd8176da3 | [
"Apache-2.0"
] | 1 | 2020-10-14T12:25:47.000Z | 2020-10-14T12:25:47.000Z | from typing import Callable
import torch
from autoPyTorch.pipeline.components.setup.network_initializer.base_network_initializer import (
BaseNetworkInitializerComponent
)
class NoInit(BaseNetworkInitializerComponent):
"""
No initialization on the weights/bias
"""
def weights_init(self) -> Callable:
"""Returns the actual PyTorch model, that is dynamically created
from a self.config object.
self.config is a dictionary created form a given config in the config space.
It contains the necessary information to build a network.
"""
def initialization(m: torch.nn.Module) -> None:
pass
return initialization
| 28.04 | 96 | 0.707561 | 79 | 701 | 6.227848 | 0.658228 | 0.073171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.232525 | 701 | 24 | 97 | 29.208333 | 0.914498 | 0.373752 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0.1 | 0.3 | 0 | 0.7 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
38adf5ebd6f269f17c1fa14e7dbf39222e45d753 | 1,362 | py | Python | python/dataProcessing/generatePlots.py | Maplenormandy/list-62x | c1731d0610fdf9e58cb2792d706e8904c549fbd6 | [
"MIT"
] | 1 | 2020-11-07T12:40:59.000Z | 2020-11-07T12:40:59.000Z | python/dataProcessing/generatePlots.py | Maplenormandy/list-62x | c1731d0610fdf9e58cb2792d706e8904c549fbd6 | [
"MIT"
] | null | null | null | python/dataProcessing/generatePlots.py | Maplenormandy/list-62x | c1731d0610fdf9e58cb2792d706e8904c549fbd6 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.stats.weightstats import ttost_paired
data = pd.read_csv(open('combined_data.csv'))
for t in data.index:
if int(data.loc[t, 'Baseline']) == 0:
data.loc[t, 'STF Baseline'] = data.loc[t, 'Succesfully Tracked Features 0']
data.loc[t, 'STF Experiment'] = data.loc[t, 'Succesfully Tracked Features 1']
else:
data.loc[t, 'STF Baseline'] = data.loc[t, 'Succesfully Tracked Features 1']
data.loc[t, 'STF Experiment'] = data.loc[t, 'Succesfully Tracked Features 0']
pvalue, stats1, stats2 = ttost_paired(data['STF Experiment'], data['STF Baseline'], 0, 10000)
print pvalue
print stats1
print stats2
plt.scatter(data.index, data['STF Baseline'], label='baseline')
plt.scatter(data.index, data['STF Experiment'], color="green", label='experiment')
plt.legend(loc='upper right')
plt.draw()
dataMax = max(data['STF Baseline'].max(), data['STF Experiment'].max())
bins = np.linspace(0, dataMax)
plt.figure()
plt.hist(data['STF Baseline'], alpha = 0.5, bins=bins, label="baseline")
plt.hist(data['STF Experiment'], alpha = 0.5, bins=bins, label="experiment")
plt.legend(loc='upper right')
plt.draw()
plt.figure()
plt.hist(data['STF Experiment'] - data['STF Baseline'], bins=30, color="red")
plt.xlabel('Experiment - Baseline')
plt.show()
| 31.674419 | 93 | 0.696035 | 203 | 1,362 | 4.650246 | 0.300493 | 0.074153 | 0.076271 | 0.04661 | 0.554025 | 0.527542 | 0.326271 | 0.322034 | 0.322034 | 0.228814 | 0 | 0.018676 | 0.135095 | 1,362 | 42 | 94 | 32.428571 | 0.782683 | 0 | 0 | 0.193548 | 0 | 0 | 0.304188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.129032 | null | null | 0.096774 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38af66085e0a385bb524dc4be264dbe2d898daba | 1,829 | py | Python | zunzun/app/app.py | aprezcuba24/zunzun | cc294d9dfb84695be0ed1425cf946a0f4ea644a9 | [
"MIT"
] | null | null | null | zunzun/app/app.py | aprezcuba24/zunzun | cc294d9dfb84695be0ed1425cf946a0f4ea644a9 | [
"MIT"
] | null | null | null | zunzun/app/app.py | aprezcuba24/zunzun | cc294d9dfb84695be0ed1425cf946a0f4ea644a9 | [
"MIT"
] | null | null | null | import importlib
from zunzun import CommandRegister
from injector import inject, singleton
from click.core import Group
from zunzun import ListenerConnector
from zunzun import inspect
from pathlib import Path
@singleton
class App:
name = ""
listeners_config: list = []
@inject
def __init__(
self, command_register: CommandRegister, listener_connector: ListenerConnector
):
self.command_register = command_register
self.listener_connector = listener_connector
self._register_listeners()
def register_services(self, injector):
pass
def get_commands(self):
return self.command_register.add_commands(
Group(self.name), self.get_or_create_module("commands", "core.commands")
)
def _register_listeners(self):
for args in self.listeners_config:
self.listener_connector.connect(*args)
def get_config(self, name, default):
return default
def get_or_create_module(self, name, config_name=None):
if config_name:
name = self.get_config(config_name, name)
file = inspect.getfile(self.__class__)
parent = Path(file).parent
folder = Path(f"{parent}/{name}")
if not folder.is_dir():
folder.mkdir()
init_file = Path(f"{folder}/__init__.py")
if not init_file.is_file():
init_file.touch()
return importlib.import_module(f"..{name}", self.__module__)
@property
def path(self):
dotted_path = str(self.__module__)
dir_path, _ = dotted_path.rsplit(".", 1)
return dir_path
def get_module(self, module_name):
return importlib.import_module(self.get_module_name(module_name))
def get_module_name(self, module_name):
return f"{self.path}.{module_name}"
| 29.5 | 86 | 0.668671 | 220 | 1,829 | 5.245455 | 0.272727 | 0.051993 | 0.041594 | 0.029463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000718 | 0.238928 | 1,829 | 61 | 87 | 29.983607 | 0.828305 | 0 | 0 | 0 | 0 | 0 | 0.049207 | 0.013669 | 0 | 0 | 0 | 0 | 0 | 1 | 0.18 | false | 0.02 | 0.18 | 0.08 | 0.54 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
38b116711be814607ed2866ab771fa7d05349727 | 807 | py | Python | main/migrations/0013_game.py | AyushHazard/Samskritam | c5db8e712afe24737cacc6e6f3f27e3fcbe83e26 | [
"MIT"
] | null | null | null | main/migrations/0013_game.py | AyushHazard/Samskritam | c5db8e712afe24737cacc6e6f3f27e3fcbe83e26 | [
"MIT"
] | null | null | null | main/migrations/0013_game.py | AyushHazard/Samskritam | c5db8e712afe24737cacc6e6f3f27e3fcbe83e26 | [
"MIT"
] | 3 | 2021-01-05T18:40:57.000Z | 2021-05-14T07:56:20.000Z | # Generated by Django 3.1.2 on 2021-01-01 05:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_attempted_contests'),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('game_type', models.CharField(max_length=200)),
('description', models.TextField(default='-')),
('imageurl', models.TextField(blank=True, default='-', null=True)),
('imagename', models.TextField(blank=True, default='-', null=True)),
],
),
]
| 32.28 | 114 | 0.567534 | 80 | 807 | 5.625 | 0.625 | 0.1 | 0.08 | 0.106667 | 0.293333 | 0.173333 | 0.173333 | 0 | 0 | 0 | 0 | 0.043029 | 0.28005 | 807 | 24 | 115 | 33.625 | 0.731497 | 0.055762 | 0 | 0 | 1 | 0 | 0.103947 | 0.030263 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38b351b78225843bd3597a610a0f89e29687ff5d | 2,224 | py | Python | genome_designer/debug/2014_08_05_de_novo_on_dep_data_with_intervals.py | churchlab/millstone | ddb5d003a5b8a7675e5a56bafd5c432d9642b473 | [
"MIT"
] | 45 | 2015-09-30T14:55:33.000Z | 2021-06-28T02:33:30.000Z | genome_designer/debug/2014_08_05_de_novo_on_dep_data_with_intervals.py | churchlab/millstone | ddb5d003a5b8a7675e5a56bafd5c432d9642b473 | [
"MIT"
] | 261 | 2015-06-03T20:41:56.000Z | 2022-03-07T08:46:10.000Z | genome_designer/debug/2014_08_05_de_novo_on_dep_data_with_intervals.py | churchlab/millstone | ddb5d003a5b8a7675e5a56bafd5c432d9642b473 | [
"MIT"
] | 22 | 2015-06-04T20:43:10.000Z | 2022-02-27T08:27:34.000Z | """
Re-running de novo assembly, this time including reads that map to mobile elements.
"""
import os
import sys
# Setup Django environment.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from Bio import SeqIO
from experimental.de_novo_assembly import run_velvet
from main.models import *
def identify_intervals(ag):
# First identify intervals that map to mobile elements.
genbank_filepath = get_dataset_with_type(ag.reference_genome,
Dataset.TYPE.REFERENCE_GENOME_GENBANK).get_absolute_location()
# Extract the proper genome record.
genome_record = None
with open(genbank_filepath) as input_fh:
genome_record_list = SeqIO.parse(input_fh, 'genbank')
for rec in genome_record_list:
if rec.name == 'CP006698':
genome_record = rec
assert genome_record
# Pick out the intervals we want:
# * mobile elements
# * lon gene
intervals = []
found_lon = False
for f in genome_record.features:
if f.type == 'mobile_element':
intervals.append((f.location.start, f.location.end))
if (f.type == 'gene' and 'gene' in f.qualifiers and
f.qualifiers['gene'][0] in ['lon', 'clpX']):
found_lon = True
intervals.append((f.location.start, f.location.end))
assert found_lon
assert 48 == len(intervals)
# Add buffer to each interval in case reads start before or after.
buffer_size = 150
def _add_buffer(i):
return (
max(i[0] - buffer_size, 0),
min(i[1] + buffer_size, len(genome_record))
)
intervals = [_add_buffer(i) for i in intervals]
return intervals
def main():
ag = AlignmentGroup.objects.get(uid='edc74a3d')
intervals = identify_intervals(ag)
for idx, sa in enumerate(ag.experimentsampletoalignment_set.all()):
print idx + 1, 'of', ag.experimentsampletoalignment_set.count()
run_velvet(sa, force_include_reads_in_intervals=intervals,
output_dir_name='velvet_mobile_lon_clpX', force_rerun=True)
if __name__ == '__main__':
main()
| 30.054054 | 83 | 0.659173 | 288 | 2,224 | 4.868056 | 0.427083 | 0.068474 | 0.019971 | 0.021398 | 0.091298 | 0.058488 | 0.058488 | 0.058488 | 0 | 0 | 0 | 0.011243 | 0.240108 | 2,224 | 73 | 84 | 30.465753 | 0.818343 | 0.110162 | 0 | 0.043478 | 0 | 0 | 0.064362 | 0.023404 | 0 | 0 | 0 | 0 | 0.065217 | 0 | null | null | 0 | 0.108696 | null | null | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38b43f59bf0131f94f4000fe15af73705057fab7 | 288 | py | Python | P942.py | Muntaha-Islam0019/Leetcode-Solutions | 0bc56ce43a6d8ad10461b69078166a2a5b913e7f | [
"MIT"
] | null | null | null | P942.py | Muntaha-Islam0019/Leetcode-Solutions | 0bc56ce43a6d8ad10461b69078166a2a5b913e7f | [
"MIT"
] | null | null | null | P942.py | Muntaha-Islam0019/Leetcode-Solutions | 0bc56ce43a6d8ad10461b69078166a2a5b913e7f | [
"MIT"
] | null | null | null | class Solution:
def diStringMatch(self, S):
low,high=0,len(S)
ans=[]
for i in S:
if i=="I":
ans.append(low)
low+=1
else:
ans.append(high)
high-=1
return ans +[low]
| 22.153846 | 32 | 0.381944 | 33 | 288 | 3.333333 | 0.575758 | 0.163636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020979 | 0.503472 | 288 | 12 | 33 | 24 | 0.748252 | 0 | 0 | 0 | 0 | 0 | 0.003472 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38b765a30bc55c0417892d2304fc6cfeafcf844e | 2,663 | py | Python | forecasting/src/autogluon/forecasting/trainer/auto_trainer.py | sgdread/autogluon | fa95c72a07066dc5380fccf8bbce04b5c031fc68 | [
"Apache-2.0"
] | null | null | null | forecasting/src/autogluon/forecasting/trainer/auto_trainer.py | sgdread/autogluon | fa95c72a07066dc5380fccf8bbce04b5c031fc68 | [
"Apache-2.0"
] | null | null | null | forecasting/src/autogluon/forecasting/trainer/auto_trainer.py | sgdread/autogluon | fa95c72a07066dc5380fccf8bbce04b5c031fc68 | [
"Apache-2.0"
] | null | null | null | import logging
from typing import Dict, Union, Optional, Any
from ..models.presets import get_preset_models
from .abstract_trainer import AbstractForecastingTrainer, TimeSeriesDataFrame
logger = logging.getLogger(__name__)
class AutoForecastingTrainer(AbstractForecastingTrainer):
def construct_model_templates(self, hyperparameters, **kwargs):
path = kwargs.pop("path", self.path)
eval_metric = kwargs.pop("eval_metric", self.eval_metric)
quantile_levels = kwargs.pop("quantile_levels", self.quantile_levels)
hyperparameter_tune = kwargs.get("hyperparameter_tune", False)
return get_preset_models(
path=path,
eval_metric=eval_metric,
prediction_length=self.prediction_length,
freq=self.freq,
hyperparameters=hyperparameters,
hyperparameter_tune=hyperparameter_tune,
quantiles=quantile_levels,
invalid_model_names=self._get_banned_model_names(),
)
# todo: implement cross-validation / holdout strategy
# todo: including CVSplitter logic
def fit(
self,
train_data: TimeSeriesDataFrame,
hyperparameters: Union[str, Dict[Any, Dict]],
val_data: Optional[TimeSeriesDataFrame] = None,
hyperparameter_tune: bool = False,
time_limit: float = None,
infer_limit: float = None, # todo: implement
):
"""
Fit a set of forecasting models specified by the `hyperparameters`
dictionary that maps model names to their specified hyperparameters.
Parameters
----------
train_data: TimeSeriesDataFrame
Training data for fitting time series forecasting models.
hyperparameters: str or Dict
A dictionary mapping selected model names, model classes or model factory to hyperparameter
settings. Model names should be present in `trainer.presets.DEFAULT_MODEL_NAMES`. Optionally,
the user may provide one of "toy", "toy_hpo", "default", "default_hpo" to specify
presets.
val_data: TimeSeriesDataFrame
Optional validation data set to report validation scores on.
hyperparameter_tune
Whether to perform hyperparameter tuning when learning individual models.
time_limit
Time limit for training
infer_limit
Time limit for inference
"""
self._train_multi(
train_data,
val_data=val_data,
hyperparameters=hyperparameters,
hyperparameter_tune=hyperparameter_tune,
time_limit=time_limit,
)
| 39.746269 | 105 | 0.66917 | 273 | 2,663 | 6.326007 | 0.410256 | 0.083382 | 0.02432 | 0.055588 | 0.076433 | 0.076433 | 0 | 0 | 0 | 0 | 0 | 0 | 0.267368 | 2,663 | 66 | 106 | 40.348485 | 0.885187 | 0.356365 | 0 | 0.108108 | 0 | 0 | 0.031736 | 0 | 0 | 0 | 0 | 0.015152 | 0 | 1 | 0.054054 | false | 0 | 0.108108 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38c660adbca3d15d5ca02084209f151a1d111447 | 19,353 | py | Python | bmtk/simulator/popnet/popsimulator.py | hernando/bmtk | 57e6924819a74f41ed94a34f55e6ebed0525d037 | [
"BSD-3-Clause"
] | 1 | 2019-03-27T12:23:09.000Z | 2019-03-27T12:23:09.000Z | bmtk/simulator/popnet/popsimulator.py | hernando/bmtk | 57e6924819a74f41ed94a34f55e6ebed0525d037 | [
"BSD-3-Clause"
] | null | null | null | bmtk/simulator/popnet/popsimulator.py | hernando/bmtk | 57e6924819a74f41ed94a34f55e6ebed0525d037 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2017. Allen Institute. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import logging
from six import string_types
from dipde.internals.internalpopulation import InternalPopulation
from dipde.internals.externalpopulation import ExternalPopulation
from dipde.internals.connection import Connection
import dipde
from bmtk.simulator.core.simulator import Simulator
from . import config as cfg
from . import utils as poputils
import bmtk.simulator.utils.simulation_inputs as inputs
from bmtk.utils.io import spike_trains, firing_rates
class PopSimulator(Simulator):
def __init__(self, graph, dt=0.0001, tstop=0.0, overwrite=True):
self._graph = graph
self._tstop = tstop
self._dt = dt
self._rates_file = None # name of file where the output is saved
self.__population_list = [] # list of all populations, internal and external
#self.__population_table = {graph: {} for graph in self._graph.networks} # population lookup by [network][id]
self.__connection_list = [] # list of all connections
self._dipde_network = None # reference to dipde.Network object
# diction of rates for every external network/pop_id. Prepopulate dictionary with populations whose rates
# have already been manually set, otherwise they should use one of the add_rates_* function.
#self._rates = {network: {pop.pop_id: pop.firing_rate for pop in self._graph.get_populations(network)
# if not pop.is_internal and pop.is_firing_rate_set}
# for network in self._graph.networks}
"""
for network in self._graph.networks:
for pop in self._graph.get_populations(network):
if pop.is_internal:
dipde_pop = self.__create_internal_pop(pop)
else:
if pop.is_firing_rate_set:
rates = pop.firing_rate
"""
@property
def tstop(self):
return self._tstop
@tstop.setter
def tstop(self, value):
self._tstop = value
@property
def dt(self):
return self._dt
@dt.setter
def dt(self, value):
self._dt = value
@property
def rates_file(self):
return self._rates_file
@rates_file.setter
def rates_file(self, value):
self._rates_file = value
@property
def populations(self):
return self.__population_list
@property
def connections(self):
return self.__connection_list
def add_rates_nwb(self, network, nwb_file, trial, force=False):
"""Creates external population firing rates from an NWB file.
Will iterate through a processing trial of an NWB file by assigning gids the population it belongs too and
taking the average firing rate.
This should be done before calling build_cells(). If a population has already been assigned a firing rate an
error will occur unless force=True.
:param network: Name of network with external populations.
:param nwb_file: NWB file with spike rates.
:param trial: trial id in NWB file
:param force: will overwrite existing firing rates
"""
existing_rates = self._rates[network] # TODO: validate network exists
# Get all unset, external populations in a network.
network_pops = self._graph.get_populations(network)
selected_pops = []
for pop in network_pops:
if pop.is_internal:
continue
elif not force and pop.pop_id in existing_rates:
print('Firing rate for {}/{} has already been set, skipping.'.format(network, pop.pop_id))
else:
selected_pops.append(pop)
if selected_pops:
# assign firing rates from NWB file
# TODO:
rates_dict = poputils.get_firing_rate_from_nwb(selected_pops, nwb_file, trial)
self._rates[network].update(rates_dict)
def add_rate_hz(self, network, pop_id, rate, force=False):
"""Set the firing rate of an external population.
This should be done before calling build_cells(). If a population has already been assigned a firing rate an
error will occur unless force=True.
:param network: name of network with wanted exteranl population
:param pop_id: name/id of external population
:param rate: firing rate in Hz.
:param force: will overwrite existing firing rates
"""
self.__add_rates_validator(network, pop_id, force)
self._rates[network][pop_id] = rate
def __add_rates_validator(self, network, pop_id, force):
if network not in self._graph.networks:
raise Exception('No network {} found in PopGraph.'.format(network))
pop = self._graph.get_population(network, pop_id)
if pop is None:
raise Exception('No population with id {} found in {}.'.format(pop_id, network))
if pop.is_internal:
raise Exception('Population {} in {} is not an external population.'.format(pop_id, network))
if not force and pop_id in self._rates[network]:
raise Exception('The firing rate for {}/{} already set and force=False.'.format(network, pop_id))
def _get_rate(self, network, pop):
"""Gets the firing rate for a given population"""
return self._rates[network][pop.pop_id]
def build_populations(self):
"""Build dipde Population objects from graph nodes.
To calculate external populations firing rates, it first see if a population's firing rate has been manually
set in the graph. Otherwise it attempts to calulate the firing rate from the call to add_rate_hz, add_rates_NWB,
etc. (which should be called first).
"""
for network in self._graph.networks:
for pop in self._graph.get_populations(network):
if pop.is_internal:
dipde_pop = self.__create_internal_pop(pop)
else:
dipde_pop = self.__create_external_pop(pop, self._get_rate(network, pop))
self.__population_list.append(dipde_pop)
self.__population_table[network][pop.pop_id] = dipde_pop
def set_logging(self, log_file):
# TODO: move this out of the function, put in io class
if os.path.exists(log_file):
os.remove(log_file)
# get root logger
logger = logging.getLogger()
for h in list(logger.handlers):
# remove existing handlers that will write to console.
logger.removeHandler(h)
# creates handler that write to log_file
logging.basicConfig(filename=log_file, filemode='w', level=logging.DEBUG)
def set_external_connections(self, network_name):
"""Sets the external connections for populations in a given network.
:param network_name: name of external network with External Populations to connect to internal pops.
"""
for edge in self._graph.get_edges(network_name):
# Get source and target populations
src = edge.source
source_pop = self.__population_table[src.network][src.pop_id]
trg = edge.target
target_pop = self.__population_table[trg.network][trg.pop_id]
# build a connection.
self.__connection_list.append(self.__create_connection(source_pop, target_pop, edge))
def set_recurrent_connections(self):
"""Initialize internal connections."""
for network in self._graph.internal_networks():
for edge in self._graph.get_edges(network):
src = edge.source
source_pop = self.__population_table[src.network][src.pop_id]
trg = edge.target
target_pop = self.__population_table[trg.network][trg.pop_id]
self.__connection_list.append(self.__create_connection(source_pop, target_pop, edge))
def run(self, tstop=None):
# TODO: Check if cells/connections need to be rebuilt.
# Create the networ
dipde_pops = [p.dipde_obj for p in self._graph.populations]
dipde_conns = [c.dipde_obj for c in self._graph.connections]
#print dipde_pops
#print dipde_conns
#exit()
self._dipde_network = dipde.Network(population_list=dipde_pops, connection_list=dipde_conns)
#self._dipde_network = dipde.Network(population_list=self._graph.populations,
# connection_list=self._graph.connections)
if tstop is None:
tstop = self.tstop
#print tstop, self.dt
#print self._graph.populations
#exit()
print("running simulation...")
self._dipde_network.run(t0=0.0, tf=tstop, dt=self.dt)
# TODO: make record_rates optional?
self.__record_rates()
print("done simulation.")
def __create_internal_pop(self, params):
# TODO: use getter methods directly in case arguments are not stored in dynamics params
# pop = InternalPopulation(**params.dynamics_params)
pop = InternalPopulation(**params.model_params)
return pop
def __create_external_pop(self, params, rates):
pop = ExternalPopulation(rates, record=False)
return pop
def __create_connection(self, source, target, params):
return Connection(source, target, nsyn=params.nsyns, delays=params.delay, weights=params.weight)
def __record_rates(self):
with open(self._rates_file, 'w') as f:
for pop in self._graph.internal_populations:
if pop.record:
for time, rate in zip(pop.dipde_obj.t_record, pop.dipde_obj.firing_rate_record):
f.write('{} {} {}\n'.format(pop.pop_id, time, rate))
'''
@classmethod
def from_config(cls, configure, graph):
# load the json file or object
if isinstance(configure, basestring):
config = cfg.from_json(configure, validate=True)
elif isinstance(configure, dict):
config = configure
else:
raise Exception('Could not convert {} (type "{}") to json.'.format(configure, type(configure)))
network = cls(graph)
if 'run' not in config:
raise Exception('Json file is missing "run" entry. Unable to build Bionetwork.')
run_dict = config['run']
# Create the output file
if 'output' in config:
out_dict = config['output']
rates_file = out_dict.get('rates_file', None)
if rates_file is not None:
# create directory if required
network.rates_file = rates_file
parent_dir = os.path.dirname(rates_file)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
if 'log_file' in out_dict:
log_file = out_dict['log_file']
network.set_logging(log_file)
# get network parameters
if 'duration' in run_dict:
network.duration = run_dict['duration']
if 'dt' in run_dict:
network.dt = run_dict['dt']
# TODO: need to get firing rates before building populations
if 'input' in config:
for netinput in config['input']:
if netinput['type'] == 'external_spikes' and netinput['format'] == 'nwb' and netinput['active']:
# Load external network spike trains from an NWB file.
print('Setting firing rates for {} from {}.'.format(netinput['source_nodes'], netinput['file']))
network.add_rates_nwb(netinput['source_nodes'], netinput['file'], netinput['trial'])
if netinput['type'] == 'pop_rate':
print('Setting {}/{} to fire at {} Hz.'.format(netinput['source_nodes'], netinput['pop_id'], netinput['rate']))
network.add_rate_hz(netinput['source_nodes'], netinput['pop_id'], netinput['rate'])
# TODO: take input as function with Population argument
# Build populations
print('Building Populations')
network.build_populations()
# Build recurrent connections
if run_dict['connect_internal']:
print('Building recurrention connections')
network.set_recurrent_connections()
# Build external connections. Set connection to default True and turn off only if explicitly stated.
# NOTE: It might be better to set to default off?!?! Need to dicuss what would be more intuitive for the users.
# TODO: ignore case of network name
external_network_settings = {name: True for name in graph.external_networks()}
if 'connect_external' in run_dict:
external_network_settings.update(run_dict['connect_external'])
for netname, connect in external_network_settings.items():
if connect:
print('Setting external connections for {}'.format(netname))
network.set_external_connections(netname)
return network
'''
@classmethod
def from_config(cls, configure, graph):
# load the json file or object
if isinstance(configure, string_types):
config = cfg.from_json(configure, validate=True)
elif isinstance(configure, dict):
config = configure
else:
raise Exception('Could not convert {} (type "{}") to json.'.format(configure, type(configure)))
if 'run' not in config:
raise Exception('Json file is missing "run" entry. Unable to build Bionetwork.')
run_dict = config['run']
# Get network parameters
# step time (dt) is set in the kernel and should be passed
overwrite = run_dict['overwrite_output_dir'] if 'overwrite_output_dir' in run_dict else True
print_time = run_dict['print_time'] if 'print_time' in run_dict else False
dt = run_dict['dt'] # TODO: make sure dt exists
tstop = float(config.tstop) / 1000.0
network = cls(graph, dt=config.dt, tstop=tstop, overwrite=overwrite)
if 'output_dir' in config['output']:
network.output_dir = config['output']['output_dir']
# network.spikes_file = config['output']['spikes_ascii']
if 'block_run' in run_dict and run_dict['block_run']:
if 'block_size' not in run_dict:
raise Exception('"block_run" is set to True but "block_size" not found.')
network._block_size = run_dict['block_size']
if 'duration' in run_dict:
network.duration = run_dict['duration']
graph.io.log_info('Building cells.')
graph.build_nodes()
graph.io.log_info('Building recurrent connections')
graph.build_recurrent_edges()
for sim_input in inputs.from_config(config):
node_set = graph.get_node_set(sim_input.node_set)
if sim_input.input_type == 'spikes':
spikes = spike_trains.SpikesInput.load(name=sim_input.name, module=sim_input.module,
input_type=sim_input.input_type, params=sim_input.params)
graph.io.log_info('Build virtual cell stimulations for {}'.format(sim_input.name))
graph.add_spike_trains(spikes, node_set)
else:
graph.io.log_info('Build virtual cell stimulations for {}'.format(sim_input.name))
rates = firing_rates.RatesInput(sim_input.params)
graph.add_rates(rates, node_set)
# Create the output file
if 'output' in config:
out_dict = config['output']
rates_file = out_dict.get('rates_file', None)
if rates_file is not None:
rates_file = rates_file if os.path.isabs(rates_file) else os.path.join(config.output_dir, rates_file)
# create directory if required
network.rates_file = rates_file
parent_dir = os.path.dirname(rates_file)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
if 'log_file' in out_dict:
log_file = out_dict['log_file']
network.set_logging(log_file)
# exit()
# build the cells
#io.log('Building cells')
#network.build_cells()
# Build internal connections
#if run_dict['connect_internal']:
# io.log('Creating recurrent connections')
# network.set_recurrent_connections()
# Build external connections. Set connection to default True and turn off only if explicitly stated.
# NOTE: It might be better to set to default off?!?! Need to dicuss what would be more intuitive for the users.
# TODO: ignore case of network name
'''
external_network_settings = {name: True for name in graph.external_networks()}
if 'connect_external' in run_dict:
external_network_settings.update(run_dict['connect_external'])
for netname, connect in external_network_settings.items():
if connect:
io.log('Setting external connections for {}'.format(netname))
network.set_external_connections(netname)
# Build inputs
if 'input' in config:
for netinput in config['input']:
if netinput['type'] == 'external_spikes' and netinput['format'] == 'nwb' and netinput['active']:
network.add_spikes_nwb(netinput['source_nodes'], netinput['file'], netinput['trial'])
io.log_info('Adding stimulations')
network.make_stims()
'''
graph.io.log_info('Network created.')
return network | 42.911308 | 131 | 0.644293 | 2,430 | 19,353 | 4.952675 | 0.169136 | 0.0172 | 0.012796 | 0.007894 | 0.393436 | 0.359867 | 0.350062 | 0.333693 | 0.313087 | 0.309763 | 0 | 0.001563 | 0.272723 | 19,353 | 451 | 132 | 42.911308 | 0.853499 | 0.265799 | 0 | 0.142857 | 0 | 0 | 0.081204 | 0 | 0 | 0 | 0 | 0.015521 | 0 | 1 | 0.126374 | false | 0 | 0.065934 | 0.032967 | 0.252747 | 0.021978 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38cabe0c4bced6b634c68a59cf4f8f5117b14f51 | 394 | py | Python | 2020/aoc6.py | lachtanek/advent-of-code | dc83d82d46392adc073191161c2767e684d776bd | [
"MIT"
] | null | null | null | 2020/aoc6.py | lachtanek/advent-of-code | dc83d82d46392adc073191161c2767e684d776bd | [
"MIT"
] | null | null | null | 2020/aoc6.py | lachtanek/advent-of-code | dc83d82d46392adc073191161c2767e684d776bd | [
"MIT"
] | null | null | null | from functools import reduce
data = []
with open("aoc6.inp") as rf:
sets = []
for l in rf:
if l == "\n":
data.append(sets)
sets = []
else:
sets.append(set([c for c in l.strip()]))
a1 = a2 = 0
for sets in data:
a1 += len(reduce(lambda s1, s2: s1 | s2, sets))
a2 += len(reduce(lambda s1, s2: s1 & s2, sets))
print(a1, a2)
| 17.909091 | 52 | 0.5 | 61 | 394 | 3.229508 | 0.491803 | 0.081218 | 0.152284 | 0.172589 | 0.274112 | 0.274112 | 0.274112 | 0.274112 | 0 | 0 | 0 | 0.061776 | 0.34264 | 394 | 21 | 53 | 18.761905 | 0.698842 | 0 | 0 | 0.133333 | 0 | 0 | 0.025381 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38caf61d9a9e8681b2748236a9dabcabd645543a | 1,075 | py | Python | ghnotifier/menu.py | iamtalhaasghar/ghnotifier | 7bbcbc32abc8ad923bff64055cb19ac042a03764 | [
"MIT"
] | 1 | 2022-02-03T05:30:22.000Z | 2022-02-03T05:30:22.000Z | ghnotifier/menu.py | iamtalhaasghar/ghnotifier | 7bbcbc32abc8ad923bff64055cb19ac042a03764 | [
"MIT"
] | 5 | 2018-10-30T13:03:24.000Z | 2022-02-03T06:06:08.000Z | ghnotifier/menu.py | iamtalhaasghar/ghnotifier | 7bbcbc32abc8ad923bff64055cb19ac042a03764 | [
"MIT"
] | 1 | 2022-02-03T06:02:02.000Z | 2022-02-03T06:02:02.000Z | #!/usr/bin/env python3
import webbrowser
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from ghnotifier.notifier import Notifier
from ghnotifier.settings import Settings
class Menu:
GITHUB_NOTIFICATIONS = 'https://github.com/notifications'
def __init__(self):
self.menu = Gtk.Menu()
self.create_menu()
self.menu.show_all()
def create_menu(self):
self.append('Open Notifications', self.notifications)
self.append('Settings', self.settings)
self.menu.append(Gtk.SeparatorMenuItem())
self.append('Quit', self.quit)
def append(self, name, callback):
item = Gtk.MenuItem(name)
item.connect('activate', callback)
self.menu.append(item)
@staticmethod
def notifications(source):
webbrowser.open(Menu.GITHUB_NOTIFICATIONS)
@staticmethod
def settings(source):
Settings().open()
@staticmethod
def quit(source):
Notifier.stop()
Gtk.main_quit()
def get_inner(self):
return self.menu | 21.5 | 61 | 0.655814 | 124 | 1,075 | 5.58871 | 0.370968 | 0.05772 | 0.066378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003628 | 0.230698 | 1,075 | 50 | 62 | 21.5 | 0.834341 | 0.019535 | 0 | 0.090909 | 0 | 0 | 0.072106 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.212121 | false | 0 | 0.151515 | 0.030303 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38d24f0b77db0580d1f6a1183215410fe0692d65 | 859 | py | Python | Python/Maths/factorielle.py | GeneralNZR/maths-and-javascript | 8a0e638e59808b1d987269dddac0b99c96c78c4a | [
"MIT"
] | 3 | 2021-10-01T06:11:28.000Z | 2021-10-04T20:50:07.000Z | Python/Maths/factorielle.py | GeneralNZR/maths-and-javascript | 8a0e638e59808b1d987269dddac0b99c96c78c4a | [
"MIT"
] | null | null | null | Python/Maths/factorielle.py | GeneralNZR/maths-and-javascript | 8a0e638e59808b1d987269dddac0b99c96c78c4a | [
"MIT"
] | null | null | null | def factorielle_rec(n: int) -> int:
"""
Description:
Factorielle méthode récursive
Paramètres:
n: {int} -- Nombre à factorielle
Retourne:
{int} -- Factorielle de n
Exemple:
>>> factorielle_rec(100)
9.332622e+157
Pour l'écriture scientifique: f"{factorielle_rec(100):e}"
"""
return 1 if n == 0 else n * factorielle_rec(n - 1)
def factorielle_it(n: int) -> int:
"""
Description:
Factorielle méthode itérative
Paramètres:
n: {int} -- Nombre à factorielle
Retourne:
{int} -- Factorielle de n
Exemple:
>>> factorielle_it(100)
9.332622e+157
Pour l'écriture scientifique: f"{factorielle_it(100):e}"
"""
result = 1
for i in range(1, n + 1):
result *= i
return result | 21.475 | 65 | 0.549476 | 98 | 859 | 4.744898 | 0.367347 | 0.12043 | 0.064516 | 0.077419 | 0.696774 | 0.696774 | 0.541935 | 0.541935 | 0.541935 | 0.541935 | 0 | 0.066901 | 0.338766 | 859 | 40 | 66 | 21.475 | 0.751761 | 0.597206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0 | 0 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38d36df65b3ed1c6bdcd5d1855bfdf3aac9db033 | 14,536 | py | Python | huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/apply_individual_realname_auths_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/apply_individual_realname_auths_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/apply_individual_realname_auths_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ApplyIndividualRealnameAuthsReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'customer_id': 'str',
'identify_type': 'int',
'verified_type': 'int',
'verified_file_url': 'list[str]',
'name': 'str',
'verified_number': 'str',
'change_type': 'int',
'xaccount_type': 'str',
'bank_card_info': 'BankCardInfoV2'
}
attribute_map = {
'customer_id': 'customer_id',
'identify_type': 'identify_type',
'verified_type': 'verified_type',
'verified_file_url': 'verified_file_url',
'name': 'name',
'verified_number': 'verified_number',
'change_type': 'change_type',
'xaccount_type': 'xaccount_type',
'bank_card_info': 'bank_card_info'
}
def __init__(self, customer_id=None, identify_type=None, verified_type=None, verified_file_url=None, name=None, verified_number=None, change_type=None, xaccount_type=None, bank_card_info=None):
"""ApplyIndividualRealnameAuthsReq - a model defined in huaweicloud sdk"""
self._customer_id = None
self._identify_type = None
self._verified_type = None
self._verified_file_url = None
self._name = None
self._verified_number = None
self._change_type = None
self._xaccount_type = None
self._bank_card_info = None
self.discriminator = None
self.customer_id = customer_id
self.identify_type = identify_type
if verified_type is not None:
self.verified_type = verified_type
self.verified_file_url = verified_file_url
self.name = name
self.verified_number = verified_number
if change_type is not None:
self.change_type = change_type
self.xaccount_type = xaccount_type
if bank_card_info is not None:
self.bank_card_info = bank_card_info
@property
def customer_id(self):
"""Gets the customer_id of this ApplyIndividualRealnameAuthsReq.
|参数名称:客户ID。| |参数约束及描述:客户ID。|
:return: The customer_id of this ApplyIndividualRealnameAuthsReq.
:rtype: str
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this ApplyIndividualRealnameAuthsReq.
|参数名称:客户ID。| |参数约束及描述:客户ID。|
:param customer_id: The customer_id of this ApplyIndividualRealnameAuthsReq.
:type: str
"""
self._customer_id = customer_id
@property
def identify_type(self):
"""Gets the identify_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:认证方案:0:个人证件认证4:个人银行卡认证。这种方式下,仅仅需要上传一张个人扫脸的图片附件即可。| |参数的约束及描述:认证方案:0:个人证件认证4:个人银行卡认证。这种方式下,仅仅需要上传一张个人扫脸的图片附件即可。|
:return: The identify_type of this ApplyIndividualRealnameAuthsReq.
:rtype: int
"""
return self._identify_type
@identify_type.setter
def identify_type(self, identify_type):
"""Sets the identify_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:认证方案:0:个人证件认证4:个人银行卡认证。这种方式下,仅仅需要上传一张个人扫脸的图片附件即可。| |参数的约束及描述:认证方案:0:个人证件认证4:个人银行卡认证。这种方式下,仅仅需要上传一张个人扫脸的图片附件即可。|
:param identify_type: The identify_type of this ApplyIndividualRealnameAuthsReq.
:type: int
"""
self._identify_type = identify_type
@property
def verified_type(self):
"""Gets the verified_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:证件类型:0:身份证,上传的附件为3张,第1张是身份证人像面,第2张是身份证国徽面,第3张是个人手持身份证人像面;3:护照,上传的附件为3张,第1张是护照个人资料页,第2张是,护照入境盖章页,第3张是手持护照个人资料页;3:护照,上传的附件为2张,第1张是护照个人资料页,第2张是手持护照个人资料页;5:港澳通行证,上传的附件为3张,第1张是港澳居民来往内地通行证正面(人像面),第2张是港澳居民来往内地通行证反面,第3张是手持港澳居民来往内地通行证人像面;6:台湾通行证,上传的附件为3张,第1张是台湾居民来往大陆通行证正面(人像面),第2张是台湾居民来往大陆通行证反面,第3张是手持台湾居民来往大陆通行证人像面;7:海外驾照,上传的附件为2张,第1张是中国以外驾照正面照片(人像面),第2张是手持中国以外驾照人像面照片;9:港澳居民居住证,上传的附件为3张,第1张是港澳居民居住证人像面,第2张是,港澳居民居住证国徽面,第3张是手持港澳居民居住证人像面照片;10:台湾居民居住证,上传的附件为3张,第1张是台湾居民居住证人像面,第2张是台湾居民居住证国徽面,第3张是手持台湾居民居住证人像面照片。当identifyType=0的时候,该字段需要填写,否则忽略该字段的取值。| |参数的约束及描述:证件类型:0:身份证,上传的附件为3张,第1张是身份证人像面,第2张是身份证国徽面,第3张是个人手持身份证人像面;3:护照,上传的附件为3张,第1张是护照个人资料页,第2张是,护照入境盖章页,第3张是手持护照个人资料页;3:护照,上传的附件为2张,第1张是护照个人资料页,第2张是手持护照个人资料页;5:港澳通行证,上传的附件为3张,第1张是港澳居民来往内地通行证正面(人像面),第2张是港澳居民来往内地通行证反面,第3张是手持港澳居民来往内地通行证人像面;6:台湾通行证,上传的附件为3张,第1张是台湾居民来往大陆通行证正面(人像面),第2张是台湾居民来往大陆通行证反面,第3张是手持台湾居民来往大陆通行证人像面;7:海外驾照,上传的附件为2张,第1张是中国以外驾照正面照片(人像面),第2张是手持中国以外驾照人像面照片;9:港澳居民居住证,上传的附件为3张,第1张是港澳居民居住证人像面,第2张是,港澳居民居住证国徽面,第3张是手持港澳居民居住证人像面照片;10:台湾居民居住证,上传的附件为3张,第1张是台湾居民居住证人像面,第2张是台湾居民居住证国徽面,第3张是手持台湾居民居住证人像面照片。当identifyType=0的时候,该字段需要填写,否则忽略该字段的取值。|
:return: The verified_type of this ApplyIndividualRealnameAuthsReq.
:rtype: int
"""
return self._verified_type
@verified_type.setter
def verified_type(self, verified_type):
"""Sets the verified_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:证件类型:0:身份证,上传的附件为3张,第1张是身份证人像面,第2张是身份证国徽面,第3张是个人手持身份证人像面;3:护照,上传的附件为3张,第1张是护照个人资料页,第2张是,护照入境盖章页,第3张是手持护照个人资料页;3:护照,上传的附件为2张,第1张是护照个人资料页,第2张是手持护照个人资料页;5:港澳通行证,上传的附件为3张,第1张是港澳居民来往内地通行证正面(人像面),第2张是港澳居民来往内地通行证反面,第3张是手持港澳居民来往内地通行证人像面;6:台湾通行证,上传的附件为3张,第1张是台湾居民来往大陆通行证正面(人像面),第2张是台湾居民来往大陆通行证反面,第3张是手持台湾居民来往大陆通行证人像面;7:海外驾照,上传的附件为2张,第1张是中国以外驾照正面照片(人像面),第2张是手持中国以外驾照人像面照片;9:港澳居民居住证,上传的附件为3张,第1张是港澳居民居住证人像面,第2张是,港澳居民居住证国徽面,第3张是手持港澳居民居住证人像面照片;10:台湾居民居住证,上传的附件为3张,第1张是台湾居民居住证人像面,第2张是台湾居民居住证国徽面,第3张是手持台湾居民居住证人像面照片。当identifyType=0的时候,该字段需要填写,否则忽略该字段的取值。| |参数的约束及描述:证件类型:0:身份证,上传的附件为3张,第1张是身份证人像面,第2张是身份证国徽面,第3张是个人手持身份证人像面;3:护照,上传的附件为3张,第1张是护照个人资料页,第2张是,护照入境盖章页,第3张是手持护照个人资料页;3:护照,上传的附件为2张,第1张是护照个人资料页,第2张是手持护照个人资料页;5:港澳通行证,上传的附件为3张,第1张是港澳居民来往内地通行证正面(人像面),第2张是港澳居民来往内地通行证反面,第3张是手持港澳居民来往内地通行证人像面;6:台湾通行证,上传的附件为3张,第1张是台湾居民来往大陆通行证正面(人像面),第2张是台湾居民来往大陆通行证反面,第3张是手持台湾居民来往大陆通行证人像面;7:海外驾照,上传的附件为2张,第1张是中国以外驾照正面照片(人像面),第2张是手持中国以外驾照人像面照片;9:港澳居民居住证,上传的附件为3张,第1张是港澳居民居住证人像面,第2张是,港澳居民居住证国徽面,第3张是手持港澳居民居住证人像面照片;10:台湾居民居住证,上传的附件为3张,第1张是台湾居民居住证人像面,第2张是台湾居民居住证国徽面,第3张是手持台湾居民居住证人像面照片。当identifyType=0的时候,该字段需要填写,否则忽略该字段的取值。|
:param verified_type: The verified_type of this ApplyIndividualRealnameAuthsReq.
:type: int
"""
self._verified_type = verified_type
@property
def verified_file_url(self):
"""Gets the verified_file_url of this ApplyIndividualRealnameAuthsReq.
|参数名称:个人证件认证时证件附件的文件URL,该URL地址必须按照顺序填写。以身份证举例,譬如身份证人像面文件名称是abc023,国徽面是def004,个人手持身份证人像面是gh007,那么这个地方需要按照abc023def004gh007的顺序填写URL(文件名称区分大小写)。以护照举例,譬如护照个人资料页文件名称是abc023,手持护照个人资料页是def004,那么这个地方需要按照abc023def004的顺序填写URL(文件名称区分大小写)。证件附件目前仅仅支持jpg、jpeg、bmp、png、gif、pdf格式,单个文件最大不超过10M。这个URL是相对URL,不需要包含桶名和download目录,只要包含download目录下的子目录和对应文件名称即可。举例如下:如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/abc023.jpg,该字段填写abc023.jpg;如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/test/abc023.jpg,该字段填写test/abc023.jpg。| |参数约束以及描述:个人证件认证时证件附件的文件URL,该URL地址必须按照顺序填写。以身份证举例,譬如身份证人像面文件名称是abc023,国徽面是def004,个人手持身份证人像面是gh007,那么这个地方需要按照abc023def004gh007的顺序填写URL(文件名称区分大小写)。以护照举例,譬如护照个人资料页文件名称是abc023,手持护照个人资料页是def004,那么这个地方需要按照abc023def004的顺序填写URL(文件名称区分大小写)。证件附件目前仅仅支持jpg、jpeg、bmp、png、gif、pdf格式,单个文件最大不超过10M。这个URL是相对URL,不需要包含桶名和download目录,只要包含download目录下的子目录和对应文件名称即可。举例如下:如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/abc023.jpg,该字段填写abc023.jpg;如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/test/abc023.jpg,该字段填写test/abc023.jpg。|
:return: The verified_file_url of this ApplyIndividualRealnameAuthsReq.
:rtype: list[str]
"""
return self._verified_file_url
@verified_file_url.setter
def verified_file_url(self, verified_file_url):
"""Sets the verified_file_url of this ApplyIndividualRealnameAuthsReq.
|参数名称:个人证件认证时证件附件的文件URL,该URL地址必须按照顺序填写。以身份证举例,譬如身份证人像面文件名称是abc023,国徽面是def004,个人手持身份证人像面是gh007,那么这个地方需要按照abc023def004gh007的顺序填写URL(文件名称区分大小写)。以护照举例,譬如护照个人资料页文件名称是abc023,手持护照个人资料页是def004,那么这个地方需要按照abc023def004的顺序填写URL(文件名称区分大小写)。证件附件目前仅仅支持jpg、jpeg、bmp、png、gif、pdf格式,单个文件最大不超过10M。这个URL是相对URL,不需要包含桶名和download目录,只要包含download目录下的子目录和对应文件名称即可。举例如下:如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/abc023.jpg,该字段填写abc023.jpg;如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/test/abc023.jpg,该字段填写test/abc023.jpg。| |参数约束以及描述:个人证件认证时证件附件的文件URL,该URL地址必须按照顺序填写。以身份证举例,譬如身份证人像面文件名称是abc023,国徽面是def004,个人手持身份证人像面是gh007,那么这个地方需要按照abc023def004gh007的顺序填写URL(文件名称区分大小写)。以护照举例,譬如护照个人资料页文件名称是abc023,手持护照个人资料页是def004,那么这个地方需要按照abc023def004的顺序填写URL(文件名称区分大小写)。证件附件目前仅仅支持jpg、jpeg、bmp、png、gif、pdf格式,单个文件最大不超过10M。这个URL是相对URL,不需要包含桶名和download目录,只要包含download目录下的子目录和对应文件名称即可。举例如下:如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/abc023.jpg,该字段填写abc023.jpg;如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/test/abc023.jpg,该字段填写test/abc023.jpg。|
:param verified_file_url: The verified_file_url of this ApplyIndividualRealnameAuthsReq.
:type: list[str]
"""
self._verified_file_url = verified_file_url
@property
def name(self):
"""Gets the name of this ApplyIndividualRealnameAuthsReq.
|参数名称:姓名。| |参数约束及描述:姓名。|
:return: The name of this ApplyIndividualRealnameAuthsReq.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ApplyIndividualRealnameAuthsReq.
|参数名称:姓名。| |参数约束及描述:姓名。|
:param name: The name of this ApplyIndividualRealnameAuthsReq.
:type: str
"""
self._name = name
@property
def verified_number(self):
"""Gets the verified_number of this ApplyIndividualRealnameAuthsReq.
|参数名称:证件号码。| |参数约束及描述:证件号码。|
:return: The verified_number of this ApplyIndividualRealnameAuthsReq.
:rtype: str
"""
return self._verified_number
@verified_number.setter
def verified_number(self, verified_number):
"""Sets the verified_number of this ApplyIndividualRealnameAuthsReq.
|参数名称:证件号码。| |参数约束及描述:证件号码。|
:param verified_number: The verified_number of this ApplyIndividualRealnameAuthsReq.
:type: str
"""
self._verified_number = verified_number
@property
def change_type(self):
"""Gets the change_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:变更类型:-1:首次实名认证| |参数的约束及描述:变更类型:-1:首次实名认证|
:return: The change_type of this ApplyIndividualRealnameAuthsReq.
:rtype: int
"""
return self._change_type
@change_type.setter
def change_type(self, change_type):
"""Sets the change_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:变更类型:-1:首次实名认证| |参数的约束及描述:变更类型:-1:首次实名认证|
:param change_type: The change_type of this ApplyIndividualRealnameAuthsReq.
:type: int
"""
self._change_type = change_type
@property
def xaccount_type(self):
"""Gets the xaccount_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:华为分给合作伙伴的平台标识。该标识的具体值由华为分配。获取方法请参见如何获取xaccountType的取值如何获取xaccountType的取值。| |参数约束及描述:华为分给合作伙伴的平台标识。该标识的具体值由华为分配。获取方法请参见如何获取xaccountType的取值如何获取xaccountType的取值。|
:return: The xaccount_type of this ApplyIndividualRealnameAuthsReq.
:rtype: str
"""
return self._xaccount_type
@xaccount_type.setter
def xaccount_type(self, xaccount_type):
"""Sets the xaccount_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:华为分给合作伙伴的平台标识。该标识的具体值由华为分配。获取方法请参见如何获取xaccountType的取值如何获取xaccountType的取值。| |参数约束及描述:华为分给合作伙伴的平台标识。该标识的具体值由华为分配。获取方法请参见如何获取xaccountType的取值如何获取xaccountType的取值。|
:param xaccount_type: The xaccount_type of this ApplyIndividualRealnameAuthsReq.
:type: str
"""
self._xaccount_type = xaccount_type
@property
def bank_card_info(self):
"""Gets the bank_card_info of this ApplyIndividualRealnameAuthsReq.
:return: The bank_card_info of this ApplyIndividualRealnameAuthsReq.
:rtype: BankCardInfoV2
"""
return self._bank_card_info
@bank_card_info.setter
def bank_card_info(self, bank_card_info):
"""Sets the bank_card_info of this ApplyIndividualRealnameAuthsReq.
:param bank_card_info: The bank_card_info of this ApplyIndividualRealnameAuthsReq.
:type: BankCardInfoV2
"""
self._bank_card_info = bank_card_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplyIndividualRealnameAuthsReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 45.003096 | 1,131 | 0.717735 | 1,489 | 14,536 | 6.826729 | 0.137005 | 0.021249 | 0.131038 | 0.064535 | 0.751303 | 0.693261 | 0.656468 | 0.566945 | 0.54245 | 0.530841 | 0 | 0.030339 | 0.190493 | 14,536 | 322 | 1,132 | 45.142857 | 0.833517 | 0.585099 | 0 | 0.091549 | 0 | 0 | 0.076778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.169014 | false | 0 | 0.035211 | 0 | 0.338028 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38d42eb30da6ce6e341aef340a29b43528065a7b | 626 | py | Python | tests/test_pack_data.py | derekmerck/endpoint | 5b74f0b3303bbf419a6c9f71e9a4a156583bf51d | [
"MIT"
] | null | null | null | tests/test_pack_data.py | derekmerck/endpoint | 5b74f0b3303bbf419a6c9f71e9a4a156583bf51d | [
"MIT"
] | null | null | null | tests/test_pack_data.py | derekmerck/endpoint | 5b74f0b3303bbf419a6c9f71e9a4a156583bf51d | [
"MIT"
] | null | null | null | from datetime import datetime
from pprint import pprint
from cryptography.fernet import Fernet
from libsvc.utils import pack_data, unpack_data
def pack_data_test():
fkey = Fernet.generate_key()
data = {"today": datetime.today(),
"dog": "cat",
"red": "blue"}
p = pack_data(data, fkey, fields=["today", "dog"])
print(p.decode("utf8"))
u = unpack_data(p, fkey)
pprint(u)
assert u["dog"] == "cat"
today = datetime.fromisoformat(u["today"]).date()
assert today == datetime.today().date()
assert "red" not in u
if __name__ == "__main__":
pack_data_test()
| 21.586207 | 54 | 0.627796 | 82 | 626 | 4.585366 | 0.426829 | 0.085106 | 0.06383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002058 | 0.223642 | 626 | 28 | 55 | 22.357143 | 0.771605 | 0 | 0 | 0 | 1 | 0 | 0.083067 | 0 | 0 | 0 | 0 | 0 | 0.157895 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.263158 | 0.157895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38d56c4e68352399b46ecce8e483eb237d03b4c0 | 1,604 | py | Python | iris_sdk/models/portin.py | NumberAI/python-bandwidth-iris | 0e05f79d68b244812afb97e00fd65b3f46d00aa3 | [
"MIT"
] | 2 | 2020-04-13T13:47:59.000Z | 2022-02-23T20:32:41.000Z | iris_sdk/models/portin.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2020-09-18T20:59:24.000Z | 2021-08-25T16:51:42.000Z | iris_sdk/models/portin.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2018-12-12T14:39:50.000Z | 2020-11-17T21:42:29.000Z | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.activation_status import ActivationStatus
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.portin import PortInData
from iris_sdk.models.history import History
from iris_sdk.models.loas import Loas
from iris_sdk.models.notes import Notes
from iris_sdk.models.totals import Totals
XML_NAME_PORTIN = "LnpOrderResponse"
XML_NAME_PORTIN_SAVE = "LnpOrder"
XPATH_PORTIN = "/{}"
class PortIn(BaseResource, PortInData):
"""Local number portability order"""
_node_name = XML_NAME_PORTIN
_node_name_save = XML_NAME_PORTIN_SAVE
_xpath = XPATH_PORTIN
@property
def activation_status(self):
return self._activation_status
@property
def history(self):
return self._history
@property
def id(self):
return self.order_id
@id.setter
def id(self, id):
self.order_id = id
@property
def loas(self):
return self._loas
@property
def notes(self):
return self._notes
@property
def totals(self):
return self._totals
def __init__(self, parent=None, client=None):
super().__init__(parent, client)
PortInData.__init__(self)
self._activation_status = ActivationStatus(self)
self._history = History(self)
self._loas = Loas(self, client)
self._notes = Notes(self, client)
self._totals = Totals(self, client)
def save(self):
return self._post_data() | 25.460317 | 64 | 0.703865 | 203 | 1,604 | 5.256158 | 0.256158 | 0.052484 | 0.072165 | 0.111528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.214464 | 1,604 | 63 | 65 | 25.460317 | 0.846825 | 0.031796 | 0 | 0.12766 | 0 | 0 | 0.017442 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.191489 | false | 0 | 0.191489 | 0.148936 | 0.617021 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
38e5c75c5a33d505486249e9804993dae69406b3 | 20,561 | py | Python | util/job_launching/procman.py | LauXy/accel-sim-framework | 4c2bf09a79d6b57bb10fe1898700930a5dd5531f | [
"BSD-2-Clause"
] | 88 | 2020-07-15T22:07:38.000Z | 2022-03-29T03:52:55.000Z | util/job_launching/procman.py | LauXy/accel-sim-framework | 4c2bf09a79d6b57bb10fe1898700930a5dd5531f | [
"BSD-2-Clause"
] | 68 | 2020-08-12T13:44:59.000Z | 2022-03-28T19:34:56.000Z | util/job_launching/procman.py | LauXy/accel-sim-framework | 4c2bf09a79d6b57bb10fe1898700930a5dd5531f | [
"BSD-2-Clause"
] | 54 | 2020-07-24T21:05:56.000Z | 2022-03-31T13:28:23.000Z | #!/usr/bin/env python
# 2020 (c) Tim Rogers, Purdue University
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution. Neither the name of
# The University of British Columbia nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# This file implements ProcMan:
# A simple process manager that allows you to queue up tasks and launch them
# once the previous tasks are done. This is useful if you want to launch
# a few hundred tests, but only have 40 cores, you can queue them all up
# using the ProcMan, then let it continuously run 40 at a time.
# This is essentially a poor-man's torque, slurm, condor, etc for a single node.
# As the goal of proc-man is a dependency-free single-node version of slurm/torque
# it is used in a similar way. Replace:
# sbatch <file decribing task>
# qsub <file describing task>
# with
# ./procmany.py <file describing task>
# After all the processes you want are submitted, start the procman using:
# ./procman.py -S
# This will create a new process that launchs and manages all the tasks you previously queued
# To avoid inter-process synchronization issues, procman operates using a
# producer/consumer model and once a procman has been started, new work cannot
# be added to that procman. However, more than one procman can run at once. That is,
# you can call:
# ./procman.py mybashA.sh
# ...
# ./procman.py mybashZ.sh
# ./procman.py -S
# ./procman.py mybash1.sh
# ...
# ./procman.py mybash26.sh
# ./procman.py -S
# And it will work, launching 2 procmans that loosely co-ordinate resource usage.
# By default procman will attempt to launch as many jobs as there are cores on the machine
# this can be changes with the "-c <numCores>" option.
#
# Some other useful commands:
# ./procman.py -s # launches a self-test to confirm that procman is working (takes 1-2 mins)
# ./procman.py -p # prints the state of all procmans and their jobs
# ./procman.py -k # kill all the jobs procman is running
# ./procman.py --help # prints all the options available
#
# NOTE: procman only works when jobs are submitted from one process. i.e.
# the user cannot spawn 2 processes and have each process concurrently
# attempt to enque work. Supporting such a system adds more complication
# and is not a common case. Also procman is designed for managing one user's
# processes and knows nothing about what other users are doing on the machine.
from optparse import OptionParser
import pickle
import subprocess
from subprocess import Popen, PIPE
import common
import os, signal
import stat
import time
import psutil
import shutil
import datetime
import re
import socket
import sys
import glob
import copy
this_directory = os.path.dirname(os.path.realpath(__file__)) + "/"
procManStateFolder = os.path.join(this_directory, "procman")
procManStateFile = os.path.join(procManStateFolder, "procman.{0}.pickle".format(socket.gethostname().strip()))
class Job:
def __init__(self, outF, errF, workingDir, command):
self.outF = outF
self.errF = errF
self.workingDir = workingDir
self.command = command
self.procId = None
self.POpenObj = None
self.maxVmSize = 0
self.runningTime = 0
self.status = "WAITING_TO_RUN"
self.name = None
self.id = None
self.hostname = "UNKNOWN"
def string(self):
return "status={0}: [name={8},procId={1},maxVmSize={2},runningTime={3},outF={4}," \
"errF={5},workingDir={6},command={7}]"\
.format(self.status,
self.procId,
self.maxVmSize,
self.runningTime,
self.outF,
self.errF,
self.workingDir,
self.command,
self.name)
def __str__(self):
return self.string()
def __repr__(self):
return self.string()
class ProcMan:
def __init__(self, jobLimit, pickleFile):
self.initialize(jobLimit, pickleFile)
def __init__(self, jobLimit):
self.initialize(jobLimit, procManStateFile)
def saveState(self):
pickle.dump(self, open(self.pickleFile, "w+"))
def clear(self):
if not self.mutable or len(self.activeJobs) > 0 or len(self.completeJobs) > 0:
sys.exit("ProcMans that have been started should not be cleared")
del self.queuedJobs [:]
def initialize(self, jobLimit, pickleFile):
self.queuedJobs = []
self.activeJobs = {}
self.completeJobs = {}
self.jobLimit = jobLimit
self.nextJobId = 1
self.tickingProcess = None
self.mutable = True
self.pickleFile = pickleFile
def queueJob(self, job):
if not self.mutable:
sys.exit("This ProcMan has already been started. No new jobs can be queued.")
job.id = self.nextJobId
self.queuedJobs.append(job)
self.nextJobId += 1
return job.id
def spawnProcMan(self, sleepTime):
if not self.mutable:
sys.exit("This ProcMan has already been started. No new spawning can occur.")
shutil.copy(self.pickleFile, self.pickleFile + ".tmp")
p = Popen([__file__,"-f", self.pickleFile + ".tmp", "-t", str(sleepTime)],
cwd=this_directory
)
print "ProcMan spawned [pid={0}]".format(p.pid)
def killJobs(self):
print "Killing {0} jobs".format(len(self.activeJobs))
for jid, activeJob in self.activeJobs.iteritems():
try:
p = psutil.Process(activeJob.procId)
except (psutil.NoSuchProcess,psutil.AccessDenied) as e:
print e
continue
for child in p.children(recursive=True):
os.kill(child.pid,9)
os.kill(activeJob.procId,9)
def tick(self):
if self.tickingProcess == None:
self.tickingProcess = os.getpid()
self.pickleFile = self.pickleFile + ".{0}".format(self.tickingProcess)
elif self.tickingProcess != os.getpid():
sys.exit("To support concurrent ProcMans in different processes, each procman can only be ticked by one process")
self.mutable = False
# test jobs for completion
jobsMoved = set()
for jid, activeJob in self.activeJobs.iteritems():
jobActive = True
# for an active session, need to poll or else the thing
# never dies. If ProcMan is launched with just a file (and
# did not launch the processes itself, we can just use the kill
# with CONT signal to see if its still alive.
if activeJob.POpenObj != None:
activeJob.POpenObj.poll()
try:
os.kill(activeJob.procId,signal.SIGCONT)
except OSError:
jobActive = False
if jobActive:
try:
p = psutil.Process(activeJob.procId)
mem = p.memory_info().vms
for child in p.children(recursive=True):
mem += child.memory_info().vms
activeJob.maxVmSize = max(mem, activeJob.maxVmSize)
activeJob.runningTime = \
datetime.datetime.now() \
- datetime.datetime.fromtimestamp(p.create_time())
activeJob.runningTime = str(activeJob.runningTime).split('.')[0]
except (psutil.NoSuchProcess,psutil.AccessDenied) as e:
print e
else:
activeJob.status = "COMPLETE_NO_OTHER_INFO"
self.completeJobs[activeJob.id] = activeJob
jobsMoved.add(activeJob.id)
for jobId in jobsMoved:
del self.activeJobs[jobId]
othersCores = self.getCPUCountFromOtherProcMans()
# launch new jobs when old ones complete
while (len(self.activeJobs) + othersCores) < self.jobLimit and len(self.queuedJobs) > 0:
newJob = self.queuedJobs.pop(0)
newJob.POpenObj = Popen(newJob.command,
stdout=open(newJob.outF,"w+"),
stderr=open(newJob.errF,"w+"),
cwd=newJob.workingDir)
newJob.procId = newJob.POpenObj.pid
newJob.hostname = socket.gethostname().strip()
newJob.status = "RUNNING"
self.activeJobs[newJob.id] = newJob
def getCPUCountFromOtherProcMans(self):
othersCores = 0
for pickleFile in glob.glob(os.path.join(os.path.dirname(self.pickleFile),"*pickle*")):
if pickleFile != self.pickleFile:
otherProcMan = pickle.load(open(pickleFile))
othersCores += len(otherProcMan.activeJobs)
return othersCores
def getState(self):
string = "queuedJobs={0}, activeJobs={1}, completeJobs={2}\n"\
.format(len(self.queuedJobs), len(self.activeJobs), len(self.completeJobs))
string += "\nqueuedJobs:\n"
for job in self.queuedJobs:
string += "\t{0}\n".format(job)
string += "\nactiveJobs:\n"
for jid,job in self.activeJobs.iteritems():
string += "\t{0}\n".format(job)
string += "\ncompleteJobs:\n"
for jid,job in self.completeJobs.iteritems():
string += "\t{0}\n".format(job)
return string
def getJob(self, jobId):
if jobId in self.activeJobs:
return self.activeJobs[jobId]
elif jobId in self.completeJobs:
return self.completeJobs[jobId]
else:
for job in self.queuedJobs:
if jobId == job.id:
return job
return None
def complete(self):
return len(self.queuedJobs) == 0 and len(self.activeJobs) == 0
def selfTest():
testPath = os.path.join(this_directory, "test")
if not os.path.isdir(testPath):
os.makedirs(testPath)
jobScript = os.path.join(testPath, "testScript.sh")
open(jobScript,"w+").write("#!/bin/bash\nsleep 20s")
st = os.stat(jobScript)
os.chmod(jobScript, st.st_mode | stat.S_IEXEC)
print "Starting synchronous selfTest"
procMan = ProcMan(4)
for i in range(5):
procMan.queueJob(
Job(
outF=os.path.join(testPath, "out.{0}.txt".format(i)),
errF=os.path.join(testPath, "err.{0}.txt".format(i)),
workingDir=testPath,
command=jobScript
)
)
print procMan.getState()
while not procMan.complete():
procMan.tick()
print procMan.getState()
time.sleep(3)
print "Passed synchronous selfTest"
print "Starting asynchronous selfTest"
for i in range(int(psutil.cpu_count()*1.2)):
jobScript = os.path.join(testPath, "testSlurm.{0}.sh".format(i))
open(jobScript,"w+").write("#!/bin/bash\n"\
"#SBATCH -J test.{0}\n".format(i) +\
"#SBATCH --output={0}\n".format(os.path.join(testPath, "out.{0}.txt".format(i))) +\
"#SBATCH --error={0}\n".format(os.path.join(testPath, "out.{0}.txt".format(i))) +\
"sleep 20s")
st = os.stat(jobScript)
os.chmod(jobScript, st.st_mode | stat.S_IEXEC)
out, err = subprocess.Popen([os.path.join(this_directory, "procman.py"),\
jobScript], stdout=PIPE).communicate()
if err != None:
sys.exit(err)
print "Queued Job {0}".format(out)
print "Starting Jobs"
subprocess.Popen([os.path.join(this_directory, "procman.py"),\
"-S", "-t", "5"], stdout=PIPE)
out = ""
while out != "Nothing Active":
time.sleep(1)
out, err = subprocess.Popen([os.path.join(this_directory, "procman.py"),\
"-p"], stdout=PIPE).communicate()
out = out.strip()
if err != None:
sys.exit(err)
print out
print "Asynchronous test passed"
print "Starting multi ProcMan test"
JOBS_PER_PROCMAN = int(psutil.cpu_count()*1.2) / 4
for j in range(4):
for i in range(JOBS_PER_PROCMAN):
jobNum = j*JOBS_PER_PROCMAN + i
jobScript = os.path.join(testPath, "testSlurm.{0}.sh".format(jobNum))
open(jobScript,"w+").write("#!/bin/bash\n"\
"#SBATCH -J test.{0}\n".format(jobNum) +\
"#SBATCH --output={0}\n".format(os.path.join(testPath, "out.{0}.txt".format(jobNum))) +\
"#SBATCH --error={0}\n".format(os.path.join(testPath, "out.{0}.txt".format(jobNum))) +\
"sleep 20s")
st = os.stat(jobScript)
os.chmod(jobScript, st.st_mode | stat.S_IEXEC)
out, err = subprocess.Popen([os.path.join(this_directory, "procman.py"),\
jobScript], stdout=PIPE).communicate()
if err != None:
sys.exit(err)
print "ProcMan {0}: Queued Job {0}".format(j, out)
print "ProcMan {0}: Starting Jobs".format(j)
subprocess.Popen([os.path.join(this_directory, "procman.py"),\
"-S", "-t", "5"], stdout=PIPE)
out = ""
while out != "Nothing Active":
time.sleep(1)
out, err = subprocess.Popen([os.path.join(this_directory, "procman.py"),\
"-p"], stdout=PIPE).communicate()
out = out.strip()
if err != None:
sys.exit(err)
print out
print "Multi-ProcMan test passed"
shutil.rmtree(testPath)
def main():
parser = OptionParser()
parser.add_option("-s", "--selfTest", dest="selfTest",
help="launched the selftester.", action="store_true")
parser.add_option("-f", "--file", dest="file",
help="File with the processes to manage.", default=procManStateFile)
parser.add_option("-t", "--sleepTime", dest="sleepTime",
help="Tune how often. ProcMan looks for completed jobs",
type=int, default=30)
parser.add_option("-c", "--cores", dest="cores",
help="how many cores to use",
type=int, default=psutil.cpu_count())
parser.add_option("-S", "--start", dest="start",action="store_true",
help="Just spawn the manager")
parser.add_option("-p", "--printState", dest="printState",action="store_true",
help="Print the state of the manager")
parser.add_option("-k", "--kill", dest="kill",action="store_true",
help="Kill all managed processes")
parser.add_option("-j", "--procManForJob", dest="procManForJob",default=None, type=int,
help="Return the path of the pickle file for the ProcMan managing this job." )
(options, args) = parser.parse_args()
if options.selfTest:
selfTest()
elif options.kill:
procmanfiles = glob.glob(options.file + ".*")
for f in procmanfiles:
print "Killing active jobs in Procman: {0}".format(os.path.basename(f))
procMan = pickle.load(open(f))
procMan.killJobs()
elif options.printState:
numProcMans = 0
numQueued = 0
numActive = 0
numComplete = 0
procmanfiles = glob.glob(options.file + ".*")
if len(procmanfiles) == 0:
print "Nothing Active"
else:
for f in procmanfiles:
numProcMans += 1
procMan = pickle.load(open(f))
numQueued += len(procMan.queuedJobs)
numActive += len(procMan.activeJobs)
numComplete += len(procMan.completeJobs)
print "Procman: {0}".format(os.path.basename(f))
print procMan.getState()
print "Total Procmans={0}, Total Queued={1}, Total Running={2}, Total Complete={3}"\
.format(numProcMans, numQueued, numActive, numComplete)
elif options.start:
if not os.path.exists(options.file):
sys.exit("Nothing to start {0} does not exist".format(options.file))
procMan = pickle.load(open(options.file))
procMan.spawnProcMan(options.sleepTime)
procMan.clear()
procMan.saveState()
elif options.procManForJob != None:
procmanfiles = glob.glob(options.file + ".*")
for f in procmanfiles:
procMan = pickle.load(open(f))
j = procMan.getJob(options.procManForJob)
if j != None:
print procMan.pickleFile
break
elif len(args) == 1:
# To make this work the same as torque and slurm - if you just give it one argument,
# we assume it's a pointer to a job file you want to submit.
if os.path.exists(options.file):
procMan = pickle.load(open(options.file))
if not procMan.mutable:
sys.exit("Error - this procman has already started")
else:
procMan = ProcMan(options.cores)
exec_file = args[0]
st = os.stat(exec_file)
os.chmod(exec_file, st.st_mode | stat.S_IEXEC)
# slurmToJob
job = Job("","",os.getcwd(),exec_file)
job.id = procMan.queueJob(job)
contents = ""
for line in open(exec_file).readlines():
if line.startswith("#SBATCH"):
nameMatch = re.match(r"#SBATCH -J (.*)", line.strip())
if nameMatch:
job.name = nameMatch.group(1)
outFMatch = re.match(r"#SBATCH --output=(.*)", line.strip())
if outFMatch:
job.outF = outFMatch.group(1)
errFMatch = re.match(r"#SBATCH --error=(.*)", line.strip())
if errFMatch:
job.errF = errFMatch.group(1)
line = re.sub(r"\$SLURM_JOB_ID", str(job.id), line)
contents += line
with open(exec_file, "w+") as f:
f.write(contents)
job.outF = re.sub("\%j", str(job.id), job.outF)
job.errF = re.sub("\%j", str(job.id), job.errF)
procMan.saveState()
print job.id
else:
options.file = common.file_option_test( options.file, "", this_directory )
if options.file == "":
sys.exit("Please specify the file containing the processes to manage with -f.")
procMan = pickle.load(open(options.file))
if procMan.tickingProcess!= None:
sys.exit("This procman is already running {0}".format(os.path.basename(options.file)))
procMan.pickleFile = options.file
os.remove(options.file)
if len(procMan.queuedJobs) > 0:
while not procMan.complete():
procMan.tick()
procMan.saveState()
time.sleep(options.sleepTime)
os.remove(procMan.pickleFile)
if __name__ == '__main__':
if not os.path.exists(procManStateFolder):
os.makedirs(procManStateFolder)
main()
| 41.453629 | 127 | 0.594961 | 2,472 | 20,561 | 4.910194 | 0.217233 | 0.014335 | 0.015653 | 0.013347 | 0.23134 | 0.199044 | 0.176306 | 0.146976 | 0.139891 | 0.113198 | 0 | 0.007356 | 0.292544 | 20,561 | 495 | 128 | 41.537374 | 0.8271 | 0.1943 | 0 | 0.239247 | 0 | 0.002688 | 0.132893 | 0.007157 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.008065 | 0.043011 | null | null | 0.072581 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38e6e2acda50235d67dfac9f5ec598d66c8ddcc1 | 2,005 | py | Python | nabu/processing/tfreaders/numpy_float_array_as_tfrecord_reader.py | Darleen2019/Nabu-MSSS | 5e862cbf846d45b8a317f87588533f3fde9f0726 | [
"MIT"
] | 18 | 2017-10-16T13:12:46.000Z | 2022-02-15T01:20:00.000Z | nabu/processing/tfreaders/numpy_float_array_as_tfrecord_reader.py | Darleen2019/Nabu-MSSS | 5e862cbf846d45b8a317f87588533f3fde9f0726 | [
"MIT"
] | null | null | null | nabu/processing/tfreaders/numpy_float_array_as_tfrecord_reader.py | Darleen2019/Nabu-MSSS | 5e862cbf846d45b8a317f87588533f3fde9f0726 | [
"MIT"
] | 9 | 2017-10-03T18:10:10.000Z | 2020-11-13T08:26:31.000Z | '''@file numpy_float_array_as_tfrecord_reader.py
contains the NumpyFloatArrayAsTfrecordReader class'''
import os
import numpy as np
import tensorflow as tf
import tfreader
import pdb
class NumpyFloatArrayAsTfrecordReader(tfreader.TfReader):
'''reader for numpy float arrays'''
def _read_metadata(self, datadirs):
'''read the input dimension
Args:
datadir: the directory where the metadata was written
Returns:
the metadata as a dictionary
'''
metadata = dict()
#read the non-time dimensions of the data
with open(os.path.join(datadirs[0], 'nontime_dims')) as fid:
metadata['nontime_dims'] = fid.read().strip().split(',')
metadata['nontime_dims'] = map(int,metadata['nontime_dims'])
for datadir in datadirs:
with open(os.path.join(datadir, 'nontime_dims')) as fid:
nontime_dims=fid.read().strip().split(',')
nontime_dims=map(int,nontime_dims)
if metadata['nontime_dims'] != nontime_dims:
raise Exception(
'all reader dimensions must be the same')
return metadata
def _create_features(self):
'''
creates the information about the features
Returns:
A dict mapping feature keys to FixedLenFeature, VarLenFeature,
and SparseFeature values
'''
return {'data': tf.FixedLenFeature([], dtype=tf.string)}
def _process_features(self, features):
'''process the read features
features:
A dict mapping feature keys to Tensor and SparseTensor values
Returns:
a pair of tensor and sequence length
'''
data = tf.decode_raw(features['data'], tf.float32)
resh_dims = [-1] + self.metadata['nontime_dims']
data = tf.reshape(data, resh_dims)
sequence_length = tf.shape(data)[0]
return data, sequence_length
| 30.378788 | 78 | 0.613965 | 226 | 2,005 | 5.327434 | 0.411504 | 0.100498 | 0.078904 | 0.023256 | 0.11794 | 0.08804 | 0 | 0 | 0 | 0 | 0 | 0.003531 | 0.293766 | 2,005 | 65 | 79 | 30.846154 | 0.846751 | 0.01995 | 0 | 0 | 0 | 0 | 0.108197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.185185 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
38e9981ca280b378dc3584ec5d46dac7194e4dbd | 643 | py | Python | apps/telegram_bot/bot/filters.py | alena-kono/bot-valley | e8403f75bcd4e4b3f84bbe8c00a53d763e7c9776 | [
"MIT"
] | null | null | null | apps/telegram_bot/bot/filters.py | alena-kono/bot-valley | e8403f75bcd4e4b3f84bbe8c00a53d763e7c9776 | [
"MIT"
] | 1 | 2022-03-01T12:18:11.000Z | 2022-03-01T12:18:11.000Z | apps/telegram_bot/bot/filters.py | alena-kono/bot-valley | e8403f75bcd4e4b3f84bbe8c00a53d763e7c9776 | [
"MIT"
] | null | null | null | import telegram
from telegram.ext.filters import MessageFilter
from apps.telegram_bot.preferences import global_preferences
class CryptoCurrencyFilter(MessageFilter):
"""A custom MessageFilter that filters telegram text messages by
the condition of entering the list of BUTTONS_CRYPTO_CURRENCIES_FROM.
"""
def filter(self, message: telegram.Message) -> bool:
currencies_str = global_preferences.get("buttons__crypto_currencies_from")
currencies_str.replace(" ", "")
currencies_list = currencies_str.split(",")
if message.text in currencies_list:
return True
return False
| 33.842105 | 82 | 0.735614 | 73 | 643 | 6.273973 | 0.547945 | 0.085153 | 0.100437 | 0.117904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.194401 | 643 | 18 | 83 | 35.722222 | 0.88417 | 0.203733 | 0 | 0 | 0 | 0 | 0.066398 | 0.062374 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.636364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
c7fbbec52e6a6987d6dd31cf694220a3b9b2bd09 | 301 | py | Python | web crawler functions/get_next_target.py | akshaynagpal/python_web_crawler | a74af25db4c9f819105621868a6a9a7337a2a770 | [
"MIT"
] | 1 | 2022-03-06T21:00:45.000Z | 2022-03-06T21:00:45.000Z | web crawler functions/get_next_target.py | akshaynagpal/python_web_crawler | a74af25db4c9f819105621868a6a9a7337a2a770 | [
"MIT"
] | null | null | null | web crawler functions/get_next_target.py | akshaynagpal/python_web_crawler | a74af25db4c9f819105621868a6a9a7337a2a770 | [
"MIT"
] | null | null | null | def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None,0
else:
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
| 27.363636 | 51 | 0.58804 | 43 | 301 | 3.860465 | 0.44186 | 0.162651 | 0.156627 | 0.216867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018433 | 0.27907 | 301 | 10 | 52 | 30.1 | 0.746544 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a0340d6bee8c182f82274f58caefa62290e5be1 | 461 | py | Python | configs/retinanet/retinanet_r50_fpn_1x_bdd100k.py | XDong18/mmdetection | 482ada168a644f350d51d85783d309677441eeb0 | [
"Apache-2.0"
] | null | null | null | configs/retinanet/retinanet_r50_fpn_1x_bdd100k.py | XDong18/mmdetection | 482ada168a644f350d51d85783d309677441eeb0 | [
"Apache-2.0"
] | null | null | null | configs/retinanet/retinanet_r50_fpn_1x_bdd100k.py | XDong18/mmdetection | 482ada168a644f350d51d85783d309677441eeb0 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/bdd100k_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# model
model = dict(
bbox_head=dict(
num_classes=10, # bdd100k class number
)
)
# data loader
data = dict(
samples_per_gpu=4, # TODO samples pre gpu
workers_per_gpu=2,
)
| 20.954545 | 72 | 0.665944 | 64 | 461 | 4.453125 | 0.6875 | 0.063158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060686 | 0.177874 | 461 | 21 | 73 | 21.952381 | 0.691293 | 0.149675 | 0 | 0 | 0 | 0 | 0.365285 | 0.357513 | 0 | 0 | 0 | 0.047619 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a0e1ca9345564e019c758529a1cb3634a3a9fae | 296 | py | Python | ergoservice/dashboard/models.py | damirmedakovic/ergoapp | 38dec32aa2dbdd3ec27a8e911fc233dff7c7c499 | [
"MIT"
] | null | null | null | ergoservice/dashboard/models.py | damirmedakovic/ergoapp | 38dec32aa2dbdd3ec27a8e911fc233dff7c7c499 | [
"MIT"
] | 5 | 2020-06-05T23:32:29.000Z | 2021-06-10T18:58:29.000Z | ergoservice/dashboard/models.py | damirmedakovic/ergoapp | 38dec32aa2dbdd3ec27a8e911fc233dff7c7c499 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Case(models.Model):
name = models.CharField(max_length =100)
email = models.EmailField(max_length=100, unique=True)
message = models.CharField(max_length=500, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
| 22.769231 | 55 | 0.773649 | 43 | 296 | 5.186047 | 0.674419 | 0.121076 | 0.161435 | 0.215247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034615 | 0.121622 | 296 | 12 | 56 | 24.666667 | 0.823077 | 0.081081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a112329bea0c737e5460d8489595b646350f0f7 | 732 | py | Python | logtf_analyser/tests/test_convert_id3_to_id64.py | cob16/logtf_analyzer | d7398da4fcf0dc64e2dfa7078e036a8189c81b77 | [
"MIT"
] | null | null | null | logtf_analyser/tests/test_convert_id3_to_id64.py | cob16/logtf_analyzer | d7398da4fcf0dc64e2dfa7078e036a8189c81b77 | [
"MIT"
] | null | null | null | logtf_analyser/tests/test_convert_id3_to_id64.py | cob16/logtf_analyzer | d7398da4fcf0dc64e2dfa7078e036a8189c81b77 | [
"MIT"
] | null | null | null | from unittest import TestCase
from parameterized import parameterized
from logtf_analyser.utils import convert_id3_to_id64
class TestConvert_id3_to_id64(TestCase):
def test_convert_id3_to_id64(self):
id64 = convert_id3_to_id64("[U:1:22202]")
self.assertEqual(id64, 76561197960287930)
@parameterized.expand([
("missing prefix", "22202]"),
("missing suffix", "[U:1:22202"),
])
def test_rase_value_error_if(self, name, input):
with self.assertRaises(ValueError):
id64 = convert_id3_to_id64(input)
def test_rase_if_not_string(self):
with self.assertRaises(TypeError):
convert_id3_to_id64(122434234)
convert_id3_to_id64(True)
| 28.153846 | 52 | 0.693989 | 92 | 732 | 5.184783 | 0.434783 | 0.073375 | 0.132075 | 0.201258 | 0.083857 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121317 | 0.211749 | 732 | 25 | 53 | 29.28 | 0.705373 | 0 | 0 | 0 | 0 | 0 | 0.075137 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a11e07673a89b3a3b7bcf3915b416ec6d3dad8d | 5,716 | py | Python | apps/blog/editer_user.py | jhjguxin/blogserver | 7873e1a94d05859650ce88d50e241456f4ab43ed | [
"MIT"
] | 3 | 2015-11-17T16:16:41.000Z | 2018-07-07T13:34:18.000Z | apps/blog/editer_user.py | jhjguxin/blogserver | 7873e1a94d05859650ce88d50e241456f4ab43ed | [
"MIT"
] | null | null | null | apps/blog/editer_user.py | jhjguxin/blogserver | 7873e1a94d05859650ce88d50e241456f4ab43ed | [
"MIT"
] | 1 | 2020-06-08T02:46:07.000Z | 2020-06-08T02:46:07.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.forms import PasswordResetForm,PasswordChangeForm,AdminPasswordChangeForm
#from django.contrib.auth.models import User
from dynamicresponse.response import *
from forms import *
from models import *
from django.views.decorators.csrf import csrf_exempt
import pdb
"""
def users(request):
users = Users.objects.all()
return render_to_response("users.html", {
'users': users },
RequestContext(request))
def test_js(request):
return render_to_response('test_js.html', {}, RequestContext(request))
"""
"""
@ csrf_exempt
def index_user(request):
"Lists all blog user."
if request.method == 'POST':
user = User.objects.create(title=request.POST.get("title"), reviewer=request.POST.get("reviewer"), email=request.POST.get("email"),content=request.POST.get("content") )
user.save()
form = RegisterForm(request.POST, instance=user)
#users = Users.objects.all()
else:
form = RegisterForm(instance=None)
users = User.objects.all()
#pdb.set_trace()
return SerializeOrRender('blog/index_user.html', { 'users': users }, extra={ 'form': form })
"""
def users_list(request):
"Lists all blog user."
users = User.objects.all()
return SerializeOrRender('blog/users_list.html', { 'users': users })
"""
def delete_user(request, user_id):
"Deletes the blog user."
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
user.delete()
return SerializeOrRedirect(reverse('list_users'), {}, status=CR_DELETED)
else:
return SerializeOrRender('blog/delete_user.html', { 'user': user }, status=CR_CONFIRM)
"""
def register(request, user_id=None):
"""Displays, creates or updates a blog users."""
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
form = RegisterForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('users_list'), { 'user': user })
else:
form = RegisterForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
def u_change(request, user_id=None):
"""Displays, creates or updates a blog users."""
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
form = U_ChangeForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('users_list'), { 'user': user })
else:
form = U_ChangeForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
def passwordchange(request, user_id=None):
password_change_form=PasswordChangeForm
user = None
if user_id:
user = get_object_or_404(User.objects.get(id=user_id), pk=user_id)
if request.method == 'POST':
form = PasswordChangeForm(user, request.POST)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('list_users'), { 'user': user })
else:
form = password_change_form(user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
"""
def passwordchange(request, user_id=None):
"Displays, creates or updates a blog users."
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
olduser=User.objects.get(id=user_id)
if request.method == 'POST':
form = U_PasswordChangeForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('list_users'), { 'user': user })
# else:
# form = U_PasswordChangeForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
"""
"""
def passwordchange(request, is_admin_site=False, template_name='blog/user.html',
email_template_name='registration/password_reset_email.html',
password_reset_form=PasswordResetForm, token_generator=default_token_generator,
post_reset_redirect=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done')
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {}
opts['use_https'] = request.is_secure()
opts['token_generator'] = token_generator
if is_admin_site:
opts['domain_override'] = request.META['HTTP_HOST']
else:
opts['email_template_name'] = email_template_name
if not Site._meta.installed:
opts['domain_override'] = RequestSite(request).domain
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
"""
| 33.426901 | 172 | 0.646956 | 671 | 5,716 | 5.33532 | 0.183308 | 0.026816 | 0.02933 | 0.037151 | 0.425698 | 0.386872 | 0.355307 | 0.355307 | 0.337989 | 0.337989 | 0 | 0.00431 | 0.228831 | 5,716 | 170 | 173 | 33.623529 | 0.807849 | 0.03359 | 0 | 0.471698 | 0 | 0 | 0.067845 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0.09434 | 0.226415 | 0 | 0.433962 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2a15f841c908a1dfe49e0cb148931bd74de7aa9f | 841 | py | Python | Source/Tools/BindTool/writer.py | ssinai1/rbfx | e69a7093d153667e3d8dd3270449d3d594c1c1a8 | [
"MIT"
] | 441 | 2018-12-26T14:50:23.000Z | 2021-11-05T03:13:27.000Z | Source/Tools/BindTool/writer.py | ssinai1/rbfx | e69a7093d153667e3d8dd3270449d3d594c1c1a8 | [
"MIT"
] | 221 | 2018-12-29T17:40:23.000Z | 2021-11-06T21:41:55.000Z | Source/Tools/BindTool/writer.py | ssinai1/rbfx | e69a7093d153667e3d8dd3270449d3d594c1c1a8 | [
"MIT"
] | 101 | 2018-12-29T13:08:10.000Z | 2021-11-02T09:58:37.000Z |
class InterfaceWriter(object):
def __init__(self, output_path):
self._output_path_template = output_path + '/_{key}_{subsystem}.i'
self._fp = {
'pre': {},
'post': {},
}
def _write(self, key, subsystem, text):
subsystem = subsystem.lower()
fp = self._fp[key].get(subsystem)
if fp is None:
self._fp[key][subsystem] = fp = open(self._output_path_template.format(key=key, subsystem=subsystem), 'w+')
fp.write(text)
fp.write('\n')
def write_pre(self, subsystem, text):
self._write('pre', subsystem, text)
def write_post(self, subsystem, text):
self._write('post', subsystem, text)
def close(self):
for fp_map in self._fp.values():
for fp in fp_map.values():
fp.close()
| 30.035714 | 119 | 0.567182 | 102 | 841 | 4.431373 | 0.303922 | 0.143805 | 0.09292 | 0.097345 | 0.115044 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.290131 | 841 | 27 | 120 | 31.148148 | 0.757119 | 0 | 0 | 0 | 0 | 0 | 0.046429 | 0.025 | 0 | 0 | 0 | 0 | 0 | 1 | 0.227273 | false | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a1b32d6549e142ab87a8c6dd798f43bdbbe5e9f | 685 | py | Python | gitzer/director/migrations/0001_initial.py | IgnisDa/Gitzer | 6810ee7ca6b9d86d7d87745342850a8be58d1865 | [
"Apache-2.0"
] | 2 | 2021-01-24T07:44:28.000Z | 2021-01-30T22:12:43.000Z | gitzer/director/migrations/0001_initial.py | IgnisDa/Gitzer | 6810ee7ca6b9d86d7d87745342850a8be58d1865 | [
"Apache-2.0"
] | null | null | null | gitzer/director/migrations/0001_initial.py | IgnisDa/Gitzer | 6810ee7ca6b9d86d7d87745342850a8be58d1865 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-16 08:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Repository',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(help_text='The absolute path to the local git repository.', max_length=500)),
('created_on', models.DateTimeField(auto_now_add=True, help_text='The date and time this repository was logged on.')),
],
),
]
| 29.782609 | 134 | 0.617518 | 80 | 685 | 5.175 | 0.7125 | 0.038647 | 0.05314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035857 | 0.267153 | 685 | 22 | 135 | 31.136364 | 0.788845 | 0.065693 | 0 | 0 | 1 | 0 | 0.191223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a1c70b9d1e40d06c382598343320ee23443054f | 2,668 | py | Python | model_based/svms/test_model_compression.py | vohoaiviet/tag-image-retrieval | 0a257560581f702cd394f3f28c9e0f6202827ce8 | [
"MIT"
] | 50 | 2015-11-04T15:53:09.000Z | 2022-01-03T14:46:17.000Z | model_based/svms/test_model_compression.py | vohoaiviet/tag-image-retrieval | 0a257560581f702cd394f3f28c9e0f6202827ce8 | [
"MIT"
] | 2 | 2018-03-07T09:51:50.000Z | 2018-10-13T11:05:13.000Z | model_based/svms/test_model_compression.py | vohoaiviet/tag-image-retrieval | 0a257560581f702cd394f3f28c9e0f6202827ce8 | [
"MIT"
] | 17 | 2015-10-26T03:41:49.000Z | 2021-08-23T08:11:05.000Z |
import sys
import os
import time
from basic.common import ROOT_PATH,checkToSkip,makedirsforfile
from basic.util import readImageSet
from simpleknn.bigfile import BigFile, StreamFile
from basic.annotationtable import readConcepts,readAnnotationsFrom
from basic.metric import getScorer
if __name__ == "__main__":
try:
rootpath = sys.argv[1]
except:
rootpath = ROOT_PATH
metric = 'AP'
feature = "dsift"
trainCollection = 'voc2008train'
trainAnnotationName = 'conceptsvoc2008train.txt'
testCollection = 'voc2008val'
testAnnotationName = 'conceptsvoc2008val.txt'
testset = testCollection
modelName = 'fik50'
modelName = 'fastlinear'
if 'fastlinear' == modelName:
from fastlinear.fastlinear import fastlinear_load_model as load_model
else:
from fiksvm.fiksvm import fiksvm_load_model as load_model
scorer = getScorer(metric)
imset = readImageSet(testCollection,testset,rootpath=rootpath)
concepts = readConcepts(testCollection,testAnnotationName,rootpath=rootpath)
feat_dir = os.path.join(rootpath, testCollection, "FeatureData", feature)
feat_file = BigFile(feat_dir)
_renamed, _vectors = feat_file.read(imset)
nr_of_images = len(_renamed)
nr_of_concepts = len(concepts)
mAP = 0.0
models = [None] * len(concepts)
for i,concept in enumerate(concepts):
model_file_name = os.path.join(rootpath,trainCollection,'Models',trainAnnotationName,feature, modelName, '%s.model'%concept)
model1 = load_model(model_file_name)
(pA,pB) = model1.get_probAB()
model2 = load_model(model_file_name)
model2.add_fastsvm(model1, 0.8, 1)
names,labels = readAnnotationsFrom(testCollection, testAnnotationName, concept, rootpath=rootpath)
name2label = dict(zip(names,labels))
ranklist1 = [(_id, model1.predict(_vec)) for _id,_vec in zip(_renamed, _vectors)]
ranklist2 = [(_id, model2.predict(_vec)) for _id,_vec in zip(_renamed, _vectors)]
model_file_name = os.path.join(rootpath,trainCollection,'Models', 'bag' + trainAnnotationName,feature, modelName, '%s.model'%concept)
model3 = load_model(model_file_name)
ranklist3 = [(_id, model3.predict(_vec)) for _id,_vec in zip(_renamed, _vectors)]
print concept,
for ranklist in [ranklist1, ranklist2, ranklist3]:
ranklist.sort(key=lambda v:v[1], reverse=True)
sorted_labels = [name2label[x[0]] for x in ranklist if x[0] in name2label]
print '%.3f'%scorer.score(sorted_labels),
print ''
| 31.388235 | 141 | 0.687031 | 298 | 2,668 | 5.942953 | 0.365772 | 0.035573 | 0.036702 | 0.030491 | 0.23546 | 0.175607 | 0.1214 | 0.1214 | 0.1214 | 0 | 0 | 0.02202 | 0.217016 | 2,668 | 84 | 142 | 31.761905 | 0.825754 | 0 | 0 | 0 | 0 | 0 | 0.057743 | 0.017248 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.181818 | null | null | 0.054545 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a291bb3879db1c009a0ad3fab6dd256c7e79e33 | 1,646 | py | Python | gemini/bin/gemini_python.py | codes1gn/gemini | 4b173ea583f2578244d1d0fb482ccb77818f7558 | [
"MIT"
] | null | null | null | gemini/bin/gemini_python.py | codes1gn/gemini | 4b173ea583f2578244d1d0fb482ccb77818f7558 | [
"MIT"
] | null | null | null | gemini/bin/gemini_python.py | codes1gn/gemini | 4b173ea583f2578244d1d0fb482ccb77818f7558 | [
"MIT"
] | null | null | null | import sys
import traceback
import copy
import importlib
from gemini.gemini_compiler import *
from gemini.utils import *
def main(argv=sys.argv[1:]):
print('gemini compiler entry point')
filename = copy.deepcopy(argv[0])
arguments = copy.deepcopy(argv[1:])
compiler = GeminiCompiler()
src_code = read_src(filename)
assert 'gemini_python' in sys.argv[0]
# step 1, parse src code
compiler.parse(src_code, filename=filename)
compiler.dump(pretty=True, prefix='src_parse')
assert 1, 'step 1 parse src'
# patch, fix import gemini
# _plugin = importlib.import_module('gemini.plugins.bert_plugin')
# sys.modules['gemini'] = _plugin
# step 2, parse modules
compiler.parse_modules()
compiler.dump(pretty=True, prefix='parse_module')
assert 1, 'step 2 parse module'
# TODO(albert) construct config, use dummy string instead
config = Configuration()
print(config)
compiler.apply_model_parallel(config)
compiler.dump(pretty=True, prefix='apply_{}_passes'.format(config.mode))
assert 1, 'step 3 apply sharding mode'
use_ast = False
# TODO(albert) have bug when not use_ast
if not use_ast:
try:
compiler.compile_and_run(use_ast=False)
print('try run src success')
except Exception:
print('try run src fail')
traceback.print_exc()
else:
try:
compiler.compile_and_run(use_ast=True)
print('try run ast success')
except Exception:
print('try run ast fail')
traceback.print_exc()
if __name__ == '__main__':
main()
| 27.433333 | 76 | 0.655529 | 212 | 1,646 | 4.924528 | 0.358491 | 0.028736 | 0.042146 | 0.063218 | 0.201149 | 0.12069 | 0.057471 | 0 | 0 | 0 | 0 | 0.009623 | 0.242406 | 1,646 | 59 | 77 | 27.898305 | 0.827586 | 0.157959 | 0 | 0.146341 | 0 | 0 | 0.156023 | 0 | 0 | 0 | 0 | 0.016949 | 0.097561 | 1 | 0.02439 | false | 0.02439 | 0.146341 | 0 | 0.170732 | 0.195122 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a2949f385ec93a057a10d2a09d0bd12596b5253 | 1,535 | py | Python | veselin-angelov-ekatte/create_database.py | veselin-angelov/training-projects | f02a29eed57d60720fe66e76af483d2b1f689bfd | [
"Apache-2.0"
] | null | null | null | veselin-angelov-ekatte/create_database.py | veselin-angelov/training-projects | f02a29eed57d60720fe66e76af483d2b1f689bfd | [
"Apache-2.0"
] | null | null | null | veselin-angelov-ekatte/create_database.py | veselin-angelov/training-projects | f02a29eed57d60720fe66e76af483d2b1f689bfd | [
"Apache-2.0"
] | null | null | null | from helpers import create_connection, execute_query
connection = create_connection(
"postgres", "postgres", "admin", "127.0.0.1", "5432"
)
create_database_query = "CREATE DATABASE ekatte"
execute_query(connection, create_database_query)
connection = create_connection(
"ekatte", "postgres", "admin", "127.0.0.1", "5432"
)
create_area_query = '''
CREATE TABLE IF NOT EXISTS "areas" (
"id" serial NOT NULL UNIQUE,
"name" TEXT NOT NULL UNIQUE,
"code" TEXT NOT NULL UNIQUE,
CONSTRAINT "area_pk" PRIMARY KEY ("code")
);
'''
create_municipality_query = '''
CREATE TABLE IF NOT EXISTS "municipalities" (
"id" serial NOT NULL UNIQUE,
"name" TEXT NOT NULL,
"code" TEXT NOT NULL UNIQUE,
"area_code" TEXT NOT NULL,
CONSTRAINT "municipalities_pk" PRIMARY KEY ("code"),
CONSTRAINT "municipalities_fk0" FOREIGN KEY ("area_code") REFERENCES "areas"("code")
);
'''
create_settlements_query = '''
CREATE TABLE IF NOT EXISTS "settlements" (
"id" serial NOT NULL,
"ekatte" TEXT NOT NULL UNIQUE,
"type" TEXT NOT NULL,
"name" TEXT NOT NULL,
"municipality_code" TEXT NOT NULL,
CONSTRAINT "settlements_pk" PRIMARY KEY ("id"),
CONSTRAINT "settlements_fk0" FOREIGN KEY ("municipality_code") REFERENCES "municipalities"("code")
);
'''
execute_query(connection, create_area_query)
execute_query(connection, create_municipality_query)
execute_query(connection, create_settlements_query)
| 31.326531 | 106 | 0.672964 | 180 | 1,535 | 5.555556 | 0.216667 | 0.084 | 0.099 | 0.14 | 0.369 | 0.211 | 0.13 | 0.13 | 0.072 | 0 | 0 | 0.018257 | 0.214984 | 1,535 | 48 | 107 | 31.979167 | 0.811618 | 0 | 0 | 0.341463 | 0 | 0 | 0.676873 | 0.029316 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02439 | 0 | 0.02439 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a29f26fbea0a3dc55f5442506c8f8e9d6c38803 | 1,053 | py | Python | api/model.py | thomashusken/img-class | 0ceead70f4e30face409069fb49d02fb927abc03 | [
"MIT"
] | null | null | null | api/model.py | thomashusken/img-class | 0ceead70f4e30face409069fb49d02fb927abc03 | [
"MIT"
] | 2 | 2022-01-13T01:20:18.000Z | 2022-03-11T23:50:06.000Z | api/model.py | thomashusken/img-class | 0ceead70f4e30face409069fb49d02fb927abc03 | [
"MIT"
] | null | null | null | from torchvision import models
import json
import numpy as np
import torch
from collections import OrderedDict
from operator import itemgetter
import os
def return_top_5(processed_image):
# inception = models.inception_v3(pretrained=True)
inception = models.inception_v3()
inception.load_state_dict(torch.load("data/inception_v3_google-1a9a5a14.pth"))
inception.eval()
result = inception(processed_image)
#load imagenet classes
class_idx = json.load(open('data/imagenet_class_index.json'))
idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]
result_idx = result.sort()[1][0][-5:]
#exponentiate and get probabilities
exps = np.exp(result.detach().numpy()[0])
exps_sum = np.sum(exps)
softmax = [np.round((j / exps_sum)*100, 2) for j in exps]
out = []
for idx in result_idx:
out.append((idx2label[idx], softmax[idx]))
# out = {k: v for k, v in dict(out).items()}
result = OrderedDict(sorted(dict(out).items(), key=itemgetter(1), reverse=True))
return result | 31.909091 | 84 | 0.698955 | 152 | 1,053 | 4.717105 | 0.453947 | 0.046025 | 0.066946 | 0.072524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02411 | 0.17284 | 1,053 | 33 | 85 | 31.909091 | 0.799082 | 0.1415 | 0 | 0 | 0 | 0 | 0.074444 | 0.074444 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.304348 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
2a2ef0f35e15ed911d8da568d2fb240bfd2a6fd8 | 5,551 | py | Python | ppobyter/marketplace/abilities.py | graatje/highscoresbot | 26207b8191ed6c9a3d7ecd49fea482e6d3603c36 | [
"MIT"
] | null | null | null | ppobyter/marketplace/abilities.py | graatje/highscoresbot | 26207b8191ed6c9a3d7ecd49fea482e6d3603c36 | [
"MIT"
] | null | null | null | ppobyter/marketplace/abilities.py | graatje/highscoresbot | 26207b8191ed6c9a3d7ecd49fea482e6d3603c36 | [
"MIT"
] | null | null | null | abilities = {1: 'Stench', 2: 'Drizzle', 3: 'Speed Boost', 4: 'Battle Armor', 5: 'Sturdy', 6: 'Damp', 7: 'Limber',
8: 'Sand Veil', 9: 'Static', 10: 'Volt Absorb', 11: 'Water Absorb', 12: 'Oblivious', 13: 'Cloud Nine',
14: 'Compound Eyes', 15: 'Insomnia', 16: 'Color Change', 17: 'Immunity', 18: 'Flash Fire',
19: 'Shield Dust', 20: 'Own Tempo', 21: 'Suction Cups', 22: 'Intimidate', 23: 'Shadow Tag',
24: 'Rough Skin', 25: 'Wonder Guard', 26: 'Levitate', 27: 'Effect Spore', 28: 'Synchronize',
29: 'Clear Body', 30: 'Natural Cure', 31: 'Lightning Rod', 32: 'Serene Grace', 33: 'Swift Swim',
34: 'Chlorophyll', 35: 'Illuminate', 36: 'Trace', 37: 'Huge Power', 38: 'Poison Point', 39: 'Inner Focus',
40: 'Magma Armor', 41: 'Water Veil', 42: 'Magnet Pull', 43: 'Soundproof', 44: 'Rain Dish',
45: 'Sand Stream', 46: 'Pressure', 47: 'Thick Fat', 48: 'Early Bird', 49: 'Flame Body',
50: 'Run Away', 51: 'Keen Eye', 52: 'Hyper Cutter', 53: 'Pickup', 54: 'Truant', 55: 'Hustle',
56: 'Cute Charm', 57: 'Plus', 58: 'Minus', 59: 'Forecast', 60: 'Sticky Hold', 61: 'Shed Skin',
62: 'Guts', 63: 'Marvel Scale', 64: 'Liquid Ooze', 65: 'Overgrow', 66: 'Blaze', 67: 'Torrent',
68: 'Swarm', 69: 'Rock Head', 70: 'Drought', 71: 'Arena Trap', 72: 'Vital Spirit', 73: 'White Smoke',
74: 'Pure Power', 75: 'Shell Armor', 76: 'Air Lock', 77: 'Tangled Feet', 78: 'Motor Drive', 79: 'Rivalry',
80: 'Steadfast', 81: 'Snow Cloak', 82: 'Gluttony', 83: 'Anger Point', 84: 'Unburden', 85: 'Heatproof',
86: 'Simple', 87: 'Dry Skin', 88: 'Download', 89: 'Iron Fist', 90: 'Poison Heal', 91: 'Adaptability',
92: 'Skill Link', 93: 'Hydration', 94: 'Solar Power', 95: 'Quick Feet', 96: 'Normalize', 97: 'Sniper',
98: 'Magic Guard', 99: 'No Guard', 100: 'Stall', 101: 'Technician', 102: 'Leaf Guard', 103: 'Klutz',
104: 'Mold Breaker', 105: 'Super Luck', 106: 'Aftermath', 107: 'Anticipation', 108: 'Forewarn',
109: 'Unaware', 110: 'Tinted Lens', 111: 'Filter', 112: 'Slow Start', 113: 'Scrappy',
114: 'Storm Drain', 115: 'Ice Body', 116: 'Solid Rock', 117: 'Snow Warning', 118: 'Honey Gather',
119: 'Frisk', 120: 'Reckless', 121: 'Multitype', 122: 'Flower Gift', 123: 'Bad Dreams', 124: 'Pickpocket',
125: 'Sheer Force', 126: 'Contrary', 127: 'Unnerve', 128: 'Defiant', 129: 'Defeatist', 130: 'Cursed Body',
131: 'Healer', 132: 'Friend Guard', 133: 'Weak Armor', 134: 'Heavy Metal', 135: 'Light Metal',
136: 'Multiscale', 137: 'Toxic Boost', 138: 'Flare Boost', 139: 'Harvest', 140: 'Telepathy', 141: 'Moody',
142: 'Overcoat', 143: 'Poison Touch', 144: 'Regenerator', 145: 'Big Pecks', 146: 'Sand Rush',
147: 'Wonder Skin', 148: 'Analytic', 149: 'Illusion', 150: 'Imposter', 151: 'Infiltrator', 152: 'Mummy',
153: 'Moxie', 154: 'Justified', 155: 'Rattled', 156: 'Magic Bounce', 157: 'Sap Sipper', 158: 'Prankster',
159: 'Sand Force', 160: 'Iron Barbs', 161: 'Zen Mode', 162: 'Victory Star', 163: 'Turboblaze',
164: 'Teravolt', 165: 'Aroma Veil', 166: 'Flower Veil', 167: 'Cheek Pouch', 168: 'Protean',
169: 'Fur Coat', 170: 'Magician', 171: 'Bulletproof', 172: 'Competitive', 173: 'Strong Jaw',
174: 'Refrigerate', 175: 'Sweet Veil', 176: 'Stance Change', 177: 'Gale Wings', 178: 'Mega Launcher',
179: 'Grass Pelt', 180: 'Symbiosis', 181: 'Tough Claws', 182: 'Pixilate', 183: 'Gooey', 184: 'Aerilate',
185: 'Parental Bond', 186: 'Dark Aura', 187: 'Fairy Aura', 188: 'Aura Break', 189: 'Primordial Sea',
190: 'Desolate Land', 191: 'Delta Stream', 192: 'Stamina', 193: 'Wimp Out', 194: 'Emergency Exit',
195: 'Water Compaction', 196: 'Merciless', 197: 'Shields Down', 198: 'Stakeout', 199: 'Water Bubble',
200: 'Steelworker', 201: 'Berserk', 202: 'Slush Rush', 203: 'Long Reach', 204: 'Liquid Voice',
205: 'Triage', 206: 'Galvanize', 207: 'Surge Surfer', 208: 'Schooling', 209: 'Disguise',
210: 'Battle Bond', 211: 'Power Construct', 212: 'Corrosion', 213: 'Comatose', 214: 'Queenly Majesty',
215: 'Innards Out', 216: 'Dancer', 217: 'Battery', 218: 'Fluffy', 219: 'Dazzling', 220: 'Soul-Heart',
221: 'Tangling Hair', 222: 'Receiver', 223: 'Power of Alchemy', 224: 'Beast Boost', 225: 'RKS System',
226: 'Electric Surge', 227: 'Psychic Surge', 228: 'Misty Surge', 229: 'Grassy Surge',
230: 'Full Metal Body', 231: 'Shadow Shield', 232: 'Prism Armor', 233: 'Neuroforce', 234: 'Intrepid Sword',
235: 'Dauntless Shield', 236: 'Libero', 237: 'Ball Fetch', 238: 'Cotton Down', 239: 'Propeller Tail',
240: 'Mirror Armor', 241: 'Gulp Missile', 242: 'Stalwart', 243: 'Steam Engine', 244: 'Punk Rock',
245: 'Sand Spit', 246: 'Ice Scales', 247: 'Ripen', 248: 'Ice Face', 249: 'Power Spot', 250: 'Mimicry',
251: 'Screen Cleaner', 252: 'Steely Spirit', 253: 'Perish Body', 254: 'Wandering Spirit',
255: 'Gorilla Tactics', 256: 'Neutralizing Gas', 257: 'Pastel Veil', 258: 'Hunger Switch',
259: 'Quick Draw', 260: 'Unseen Fist', 261: 'Curious Medicine', 262: 'Transistor', 263: "Dragon's Maw",
264: 'Chilling Neigh', 265: 'Grim Neigh', 266: 'As One', 267: 'As One'}
| 108.843137 | 120 | 0.566384 | 699 | 5,551 | 4.497854 | 0.888412 | 0.003181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.164374 | 0.240497 | 5,551 | 50 | 121 | 111.02 | 0.581357 | 0 | 0 | 0 | 0 | 0 | 0.469465 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a326bdb7af710f5433ff63b32a58d49b24260c8 | 3,951 | py | Python | strictdoc/backend/sdoc/grammar/grammar.py | BenGardiner/strictdoc | c927872a8f080ca1f86cdb7e99767fb25398dbd5 | [
"Apache-2.0"
] | null | null | null | strictdoc/backend/sdoc/grammar/grammar.py | BenGardiner/strictdoc | c927872a8f080ca1f86cdb7e99767fb25398dbd5 | [
"Apache-2.0"
] | 3 | 2022-02-21T10:57:01.000Z | 2022-02-21T13:22:23.000Z | strictdoc/backend/sdoc/grammar/grammar.py | BenGardiner/strictdoc | c927872a8f080ca1f86cdb7e99767fb25398dbd5 | [
"Apache-2.0"
] | null | null | null | STRICTDOC_GRAMMAR = r"""
Document[noskipws]:
'[DOCUMENT]' '\n'
// NAME: is deprecated. Both documents and sections now have TITLE:.
(('NAME: ' name = /.*$/ '\n') | ('TITLE: ' title = /.*$/ '\n')?)
(config = DocumentConfig)?
('\n' grammar = DocumentGrammar)?
free_texts *= SpaceThenFreeText
section_contents *= SectionOrRequirement
;
ReservedKeyword[noskipws]:
'DOCUMENT' | 'GRAMMAR'
;
DocumentGrammar[noskipws]:
'[GRAMMAR]' '\n'
'ELEMENTS:' '\n'
elements += GrammarElement
;
GrammarElement[noskipws]:
'- TAG: ' tag = RequirementType '\n'
' FIELDS:' '\n'
fields += GrammarElementField
;
GrammarElementField[noskipws]:
GrammarElementFieldString |
GrammarElementFieldSingleChoice |
GrammarElementFieldMultipleChoice |
GrammarElementFieldTag
;
GrammarElementFieldString[noskipws]:
' - TITLE: ' title=FieldName '\n'
' TYPE: String' '\n'
' REQUIRED: ' (required = BooleanChoice) '\n'
;
GrammarElementFieldSingleChoice[noskipws]:
' - TITLE: ' title=FieldName '\n'
' TYPE: SingleChoice'
'(' ((options = ChoiceOption) (options *= ChoiceOptionXs)) ')' '\n'
' REQUIRED: ' (required = BooleanChoice) '\n'
;
GrammarElementFieldMultipleChoice[noskipws]:
' - TITLE: ' title=FieldName '\n'
' TYPE: MultipleChoice'
'(' ((options = ChoiceOption) (options *= ChoiceOptionXs)) ')' '\n'
' REQUIRED: ' (required = BooleanChoice) '\n'
;
GrammarElementFieldTag[noskipws]:
' - TITLE: ' title=FieldName '\n'
' TYPE: Tag' '\n'
' REQUIRED: ' (required = BooleanChoice) '\n'
;
BooleanChoice[noskipws]:
('True' | 'False')
;
DocumentConfig[noskipws]:
('VERSION: ' version = /.*$/ '\n')?
('NUMBER: ' number = /.*$/ '\n')?
('OPTIONS:' '\n'
(' MARKUP: ' (markup = MarkupChoice) '\n')?
(' AUTO_LEVELS: ' (auto_levels = AutoLevelsChoice) '\n')?
)?
;
MarkupChoice[noskipws]:
'RST' | 'Text' | 'HTML'
;
AutoLevelsChoice[noskipws]:
'On' | 'Off'
;
Section[noskipws]:
'[SECTION]'
'\n'
('UID: ' uid = /.+$/ '\n')?
('LEVEL: ' level = /.*/ '\n')?
'TITLE: ' title = /.*$/ '\n'
free_texts *= SpaceThenFreeText
section_contents *= SectionOrRequirement
'\n'
'[/SECTION]'
'\n'
;
SectionOrRequirement[noskipws]:
'\n' (Section | Requirement | CompositeRequirement)
;
SpaceThenRequirement[noskipws]:
'\n' (Requirement | CompositeRequirement)
;
SpaceThenFreeText[noskipws]:
'\n' (FreeText)
;
ReservedKeyword[noskipws]:
'DOCUMENT' | 'GRAMMAR' | 'SECTION' | 'FREETEXT'
;
Requirement[noskipws]:
'[' !CompositeRequirementTagName requirement_type = RequirementType ']' '\n'
fields *= RequirementField
;
CompositeRequirementTagName[noskipws]:
'COMPOSITE_'
;
RequirementType[noskipws]:
!ReservedKeyword /[A-Z]+(_[A-Z]+)*/
;
RequirementField[noskipws]:
(
field_name = 'REFS' ':' '\n'
(field_value_references += Reference)
) |
(
field_name = FieldName ':'
(
((' ' field_value = SingleLineString | field_value = '') '\n') |
(' ' (field_value_multiline = MultiLineString) '\n')
)
)
;
CompositeRequirement[noskipws]:
'[COMPOSITE_' requirement_type = RequirementType ']' '\n'
fields *= RequirementField
requirements *= SpaceThenRequirement
'\n'
'[/COMPOSITE_REQUIREMENT]' '\n'
;
ChoiceOption[noskipws]:
/[\w\/-]+( *[\w\/-]+)*/
;
ChoiceOptionXs[noskipws]:
/, /- ChoiceOption
;
RequirementStatus[noskipws]:
'Draft' | 'Active' | 'Deleted';
RequirementComment[noskipws]:
'COMMENT: ' (
comment_single = SingleLineString | comment_multiline = MultiLineString
) '\n'
;
FreeText[noskipws]:
'[FREETEXT]' '\n'
parts+=TextPart
FreeTextEnd
;
FreeTextEnd: /^/ '[/FREETEXT]' '\n';
TextPart[noskipws]:
(InlineLink | NormalString)
;
NormalString[noskipws]:
(!SpecialKeyword !FreeTextEnd /(?ms)./)*
;
SpecialKeyword:
InlineLinkStart // more keywords are coming later
;
InlineLinkStart: '[LINK: ';
InlineLink[noskipws]:
InlineLinkStart value = /[^\]]*/ ']'
;
"""
| 20.365979 | 78 | 0.638826 | 310 | 3,951 | 8.067742 | 0.329032 | 0.02399 | 0.028788 | 0.043183 | 0.22391 | 0.19912 | 0.056777 | 0.056777 | 0.056777 | 0 | 0 | 0 | 0.176158 | 3,951 | 193 | 79 | 20.471503 | 0.768356 | 0 | 0 | 0.141935 | 0 | 0.006452 | 0.992913 | 0.258669 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a4151beaa3590019a94a7fe55fcae4fc19b8066 | 1,826 | py | Python | IMfunctions.py | KartoffelCheetah/personal-website-001 | c0700687ef90cc9a8a26c13af158fc91ee30a647 | [
"MIT"
] | null | null | null | IMfunctions.py | KartoffelCheetah/personal-website-001 | c0700687ef90cc9a8a26c13af158fc91ee30a647 | [
"MIT"
] | 7 | 2017-10-04T15:22:23.000Z | 2018-02-08T18:31:21.000Z | IMfunctions.py | KartoffelCheetah/personal-website-001 | c0700687ef90cc9a8a26c13af158fc91ee30a647 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.5
#-*- coding:utf-8 -*-
"""Functions handling images."""
import subprocess
import os
# supported ImageMagic Formats?
def getDateTimeOriginal(filePath) :
"""If image has DateTimeOriginal then returns it as a string. If it's missing or there isn't EXIF at all then returns empty string."""
return subprocess.check_output(['identify', '-format', '%[EXIF:DateTimeOriginal]', filePath], timeout=3).decode('utf8').strip()
def getDimensions(filePath) :
"""Returns dimensions of image in a tuple."""
x,y = subprocess.check_output(['identify', '-format', '%[w]x%[h]', filePath], timeout=3).decode('utf8').strip().split('x')
return int(x), int(y)
def createThumbnail(originPath, destinationPath, size='200x200') :
"""Creates a thumbnail image out of originPath in destinationPath of size (which defaults to maxWidth:200, maxHeight:200) and keeps ratio. The function creates directory if not exists already. Returns the subprocess's response."""
print('Create thumbnail -> %s' % destinationPath)
destDirPath = os.path.split(destinationPath)[0]
if not os.path.exists(destDirPath) :
os.makedirs(destDirPath)
return subprocess.run(['convert', originPath, '-auto-orient', '-thumbnail', size, destinationPath], timeout=3)
def createRotatedImage(originPath, destinationPath) :
"""Creates a rotated image out of originPath in destinationPath. EXIF orientation will be adjusted with the rotation. The function creates directory if not exists already. Returns the subprocess's response."""
print('Create rotatedImage -> %s' % destinationPath)
destDirPath = os.path.split(destinationPath)[0]
if not os.path.exists(destDirPath) :
os.makedirs(destDirPath)
return subprocess.run(['convert', originPath, '-auto-orient', destinationPath], timeout=3)
| 58.903226 | 234 | 0.727273 | 229 | 1,826 | 5.790393 | 0.454148 | 0.024133 | 0.031674 | 0.043741 | 0.508296 | 0.455505 | 0.352941 | 0.352941 | 0.352941 | 0.352941 | 0 | 0.014668 | 0.141292 | 1,826 | 30 | 235 | 60.866667 | 0.830995 | 0.382256 | 0 | 0.315789 | 0 | 0 | 0.158182 | 0.021818 | 0 | 0 | 0 | 0 | 0 | 1 | 0.210526 | false | 0 | 0.105263 | 0 | 0.526316 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a4164567c21ed5336f207306a71b8f7e94eae41 | 18,415 | py | Python | plugins/modules/oci_resource_manager_template.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 108 | 2020-05-19T20:46:10.000Z | 2022-03-25T14:10:01.000Z | plugins/modules/oci_resource_manager_template.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 90 | 2020-06-14T22:07:11.000Z | 2022-03-07T05:40:29.000Z | plugins/modules/oci_resource_manager_template.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 42 | 2020-08-30T23:09:12.000Z | 2022-03-25T16:58:01.000Z | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_resource_manager_template
short_description: Manage a Template resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a Template resource in Oracle Cloud Infrastructure
- For I(state=present), creates a private template in the specified compartment.
- "This resource has the following action operations in the M(oracle.oci.oci_resource_manager_template_actions) module: change_compartment."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing this template.
- Required for create using I(state=present).
type: str
display_name:
description:
- The template's display name. Avoid entering confidential information.
- Required for create using I(state=present).
- Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
description:
description:
- Description of the template. Avoid entering confidential information.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
long_description:
description:
- Detailed description of the template. This description is displayed in the Console page listing templates when the template is expanded. Avoid
entering confidential information.
- This parameter is updatable.
type: str
logo_file_base64_encoded:
description:
- "Base64-encoded logo to use as the template icon.
Template icon file requirements: PNG format, 50 KB maximum, 110 x 110 pixels."
- This parameter is updatable.
type: str
template_config_source:
description:
- ""
- Required for create using I(state=present).
- This parameter is updatable.
type: dict
suboptions:
template_config_source_type:
description:
- Specifies the `configSourceType` for uploading the Terraform configuration.
- This parameter is updatable.
type: str
choices:
- "ZIP_UPLOAD"
required: true
zip_file_base64_encoded:
description:
- ""
- This parameter is updatable.
- Applicable when template_config_source_type is 'ZIP_UPLOAD'
type: str
freeform_tags:
description:
- "Free-form tags associated with the resource. Each tag is a key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
- This parameter is updatable.
type: dict
template_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the template.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
state:
description:
- The state of the Template.
- Use I(state=present) to create or update a Template.
- Use I(state=absent) to delete a Template.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create template
oci_resource_manager_template:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
description: description_example
template_config_source:
# required
template_config_source_type: ZIP_UPLOAD
zip_file_base64_encoded: zip_file_base64_encoded_example
# optional
long_description: long_description_example
logo_file_base64_encoded: logo_file_base64_encoded_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update template
oci_resource_manager_template:
# required
template_id: "ocid1.template.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
description: description_example
long_description: long_description_example
logo_file_base64_encoded: logo_file_base64_encoded_example
template_config_source:
# required
template_config_source_type: ZIP_UPLOAD
zip_file_base64_encoded: zip_file_base64_encoded_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update template using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_resource_manager_template:
# required
display_name: display_name_example
# optional
description: description_example
long_description: long_description_example
logo_file_base64_encoded: logo_file_base64_encoded_example
template_config_source:
# required
template_config_source_type: ZIP_UPLOAD
zip_file_base64_encoded: zip_file_base64_encoded_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Delete template
oci_resource_manager_template:
# required
template_id: "ocid1.template.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete template using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_resource_manager_template:
# required
display_name: display_name_example
state: absent
"""
RETURN = """
template:
description:
- Details of the Template resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- Unique identifier (L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm)) for the template.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing this template.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
category_id:
description:
- Unique identifier for the category where the template is located.
Possible values are `0` (Quick Starts), `1` (Service), `2` (Architecture), and `3` (Private).
returned: on success
type: str
sample: "ocid1.category.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- Human-readable name of the template.
returned: on success
type: str
sample: display_name_example
description:
description:
- Brief description of the template.
returned: on success
type: str
sample: description_example
long_description:
description:
- Detailed description of the template. This description is displayed in the Console page listing templates when the template is expanded. Avoid
entering confidential information.
returned: on success
type: str
sample: long_description_example
is_free_tier:
description:
- whether the template will work for free tier tenancy.
returned: on success
type: bool
sample: true
time_created:
description:
- "The date and time at which the template was created.
Format is defined by RFC3339.
Example: `2020-11-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
template_config_source:
description:
- ""
returned: on success
type: complex
contains:
template_config_source_type:
description:
- The type of configuration source to use for the template configuration.
returned: on success
type: str
sample: ZIP_UPLOAD
lifecycle_state:
description:
- The current lifecycle state of the template.
returned: on success
type: str
sample: ACTIVE
freeform_tags:
description:
- "Free-form tags associated with the resource. Each tag is a key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"category_id": "ocid1.category.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"description": "description_example",
"long_description": "long_description_example",
"is_free_tier": true,
"time_created": "2013-10-20T19:20:30+01:00",
"template_config_source": {
"template_config_source_type": "ZIP_UPLOAD"
},
"lifecycle_state": "ACTIVE",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.resource_manager import ResourceManagerClient
from oci.resource_manager.models import CreateTemplateDetails
from oci.resource_manager.models import UpdateTemplateDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class TemplateHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "template_id"
def get_module_resource_id(self):
return self.module.params.get("template_id")
def get_get_fn(self):
return self.client.get_template
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_template, template_id=self.module.params.get("template_id"),
)
def get_required_kwargs_for_list(self):
return dict()
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["compartment_id", "template_id", "display_name"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(self.client.list_templates, **kwargs)
def get_create_model_class(self):
return CreateTemplateDetails
def get_exclude_attributes(self):
return ["logo_file_base64_encoded"]
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_template,
call_fn_args=(),
call_fn_kwargs=dict(create_template_details=create_details,),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateTemplateDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_template,
call_fn_args=(),
call_fn_kwargs=dict(
template_id=self.module.params.get("template_id"),
update_template_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_template,
call_fn_args=(),
call_fn_kwargs=dict(template_id=self.module.params.get("template_id"),),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
TemplateHelperCustom = get_custom_class("TemplateHelperCustom")
class ResourceHelper(TemplateHelperCustom, TemplateHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
compartment_id=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
description=dict(type="str"),
long_description=dict(type="str"),
logo_file_base64_encoded=dict(type="str"),
template_config_source=dict(
type="dict",
options=dict(
template_config_source_type=dict(
type="str", required=True, choices=["ZIP_UPLOAD"]
),
zip_file_base64_encoded=dict(type="str"),
),
),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
template_id=dict(aliases=["id"], type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="template",
service_client_class=ResourceManagerClient,
namespace="resource_manager",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 38.686975 | 160 | 0.645072 | 2,048 | 18,415 | 5.546387 | 0.15625 | 0.016639 | 0.025442 | 0.025883 | 0.597148 | 0.557972 | 0.467207 | 0.446694 | 0.4209 | 0.404085 | 0 | 0.011198 | 0.272604 | 18,415 | 475 | 161 | 38.768421 | 0.836805 | 0.023948 | 0 | 0.461722 | 0 | 0.04067 | 0.654771 | 0.104276 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033493 | false | 0.002392 | 0.019139 | 0.021531 | 0.088517 | 0.002392 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a419057ed32d0d72a2ab5830767804b84e6235c | 2,259 | py | Python | test.py | alexanderAustin/PythonGame | 9a3da340ffa426d3c6d59b5c2eb3f2a68792164f | [
"Apache-2.0"
] | null | null | null | test.py | alexanderAustin/PythonGame | 9a3da340ffa426d3c6d59b5c2eb3f2a68792164f | [
"Apache-2.0"
] | null | null | null | test.py | alexanderAustin/PythonGame | 9a3da340ffa426d3c6d59b5c2eb3f2a68792164f | [
"Apache-2.0"
] | null | null | null | # This was built from the tutorial https://www.raywenderlich.com/24252/beginning-game-programming-for-teens-with-python
import pygame, math, random
from pygame.locals import *
import pyganim
# 2 - Initialize the game
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
pygame.display.set_caption('PyGame - Testing')
rootImg = "resources/images/basic_game/"
rootAud = "resources/audio/basic_game/"
player = pygame.image.load(rootImg + "dude.png")
grass = pygame.image.load(rootImg + "grass.png")
castle = pygame.image.load(rootImg + "castle.png").convert_alpha()
# cow = pygame.image.load("resources/images/animals/cow/cow_front.png") #subject to change
# Used https://github.com/asweigart/pyganim/tree/master/examples
# http://www.pygame.org/project-Pyganim+sprite+animation+module-2106-.html
# for the sprite sheets
cows = pyganim.getImagesFromSpriteSheet(
filename="resources/images/animals/cow/cow_front.png",
rows=4, cols=2,
scale=2)
cframes = list(zip(cows, [100] * len(cows)))
cowObj = pyganim.PygAnimation(cframes)
cowObj.play()
cowsr = pyganim.getImagesFromSpriteSheet(
filename="resources/images/animals/cow/cow_rear.png",
rows=3, cols=3,
scale=2)
crframes = list(zip(cowsr, [100] * len(cowsr)))
# crframes = crframes.pop()#remove blank frame
print crframes
cowrObj = pyganim.PygAnimation(crframes)
cowrObj.play()
# 4 - keep looping through
running = 1
while running:
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the screen elements
for x in range(width/grass.get_width()+1):
for y in range(height/grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
cowObj.blit(screen, (200, 20))
cowrObj.blit(screen, (50, 200))
# screen.blit(castle, (100,100))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip() | 30.945205 | 119 | 0.683046 | 315 | 2,259 | 4.866667 | 0.438095 | 0.03392 | 0.039139 | 0.043053 | 0.174821 | 0.147423 | 0.116112 | 0.08741 | 0 | 0 | 0 | 0.033586 | 0.182824 | 2,259 | 73 | 120 | 30.945205 | 0.796858 | 0.296591 | 0 | 0.26087 | 0 | 0 | 0.11514 | 0.087786 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.065217 | null | null | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a4c8d2566e58f41d640706fecad5ae122f31804 | 2,061 | py | Python | test/test_phones.py | tnurtdinov-st/python_traning | a5204b073101a9c7870b3219ba7ceff321cf6b41 | [
"Apache-2.0"
] | null | null | null | test/test_phones.py | tnurtdinov-st/python_traning | a5204b073101a9c7870b3219ba7ceff321cf6b41 | [
"Apache-2.0"
] | null | null | null | test/test_phones.py | tnurtdinov-st/python_traning | a5204b073101a9c7870b3219ba7ceff321cf6b41 | [
"Apache-2.0"
] | null | null | null | import re
from random import randrange
def test_phones_on_homepage(app):
contact_from_homepage = app.contact.get_contact_list()[0]
contact_from_editpage = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_homepage.all_phones_from_homepage == merge_phones_like_on_homepage(contact_from_editpage)
def test_phones_on_view_page(app):
contact_from_viewpage = app.contact.get_contact_from_viewpage(0)
contact_from_editpage = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_viewpage.homephone == contact_from_editpage.homephone
assert contact_from_viewpage.mobilephone == contact_from_editpage.mobilephone
assert contact_from_viewpage.workphone == contact_from_editpage.workphone
assert contact_from_viewpage.phone2 == contact_from_editpage.phone2
def test_random_person_info(app):
contact = app.contact.get_contact_list()
index = randrange(len(contact))
contact_from_homepage = app.contact.get_contact_list()[index]
contact_from_editpage = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_homepage.firstname == contact_from_editpage.firstname
assert contact_from_homepage.lastname == contact_from_editpage.lastname
assert contact_from_homepage.id == contact_from_editpage.id
assert contact_from_homepage.address1 == contact_from_editpage.address1
assert contact_from_homepage.all_phones_from_homepage == merge_phones_like_on_homepage(contact_from_editpage)
assert contact_from_homepage.all_emails == merge_emails_like_on_homepage(contact_from_editpage)
def merge_phones_like_on_homepage(contact):
return "\n".join(filter(lambda x: x !="", map(lambda x: clear(x), filter(lambda x: x is not None, [contact.homephone, contact.mobilephone, contact.workphone, contact.phone2]))))
def merge_emails_like_on_homepage(contact):
return "\n".join(filter(lambda x: x !="", map(lambda x: clear(x), filter(lambda x: x is not None, [contact.email1, contact.email2, contact.email3]))))
def clear(s):
return re.sub("[() -]", "", s) | 55.702703 | 181 | 0.802523 | 290 | 2,061 | 5.3 | 0.182759 | 0.207547 | 0.173064 | 0.091087 | 0.500976 | 0.482759 | 0.439167 | 0.413793 | 0.35784 | 0.35784 | 0 | 0.006518 | 0.106744 | 2,061 | 37 | 182 | 55.702703 | 0.828354 | 0 | 0 | 0.133333 | 0 | 0 | 0.00485 | 0 | 0 | 0 | 0 | 0 | 0.366667 | 1 | 0.2 | false | 0 | 0.066667 | 0.1 | 0.366667 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a5062017f7cc51174da9372c129642a7f1b6002 | 992 | py | Python | iotkit/led.py | dhrg/iot-edison | d1a4eed659891ecbebe45e4e2fcf55c7828d01e7 | [
"CC-BY-4.0"
] | null | null | null | iotkit/led.py | dhrg/iot-edison | d1a4eed659891ecbebe45e4e2fcf55c7828d01e7 | [
"CC-BY-4.0"
] | null | null | null | iotkit/led.py | dhrg/iot-edison | d1a4eed659891ecbebe45e4e2fcf55c7828d01e7 | [
"CC-BY-4.0"
] | null | null | null | import mraa # For accessing the GPIO
import time # For sleeping between blinks
global led
def init_led(pin):
global led
led = mraa.Gpio(pin) # Get the LED pin object
led.dir(mraa.DIR_OUT) # Set the direction as output
led.write(0)
def write_led(signal):
global led
led.write(signal)
def main():
pin = 5 # we are using D5 pin
led = mraa.Gpio(pin) # Get the LED pin object
led.dir(mraa.DIR_OUT) # Set the direction as output
ledState = False # LED is off to begin with
led.write(ledState)
# One infinite loop coming up
while True:
if ledState == False:
# LED is off, turn it on
led.write(1)
ledState = True # LED is on
else:
led.write(0)
ledState = False
print "LED is: %s" %(ledState)
# Wait for some time
time.sleep(1)
if __name__ == '__main__':
main()
del led | 24.8 | 61 | 0.556452 | 138 | 992 | 3.913043 | 0.427536 | 0.074074 | 0.044444 | 0.051852 | 0.340741 | 0.262963 | 0.262963 | 0.262963 | 0.262963 | 0.262963 | 0 | 0.009509 | 0.363911 | 992 | 40 | 62 | 24.8 | 0.846276 | 0.279234 | 0 | 0.37931 | 0 | 0 | 0.025605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.068966 | null | null | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a52a6d200cba48f11a709eb7672ab1b68768cc1 | 218 | py | Python | cogs/anime.py | TheLastNever/discord_bot | b4bb5513a70ebd0f6f794b035f498de3ae6f9a8f | [
"MIT"
] | null | null | null | cogs/anime.py | TheLastNever/discord_bot | b4bb5513a70ebd0f6f794b035f498de3ae6f9a8f | [
"MIT"
] | null | null | null | cogs/anime.py | TheLastNever/discord_bot | b4bb5513a70ebd0f6f794b035f498de3ae6f9a8f | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
class anime(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
def setup(bot):
bot.add_cog(anime(bot)) | 14.533333 | 32 | 0.637615 | 30 | 218 | 4.4 | 0.566667 | 0.106061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.266055 | 218 | 15 | 33 | 14.533333 | 0.825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a6147abbf7955e413192c373cc1c16dc8668901 | 5,315 | py | Python | amalia/simulation/PoissonSimulation.py | Aganonce/AMALIA-lite | a9c854b45cc6486763349c262e44ee4d27d6bfac | [
"MIT"
] | null | null | null | amalia/simulation/PoissonSimulation.py | Aganonce/AMALIA-lite | a9c854b45cc6486763349c262e44ee4d27d6bfac | [
"MIT"
] | null | null | null | amalia/simulation/PoissonSimulation.py | Aganonce/AMALIA-lite | a9c854b45cc6486763349c262e44ee4d27d6bfac | [
"MIT"
] | 2 | 2021-05-21T07:55:54.000Z | 2021-09-23T12:58:50.000Z | import logging
from tools.EventGeneration import convert_date, generate_random_time, generate_random_node_id
logger = logging.getLogger(__name__.split('.')[-1])
from features.ResponseTypeFeature import ResponseTypeFeature
from features.ReplayTimeSeriesFeature import ReplayTimeSeriesFeature
import tools.Cache as Cache
import random
import pandas as pd
import warnings
from scipy.sparse import SparseEfficiencyWarning
warnings.simplefilter('ignore', SparseEfficiencyWarning)
random.seed(1234)
class PoissonSimulation:
'''
Simple event simulation. Given a replay of base events
and probabilities of responses, generate arbitrary single-layer
event cascades.
Parameters
----------
Parameters here
'''
def __init__(self, cfg, generate_replies=None, **kwargs):
self.start_date = cfg.get("limits.start_date", type=convert_date)
self.end_date = cfg.get("limits.end_date", type=convert_date)
self.time_delta = cfg.get("limits.time_delta", type=pd.Timedelta).total_seconds()
if generate_replies is None:
self.generate_replies = cfg.get("poisson_simulation.generate_replies", True)
else:
self.generate_replies = generate_replies
self.cfg = cfg
@Cache.amalia_cache
def compute(self, dfs, train_dfs=None):
# Retrieve replay time-series feature and response type feature
ts = ReplayTimeSeriesFeature(self.cfg).compute(dfs)
responses = ResponseTypeFeature(self.cfg).compute(dfs)
res = []
platforms = dfs.get_platforms()
logger.warning('Very slow for dense data generation. Use ParallelPoissonSimulation to reduce runtime.')
for platform in platforms:
ts = ts[platform]
responses = responses[platform]
node_map = dfs.get_node_map(platform)
# For all users that have a nonzero row in their ts, generate events
logger.info('Generating new events.')
nonzero_rows, __ = ts.nonzero()
res = res + _generate_base_event(ts, node_map, nonzero_rows, self.start_date, responses, self.generate_replies, platform)
# Return a pandas DataFrame sorted by time
# Feed into the output module for actual result generation
res = pd.DataFrame(res)
if len(res) == 0:
logger.error('PoissonSimulation produced no events. Terminating.')
raise ValueError('PoissonSimulation produced no events.')
return res.sort_values(by=['nodeTime']).reset_index(drop=True)
def _generate_base_event(ts, node_map, nonzero_rows, start_date, responses, generate_replies, platform):
res = []
for root_user_id in nonzero_rows:
ts_row = ts.getrow(root_user_id)
__, events = ts_row.nonzero()
# For each user, get event counts and the time index in which those events occurred
event_counts = [ts_row.getcol(event).toarray()[0][0] for event in events]
for i in range(len(event_counts)):
for j in range(event_counts[i]):
# Generate the base event
current_day_time = int(start_date + events[i] * 86400)
root_event_id = generate_random_node_id()
res.append({'nodeID': root_event_id, 'nodeUserID': node_map[root_user_id], 'parentID': root_event_id,
'rootID': root_event_id, 'actionType': 'tweet', 'nodeTime': current_day_time,
'platform': platform})
# Generate responses to the base event
if generate_replies:
generated_responses = _generate_responses(root_event_id, root_user_id, current_day_time, responses,
node_map, platform)
# if len(generated_responses) == 0:
# msg = 'Root user ID ' + str(root_user_id) + ' generated no responses.'
# logger.warning(msg)
res = res + generated_responses
return res
def _generate_responses(root_event_id, root_user_id, current_day_time, responses, node_map, platform):
res = []
# For each event type generate responses using associated probabilities
for response_type in responses:
# Get the user response probabilities for the given event type and root user id
response_row = responses[response_type].getrow(root_user_id)
# If the probability is below some threshold, zero it out
# Have the users associated with the nonzero indices generate an event
response_row[response_row < random.random()] = 0
__, acting_indices = response_row.nonzero()
# Generate random timestamps and find the associated user id for each new event
time_stamps = [generate_random_time(current_day_time) for x in acting_indices]
node_user_ids = [node_map[x] for x in acting_indices]
res = res + [{'nodeID': generate_random_node_id(), 'nodeUserID': node_user_id, 'parentID': root_event_id,
'rootID': root_event_id, 'actionType': response_type, 'nodeTime': node_time,
'platform': platform} for
node_user_id, node_time in zip(node_user_ids, time_stamps)]
return res
| 40.572519 | 133 | 0.660207 | 641 | 5,315 | 5.24181 | 0.282371 | 0.021429 | 0.026786 | 0.017857 | 0.123214 | 0.098214 | 0.098214 | 0.098214 | 0.07619 | 0.07619 | 0 | 0.003809 | 0.259078 | 5,315 | 130 | 134 | 40.884615 | 0.849416 | 0.194544 | 0 | 0.069444 | 1 | 0 | 0.096973 | 0.014191 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.125 | 0 | 0.236111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a67605a117277819275759c2973c2e8735861bc | 2,497 | py | Python | ui/_alert.py | isbm/pybug | 406e47d0b75961d8b8a984b02053a9182f96c56d | [
"MIT"
] | null | null | null | ui/_alert.py | isbm/pybug | 406e47d0b75961d8b8a984b02053a9182f96c56d | [
"MIT"
] | null | null | null | ui/_alert.py | isbm/pybug | 406e47d0b75961d8b8a984b02053a9182f96c56d | [
"MIT"
] | 1 | 2020-03-04T10:11:41.000Z | 2020-03-04T10:11:41.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'xml/alert.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(287, 171)
Dialog.setWindowOpacity(0.8)
Dialog.setAutoFillBackground(False)
Dialog.setSizeGripEnabled(False)
Dialog.setModal(True)
self.verticalLayout_2 = QtGui.QVBoxLayout(Dialog)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.okButton = QtGui.QPushButton(Dialog)
self.okButton.setObjectName(_fromUtf8("okButton"))
self.horizontalLayout.addWidget(self.okButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.okButton, QtCore.SIGNAL(_fromUtf8("clicked()")), Dialog.on_alert)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Alert", None))
self.label.setText(_translate("Dialog", "TextLabel", None))
self.okButton.setText(_translate("Dialog", "Close", None))
| 40.934426 | 103 | 0.716059 | 256 | 2,497 | 6.894531 | 0.371094 | 0.091785 | 0.045326 | 0.063456 | 0.129178 | 0.092918 | 0.092918 | 0.092918 | 0.092918 | 0.092918 | 0 | 0.019932 | 0.176211 | 2,497 | 60 | 104 | 41.616667 | 0.838114 | 0.073288 | 0 | 0.130435 | 1 | 0 | 0.048135 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.021739 | 0.065217 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a68fb1e3fb5aa04aab2b0e5a4bef92bf6a0aa37 | 689 | py | Python | scripts/client_lidar.py | Paulllit/projet_proto_lidar | a84c80261c81ab08e365c575c416863c06355b71 | [
"BSD-2-Clause"
] | null | null | null | scripts/client_lidar.py | Paulllit/projet_proto_lidar | a84c80261c81ab08e365c575c416863c06355b71 | [
"BSD-2-Clause"
] | null | null | null | scripts/client_lidar.py | Paulllit/projet_proto_lidar | a84c80261c81ab08e365c575c416863c06355b71 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy
import math
from sensor_msgs.msg import LaserScan #import des donnes laser du lidar
import time
from rplidar_ros.srv import *
class Client():
vel=None
angle_est=None
#Init
def __init__(self):
rospy.init_node('client_lidar')
def listen_angle(self):
r = rospy.Rate(10)
while not rospy.is_shutdown():
self.angle_est=rospy.ServiceProxy('angle',angle)
self.angle_est()
print(self.angle_est().angle_mes)
########################################################## Main ######################################################
if __name__ == "__main__":
l=Client()
l.listen_angle()
rospy.spin()
| 23.758621 | 118 | 0.56894 | 83 | 689 | 4.433735 | 0.566265 | 0.086957 | 0.097826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003515 | 0.174165 | 689 | 28 | 119 | 24.607143 | 0.643234 | 0.088534 | 0 | 0 | 0 | 0 | 0.048828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.25 | null | null | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a6d411eb9f892a9e63182fd8d2cb98e424999b0 | 3,021 | py | Python | main.py | maxc0d3r/google-workspace-manager | d2a4e561ae8278455cd5b0c03283a413ebeaff54 | [
"MIT"
] | null | null | null | main.py | maxc0d3r/google-workspace-manager | d2a4e561ae8278455cd5b0c03283a413ebeaff54 | [
"MIT"
] | null | null | null | main.py | maxc0d3r/google-workspace-manager | d2a4e561ae8278455cd5b0c03283a413ebeaff54 | [
"MIT"
] | null | null | null | """
Usage:
main.py domains list
main.py domains add [--body=<request_body>] [--file=<input_file>]
main.py users list [--domain=<domain_name>]
main.py users get [--email=<email>]
main.py users add [--body=<request_body>] [--file=<input_file>]
main.py users update [--email=<email>] [--body=<request_body>] [--file=<input_file>]
main.py (-h | --help)
main.py (-V | --version)
Options:
-h --help Show this screen
-v --version Show version
--body=<request_body> Request body in JSON format
--domain=<domain_name> Domain name
--file=<input_file> JSON file
"""
from docopt import docopt
import csv
import os
import json
import random
import string
import gwm.directory.domains
import gwm.directory.users
import gwm.directory.groups
from gwm.directory.domains import list_domains, add_domain, delete_domain
from gwm.directory.users import list_users, add_user, delete_user, get_user, update_user
APP_VERSION='0.0.0'
def generate_password():
source = string.ascii_letters + string.digits
password = ''.join((random.choice(source) for i in range(10)))
return password
def main(args):
customer_id = os.getenv('CUSTOMER_ID')
if args['domains']:
if args['list']:
domains = list_domains(customer_id)
print(json.dumps(domains,indent=1))
elif args['add']:
if args['--file']:
with open(args['--file'],'r') as f:
body = json.load(f)
response = add_domain(customer_id, body)
else:
response = add_domain(customer_id, json.loads(args['--body']))
print(json.dumps(response,indent=1))
elif args['get']:
pass
elif args['delete']:
pass
elif args['users']:
if args['list']:
users = list_users(args['--domain'])
print(json.dumps(users,indent=1))
elif args['add']:
if args['--file']:
with open(args['--file'],'r') as f:
body = json.load(f)
response = add_user(body)
else:
request_body = json.loads(args['--body'])
response = add_user(request_body)
print(json.dumps(response,indent=1))
elif args['get']:
response = get_user(args['--email'])
print(json.dumps(response,indent=1))
elif args['update']:
if args['--file']:
with open(args['--file'],'r') as f:
body = json.load(f)
response = update_user(args['--email'],body)
else:
request_body = json.loads(args['--body'])
response = update_user(args['--email'],request_body)
print(json.dumps(response,indent=1))
else:
pass
if __name__ == "__main__":
arguments = docopt(__doc__, version='Google Workspace Manager {}'.format(APP_VERSION))
main(arguments)
| 34.329545 | 90 | 0.568355 | 367 | 3,021 | 4.536785 | 0.217984 | 0.059459 | 0.05045 | 0.045045 | 0.392793 | 0.344144 | 0.344144 | 0.344144 | 0.273273 | 0.22042 | 0 | 0.005123 | 0.289308 | 3,021 | 87 | 91 | 34.724138 | 0.770377 | 0.209202 | 0 | 0.4375 | 0 | 0 | 0.076018 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0.09375 | 0.171875 | 0 | 0.21875 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2a6f14b3af74899266a2956f8a6a90c52813e8dd | 6,105 | py | Python | BMES_exps/BMES.py | volgachen/Chinese-Tokenization | 467e08da6fe271b6e33258d5aa6682c0405a3f32 | [
"Apache-2.0"
] | null | null | null | BMES_exps/BMES.py | volgachen/Chinese-Tokenization | 467e08da6fe271b6e33258d5aa6682c0405a3f32 | [
"Apache-2.0"
] | null | null | null | BMES_exps/BMES.py | volgachen/Chinese-Tokenization | 467e08da6fe271b6e33258d5aa6682c0405a3f32 | [
"Apache-2.0"
] | 1 | 2020-07-12T10:38:34.000Z | 2020-07-12T10:38:34.000Z | from collections import Counter
from math import log
from tqdm import tqdm
import re
from evaluation import evaluateSet
def build_model(train_set):
hmm_model = {i:Counter() for i in 'SBME'}
trans = {'SS':0,
'SB':0,
'BM':0,
'BE':0,
'MM':0,
'ME':0,
'ES':0,
'EB':0
}
with open(train_set,'r',encoding='utf-8') as f:
cha = []
tag = []
for l in f:
l = l.split()
if (len(l) == 0) :
cha += " "
tag += " "
else:
cha += l[0]
tag += l[1]
for i in range(len(tag)):
if tag[i] != ' ':
hmm_model[tag[i]][cha[i]] += int(1)
if i+1<len(tag) and tag[i+1] != ' ':
trans[tag[i]+tag[i+1]] +=1
s_ = trans['SS'] + trans['SB']
trans['SS'] /= s_
trans['SB'] /= s_
b_ = trans['BM'] + trans['BE']
trans['BM'] /= b_
trans['BE'] /= b_
m_ = trans['MM'] + trans['ME']
trans['MM'] /= m_
trans['ME'] /= m_
e_ = trans['ES'] + trans['EB']
trans['ES'] /= e_
trans['EB'] /= e_
log_total = {i:log(sum(hmm_model[i].values())) for i in 'SBME'}
trans = {i:log(j) for i,j in trans.items()}
return hmm_model, trans, log_total
def viterbi(nodes):
paths = nodes[0]
for l in range(1, len(nodes)):
paths_ = paths
paths = {}
for i in nodes[l]:
nows = {}
for j in paths_:
if j[-1]+i in trans:
nows[j+i]=paths_[j]+nodes[l][i]+trans[j[-1]+i]
k = list(nows.values()).index(max(nows.values()))
paths[list(nows.keys())[k]] = list(nows.values())[k]
return list(paths.keys())[list(paths.values()).index(max(list(paths.values())))]
def hmm_cut(s):
nodes = [{i:log(j[t]+1)-log_total[i] for i,j in hmm_model.items()} for t in s]
tags = viterbi(nodes)
words = [s[0]]
for i in range(1, len(s)):
if tags[i] in ['B', 'S']:
words.append(s[i])
else:
words[-1] += s[i]
return words
def changenum(ustring):
rstr = ""
for uchar in ustring:
unic=ord(uchar)
if unic == 12288:
unic = 32
elif (65296 <= unic <= 65305) or (65345 <= unic <= 65370) or (65313 <= unic <= 65338):
unic -= 65248
rstr += chr(unic)
# 所有数字改为 0
rstr = re.sub(r"\d+\.?\d*", "0", rstr)
# 所有英文单词改为 1
rstr = re.sub(r"[a-zA-Z]+\/", "1/", rstr)
return rstr
if __name__ == '__main__':
print("Train Set: PKU; Test Set: Weibo, w/o re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/rmrb_BMES.txt")
# load test set without number and english replace
nlpcc_f = open('data/nlpcc2016-wordseg-dev.dat', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [line.strip().split() for line in lines]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: PKU; Test Set: Weibo, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/rmrb_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('data/nlpcc2016-wordseg-dev.dat', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: MSR; Test Set: PKU, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/msr_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('data/nlpcc2016-wordseg-dev.dat', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: PKU; Test Set: PKU, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/rmrb_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('BMES_corpus/pku_training.utf8', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
lines = [line for line in lines if len(line)]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: MSR; Test Set: MSR, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/msr_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('BMES_corpus/msr_training.utf8', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
lines = [line for line in lines if len(line)]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
| 30.989848 | 95 | 0.53923 | 843 | 6,105 | 3.788849 | 0.163701 | 0.035066 | 0.045085 | 0.048215 | 0.650908 | 0.634941 | 0.634941 | 0.634941 | 0.624296 | 0.624296 | 0 | 0.022684 | 0.314005 | 6,105 | 196 | 96 | 31.147959 | 0.739971 | 0.067813 | 0 | 0.402685 | 0 | 0 | 0.122765 | 0.053812 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026846 | false | 0 | 0.033557 | 0 | 0.087248 | 0.033557 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a6f89f48cd8c7b16e844264141418335e7aca6a | 199 | py | Python | code/bqutils/auth.py | victoria-cds-sig/explore_mimiciv | 856705ab1af7ecbfe30d5e8054747b61de8af031 | [
"MIT"
] | 3 | 2020-11-23T04:29:16.000Z | 2021-05-29T06:22:00.000Z | code/bqutils/auth.py | victoria-cds-sig/explore_mimiciv | 856705ab1af7ecbfe30d5e8054747b61de8af031 | [
"MIT"
] | null | null | null | code/bqutils/auth.py | victoria-cds-sig/explore_mimiciv | 856705ab1af7ecbfe30d5e8054747b61de8af031 | [
"MIT"
] | 4 | 2020-11-23T05:18:54.000Z | 2021-05-29T06:23:12.000Z | import google.auth
def get_gcreds(scopes = None):
if scopes == None:
scopes = ["https://www.googleapis.com/auth/bigquery"]
return google.auth.default(
scopes = scopes )
| 18.090909 | 61 | 0.628141 | 24 | 199 | 5.166667 | 0.666667 | 0.16129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.246231 | 199 | 10 | 62 | 19.9 | 0.826667 | 0 | 0 | 0 | 0 | 0 | 0.203046 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a74193bd4405cdd9a1190cbabe2ed90d97be2b2 | 5,678 | py | Python | views.py | margish100/Api-json | 82b0561f6335708702b997dc8dd1e62c7df5d03d | [
"MIT"
] | null | null | null | views.py | margish100/Api-json | 82b0561f6335708702b997dc8dd1e62c7df5d03d | [
"MIT"
] | null | null | null | views.py | margish100/Api-json | 82b0561f6335708702b997dc8dd1e62c7df5d03d | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from rest_framework.parsers import JSONParser
from .models import Article
from .serializers import ArticleSerializer
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework import mixins
from rest_framework.authentication import SessionAuthentication,TokenAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets
from django.shortcuts import get_list_or_404
class ArticleAPIViewSet (viewsets.ViewSet):
def list(self, request):
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True)
return Response(serializer.data)
def create(self, request):
serializer = ArticleSerializer(data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status= status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
queryset = Article.objects.all()
article = get_list_or_404(queryset, pk=pk)
serializer = ArticleSerializer(article)
return Response(serializer.data)
def update(self, request, pk=None):
article = Article.objects.get(pk=pk)
serializer = ArticleSerializer(article, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class GenericAPIView(generics.GenericAPIView, mixins.ListModelMixin, mixins.CreateModelMixin,
mixins.UpdateModelMixin, mixins.RetrieveModelMixin,
mixins.DestroyModelMixin):
serializer_class = ArticleSerializer
queryset = Article.objects.all()
lookup_field = 'id'
#authentication_classes =[SessionAuthentication, BasicAuthentication]
authentication_classes = [ TokenAuthentication]
permission_classes= [IsAuthenticated]
def get(self, request, id = None):
if id:
return self.retrieve(request)
else:
return self.list(request)
def post (self, request):
return self.create(request)
def put(self, request, id=None):
return self.update(request, id)
def delete(self, request, id):
return self.destroy(request, id)
class ArticleAPIView(APIView):
def get(self, request):
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True)
return Response(serializer.data)
def post(self, request):
serializer = ArticleSerializer(data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status= status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ArticleDetails(APIView):
def get_object(self, id):
try:
return Article.objects.get(id = id)
except Article.DoesNotExit:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
def get(self, request, id):
article = self.get_object(id)
serializer = ArticleSerializer(article)
return Response(serializer.data)
def put(self, request, id):
article = self.get_object(id)
serializer = ArticleSerializer(article, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id):
article = self.get_object(id)
article.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def article_list(request):
if request.method =='GET':
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = ArticleSerializer(data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status= status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET' , 'PUT', 'DELETE'])
def article_detail(request, pk):
try:
article = Article.objects.get(pk=pk)
except Article.DoesNotExit:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ArticleSerializer(article)
return Response(serializer.data)
elif request.method == 'PUT':
#data = JSONParser().parse(request)
serializer = ArticleSerializer(article, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
article.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 30.202128 | 105 | 0.66608 | 591 | 5,678 | 6.275804 | 0.165821 | 0.075492 | 0.116473 | 0.09059 | 0.576705 | 0.555406 | 0.540307 | 0.514424 | 0.479105 | 0.479105 | 0 | 0.010566 | 0.249912 | 5,678 | 187 | 106 | 30.363636 | 0.860296 | 0.017964 | 0 | 0.566667 | 0 | 0 | 0.007425 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0.025 | 0.575 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2a7ffe46901a396481978f7847250513748371b6 | 611 | py | Python | knightmovs_test.py | rcolomina/pythonchess | 1b12ea4a1668da6c47dd39ff16d1e48af33ea2f5 | [
"MIT"
] | null | null | null | knightmovs_test.py | rcolomina/pythonchess | 1b12ea4a1668da6c47dd39ff16d1e48af33ea2f5 | [
"MIT"
] | 2 | 2016-11-01T09:57:36.000Z | 2016-11-01T10:05:50.000Z | knightmovs_test.py | rcolomina/pythonchess | 1b12ea4a1668da6c47dd39ff16d1e48af33ea2f5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from piece import Piece
from gameNode import GameNode
from knightmovs import *
from functions import *
listPiecesWhite=[]
listPiecesBlack=[]
w1=Piece('Q',[3,1])
w2=Piece('N',[3,2])
w3=Piece('N',[4,2])
w4=Piece('B',[2,3])
listPiecesWhite.append(w1)
listPiecesWhite.append(w2)
listPiecesWhite.append(w3)
listPiecesWhite.append(w4)
b1=Piece('q',[3,4])
listPiecesBlack.append(b1)
gameNode=GameNode(listPiecesWhite,listPiecesBlack,"white")
assert(listTargetsKnight(gameNode,w3)==[[3,4]])
print "Checked knight movements on ",w2.coordenates," which should be ",listTargetsKnight(gameNode,w3)
| 21.821429 | 102 | 0.752864 | 83 | 611 | 5.542169 | 0.433735 | 0.182609 | 0.030435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044405 | 0.07856 | 611 | 27 | 103 | 22.62963 | 0.772647 | 0.026187 | 0 | 0 | 0 | 0 | 0.092593 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 0 | null | null | 0 | 0.210526 | null | null | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a863733ace28ef6349e137d5a4a527f5a173db6 | 423 | py | Python | bootstrapvz/providers/virtualbox/tasks/boot.py | qqshfox/bootstrap-vz | 38fc7c52407d015d3c75867bfea266d0aec6d7e2 | [
"Apache-2.0"
] | 1 | 2016-07-21T15:10:26.000Z | 2016-07-21T15:10:26.000Z | bootstrapvz/providers/virtualbox/tasks/boot.py | qqshfox/bootstrap-vz | 38fc7c52407d015d3c75867bfea266d0aec6d7e2 | [
"Apache-2.0"
] | 3 | 2017-05-10T15:04:10.000Z | 2017-06-02T18:14:50.000Z | bootstrapvz/providers/virtualbox/tasks/boot.py | qqshfox/bootstrap-vz | 38fc7c52407d015d3c75867bfea266d0aec6d7e2 | [
"Apache-2.0"
] | 14 | 2016-12-15T09:29:10.000Z | 2021-01-28T13:06:14.000Z | from bootstrapvz.base import Task
from bootstrapvz.common import phases
from bootstrapvz.common.tasks import grub
class AddVirtualConsoleGrubOutputDevice(Task):
description = 'Adding `tty0\' as output device for grub'
phase = phases.system_modification
successors = [grub.WriteGrubConfig]
@classmethod
def run(cls, info):
info.grub_config['GRUB_CMDLINE_LINUX_DEFAULT'].append('console=tty0')
| 30.214286 | 77 | 0.763593 | 49 | 423 | 6.489796 | 0.693878 | 0.141509 | 0.132075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005587 | 0.153664 | 423 | 13 | 78 | 32.538462 | 0.882682 | 0 | 0 | 0 | 0 | 0 | 0.120567 | 0.061466 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.8 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2a8a638716da9a921d2678fd212392c39c24f195 | 547 | py | Python | satang_pro_signer/signer.py | thebevrishot/satang-pro-signer | e0d6affdaf3b3bf5a670bda160f8a7d341b41707 | [
"MIT"
] | null | null | null | satang_pro_signer/signer.py | thebevrishot/satang-pro-signer | e0d6affdaf3b3bf5a670bda160f8a7d341b41707 | [
"MIT"
] | null | null | null | satang_pro_signer/signer.py | thebevrishot/satang-pro-signer | e0d6affdaf3b3bf5a670bda160f8a7d341b41707 | [
"MIT"
] | null | null | null | import hashlib
import hmac
from satang_pro_signer import preparer
class Signer:
def __init__(self, secret: bytes):
self.secret = secret
def sign(self, obj) -> bytes:
parsed = preparer.Preparer(obj).encode()
msg = bytes(parsed, encoding='utf-8')
try:
# better performance
return hmac.digest(self.secret, msg, 'sha512')
except AttributeError:
# compatible with Python 3.6
m = hmac.new(self.secret, msg, hashlib.sha512)
return m.digest() | 26.047619 | 58 | 0.605119 | 64 | 547 | 5.078125 | 0.578125 | 0.123077 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023438 | 0.297989 | 547 | 21 | 59 | 26.047619 | 0.822917 | 0.082267 | 0 | 0 | 0 | 0 | 0.022 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.214286 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2a8e83999cf4e97fafb8dbeb31077fa54eca387c | 7,826 | py | Python | visualization/POF/utils/keypoint_conversion.py | alvaro-budria/body2hands | 0eba438b4343604548120bdb03c7e1cb2b08bcd6 | [
"BSD-3-Clause"
] | 63 | 2021-05-14T02:55:16.000Z | 2022-03-13T01:51:12.000Z | visualization/POF/utils/keypoint_conversion.py | human2b/body2hands | 8ab4b206dc397c3b326f2b4ec9448c84ee8801fe | [
"BSD-3-Clause"
] | 9 | 2021-06-24T09:59:41.000Z | 2021-12-31T08:15:20.000Z | visualization/POF/utils/keypoint_conversion.py | human2b/body2hands | 8ab4b206dc397c3b326f2b4ec9448c84ee8801fe | [
"BSD-3-Clause"
] | 9 | 2021-05-17T03:33:28.000Z | 2022-02-17T02:30:44.000Z | import numpy as np
import numpy.linalg as nl
from utils.general import connMat
a4_to_main = {
'body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'1_body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'2_body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'1_left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'2_left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'1_right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'2_right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'openpose_lhand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_rhand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_lhand_score': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_rhand_score': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
}
human36m_to_main = {
'body': np.array([9, 8, 14, 15, 16, 11, 12, 13, 4, 5, 6, 1, 2, 3, 17, 17, 17, 17, 10, 17], dtype=np.int64)
}
mpi3d_to_main = {
'body': np.array([6, 5, 14, 15, 16, 9, 10, 11, 23, 24, 25, 18, 19, 20, 28, 28, 28, 28, 7], dtype=np.int64)
}
adam_to_main = {
'body': np.array([12, 17, 19, 21, 16, 18, 20, 2, 5, 8, 1, 4, 7], dtype=np.int64),
'select_body_main': np.arange(1, 14, dtype=np.int64)
}
COCO_to_main = {
'body': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'body_valid': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'all_body': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'all_body_valid': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64)
}
SMPL_to_main = { # actually COCOPLUS regressor to main
'body': np.array([14, 12, 8, 7, 6, 9, 10, 11, 2, 1, 0, 3, 4, 5, 16, 15, 18, 17, 13], dtype=np.int64)
}
STB_to_main = {
'left_hand': np.array([0, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], dtype=np.int64)
}
MPII_to_main = {
'body': np.array([16, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5, 16, 16, 16, 16, 9], dtype=np.int64),
'body_valid': np.array([16, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5, 16, 16, 16, 16, 9], dtype=np.int64)
}
tsimon_to_main = {
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
}
GAnerated_to_main = {
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_3d': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand': np.arange(21, dtype=np.int64),
'right_hand_valid': np.arange(21, dtype=np.int64),
'right_hand_3d': np.arange(21, dtype=np.int64)
}
std_body_size = 267.807
std_hand_size = (82.2705 + 79.8843) / 2
def compute_size(joint3d, type_str):
""" use this to compute size for scaling: joints are in main order.
"""
length = 0.0
for ic, conn in enumerate(connMat[type_str]):
if type_str == 'body':
if ic in (2, 3, 5, 6, 8, 9, 11, 12):
length += nl.norm(joint3d[conn[0]] - joint3d[conn[1]])
else:
assert type_str == 'hand'
length += nl.norm(joint3d[conn[0]] - joint3d[conn[1]])
return length
def main_to_a4(joint):
assert joint.shape[0] == 20
output = np.zeros((21, joint.shape[1]), dtype=joint.dtype)
for io, ic in enumerate(a4_to_main['body']):
output[ic, :] = joint[io, :]
output[2, :] = (output[6, :] + output[12, :]) / 2
return output
def main_to_a4_hand(joint):
assert joint.shape[0] == 21
output = np.zeros(joint.shape, dtype=joint.dtype)
output[0] = joint[0]
for i in (1, 5, 9, 13, 17):
output[i:i + 4] = joint[i + 3:i - 1:-1]
return output
def assemble_total_3d(body, lhand, rhand):
len_b = compute_size(body, 'body')
if len_b > 0:
sbody = (std_body_size / len_b) * body
else:
sbody = body
len_l = compute_size(lhand, 'hand')
if len_l > 0:
slhand = (std_hand_size / len_l) * lhand
else:
slhand = lhand
len_r = compute_size(rhand, 'hand')
if len_r > 0:
srhand = (std_hand_size / len_r) * rhand
else:
srhand = rhand
sbody = main_to_a4(sbody)
slhand = main_to_a4_hand(slhand)
srhand = main_to_a4_hand(srhand)
slhand_invalid = (slhand[:, 0] == 0) * (slhand[:, 1] == 0) * (slhand[:, 2] == 0)
srhand_invalid = (srhand[:, 0] == 0) * (srhand[:, 1] == 0) * (srhand[:, 2] == 0)
if not slhand[0].any():
slhand_invalid[:] = True
if not srhand[0].any():
srhand_invalid[:] = True
lhand_idx_a4 = 5
rhand_idx_a4 = 11
shift_lhand = sbody[lhand_idx_a4] - slhand[0]
shift_rhand = sbody[rhand_idx_a4] - srhand[0]
slhand += shift_lhand
srhand += shift_rhand
slhand[slhand_invalid] = 0
srhand[srhand_invalid] = 0
return np.concatenate([sbody, slhand, srhand], axis=0), std_body_size / len_b
def assemble_total_2d(body_2d, lhand_2d, rhand_2d):
keypoint_list = []
for i, item in enumerate((body_2d, lhand_2d, rhand_2d)):
keypoint = item['uv_local']
keypoint = (keypoint - 184) / item['scale2d'] + item['crop_center2d']
valid = item['valid']
keypoint = keypoint * np.stack([valid, valid], axis=1) # remove those invalid values
if i == 0:
keypoint = main_to_a4(keypoint)
else:
keypoint = main_to_a4_hand(keypoint)
keypoint_list.append(keypoint)
ret = np.concatenate(keypoint_list, axis=0)
ret[np.isnan(ret)] = 0.0 # nan when the whole joint is zero
return ret
def main_to_human36m(joint):
# except 9, 10 in human36m
out = np.zeros((17, 3), dtype=joint.dtype)
for im, ih in enumerate(human36m_to_main['body']):
if ih == 17: # virtual zero joint
continue
out[ih] = np.copy(joint[im, :])
out[0] = (out[1] + out[4]) / 2 # middle hip
out[7] = (out[1] + out[4] + out[11] + out[14]) / 4 # abdomen (average of l/r hip, l/r shoulder)
return out
| 43.966292 | 153 | 0.564528 | 1,480 | 7,826 | 2.884459 | 0.106757 | 0.05739 | 0.098384 | 0.016866 | 0.511595 | 0.473179 | 0.468025 | 0.444601 | 0.430546 | 0.41368 | 0 | 0.201983 | 0.239586 | 7,826 | 177 | 154 | 44.214689 | 0.515376 | 0.067084 | 0 | 0.112676 | 0 | 0 | 0.058525 | 0 | 0 | 0 | 0 | 0 | 0.021127 | 1 | 0.042254 | false | 0 | 0.021127 | 0 | 0.105634 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a91414de57b627b89516d25ee369f8bbf7d2897 | 431 | py | Python | team/migrations/0014_auto_20200608_1855.py | Aleccc/gtcrew | 7e6e7024afdbf48ee796cb1f9a86b913e6843dda | [
"MIT"
] | null | null | null | team/migrations/0014_auto_20200608_1855.py | Aleccc/gtcrew | 7e6e7024afdbf48ee796cb1f9a86b913e6843dda | [
"MIT"
] | 21 | 2019-02-14T02:47:34.000Z | 2022-01-23T02:22:54.000Z | team/migrations/0014_auto_20200608_1855.py | Aleccc/gtcrew | 7e6e7024afdbf48ee796cb1f9a86b913e6843dda | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-06-08 22:55
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('team', '0013_auto_20200608_1824'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=wagtail.core.fields.RichTextField(blank=True, max_length=1500),
),
]
| 21.55 | 81 | 0.62413 | 49 | 431 | 5.387755 | 0.816327 | 0.083333 | 0.128788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.110063 | 0.262181 | 431 | 19 | 82 | 22.684211 | 0.720126 | 0.104408 | 0 | 0 | 1 | 0 | 0.096354 | 0.059896 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a920b7ccb1c0d280a100c971c21a949a0ed335a | 408 | py | Python | discussions/migrations/0006_channel_is_deleted.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 32 | 2016-03-25T01:03:13.000Z | 2022-01-15T19:35:42.000Z | discussions/migrations/0006_channel_is_deleted.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 4,858 | 2016-03-03T13:48:30.000Z | 2022-03-29T22:09:51.000Z | discussions/migrations/0006_channel_is_deleted.py | umarmughal824/micromasters | ea92d3bcea9be4601150fc497302ddacc1161622 | [
"BSD-3-Clause"
] | 20 | 2016-08-18T22:07:44.000Z | 2021-11-15T13:35:35.000Z | # Generated by Django 2.1.5 on 2019-03-22 07:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discussions', '0005_timestamped_discussions_models'),
]
operations = [
migrations.AddField(
model_name='channel',
name='is_deleted',
field=models.BooleanField(default=False),
),
]
| 21.473684 | 63 | 0.620098 | 42 | 408 | 5.904762 | 0.809524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.063973 | 0.272059 | 408 | 18 | 64 | 22.666667 | 0.771044 | 0.110294 | 0 | 0 | 1 | 0 | 0.174515 | 0.096953 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a97aa991dbbe490d3f89c2502461d05c3f3477d | 3,808 | py | Python | test/test_CustomLayers.py | tomasheiskanen/pro_gan_pytorch | af44df67a2e771207ed96af4c0948980edd9d3d7 | [
"MIT"
] | 2 | 2020-11-19T14:20:51.000Z | 2020-12-10T10:54:39.000Z | test/test_CustomLayers.py | tomasheiskanen/pro_gan_pytorch | af44df67a2e771207ed96af4c0948980edd9d3d7 | [
"MIT"
] | null | null | null | test/test_CustomLayers.py | tomasheiskanen/pro_gan_pytorch | af44df67a2e771207ed96af4c0948980edd9d3d7 | [
"MIT"
] | 2 | 2020-11-19T11:32:46.000Z | 2020-12-10T23:32:18.000Z | import torch as th
from unittest import TestCase
from pro_gan_pytorch import CustomLayers as cL
device = th.device("cuda" if th.cuda.is_available() else "cpu")
class Test_equalized_conv2d(TestCase):
def setUp(self):
self.conv_block = cL._equalized_conv2d(21, 3, k_size=(3, 3), pad=1)
# print the Equalized conv block
print("\nEqualized conv block:\n%s" % str(self.conv_block))
def test_forward(self):
mock_in = th.randn(32, 21, 16, 16).to(device)
mock_out = self.conv_block(mock_in)
# check output
self.assertEqual(mock_out.shape, (32, 3, 16, 16))
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# check the weight's scale
self.assertAlmostEqual(self.conv_block.weight.data.std(), 1, delta=1e-1)
def tearDown(self):
# delete the computational resources
del self.conv_block
class Test_equalized_deconv2d(TestCase):
def setUp(self):
self.deconv_block = cL._equalized_deconv2d(21, 3, k_size=(3, 3), pad=1)
# print the Equalized conv block
print("\nEqualized conv block:\n%s" % str(self.deconv_block))
def test_forward(self):
mock_in = th.randn(32, 21, 16, 16).to(device)
mock_out = self.deconv_block(mock_in)
# check output
self.assertEqual(mock_out.shape, (32, 3, 16, 16))
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# check the weight's scale
self.assertAlmostEqual(self.deconv_block.weight.data.std(), 1, delta=1e-1)
def tearDown(self):
# delete the computational resources
del self.deconv_block
class Test_equalized_linear(TestCase):
def setUp(self):
self.lin_block = cL._equalized_linear(13, 52)
# print the Equalized conv block
print("\nEqualized linear block:\n%s" % str(self.lin_block))
def test_forward(self):
# test the forward for the first res block
mock_in = th.randn(32, 13).to(device)
mock_out = self.lin_block(mock_in)
# check output
self.assertEqual(mock_out.shape, (32, 52))
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# check the weight's scale
self.assertAlmostEqual(self.lin_block.weight.data.std(), 1, delta=1e-1)
def tearDown(self):
# delete the computational resources
del self.lin_block
class Test_PixelwiseNorm(TestCase):
def setUp(self):
self.normalizer = cL.PixelwiseNorm()
def test_forward(self):
mock_in = th.randn(1, 13, 1, 1).to(device)
mock_out = self.normalizer(mock_in)
# check output
self.assertEqual(mock_out.shape, mock_in.shape)
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# we cannot comment that the norm of the output tensor
# will always be less than the norm of the input tensor
# so no more checking can be done
def tearDown(self):
# delete the computational resources
del self.normalizer
class Test_MinibatchStdDev(TestCase):
def setUp(self):
self.minStdD = cL.MinibatchStdDev()
def test_forward(self):
mock_in = th.randn(1, 13, 16, 16).to(device)
mock_out = self.minStdD(mock_in)
# check output
self.assertEqual(mock_out.shape[1], mock_in.shape[1] + 1)
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
def tearDown(self):
# delete the computational resources
del self.minStdD
| 30.464 | 82 | 0.642595 | 539 | 3,808 | 4.408163 | 0.187384 | 0.058923 | 0.071549 | 0.058923 | 0.722222 | 0.633838 | 0.633838 | 0.606902 | 0.606902 | 0.525253 | 0 | 0.031648 | 0.236607 | 3,808 | 124 | 83 | 30.709677 | 0.78569 | 0.154149 | 0 | 0.446154 | 0 | 0 | 0.028125 | 0 | 0 | 0 | 0 | 0 | 0.276923 | 1 | 0.230769 | false | 0 | 0.046154 | 0 | 0.353846 | 0.046154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aa48f57729bf1af92cafcf77cd0d806243d6e2f7 | 511 | py | Python | onlinecourse/tests.py | vicbolo78/Week-8-Lab | 39e086c2d8a07875218bce461b5ab1ca63b3b931 | [
"Apache-2.0"
] | null | null | null | onlinecourse/tests.py | vicbolo78/Week-8-Lab | 39e086c2d8a07875218bce461b5ab1ca63b3b931 | [
"Apache-2.0"
] | null | null | null | onlinecourse/tests.py | vicbolo78/Week-8-Lab | 39e086c2d8a07875218bce461b5ab1ca63b3b931 | [
"Apache-2.0"
] | null | null | null | import unittest
from django.test import TestCase
class TestQuestion(unittest.TestCasel):
def test_is_get_score(self, selected_ids):
self.assertNotEqual(all_answers = self.choice_set.filter(is_correct=True).count()
self.assertEqual(selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
if all_answers == selected_correct:
return True
else:
return False
if __name__=='__main__':
unittest.main()
| 28.388889 | 112 | 0.688845 | 62 | 511 | 5.306452 | 0.548387 | 0.066869 | 0.079027 | 0.115502 | 0.194529 | 0.194529 | 0.194529 | 0 | 0 | 0 | 0 | 0 | 0.223092 | 511 | 17 | 113 | 30.058824 | 0.828715 | 0 | 0 | 0 | 0 | 0 | 0.015656 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 0 | null | null | 0 | 0.166667 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aa49195189dacb2ef252f543c4131fc6f7dbb7a4 | 1,482 | py | Python | trnsystor/statement/overwritecheck.py | samuelduchesne/pyTrnsys | f2deb5eb340a2814722eead5f8b6278a945c730d | [
"MIT"
] | 5 | 2021-07-20T16:07:06.000Z | 2022-02-09T07:57:21.000Z | trnsystor/statement/overwritecheck.py | samuelduchesne/pyTrnsys | f2deb5eb340a2814722eead5f8b6278a945c730d | [
"MIT"
] | 50 | 2021-02-12T07:36:55.000Z | 2022-03-21T10:40:47.000Z | trnsystor/statement/overwritecheck.py | samuelduchesne/pyTrnsys | f2deb5eb340a2814722eead5f8b6278a945c730d | [
"MIT"
] | 4 | 2019-06-28T17:56:05.000Z | 2020-02-24T16:33:28.000Z | """OverwriteCheck Statement."""
from trnsystor.statement.statement import Statement
class OverwriteCheck(Statement):
"""OverwriteCheck Statement.
A common error in non standard and user written TRNSYS Type routines is
to reserve too little space in the global output array. By default, each
Type is accorded 20 spots in the global TRNSYS output array. However, there
is no way to prevent the Type from then writing in (for example) the 21st
spot; the entire global output array is always accessible. By activating the
OVERWRITE_CHECK statement, the TRNSYS kernel checks to make sure that each
Type did not write outside its allotted space. As with the NAN_CHECK
statement, OVERWRITE_CHECK is a time consuming process and should only be
used as a debugging tool when a simulation is ending in error.
"""
def __init__(self, n=0):
"""Initialize an OVERWRITE_CHECK object.
Hint:
OVERWRITE_CHECK is a time consuming process and should only be used
as a debugging tool when a simulation is ending in error.
Args:
n (int): Is 0 if the OVERWRITE_CHECK feature is not desired or 1 if
OVERWRITE_CHECK feature is desired.
"""
super().__init__()
self.n = int(n)
self.doc = "The OVERWRITE_CHECK Statement"
def _to_deck(self):
"""Return deck representation of self."""
return "OVERWRITE_CHECK {}".format(self.n)
| 39 | 80 | 0.691633 | 210 | 1,482 | 4.790476 | 0.47619 | 0.111332 | 0.050696 | 0.05169 | 0.202783 | 0.202783 | 0.202783 | 0.202783 | 0.202783 | 0.202783 | 0 | 0.006329 | 0.253711 | 1,482 | 37 | 81 | 40.054054 | 0.903255 | 0.703779 | 0 | 0 | 0 | 0 | 0.146875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.125 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aa61678d6fbbb6831ab89015e270d094a885eab5 | 9,034 | py | Python | Chapter 16/Lookup/algorithms.py | codered-by-ec-council/Micro-Degree-in-Python-Security | cb16ed78ee38dad32e3909371edec8ff3ce6e6a7 | [
"MIT"
] | 4 | 2020-09-25T05:57:22.000Z | 2021-02-27T14:56:23.000Z | Chapter 16/Lookup/algorithms.py | codered-by-ec-council/Micro-Degree-in-Python-Security | cb16ed78ee38dad32e3909371edec8ff3ce6e6a7 | [
"MIT"
] | 4 | 2021-06-08T23:01:11.000Z | 2022-03-12T00:54:16.000Z | Chapter 16/Lookup/algorithms.py | codered-by-ec-council/Micro-Degree-in-Python-Security | cb16ed78ee38dad32e3909371edec8ff3ce6e6a7 | [
"MIT"
] | 5 | 2020-10-15T10:22:04.000Z | 2021-11-16T22:17:50.000Z | #!/usr/bin/env python3
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
---------------------------------------------------------------------
The first step in creating a cryptographic hash lookup table.
Creates a file of the following format:
[HASH_PART][WORDLIST_OFFSET][HASH_PART][WORDLIST_OFFSET]...
HASH_PART is the first 64 BITS of the hash, right-padded with zeroes if
necessary. WORDLIST_OFFSET is the position of the first character of the
word in the dictionary encoded as a 48-bit LITTLE ENDIAN integer.
'''
import sys
import hashlib
from binascii import hexlify, unhexlify
try:
import passlib
# from passlib.utils.handlers import MAX_PASSWORD_SIZE
from passlib.hash import nthash, lmhash, mysql41, oracle10, mysql323
from passlib.hash import msdcc, msdcc2, postgres_md5
except ImportError:
err = "\nFailed to import passlib"
sys.stderr.write(err)
sys.stderr.flush()
passlib = None
try:
import whirlpool
except ImportError:
sys.stderr.write("\nFailed to import whirlpool")
sys.stderr.flush()
whirlpool = None
class BaseAlgorithm(object):
'''
Gives us a single interface to passlib and hashlib
'''
_data = None
def __init__(self, data=None):
self.data = data if data is not None else b''
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if isinstance(value, str):
value = value.encode()
if not isinstance(value, bytes):
raise TypeError('Data must be bytes')
self._data = value
def update(self, data):
if isinstance(data, str):
data = data.encode()
if not isinstance(data, bytes):
raise TypeError('Data must be bytes')
self._data += data
def digest(self):
raise NotImplementedError()
def hexdigest(self):
return hexlify(self.digest())
##########################################################
# > HASHLIB
##########################################################
class Md4(BaseAlgorithm):
name = 'Message Digest 4'
key = 'md4'
hex_length = 32
def digest(self):
return hashlib.new('md4', self.data).digest()
class Md5(BaseAlgorithm):
name = 'Message Digest 5'
key = 'md5'
hex_length = 32
def digest(self):
return hashlib.md5(self.data).digest()
class Sha1(BaseAlgorithm):
name = 'Secure Hashing Algorithm 1'
key = 'sha1'
hex_length = 40
def digest(self):
return hashlib.sha1(self.data).digest()
class Sha224(BaseAlgorithm):
name = 'Secure Hashing Algorithm 2 (224 bit)'
key = 'sha2-224'
hex_length = 56
def digest(self):
return hashlib.sha224(self.data).digest()
class Sha256(BaseAlgorithm):
name = 'Secure Hashing Algorithm 2 (256 bit)'
key = 'sha2-256'
hex_length = 64
def digest(self):
return hashlib.sha256(self.data).digest()
class Sha384(BaseAlgorithm):
name = 'Secure Hashing Algorithm 2 (384 bit)'
key = 'sha2-384'
hex_length = 96
def digest(self):
return hashlib.sha384(self.data).digest()
class Sha512(BaseAlgorithm):
name = 'Secure Hashing Algorithm 2 (512 bit)'
key = 'sha2-512'
hex_length = 128
def digest(self):
return hashlib.sha512(self.data).digest()
class Ripemd160(BaseAlgorithm):
name = "RACE Integrity Primitives Evaluation Message Digest (160 bit)"
key = "ripemd160"
hex_length = 40
def digest(self):
md = hashlib.new('ripemd160')
md.update(self._data)
return md.digest()
##########################################################
# > SHA3
##########################################################
class Sha3_224(BaseAlgorithm):
name = 'Secure Hashing Algorithm 3 (224 bit)'
key = 'sha3-224'
hex_length = 56
def digest(self):
return hashlib.sha3_224(self.data).digest()
class Sha3_256(BaseAlgorithm):
name = 'Secure Hashing Algorithm 3 (256 bit)'
key = 'sha3-256'
hex_length = 64
def digest(self):
return hashlib.sha3_256(self.data).digest()
class Sha3_384(BaseAlgorithm):
name = 'Secure Hashing Algorithm 3 (384 bit)'
key = 'sha3-384'
hex_length = 96
def digest(self):
return hashlib.sha3_384(self._data).digest()
class Sha3_512(BaseAlgorithm):
name = 'Secure Hashing Algorithm 3 (512 bit)'
key = 'sha3-512'
hex_length = 128
def digest(self):
return hashlib.sha3_512(self.data).digest()
##########################################################
# > PASSLIB
##########################################################
class Lm(BaseAlgorithm):
name = 'LM'
key = 'lm'
hex_length = 32
def digest(self):
return unhexlify(lmhash.encrypt(self.data[:15]))
class Ntlm(BaseAlgorithm):
name = 'NTLM'
key = 'ntlm'
hex_length = 32
def digest(self):
return unhexlify(nthash.encrypt(self.data[:127]))
class MySql323(BaseAlgorithm):
name = 'MySQL v3.2.3'
key = 'mysql323'
hex_length = 16
def digest(self):
return unhexlify(mysql323.encrypt(self.data[:64]))
class MySql41(BaseAlgorithm):
''' Ignore the preceeding "*" symbol '''
name = 'MySQL v4.1'
key = 'mysql41'
hex_length = 40
def digest(self):
return unhexlify(mysql41.encrypt(self.data[:64])[1:])
class Oracle10(BaseAlgorithm):
'''
Base Oracle 10g algorithm, this algorithm is salted with a username.
Subclasses contain common usernames.
'''
hex_length = 16
_user = ''
def digest(self):
return unhexlify(oracle10.encrypt(self.data[:64], user=self._user))
class Oracle10_Sys(Oracle10):
name = 'Oracle 10g (SYS)'
key = 'oracle10g-sys'
_user = 'SYS'
class Oracle10_System(Oracle10):
name = 'Oracle 10g (SYSTEM)'
key = 'oracle10g-system'
_user = 'SYSTEM'
class PostgresMd5(BaseAlgorithm):
hex_length = 32
_user = ''
def digest(self):
''' Removes the "md5" prefix '''
return unhexlify(postgres_md5.encrypt(self._data[:64], user=self._user)[3:])
class PostgresMd5_Root(PostgresMd5):
name = 'Postgres MD5 (root)'
key = 'postgres_md5-root'
_user = 'root'
class PostgresMd5_Postgres(PostgresMd5):
name = 'Postgres MD5 (postgres)'
key = 'postgres_md5-postgres'
_user = 'postgres'
class PostgresMd5_Admin(PostgresMd5):
name = 'Postgres MD5 (admin)'
key = 'postgres_md5-admin'
_user = 'admin'
class Msdcc_Administrator(BaseAlgorithm):
name = 'MS Domain Cached Credentials'
key = 'msdcc-administrator'
hex_length = 32
_user = "administrator"
def digest(self):
return unhexlify(msdcc.encrypt(self._data[:64], user=self._user))
class Msdcc2_Administrator(BaseAlgorithm):
name = 'MS Domain Cached Credentials v2'
key = 'msdcc2-administrator'
hex_length = 32
_user = "administrator"
def digest(self):
return unhexlify(msdcc2.encrypt(self._data[:64], user=self._user))
##########################################################
# > Whirlpool
##########################################################
class Whirlpool(BaseAlgorithm):
name = "Whirlpool"
key = "whirlpool"
hex_length = 128
def digest(self):
return whirlpool.new(self._data).digest()
# Base algorithms
algorithms = {
Md4.key: Md4,
Md5.key: Md5,
Sha1.key: Sha1,
Sha224.key: Sha224,
Sha256.key: Sha256,
Sha384.key: Sha384,
Sha512.key: Sha512,
Sha3_224.key: Sha3_224,
Sha3_256.key: Sha3_256,
Sha3_384.key: Sha3_384,
Sha3_512.key: Sha3_512
}
if hasattr(hashlib, "algorithms_available"):
if 'ripemd160' in hashlib.algorithms_available:
algorithms[Ripemd160.key] = Ripemd160
if passlib is not None:
algorithms[Lm.key] = Lm
algorithms[Ntlm.key] = Ntlm
algorithms[MySql323.key] = MySql323
algorithms[MySql41.key] = MySql41
algorithms[Oracle10_Sys.key] = Oracle10_Sys
algorithms[Oracle10_System.key] = Oracle10_System
algorithms[Msdcc_Administrator.key] = Msdcc_Administrator
algorithms[Msdcc2_Administrator.key] = Msdcc2_Administrator
algorithms[PostgresMd5_Admin.key] = PostgresMd5_Admin
algorithms[PostgresMd5_Postgres.key] = PostgresMd5_Postgres
algorithms[PostgresMd5_Root.key] = PostgresMd5_Root
if whirlpool is not None:
algorithms[Whirlpool.key] = Whirlpool
| 23.464935 | 84 | 0.627629 | 1,080 | 9,034 | 5.157407 | 0.217593 | 0.038779 | 0.051347 | 0.064811 | 0.304668 | 0.267684 | 0.183662 | 0.137163 | 0.097666 | 0.024776 | 0 | 0.053932 | 0.215962 | 9,034 | 384 | 85 | 23.526042 | 0.732458 | 0.155966 | 0 | 0.25 | 0 | 0 | 0.143642 | 0.002957 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122727 | false | 0.027273 | 0.05 | 0.095455 | 0.759091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
aa66b44f404f54a5640fb09e472c1518a47d7552 | 5,142 | py | Python | taxi/migrations/0001_initial.py | alifelan/taxi-unico-web | edcbe8a8a9584350452fcea04a83247c5676b8f6 | [
"MIT"
] | 1 | 2021-05-26T09:06:08.000Z | 2021-05-26T09:06:08.000Z | taxi/migrations/0001_initial.py | alifelan/taxi-unico-web | edcbe8a8a9584350452fcea04a83247c5676b8f6 | [
"MIT"
] | 10 | 2019-04-13T00:08:17.000Z | 2019-05-09T10:43:06.000Z | taxi/migrations/0001_initial.py | alifelan/taxi-unico-web | edcbe8a8a9584350452fcea04a83247c5676b8f6 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-05-09 06:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BusTrip',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('first_departure_date', models.DateTimeField()),
('first_arrival_date', models.DateTimeField()),
('second_departure_date', models.DateTimeField(blank=True, null=True)),
('second_arrival_date', models.DateTimeField(blank=True, null=True)),
('round_trip', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('city', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=50)),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='locations', to='taxi.City')),
],
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('state', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Taxi',
fields=[
('driver_name', models.CharField(max_length=50)),
('email', models.CharField(max_length=50, primary_key=True, serialize=False)),
('password', models.CharField(max_length=50)),
('plate', models.CharField(max_length=15)),
('model', models.CharField(max_length=50)),
('brand', models.CharField(max_length=50)),
('taxi_number', models.IntegerField()),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taxis', to='taxi.City')),
],
),
migrations.CreateModel(
name='User',
fields=[
('name', models.CharField(max_length=50)),
('email', models.CharField(max_length=50, primary_key=True, serialize=False)),
('password', models.CharField(max_length=50)),
('card', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='TaxiTrip',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('departure_date', models.DateTimeField()),
('arrival_date', models.DateTimeField()),
('price', models.FloatField(blank=True, null=True)),
('taxi_rating', models.PositiveSmallIntegerField(blank=True, null=True)),
('user_rating', models.PositiveSmallIntegerField(blank=True, null=True)),
('distance_meters', models.FloatField()),
('distance_string', models.CharField(max_length=100)),
('time_seconds', models.BigIntegerField()),
('time_string', models.CharField(max_length=100)),
('status', models.CharField(max_length=9)),
('bus_trip', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='taxiTrips', to='taxi.BusTrip')),
('destination', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='taxiTripsD', to='taxi.Location')),
('origin', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='taxiTripsO', to='taxi.Location')),
('taxi', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='trips', to='taxi.Taxi')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='taxiTrips', to='taxi.User')),
],
),
migrations.AddField(
model_name='city',
name='state',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cities', to='taxi.State'),
),
migrations.AddField(
model_name='bustrip',
name='destination',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='busTripsD', to='taxi.Location'),
),
migrations.AddField(
model_name='bustrip',
name='origin',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='busTripsO', to='taxi.Location'),
),
]
| 46.745455 | 154 | 0.571373 | 489 | 5,142 | 5.871166 | 0.204499 | 0.088819 | 0.106583 | 0.142111 | 0.62278 | 0.595611 | 0.51132 | 0.445838 | 0.41031 | 0.319749 | 0 | 0.013279 | 0.28238 | 5,142 | 109 | 155 | 47.174312 | 0.76477 | 0.008363 | 0 | 0.460784 | 1 | 0 | 0.123602 | 0.00412 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.019608 | 0.019608 | 0 | 0.058824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aa6a2735bc18f7d76cf04368018e139d17c5dd19 | 1,116 | py | Python | pepdb/cms_pages/migrations/0006_auto_20151024_2019.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
] | 7 | 2015-12-21T03:52:46.000Z | 2020-07-24T19:17:23.000Z | pepdb/cms_pages/migrations/0006_auto_20151024_2019.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
] | 12 | 2016-03-05T18:11:05.000Z | 2021-06-17T20:20:03.000Z | pepdb/cms_pages/migrations/0006_auto_20151024_2019.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
] | 4 | 2016-07-17T20:19:38.000Z | 2021-03-23T12:47:20.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('cms_pages', '0005_auto_20150829_1516'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='body_en',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[EN] \u0422\u0435\u043a\u0441\u0442 \u043d\u0430 \u0431\u043b\u0430\u043a\u0438\u0442\u043d\u0456\u0439 \u043f\u0430\u043d\u0435\u043b\u0456'),
),
migrations.AddField(
model_name='homepage',
name='title_en',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[UA] \u0422\u0435\u043a\u0441\u0442 \u043d\u0430 \u0431\u043b\u0430\u043a\u0438\u0442\u043d\u0456\u0439 \u043f\u0430\u043d\u0435\u043b\u0456'),
),
]
| 36 | 228 | 0.648746 | 122 | 1,116 | 5.795082 | 0.45082 | 0.056577 | 0.101839 | 0.089109 | 0.632249 | 0.591231 | 0.480905 | 0.480905 | 0.311174 | 0.311174 | 0 | 0.205245 | 0.214158 | 1,116 | 30 | 229 | 37.2 | 0.600912 | 0.018817 | 0 | 0.333333 | 0 | 0.083333 | 0.324794 | 0.240622 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aa6aeec0b7ed691ded9f765e25e1d736dda9bbf4 | 1,255 | py | Python | autorop/leak/puts.py | Tanson/autorop | 0d2fc71cdcc9649a6006aee641a3808f884d7fc4 | [
"MIT"
] | null | null | null | autorop/leak/puts.py | Tanson/autorop | 0d2fc71cdcc9649a6006aee641a3808f884d7fc4 | [
"MIT"
] | null | null | null | autorop/leak/puts.py | Tanson/autorop | 0d2fc71cdcc9649a6006aee641a3808f884d7fc4 | [
"MIT"
] | null | null | null | from autorop import PwnState, arutil
from pwn import ROP
def puts(state: PwnState) -> PwnState:
"""Leak libc addresses using ``puts``.
This function leaks the libc addresses of ``__libc_start_main`` and ``puts``
using ``puts``, placing them in ``state.leaks``.
Arguments:
state: The current ``PwnState`` with the following set
- ``target``: What we want to exploit.
- ``_elf``: pwntools ``ELF`` of ``state.binary_name``.
- ``overwriter``: Function which writes rop chain to the "right place".
- ``vuln_function``: Name of vulnerable function in binary,
which we can return to repeatedly.
Returns:
Mutated ``PwnState``, with the following updated
- ``target``: The instance of target from which we got a successful leak.
Hopefully it can still be interacted with.
- ``leaks``: Updated with ``"symbol": address`` pairs for each
function address of libc that was leaked.
"""
LEAK_FUNCS = ["__libc_start_main", "puts"]
def leaker(rop: ROP, address: int) -> ROP:
arutil.align_call(rop, "puts", [address])
return rop
return arutil.leak_helper(state, leaker, LEAK_FUNCS)
| 35.857143 | 85 | 0.619124 | 154 | 1,255 | 4.948052 | 0.5 | 0.034121 | 0.034121 | 0.062992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.268526 | 1,255 | 34 | 86 | 36.911765 | 0.830065 | 0.684462 | 0 | 0 | 0 | 0 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aa707bae2c10ff673808c3e9ddea8f24a1136fa7 | 4,463 | py | Python | train.py | MannyKayy/PlayableVideoGeneration | 14133f94e14a40fc17f283e5b01c168bc45990b6 | [
"MIT"
] | 1 | 2021-06-19T11:34:29.000Z | 2021-06-19T11:34:29.000Z | train.py | MannyKayy/PlayableVideoGeneration | 14133f94e14a40fc17f283e5b01c168bc45990b6 | [
"MIT"
] | null | null | null | train.py | MannyKayy/PlayableVideoGeneration | 14133f94e14a40fc17f283e5b01c168bc45990b6 | [
"MIT"
] | null | null | null | import argparse
import importlib
import os
import torch
import torch.nn as nn
import torchvision
import numpy as np
from dataset.dataset_splitter import DatasetSplitter
from dataset.transforms import TransformsGenerator
from dataset.video_dataset import VideoDataset
from evaluation.action_sampler import OneHotActionSampler, GroundTruthActionSampler
from evaluation.evaluator import Evaluator
from training.trainer import Trainer
from utils.configuration import Configuration
from utils.logger import Logger
torch.backends.cudnn.benchmark = True
if __name__ == "__main__":
# Loads configuration file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=True)
arguments = parser.parse_args()
config_path = arguments.config
configuration = Configuration(config_path)
configuration.check_config()
configuration.create_directory_structure()
config = configuration.get_config()
logger = Logger(config)
search_name = config["model"]["architecture"]
model = getattr(importlib.import_module(search_name), 'model')(config)
model.cuda()
datasets = {}
dataset_splits = DatasetSplitter.generate_splits(config)
transformations = TransformsGenerator.get_final_transforms(config)
for key in dataset_splits:
path, batching_config, split = dataset_splits[key]
transform = transformations[key]
datasets[key] = VideoDataset(path, batching_config, transform, split)
# Creates trainer and evaluator
trainer = getattr(importlib.import_module(config["training"]["trainer"]), 'trainer')(config, model, datasets["train"], logger)
# Evaluators will be assigned their specific action samplers to implement the evaluation strategy
evaluator_inferred_actions = getattr(importlib.import_module(config["evaluation"]["evaluator"]), 'evaluator')(config, datasets["validation"], logger, action_sampler=None, logger_prefix="validation_inferred_actions")
evaluator_inferred_actions_onehot = getattr(importlib.import_module(config["evaluation"]["evaluator"]), 'evaluator')(config, datasets["validation"], logger, action_sampler=OneHotActionSampler(), logger_prefix="validation_inferred_actions_onehot")
evaluator_ground_truth_actions = getattr(importlib.import_module(config["evaluation"]["evaluator"]), 'evaluator')(config, datasets["validation"], logger, action_sampler=None, logger_prefix="validation_gt_actions")
# Resume training
try:
trainer.load_checkpoint(model)
except Exception as e:
logger.print(e)
logger.print("- Warning: training without loading saved checkpoint")
model = nn.DataParallel(model)
model.cuda()
logger.get_wandb().watch(model, log='all')
last_save_step = 0
last_eval_step = 0
# Makes the model parallel and train
while trainer.global_step < config["training"]["max_steps"]:
model.train()
trainer.train_epoch(model)
# Saves the model
trainer.save_checkpoint(model)
if trainer.global_step > last_save_step + config["training"]["save_freq"]:
trainer.save_checkpoint(model, f"checkpoint_{trainer.global_step}")
last_save_step = trainer.global_step
model.eval()
# Evaluates the model
if trainer.global_step > last_eval_step + config["evaluation"]["eval_freq"]:
# Evaluates with actions predicted from the model
evaluator_inferred_actions.evaluate(model, trainer.global_step)
# Evaluates with actions predicted from the model in one hot version
# Disabled to improve evaluation time
#evaluator_inferred_actions_onehot.evaluate(model, trainer.global_step)
if config["data"]["ground_truth_available"]:
# Evaluates with ground truth actions translated to the model action space
# Uses the mapping between inferred and ground truth actions to configure the
# ground truth action space -> model action space translation function
action_mapping = evaluator_inferred_actions.get_best_action_mappings()
ground_truth_action_sampler = GroundTruthActionSampler(action_mapping)
evaluator_ground_truth_actions.set_action_sampler(ground_truth_action_sampler)
evaluator_ground_truth_actions.evaluate(model, trainer.global_step)
last_eval_step = trainer.global_step
| 40.572727 | 250 | 0.734484 | 498 | 4,463 | 6.355422 | 0.301205 | 0.03128 | 0.048341 | 0.044234 | 0.242022 | 0.205371 | 0.146288 | 0.120379 | 0.120379 | 0.120379 | 0 | 0.000548 | 0.182164 | 4,463 | 109 | 251 | 40.944954 | 0.866575 | 0.151692 | 0 | 0.030303 | 0 | 0 | 0.113263 | 0.036074 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.30303 | 0 | 0.30303 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
aa7171ed1f15768b83ea9a034a29465b15d2d581 | 1,039 | py | Python | integration-test/291-483-suppress-historical-closed.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | integration-test/291-483-suppress-historical-closed.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | integration-test/291-483-suppress-historical-closed.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
import dsl
from shapely.wkt import loads as wkt_loads
from . import FixtureTest
class SuppressHistoricalClosed(FixtureTest):
def test_cartoon_museum(self):
# Cartoon Art Museum (closed)
self.generate_fixtures(dsl.way(368173967, wkt_loads('POINT (-122.400856246311 37.78696485494709)'), {u'name': u'Cartoon Art Museum (closed)', u'gnis:reviewed': u'no', u'addr:state': u'CA', u'ele': u'7',
u'source': u'openstreetmap.org', u'wikidata': u'Q1045990', u'gnis:import_uuid': u'57871b70-0100-4405-bb30-88b2e001a944', u'gnis:feature_id': u'1657282', u'tourism': u'museum', u'gnis:county_name': u'San Francisco'}))
# POI shouldn't be visible early
self.assert_no_matching_feature(
15, 5242, 12664, 'pois',
{'id': 368173967})
# but POI should be present at z17 and marked as closed
self.assert_has_feature(
16, 10485, 25328, 'pois',
{'id': 368173967, 'kind': 'closed', 'min_zoom': 17})
| 43.291667 | 247 | 0.636189 | 141 | 1,039 | 4.588652 | 0.595745 | 0.030912 | 0.049459 | 0.068006 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.156404 | 0.218479 | 1,039 | 23 | 248 | 45.173913 | 0.640394 | 0.130895 | 0 | 0 | 0 | 0 | 0.32294 | 0.040089 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.076923 | false | 0 | 0.307692 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
aa7c9e46ce390c33e4950020c5b8e38f18d9c7b1 | 2,905 | py | Python | pic_carver.py | volf52/black_hat_python | 063c241db473d3ef25782efc17f651c3aa66b4f8 | [
"MIT"
] | null | null | null | pic_carver.py | volf52/black_hat_python | 063c241db473d3ef25782efc17f651c3aa66b4f8 | [
"MIT"
] | null | null | null | pic_carver.py | volf52/black_hat_python | 063c241db473d3ef25782efc17f651c3aa66b4f8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
@author : 'Muhammad Arslan <rslnrkmt2552@gmail.com>'
"""
import re
import zlib
import cv2
from scapy.all import *
pics = "pictues"
faces_dir = "faces"
pcap_file = "bhp.pcap"
def get_http_headers(http_payload):
try:
headers_raw = http_payload[:http_payload.index("\r\n\r\n")+2]
headers = dict(re.findall(r"(?P<'name>.*?): (?P<value>.*?)\r\n", headers_raw))
except:
return None
def extract_images(headers, http_payload):
image = None
image_type = None
try:
if "image" in headers['Content-Type']:
image_type = headers['Content-Type'].split('/')[1]
image = http_payload[http_payload.index('\r\n\r\n') + 4:]
try:
if "Content-Encoding" in headers.keys():
if headers['Content-Encoding'] == 'gzip':
image = zlib.decompress(image, 16+zlib.MAX_WBITS)
elif headers['Content-Encoding'] == "deflate":
image = zlib.decompress(image)
except:
pass
except:
return None, None
return image, image_type
def face_detect(path, filename):
img = cv2.imread(path)
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
rects = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (20, 20))
if len(rects) == 0:
return False
rects[:, 2:] += rects[:, :2]
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), (127, 255, 0), 2)
cv2.imwrite("%s/$s-%s" % (faces_dir, pcap_file, filename), img)
return True
def http_assembler(pcap_file):
carved_images = 0
faces_detected = 0
a = rdpcap(pcap_file)
sessions = a.sessions()
for session in sessions:
http_payload = ""
for packet in sessions[session]:
try:
if packet[TCP].dport == 80 or packet[TCP].sport == 80:
http_payload += str(packet[TCP].payload)
except:
pass
headers = get_http_headers(http_payload)
if headers is None:
continue
image, image_type = extract_image(headers, http_payload)
if image is not None and image_type is not None:
file_name = "%s-pic_carver_%d.%s" % (pcap_file, carved_images, image_type)
with open("%s/%s" % (pics, file_name), "wb") as fd:
fd.write(image)
carved_images += 1
try:
result = face_detect("%s/%s" % (pics, file_name), file_name)
if result is True:
faces_detected += 1
except:
pass
return carved_images, faces_detected
carved_images, faces_detected = http_assembler(pcap_file)
print "Extracted: %d images" % carved_images
print "Detected: %d faces" % faces_detected
| 25.9375 | 87 | 0.571084 | 363 | 2,905 | 4.407714 | 0.347107 | 0.06875 | 0.045 | 0.0225 | 0.0875 | 0.03875 | 0.03875 | 0.03875 | 0.03875 | 0 | 0 | 0.024233 | 0.303959 | 2,905 | 111 | 88 | 26.171171 | 0.767062 | 0.006885 | 0 | 0.175676 | 0 | 0 | 0.094681 | 0.010993 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.040541 | 0.054054 | null | null | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aa7e5a273a38c1c336f9c6538edfafbe1859aa62 | 1,536 | py | Python | py/testdir_hosts/test_rf_311M_rows_hosts.py | vkuznet/h2o | e08f7014f228cbaecfb21f57379970e6a3ac0756 | [
"Apache-2.0"
] | null | null | null | py/testdir_hosts/test_rf_311M_rows_hosts.py | vkuznet/h2o | e08f7014f228cbaecfb21f57379970e6a3ac0756 | [
"Apache-2.0"
] | null | null | null | py/testdir_hosts/test_rf_311M_rows_hosts.py | vkuznet/h2o | e08f7014f228cbaecfb21f57379970e6a3ac0756 | [
"Apache-2.0"
] | null | null | null | import unittest, sys, time
sys.path.extend(['.','..','py'])
import h2o_cmd, h2o, h2o_hosts, h2o_browse as h2b, h2o_import as h2i
# Uses your username specific json: pytest_config-<username>.json
# copy pytest_config-simple.json and modify to your needs.
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_311M_rows_hosts(self):
# since we'll be waiting, pop a browser
# h2b.browseTheCloud()
importFolderPath = 'standard'
csvFilename = 'new-poker-hand.full.311M.txt.gz'
csvPathname = importFolderPath + "/" + csvFilename
for trials in range(2):
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local',
timeoutSecs=500)
print csvFilename, 'parse time:', parseResult['response']['time']
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None,parseResult['destination_key'])
print "\n" + csvFilename
start = time.time()
RFview = h2o_cmd.runRF(parseResult=parseResult, trees=5, depth=5,
timeoutSecs=600, retryDelaySecs=10.0)
print "RF end on ", csvFilename, 'took', time.time() - start, 'seconds'
if __name__ == '__main__':
h2o.unit_main()
| 35.72093 | 108 | 0.647786 | 180 | 1,536 | 5.338889 | 0.611111 | 0.01873 | 0.052029 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028862 | 0.233073 | 1,536 | 42 | 109 | 36.571429 | 0.786927 | 0.116536 | 0 | 0.068966 | 0 | 0 | 0.137778 | 0.042222 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.172414 | null | null | 0.137931 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aa84562ff31e7d467b614463b77b32138e9f4492 | 372 | py | Python | amktools/util.py | jimbo1qaz/amktools | 25a65d7c9c09a2622065fcacdaed82e1f9d7fb2c | [
"BSD-3-Clause"
] | 2 | 2020-03-14T06:13:03.000Z | 2022-03-03T17:53:51.000Z | amktools/util.py | nyanpasu64/amktools | 25a65d7c9c09a2622065fcacdaed82e1f9d7fb2c | [
"BSD-3-Clause"
] | 14 | 2018-06-19T14:48:58.000Z | 2018-10-28T07:02:27.000Z | amktools/util.py | jimbo1qaz/amktools | 25a65d7c9c09a2622065fcacdaed82e1f9d7fb2c | [
"BSD-3-Clause"
] | null | null | null | from typing import TypeVar, Optional
def ceildiv(n: int, d: int) -> int:
return -(-n // d)
T = TypeVar("T")
def coalesce(*args: Optional[T]) -> T:
if len(args) == 0:
raise TypeError("coalesce expected >=1 argument, got 0")
for arg in args:
if arg is not None:
return arg
raise TypeError("coalesce() called with all None")
| 20.666667 | 64 | 0.594086 | 54 | 372 | 4.092593 | 0.592593 | 0.126697 | 0.199095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011111 | 0.274194 | 372 | 17 | 65 | 21.882353 | 0.807407 | 0 | 0 | 0 | 0 | 0 | 0.185484 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0.090909 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aa84e85a9b53b2b65be434c51bb6eb739b861665 | 3,638 | py | Python | sql_judge/model/load_types.py | r4ulill0/gui_dbjudge | 7c421c6f2fe1281f95e624242338b3e020ee5f28 | [
"MIT"
] | 1 | 2020-12-11T10:45:58.000Z | 2020-12-11T10:45:58.000Z | sql_judge/model/load_types.py | r4ulill0/gui_dbjudge | 7c421c6f2fe1281f95e624242338b3e020ee5f28 | [
"MIT"
] | null | null | null | sql_judge/model/load_types.py | r4ulill0/gui_dbjudge | 7c421c6f2fe1281f95e624242338b3e020ee5f28 | [
"MIT"
] | null | null | null | from PyQt5.QtCore import QAbstractTableModel, QAbstractItemModel
from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot
class LoadTypesProcess(QAbstractTableModel):
def __init__(self):
super().__init__()
self.csv_values = []
self.header_model = HeaderModel()
def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column)
def rowCount(self, parent=QModelIndex()):
return len(self.csv_values)
def columnCount(self, parent=QModelIndex()):
count = 0
if len(self.csv_values):
count = len(self.csv_values[0])
return count
def data(self, index, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self.csv_values[index.row()][index.column()]
def setData(self, index, value, role=Qt.EditRole):
if index.isValid() and role == Qt.EditRole:
if index.column() >= self.columnCount():
self.insertColumns(index.column(), 1)
if index.row() >= self.rowCount():
self.insertRows(index.row(), 1)
self.csv_values[index.row()][index.column()] = value
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index):
return QAbstractTableModel.flags(self, index) | Qt.ItemIsEditable
def insertRows(self, position, rows, index=QModelIndex()):
self.beginInsertRows(index, position, position+rows-1)
for _ in range(rows):
new_row = []
for _ in range(self.columnCount()):
new_row.append("")
self.csv_values.append(new_row)
self.endInsertRows()
return True
def insertColumns(self, position, columns, index=QModelIndex()):
self.beginInsertColumns(index, position, position+columns-1)
for row in self.csv_values:
for _ in range(columns):
row.append("")
self.endInsertColumns()
return True
def removeRows(self, position, rows, index=QModelIndex()):
self.beginRemoveRows(index, position, position+rows-1)
for row in reversed(range(position, position+rows)):
self.csv_values.pop(row)
self.endRemoveRows()
def removeColumns(self, position, columns, index=QModelIndex()):
self.beginRemoveColumns(index, position, position+columns-1)
for row in self.csv_values:
for column in reversed(range(position, position+columns)):
row.pop(column)
class HeaderModel(QAbstractItemModel):
def __init__(self):
super().__init__()
self.values = []
def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column)
def columnCount(self, parent=QModelIndex()):
return len(self.values)
def rowCount(self, parent=QModelIndex()):
return 1
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self.values[section]
def setHeaderData(self, section, orientation, value, role=Qt.EditRole):
if role == Qt.EditRole:
self.values[section] = value
def removeColumn(self, column, index=QModelIndex()):
self.beginRemoveColumns(index, column, column)
self.values.pop(column)
self.endRemoveColumns()
def insertColumns(self, column, amount, index=QModelIndex()):
self.beginInsertColumns(index, column, column+amount-1)
for idx in range(amount):
self.values.append(str(self.columnCount()+idx))
self.endInsertColumns()
| 33.685185 | 75 | 0.631116 | 396 | 3,638 | 5.714646 | 0.194444 | 0.030932 | 0.057446 | 0.035793 | 0.47194 | 0.340256 | 0.176757 | 0.150243 | 0.150243 | 0.109589 | 0 | 0.004423 | 0.254261 | 3,638 | 107 | 76 | 34 | 0.829709 | 0 | 0 | 0.256098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.231707 | false | 0 | 0.02439 | 0.073171 | 0.439024 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aa8b120c78b48885a14d17efcfc8523380e3b89e | 388 | py | Python | base/struct_data.py | cateatfish108/AutoTest | 8697aadd4c60c6a7cb435f784fc5c588805067bf | [
"MIT"
] | null | null | null | base/struct_data.py | cateatfish108/AutoTest | 8697aadd4c60c6a7cb435f784fc5c588805067bf | [
"MIT"
] | null | null | null | base/struct_data.py | cateatfish108/AutoTest | 8697aadd4c60c6a7cb435f784fc5c588805067bf | [
"MIT"
] | null | null | null | #coding:utf-8
# 数据库结构体
class DataBase:
url = ""
port = 3306
username = ""
password = ""
database = ""
charset = ""
# 测试用例信息结构体
class CaseInfo:
path = ""
case_list = []
# 测试用例结构体
class Case:
url = ""
db_table = ""
case_id = ""
method = ""
data = {}
check_item = {}
status = ""
db_key = {}
check_result = "" | 12.933333 | 21 | 0.474227 | 36 | 388 | 4.944444 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020661 | 0.376289 | 388 | 30 | 21 | 12.933333 | 0.714876 | 0.095361 | 0 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.05 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
aa8f41fdb7d2b4f91adbdaae406e59a5680747b1 | 3,829 | py | Python | camper/handlers/users/edit.py | mrtopf/camper | 7016539f92202bbea608c6d53ce19097d4ad931d | [
"MIT"
] | 13 | 2016-03-13T02:33:39.000Z | 2021-04-01T13:09:12.000Z | camper/handlers/users/edit.py | comlounge/camper | 7016539f92202bbea608c6d53ce19097d4ad931d | [
"MIT"
] | 122 | 2016-03-10T09:28:09.000Z | 2021-09-07T23:49:05.000Z | camper/handlers/users/edit.py | mrtopf/camper | 7016539f92202bbea608c6d53ce19097d4ad931d | [
"MIT"
] | 5 | 2017-01-11T22:00:57.000Z | 2020-04-26T14:03:32.000Z | #encoding=utf8
from starflyer import Handler, redirect, asjson
from camper import BaseForm, db, BaseHandler
from camper import logged_in, is_admin
from wtforms import *
from sfext.babel import T
from camper.handlers.forms import *
import werkzeug.exceptions
from bson import ObjectId
from camper.handlers.images import AssetUploadView
class ProfileImageAssetUploadView(AssetUploadView):
"""custom upload handler for different version"""
variant = "medium_user"
class EditForm(BaseForm):
"""form for adding a barcamp"""
user_id = HiddenField()
fullname = TextField(T(u"Fullname"))
username = TextField(T(u"url name (username)"), [validators.Length(min=3, max=50), validators.Required(), validators.Regexp('^[a-zA-Z0-9_]+$')], description=T("this is the url path of your profile page, should only contain letters and numbers"))
bio = TextAreaField(T(u"About me"))
organisation = TextField(T(u"Organization"), [validators.Length(max=100)], description = T("your school, company, institution (max. 100 characters)"))
twitter = TextField(T(u"Twitter"), [validators.Length(max=100)], description = T("your twitter username"))
facebook = TextField(T(u"Facebook"), [validators.Length(max=255)], description = T("path to your facebook profile (without domain)"))
image = UploadField(T(u"Profile Image (optional)"))
# TODO: maybe change email, too?
def validate_email(form, field):
if form.app.module_map.userbase.users.find({'email' : field.data}).count() > 0:
raise ValidationError(form.handler._('this email address is already taken'))
def validate_username(form, field):
if form.app.module_map.userbase.users.find({'username' : field.data, '_id' : {'$ne': ObjectId(form.data['user_id'])}}).count() > 0:
raise ValidationError(form.handler._('this url path is already taken'))
class ProfileEditView(BaseHandler):
"""shows the profile edit form"""
template = "users/edit.html"
@logged_in()
def get(self):
"""render the view"""
form = EditForm(self.request.form, obj = self.user, config = self.config, app = self.app, handler = self)
if self.user.image:
try:
asset = self.app.module_map.uploader.get(self.user.image)
image = self.url_for("asset", asset_id = asset.variants['medium_user']._id)
except:
image = None
else:
image = None
if self.request.method=="POST":
if form.validate():
self.user.update(form.data)
self.user.save()
self.flash(self._("Your profile has been updated"), category="info")
url = self.url_for("profile", username = self.user.username)
return redirect(url)
else:
self.flash(self._("There have been errors in the form"), category="danger")
return self.render(form = form, user = self.user, image = image)
post = get
class ProfileImageDeleteView(BaseHandler):
"""delete the profile image"""
@asjson()
def json(self, d):
return d
@logged_in()
def delete(self):
"""delete the profile image and return to the profile page"""
asset_id = self.user.image
if asset_id is not None:
asset = self.app.module_map.uploader.remove(asset_id)
self.user.image = None
self.user.save()
self.flash(self._("Your profile image has been deleted"), category="info")
fmt = self.request.form.get("fmt", "html")
if fmt=="html":
url = self.url_for("profile", username = self.user.username)
return redirect(url)
else:
return self.json({"status": "ok"})
| 41.619565 | 254 | 0.630713 | 471 | 3,829 | 5.065817 | 0.348195 | 0.036882 | 0.023051 | 0.018441 | 0.228835 | 0.21207 | 0.187762 | 0.121542 | 0.091366 | 0.091366 | 0 | 0.006885 | 0.241316 | 3,829 | 91 | 255 | 42.076923 | 0.814458 | 0.062418 | 0 | 0.191176 | 0 | 0 | 0.16498 | 0 | 0 | 0 | 0 | 0.010989 | 0 | 1 | 0.073529 | false | 0 | 0.132353 | 0.014706 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aaa2a22421918ddfde6678f1b564035567b5ee57 | 2,073 | py | Python | bouncy.py | kary1806/bouncy | afbd8a2e030cd51c0c8b84062ce7aa2b51f549df | [
"MIT"
] | null | null | null | bouncy.py | kary1806/bouncy | afbd8a2e030cd51c0c8b84062ce7aa2b51f549df | [
"MIT"
] | null | null | null | bouncy.py | kary1806/bouncy | afbd8a2e030cd51c0c8b84062ce7aa2b51f549df | [
"MIT"
] | null | null | null | from itertools import count, tee
class Bouncy:
def __init__(self, porcentage):
"""
print the number bouncy
:type porcentage: int -> this is porcentage of the bouncy
"""
nums = count(1)
rebound = self.sum_number(map(lambda number: float(self.is_rebound(number)), count(1)))
bouncy = next(
(
number
for number, number_b in zip(nums, rebound)
if number_b / number == (porcentage / 100)
)
)
print(bouncy)
def pairs(self, iterable):
"""
return a list convert map, produces new list
:type number: int
"""
# tee() get iterator independent (default 2) with a input
a, b = tee(iterable)
# next() return next element in the secuence
next(b, None)
# zip() return new iterator
return zip(a, b)
def digits(self, number):
"""
return a list convert map, produces new list
:type number: int
"""
return list(map(int, str(number)))
def increase(self, number):
"""
return the elements as long as the previous number is less than or equal to the current one
:type number: int
"""
return all(prev <= curr for prev, curr in self.pairs(self.digits(number)))
def decrease(self, number):
"""
return the elements as long as the previous number is greater than or equal to the current one
:type number: int
"""
return all(prev >= curr for prev, curr in self.pairs(self.digits(number)))
def is_rebound(self, number):
"""
return the elements is rebound
:type number: int
"""
return not self.increase(number) and not self.decrease(number)
def sum_number(self, iterable):
"""
return a element sum total
:type iterable: list
"""
total = 0
for element in iterable:
total += element
yield total
test = Bouncy(99)
| 28.39726 | 102 | 0.554752 | 252 | 2,073 | 4.52381 | 0.305556 | 0.04386 | 0.057018 | 0.066667 | 0.374561 | 0.350877 | 0.350877 | 0.350877 | 0.350877 | 0.350877 | 0 | 0.006726 | 0.354559 | 2,073 | 72 | 103 | 28.791667 | 0.845291 | 0.315002 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.225806 | false | 0 | 0.032258 | 0 | 0.451613 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aaa40f7d32c94661f35c79a2fb1ee27a71d6e4e9 | 909 | py | Python | pipeline.py | ankitshah009/BioASQ-Rabbit | 1d3073fbcdebb58b91788b6c2ab0ad9380cb2498 | [
"Apache-2.0"
] | 1 | 2019-01-29T13:37:45.000Z | 2019-01-29T13:37:45.000Z | pipeline.py | ankitshah009/BioASQ-Rabbit | 1d3073fbcdebb58b91788b6c2ab0ad9380cb2498 | [
"Apache-2.0"
] | 1 | 2018-08-27T21:02:24.000Z | 2018-08-27T21:02:24.000Z | pipeline.py | ankitshah009/BioASQ-Rabbit | 1d3073fbcdebb58b91788b6c2ab0ad9380cb2498 | [
"Apache-2.0"
] | 8 | 2018-03-26T17:36:39.000Z | 2019-02-28T14:23:25.000Z | #!/usr/bin/env python
import sys
from deiis.rabbit import Message, MessageBus
from deiis.model import Serializer, DataSet, Question
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'Usage: python pipeline.py <data.json>'
exit(1)
# filename = 'data/training.json'
filename = sys.argv[1]
print 'Processing ' + filename
fp = open(filename, 'r')
dataset = Serializer.parse(fp, DataSet)
fp.close()
# The list of services to send the questions to.
pipeline = ['mmr.core', 'tiler.concat', 'results']
count=0
bus = MessageBus()
for index in range(0,10):
question = dataset.questions[index]
# for question in dataset.questions:
message = Message(body=question, route=pipeline)
bus.publish('expand.none', message)
count = count + 1
print 'Sent {} questions for ranking.'.format(count)
print 'Done.'
| 28.40625 | 56 | 0.641364 | 115 | 909 | 5 | 0.565217 | 0.031304 | 0.027826 | 0.045217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011494 | 0.234323 | 909 | 31 | 57 | 29.322581 | 0.814655 | 0.147415 | 0 | 0 | 0 | 0 | 0.168612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.136364 | null | null | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aaa6b62d2482defbe3a9a248af3f048c7de3d0b9 | 310 | py | Python | other tests/test_6_create_record_label.py | pavelwearevolt/Cross_Edit_TestsAutomatization | 953691244d86c5832fe2a2705841711939a353a5 | [
"Apache-2.0"
] | null | null | null | other tests/test_6_create_record_label.py | pavelwearevolt/Cross_Edit_TestsAutomatization | 953691244d86c5832fe2a2705841711939a353a5 | [
"Apache-2.0"
] | null | null | null | other tests/test_6_create_record_label.py | pavelwearevolt/Cross_Edit_TestsAutomatization | 953691244d86c5832fe2a2705841711939a353a5 | [
"Apache-2.0"
] | null | null | null | __author__ = 'pavelkosicin'
from model.label import Label
def test_create_record_label(app):
app.label.create_recording_artist(Label(name="rl_#1", asap="WB86-8RH31.50UTS-J",
note="Mens autem qui est in festinabat non facere bonum, voluntas in malo reperit."))
| 38.75 | 129 | 0.664516 | 41 | 310 | 4.780488 | 0.829268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033898 | 0.23871 | 310 | 7 | 130 | 44.285714 | 0.79661 | 0 | 0 | 0 | 0 | 0 | 0.358065 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aaadabaa6eb1195381301ba6975765da7236103f | 1,114 | py | Python | src/users/models/componentsschemasmicrosoft_graph_workbooktablesortallof1.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | src/users/models/componentsschemasmicrosoft_graph_workbooktablesortallof1.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | src/users/models/componentsschemasmicrosoft_graph_workbooktablesortallof1.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphWorkbooktablesortallof1(Model):
"""workbookTableSort.
:param fields:
:type fields: list[~users.models.MicrosoftgraphworkbookSortField]
:param match_case:
:type match_case: bool
:param method:
:type method: str
"""
_attribute_map = {
'fields': {'key': 'fields', 'type': '[MicrosoftgraphworkbookSortField]'},
'match_case': {'key': 'matchCase', 'type': 'bool'},
'method': {'key': 'method', 'type': 'str'},
}
def __init__(self, fields=None, match_case=None, method=None):
super(ComponentsschemasmicrosoftGraphWorkbooktablesortallof1, self).__init__()
self.fields = fields
self.match_case = match_case
self.method = method
| 33.757576 | 86 | 0.587074 | 97 | 1,114 | 6.57732 | 0.536082 | 0.08464 | 0.043887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003264 | 0.175045 | 1,114 | 32 | 87 | 34.8125 | 0.690968 | 0.429084 | 0 | 0 | 1 | 0 | 0.174497 | 0.055369 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aab07d8e601e02e0aaa27e2764b313638874d9dd | 591 | py | Python | 24/03/0.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | null | null | null | 24/03/0.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | 39 | 2017-07-31T22:54:01.000Z | 2017-08-31T00:19:03.000Z | 24/03/0.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | null | null | null | #!python3.6
import difflib
from pprint import pprint
import sys
text1 = ''' 1. Beautiful is better than ugly.
2. Explicit is better than implicit.
3. Simple is better than complex.
4. Complex is better than complicated.
'''.splitlines(keepends=True)
text2 = ''' 1. Beautiful is better than ugly.
3. Simple is better than complex.
4. Complicated is better than complex.
5. Flat is better than nested.
'''.splitlines(keepends=True)
d = difflib.Differ();
result = list(d.compare(text1, text2))
print('-----')
pprint(result)
print('-----')
print(sys.stdout.writelines(result))
| 25.695652 | 46 | 0.707276 | 84 | 591 | 4.97619 | 0.440476 | 0.15311 | 0.229665 | 0.136364 | 0.253589 | 0.253589 | 0.129187 | 0 | 0 | 0 | 0 | 0.028169 | 0.159052 | 591 | 22 | 47 | 26.863636 | 0.812877 | 0.01692 | 0 | 0.315789 | 0 | 0 | 0.534483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.157895 | 0 | 0.157895 | 0.263158 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aab254d87e54d35e023480f5b36f3f53979b98fb | 8,635 | py | Python | signalworks/tracking/multitrack.py | lxkain/tracking | 00ed9a0b31c4880687a42df3bf9651e68e0c4360 | [
"MIT"
] | 2 | 2019-04-09T17:28:34.000Z | 2019-06-05T10:05:11.000Z | signalworks/tracking/multitrack.py | lxkain/tracking | 00ed9a0b31c4880687a42df3bf9651e68e0c4360 | [
"MIT"
] | 11 | 2019-04-19T23:03:38.000Z | 2019-11-22T17:59:07.000Z | signalworks/tracking/multitrack.py | lxkain/tracking | 00ed9a0b31c4880687a42df3bf9651e68e0c4360 | [
"MIT"
] | 3 | 2019-05-01T16:02:32.000Z | 2019-06-25T18:05:39.000Z | import copy
import json
import os
from collections import UserDict
from signalworks.tracking import Event, Partition, TimeValue, Value, Wave
class MultiTrack(UserDict):
"""
A dictionary containing time-synchronous tracks of equal duration and fs
"""
def __init__(self, mapping=None):
if mapping is None:
mapping = UserDict()
UserDict.__init__(self, mapping)
if __debug__: # long assert - TODO: do this on mapping, and then assign
self.check()
def check(self):
if len(self) > 1:
for i, (key, track) in enumerate(self.items()):
if track.fs != self.fs:
raise AssertionError(
f"all fs' must be equal, track #{i} ('{key}) does not match track #1"
)
if track.duration != next(iter(self.values())).duration:
raise AssertionError(
f"all durations must be equal, track #{i} ('{key}'') does not match track #1"
)
def get_fs(self):
if len(self):
return next(iter(self.values())).fs
else:
return 0 # or raise?
def set_fs(self, fs):
raise Exception("Cannot change fs, try resample()")
fs = property(get_fs, set_fs, doc="sampling frequency")
def get_duration(self):
if len(self):
if __debug__: # long assert - TODO: do this on mapping, and then assign
self.check()
return next(iter(self.values())).duration
else:
return 0
def set_duration(self, duration):
raise Exception("The duration cannot be set, it is derived from its conents")
duration = property(
get_duration, set_duration, doc="duration, as defined by its content"
)
def __eq__(self, other):
# excluding wav from comparison as long as wav writing/reading is erroneous
if (set(self.keys()) - {"wav"}) != (set(other.keys()) - {"wav"}):
return False
for k in self.keys():
if k != "wav" and self[k] != other[k]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __setitem__(self, key, value):
if len(self):
if value.duration != self.duration:
raise AssertionError("duration does not match")
if value.fs != self.fs:
raise AssertionError("fs does not match")
UserDict.__setitem__(self, key, value)
def __str__(self):
s = ""
for key, track in self.items():
s += "%s: %s\n" % (key, track)
return s
def __add__(self, other):
if self is other:
other = copy.deepcopy(other)
obj = type(self)()
for k in self: # .iterkeys():
obj[k] = self[k] + other[k]
return obj
def resample(self, fs):
multiTrack = type(self)()
for key, track in self.items():
multiTrack[key] = track.resample(fs)
return multiTrack
def crossfade(self, other, length):
"""
append multiTrack to self, using a crossfade of a specified length in samples
"""
assert type(self) == type(other)
assert self.keys() == other.keys()
assert self.fs == other.fs
assert isinstance(length, int)
assert length > 0
assert other.duration >= length
assert self.duration >= length
multiTrack = type(self)()
for key, _ in self.items():
multiTrack[key] = self[key].crossfade(other[key], length)
return multiTrack
def select(self, a, b, keys=None):
assert a >= 0
assert a < b # or a <= b?
assert b <= self.duration
"""return a new multitrack object with all track views from time a to b"""
if keys is None:
keys = self.keys()
multiTrack = type(self)()
for key in keys:
multiTrack[key] = self[key].select(a, b)
return multiTrack
# TODO: should this be deprecated in favor of / should this call - the more general time_warp function?
def scale_duration(self, factor):
if factor != 1:
for t in self.values():
if isinstance(t, Partition):
t.time *= (
factor
) # last time parameter IS duration, so no worries about duration
elif isinstance(t, TimeValue) or isinstance(t, Event):
if factor > 1: # make room for expanded times
t.duration = int(t.duration * factor)
t.time *= factor
else:
t.time *= factor
t.duration = int(t.duration * factor)
else:
raise NotImplementedError # wave?
def time_warp(self, x, y):
"""in-place"""
for track in iter(self.values()):
track.time_warp(x, y)
default_suffix = ".mtt"
@classmethod
def read(cls, name):
"""Loads info about stored tracks from name, adding extension if missing,
and loads tracks by calling read(<name without extension>) for them.
"""
name_wo_ext = os.path.splitext(name)[
0
] # TODO: upgrade all path stuff to pathlib
if name == name_wo_ext:
name += cls.default_suffix
with open(name, "rb") as mtt_file:
track_infos = json.load(mtt_file)
self = cls()
for track_type_name, track_info_list in track_infos:
track_type = globals()[track_type_name]
track_info: UserDict = UserDict(track_info_list)
track = track_type.read(name_wo_ext, **track_info)
self[track_info["track_name"]] = track
return self
@classmethod
def read_edf(cls, path):
raise NotImplementedError
# TODO: adapt
# the following is copied from elsewhere and won't work as is
import pyedflib
with pyedflib.EdfReader(str(path)) as f:
labels = f.getSignalLabels()
for label in labels:
index = labels.index(label)
wav = Wave(f.readSignal(index), f.getSampleFrequency(index))
wav.label = label
wav.path = f.with_name(f.stem + "-" + label + ".wav")
wav.min = f.getPhysicalMinimum(index)
wav.max = f.getPhysicalMaximum(index)
wav.unit = f.getPhysicalDimension(index)
# self.add_view(wav, panel_index=panel_index, y_min=wav.min, y_max=wav.max)
@classmethod
def read_xdf(cls, path):
raise NotImplementedError
import openxdf
# TODO: below is a place holder and needs to be finalize
xdf = openxdf.OpenXDF(path)
signals = openxdf.Signal(xdf, path.with_suffix(".nkamp"))
# TODO: automate this, why are the xdf.header names different from signals.list_channels?
for label in ["ECG", "Chin"]:
# logger.info(f'reading {label} channel')
sig = signals.read_file(label)[label]
wav = Wave(sig.ravel(), 200)
wav.label = label
# wav.path = file.with_name(file.stem + '-' + label + '.wav')
wav.min = -3200
wav.max = 3200
wav.unit = "1"
# self.add_view(wav, panel_index=panel_index, y_min=wav.min, y_max=wav.max)
def write(self, name):
"""Saves info about stored tracks to name, adding extension if missing,
and calls write(<name without extension>) for the contained tracks.
Note!: not saving wav as long as wav writing/reading is erroneous
"""
name_wo_ext = os.path.splitext(name)[0]
if name == name_wo_ext:
name += self.default_suffix
track_infos = [] # list of dicts storing track info
for track_name, track in sorted(self.items()):
if track_name == "wav":
continue
track_info = {
"track_name": track_name,
"fs": int(track.get_fs()),
"duration": int(track.get_duration()),
}
if type(track) == Value:
track_info.update({"value_type": type(track.get_value()).__name__})
track.write(name_wo_ext, **track_info)
track_infos.append((type(track).__name__, sorted(track_info.items())))
with open(name, "wt") as mtt_file:
json.dump(track_infos, mtt_file)
| 36.901709 | 107 | 0.555067 | 1,047 | 8,635 | 4.451767 | 0.233047 | 0.019309 | 0.011586 | 0.008367 | 0.236645 | 0.145462 | 0.091826 | 0.091826 | 0.064364 | 0.064364 | 0 | 0.004046 | 0.341633 | 8,635 | 233 | 108 | 37.060086 | 0.815831 | 0.168848 | 0 | 0.216667 | 0 | 0 | 0.058672 | 0 | 0 | 0 | 0 | 0.012876 | 0.077778 | 1 | 0.111111 | false | 0 | 0.038889 | 0.005556 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aab57c61fdbffbd48b08ceac3432b1c6895bbeba | 1,027 | py | Python | python/solutii/ingrid_stoleru/Cursor.py | broascaiulian/labs | 068c7f440c7a29cb6a3e1dbb8e4bb7dfaff5a050 | [
"MIT"
] | null | null | null | python/solutii/ingrid_stoleru/Cursor.py | broascaiulian/labs | 068c7f440c7a29cb6a3e1dbb8e4bb7dfaff5a050 | [
"MIT"
] | null | null | null | python/solutii/ingrid_stoleru/Cursor.py | broascaiulian/labs | 068c7f440c7a29cb6a3e1dbb8e4bb7dfaff5a050 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# *-* coding: UTF-8 *-*
"""Solutia problemei Cursor"""
DIRECTIONS = {" stanga ": [-1, 0], " dreapta ": [1, 0],
" jos ": [0, -1], " sus ": [0, 1]}
def distanta(string, pozitie):
"""Determinarea distantei"""
directie, valoare = string.split()
directie = directie.lower()
if directie in DIRECTIONS:
directie = DIRECTIONS[directie]
pozitie[0] = pozitie[0]+directie[0]*int(valoare)
pozitie[1] = pozitie[1]+directie[1]*int(valoare)
def main():
"""Apelarea functiei"""
try:
fisier = open("Cursor_Date", "r")
mesaje = fisier.read()
fisier.close()
except IOError:
print "Nu am putut obține coordonatele."
return
pozitie = [0, 0]
for linie in mesaje.splitlines():
if linie:
distanta(linie, pozitie)
print pozitie
rezultat = (pozitie[0]**2 + pozitie[1]**2) ** 0.5
print rezultat
if __name__ == "__main__":
main()
| 26.333333 | 57 | 0.542356 | 113 | 1,027 | 4.849558 | 0.504425 | 0.058394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032078 | 0.30185 | 1,027 | 38 | 58 | 27.026316 | 0.732218 | 0.040896 | 0 | 0 | 0 | 0 | 0.091435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aab5dd1420b09051fc9fe578384a7add1adbe417 | 1,932 | py | Python | hoods/models.py | badruu/neighborhood | 85d30f7451f921c533dc4463aad76ed2d39f8023 | [
"MIT"
] | null | null | null | hoods/models.py | badruu/neighborhood | 85d30f7451f921c533dc4463aad76ed2d39f8023 | [
"MIT"
] | 6 | 2021-03-19T01:10:18.000Z | 2022-03-11T23:49:18.000Z | hoods/models.py | badruu/neighborhood | 85d30f7451f921c533dc4463aad76ed2d39f8023 | [
"MIT"
] | null | null | null | from django.db import models
import datetime
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
class Hoods(models.Model):
name = models.CharField(max_length = 100)
location = models.CharField(max_length = 100)
image = models.ImageField(upload_to = 'images/', default = 'default.jpg')
description = models.TextField(max_length = 300, default = 'No description')
population = models.IntegerField(default = '0')
admin = models.ForeignKey(User, on_delete = models.CASCADE)
timestamp = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
def create_hood(self):
self.save()
def delete_hood(self):
self.delete()
def find_neighbourhood(hoods_id):
neighbourhood = Hoods.objects.get(id = hoods_id)
return neighbourhood
def update_hood(self, item, value):
self.update(item = value)
def update_occupants(self, value):
self.update(population = value)
class Business(models.Model):
name = models.CharField(max_length = 100)
user = models.ForeignKey(User, on_delete = models.CASCADE)
hood_id = models.ForeignKey(Hoods, on_delete = models.CASCADE)
email_address = models.EmailField(max_length=254)
timestamp = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
def create_business(self):
self.save()
def delete_business(self):
self.delete()
def find_business(business_id):
business = Business.objects.get(id = business_id)
return business
def update_business(self, item, value):
self.update(item = value)
@classmethod
def search_business(cls, name):
businesses = cls.objects.filter(name__icontains=name).all()
return businesses | 31.16129 | 80 | 0.699793 | 233 | 1,932 | 5.652361 | 0.32618 | 0.037965 | 0.041002 | 0.05467 | 0.378891 | 0.294609 | 0.294609 | 0.183751 | 0.11997 | 0.11997 | 0 | 0.010383 | 0.202381 | 1,932 | 62 | 81 | 31.16129 | 0.844257 | 0 | 0 | 0.291667 | 0 | 0 | 0.017072 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.125 | 0.041667 | 0.770833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aab991f211f7427de19a3a6c9a2b406d03220528 | 3,887 | py | Python | plugin.video.mrstealth.serialu.net/uppod.py | mrstealth/kodi-isengard | 2f37ba5320c1618fbe635f5683e7329a63195c16 | [
"MIT"
] | null | null | null | plugin.video.mrstealth.serialu.net/uppod.py | mrstealth/kodi-isengard | 2f37ba5320c1618fbe635f5683e7329a63195c16 | [
"MIT"
] | null | null | null | plugin.video.mrstealth.serialu.net/uppod.py | mrstealth/kodi-isengard | 2f37ba5320c1618fbe635f5683e7329a63195c16 | [
"MIT"
] | null | null | null | #-------------------------------------------------------------------------------
# Uppod decoder
#-------------------------------------------------------------------------------
import urllib2
import cookielib
def decode(param):
try:
#-- define variables
loc_3 = [0,0,0,0]
loc_4 = [0,0,0]
loc_2 = ''
#-- define hash parameters for decoding
dec = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
hash1 = ["0", "5", "u", "w", "6", "n", "H", "o", "B", "p", "N", "M", "D", "R", "z", "G", "V", "e", "i", "3", "m", "W", "U", "7", "g", "="]
hash2 = ["c", "T", "I", "4", "Q", "Z", "v", "Y", "y", "X", "k", "b", "8", "a", "J", "d", "1", "x", "L", "t", "l", "2", "f", "s", "9", "h"]
#-- decode
for i in range(0, len(hash1)):
re1 = hash1[i]
re2 = hash2[i]
param = param.replace(re1, '___')
param = param.replace(re2, re1)
param = param.replace('___', re2)
i = 0
while i < len(param):
j = 0
while j < 4 and i+j < len(param):
loc_3[j] = dec.find(param[i+j])
j = j + 1
loc_4[0] = (loc_3[0] << 2) + ((loc_3[1] & 48) >> 4);
loc_4[1] = ((loc_3[1] & 15) << 4) + ((loc_3[2] & 60) >> 2);
loc_4[2] = ((loc_3[2] & 3) << 6) + loc_3[3];
j = 0
while j < 3:
if loc_3[j + 1] == 64 or loc_4[j] == 0:
break
loc_2 += unichr(loc_4[j])
j = j + 1
i = i + 4;
except:
loc_2 = ''
return loc_2
def decodeSourceURL(uhash):
print "*** Got uppod uhash: %s" % uhash
return decode(uhash)
def getDecodedHashFromSourceURL(url, referer):
print "*** Decoded source URL: %s" % url
# NOTE: set cookie
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
# Accept text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
# Accept-Encoding gzip, deflate
# Accept-Language de-de,de;q=0.8,en-us;q=0.5,en;q=0.3
# Connection keep-alive
# Cookie SERIALU=cd640e59142f39cc54ed65461dd60e10; MarketGidStorage=%7B%220%22%3A%7B%22svspr%22%3A%22%22%2C%22svsds%22%3A3%2C%22TejndEEDj%22%3A%22MTM4MDU1NzM0NTY2NTQ0OTk0NTMz%22%7D%2C%22C44994%22%3A%7B%22page%22%3A3%2C%22time%22%3A1380557356398%7D%7D; amcu_n=2; advmaker_pop=1
# DNT 1
# Host serialu.net
# Referer http://serialu.net/media/stil-nov/uppod.swf
# User-Agent Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:24.0) Gecko/20100101 Firefox/24.0
request = urllib2.Request(url, None)
request.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
request.add_header('Accept-Encoding', 'gzip, deflate')
request.add_header('Accept-Language', 'de-de,de;q=0.8,en-us;q=0.5,en;q=0.3')
request.add_header('Connection', 'keep-alive')
# request.add_header('Cookie', 'SERIALU=cd640e59142f39cc54ed65461dd60e10; MarketGidStorage=%7B%220%22%3A%7B%22svspr%22%3A%22%22%2C%22svsds%22%3A3%2C%22TejndEEDj%22%3A%22MTM4MDU1NzM0NTY2NTQ0OTk0NTMz%22%7D%2C%22C44994%22%3A%7B%22page%22%3A3%2C%22time%22%3A1380557356398%7D%7D; amcu_n=2; advmaker_pop=1')
request.add_header('DNT', 1)
request.add_header('Host', 'serialu.net')
request.add_header('Referer', 'http://serialu.net/media/stil-nov/uppod.swf')
request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:24.0) Gecko/20100101 Firefox/24.0')
return urllib2.urlopen(request).read()
| 44.170455 | 306 | 0.516337 | 521 | 3,887 | 3.777351 | 0.303263 | 0.010163 | 0.073171 | 0.033537 | 0.454268 | 0.430894 | 0.430894 | 0.430894 | 0.430894 | 0.389228 | 0 | 0.128088 | 0.270903 | 3,887 | 87 | 307 | 44.678161 | 0.565984 | 0.303576 | 0 | 0.113208 | 0 | 0.056604 | 0.191391 | 0.062644 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.037736 | null | null | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aabc2c45a2f070f9b91c1f8410ef7d7691faf98d | 183 | py | Python | localtalk/application.py | mattcollie/LocalTalk | d17765243cd23d09024544a763a18226be16c50c | [
"MIT"
] | null | null | null | localtalk/application.py | mattcollie/LocalTalk | d17765243cd23d09024544a763a18226be16c50c | [
"MIT"
] | null | null | null | localtalk/application.py | mattcollie/LocalTalk | d17765243cd23d09024544a763a18226be16c50c | [
"MIT"
] | null | null | null | from localtalk import create_app, create_server
app = create_app()
server = create_server()
# server.start()
if __name__ == '__main__':
app.run(debug=True, host='localhost')
| 15.25 | 47 | 0.715847 | 24 | 183 | 4.958333 | 0.625 | 0.151261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153005 | 183 | 11 | 48 | 16.636364 | 0.767742 | 0.076503 | 0 | 0 | 0 | 0 | 0.10241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aabdff6b46e83b814599086ebf3ca4b5caeb3757 | 2,070 | py | Python | biokeypy/moduleForShowingJudges.py | zacandcheese/biokeypy | d421e8be0b407fd1df395c79ffde409ca80066e2 | [
"MIT"
] | null | null | null | biokeypy/moduleForShowingJudges.py | zacandcheese/biokeypy | d421e8be0b407fd1df395c79ffde409ca80066e2 | [
"MIT"
] | null | null | null | biokeypy/moduleForShowingJudges.py | zacandcheese/biokeypy | d421e8be0b407fd1df395c79ffde409ca80066e2 | [
"MIT"
] | null | null | null | #moduleForShowingJudges
#cmd /K "$(FULL_CURRENT_PATH)"
#cd ~/Documents/GitHub/Keyboard-Biometric-Project/Project_Tuples
#sudo python -m pip install statistics
#python analyzeData.py
"""
Author: Zachary Nowak and Matthew Nowak
Date: 3/09/2018
Program Description: This code can record the
Press Time and Flight Time of a tuple as a user
types a passage and it saves a matrix to a file.
"""
__version__ = '1.0'
__author__ = 'Zachary Nowak'
"""STANDARD LIBRARY IMPORTS"""
import json
import platform
import os
"""LOCAL LIBRARY IMPORTS"""
import moduleForSavingTimelines as ST
import moduleForRecordingWithGUI as GUI
import moduleForCreatingPasswordSentence as PS
import moduleForDeconstructingTimelines as DT
import moduleForAuthenticatingUsers as AU
import moduleForFindingTuples as FT
import moduleForGettingSentence as GS
import moduleForPlotting as P
"""FOLDER IMPORTS"""
infile = "data/451.txt"# passage for training people.
#tupleList = FT.allPeople()
tupleList = ["his", "the","ing"]
location = ""
if(platform.system() == "Windows"):#WINDOWS
name = input("What is your name: ")
while(not(location in ["y","n","z","c"])):
location = input("Is this training data?(y/n) ")
if(location == "n"):
location = "Applying/"
passage = ("The thing likes learning his history.There the thing sings.This is what the thing sings.").split(".")
elif(location == "z"):
os.chdir("judgeslib")
P.plot(tupleList)
elif(location == "c"):
os.chdir("judgeslib")
DT.clearAll()
else:
location = "Database/"
passages = open(infile,"r").read().split(".")
passage2 = passages[1].split(",")
passage = passages + passage2
passage.remove(passages[1])
"""TYPE THE PASSAGE AND RECORD THE TIME LINE"""
pressTimeLine,pressCharTimeLine,releaseTimeLine,releaseCharTimeLine = GUI.start_recording(passage)
os.chdir("judgeslib/")
ST.saveTimeLine(pressTimeLine,pressCharTimeLine,name,location)
DT.userSummary(name,location)
if(location == "Applying/"):
#AU.newData(tupleList)
print("Now to verify")
AU.verify(tupleList,name)
#IMPLIMENT MATPLOTLIB
#IMPLIMENT CLEAR FEATURE
| 27.236842 | 114 | 0.746377 | 265 | 2,070 | 5.784906 | 0.550943 | 0.015656 | 0.031311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00882 | 0.123672 | 2,070 | 76 | 115 | 27.236842 | 0.836273 | 0.24058 | 0 | 0.047619 | 0 | 0 | 0.180294 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.166667 | 0.261905 | 0 | 0.261905 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
aad089a6f4a448fc23d035f432e9858d598d7704 | 1,982 | py | Python | user/forms.py | apuc/django-rest-framework | 863f2dcca5f2a677ac0e477fc704cc54cd9a53f8 | [
"MIT"
] | null | null | null | user/forms.py | apuc/django-rest-framework | 863f2dcca5f2a677ac0e477fc704cc54cd9a53f8 | [
"MIT"
] | 6 | 2021-03-30T14:08:14.000Z | 2021-09-08T02:21:23.000Z | user/forms.py | apuc/django-rest-framework | 863f2dcca5f2a677ac0e477fc704cc54cd9a53f8 | [
"MIT"
] | null | null | null | from crispy_forms import layout
from crispy_forms.helper import FormHelper
from django.conf import settings
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django import forms
from .models import UserProfile
class RegisterForm(UserCreationForm):
username = forms.CharField(label='Username', max_length=45)
email = forms.EmailField(label='Email')
password1 = forms.CharField(
min_length=settings.MIN_PASSWORD_LENGTH,
label='Password',
strip=False,
help_text=f'Enter {settings.MIN_PASSWORD_LENGTH} digits and chars',
widget=forms.PasswordInput()
)
password2 = forms.CharField(
min_length=settings.MIN_PASSWORD_LENGTH,
label='Repeat the password',
strip=False,
widget=forms.PasswordInput()
)
photo = forms.ImageField(required=False)
class Meta:
model = UserProfile
fields = (
'username',
'email',
'password1',
'password2',
'photo'
)
def crispy_init(self):
"""Initialize crispy-forms helper."""
self.helper = FormHelper()
self.helper.form_id = 'id-RegistrationForm'
self.helper.form_class = 'form-group'
self.helper.form_method = 'post'
self.helper.form_action = reverse_lazy('user:api-register')
self.helper.layout = layout.Layout(
layout.Field('username'),
layout.Field('email'),
layout.Field('password1'),
layout.Field('password2'),
layout.Field('photo'),
layout.Div(
layout.Submit(
'submit',
'Register',
css_class='btn-success my-2 px-4'
),
css_class='text-center'
)
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.crispy_init()
| 30.492308 | 75 | 0.592331 | 200 | 1,982 | 5.72 | 0.395 | 0.052448 | 0.048951 | 0.065559 | 0.092657 | 0.092657 | 0.092657 | 0.092657 | 0.092657 | 0 | 0 | 0.00722 | 0.301211 | 1,982 | 64 | 76 | 30.96875 | 0.818773 | 0.015641 | 0 | 0.105263 | 0 | 0 | 0.13419 | 0.015424 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0.22807 | 0.122807 | 0 | 0.280702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
aad22ce0ae134c7841d5d6eb61bc2075cbcc5f71 | 3,187 | py | Python | wadi.py | sensepost/wadi | 7d29ee53d63425029c653fb7c20b4ff4c15f289b | [
"CC0-1.0"
] | 137 | 2015-10-23T14:58:42.000Z | 2021-11-18T09:59:16.000Z | wadi.py | sensepost/wadi | 7d29ee53d63425029c653fb7c20b4ff4c15f289b | [
"CC0-1.0"
] | 11 | 2015-10-31T06:51:50.000Z | 2022-02-20T20:22:04.000Z | wadi.py | sensepost/wadi | 7d29ee53d63425029c653fb7c20b4ff4c15f289b | [
"CC0-1.0"
] | 62 | 2015-10-23T14:58:49.000Z | 2021-11-18T09:18:13.000Z | import sys
import os
from multiprocessing import Process, Queue, Manager
from threading import Timer
from wadi_harness import Harness
from wadi_debug_win import Debugger
import time
import hashlib
def test(msg):
while True:
print 'Process 2:' + msg
#print msg
def test2():
print 'Process 1'
time.sleep(2)
while True:
print 'Process 1'
def run_harness(t):
harness = Harness(sys.argv[1],sys.argv[2],t)
harness.run()
def run_debugger(q):
debugger = Debugger(q)
debugger.run_Browser('IE')
def timeout_debug(dp):
print '[*] Terminating Debugger Process PID: %d' % dp.pid
dp.terminate()
class wadi():
def __init__(self, args=None):
if args:
self.args = args
else:
pass
def writeTestCases(self,tcases,msg):
self.msg = msg[0]
self.code = msg[1]
self.add = msg[2]
self.testcases = tcases
self.hash = hashlib.md5()
self.b = self.code+self.add
self.hash.update(self.b)
self.dgst = self.hash.hexdigest()
self.path = "./"+self.dgst
if os.path.exists(self.path):
print "[*] Duplicate Crash: %s" % self.dgst
else:
os.makedirs(self.path)
f = open(self.path + "/" +self.dgst+".crash","w+b")
f.write(self.msg)
f.close()
print "[*] Written Crash file to: %s" % self.dgst+".crash"
for i in range(10):
self.tcase = self.testcases.pop()
f2 = open(self.path+"/"+self.dgst+"_"+str(i)+".html","w+b")
f2.write(self.tcase)
f2.close()
print "[*] Written testcases to %s" % self.path+"/"+self.dgst+str(i)+".html"
print "[*] Last TestCase Folder '%s'" % self.dgst
def close(self):
sys.exit()
def run(self):
self.queue = Manager().list()
self.tcases = Manager().list()
self.server_pid = None
self.debugger_pid = None
self.init = 0
while True:
if not self.server_pid:
self.server_process = Process(target=run_harness, args=(self.tcases,))
self.server_process.start()
self.server_pid = self.server_process.pid
print '[*] Running Server Process %s ' % (self.server_pid,)
#self.server_pid =
if not self.debugger_pid:
self.debugger_process = Process(target=run_debugger,args=(self.queue,))
self.debugger_process.start()
self.debugger_pid = self.debugger_process.pid
timer = Timer(120.0,timeout_debug,(self.debugger_process,))
timer.daemon = True
timer.start()
if not self.debugger_process.is_alive():
print "[*] Debugger Process %s exited" % self.debugger_pid
timer.cancel()
self.lenq = len(self.queue)
self.lentc = len(self.tcases)
if self.lenq:
self.msg = self.queue.pop()
#self.msg = self.queue.get()
print "[*] Wooops Crash !!!!"
print "[*] %s" % self.msg[0]
else:
print "[*] No Crashes"
#if not self.tcases.empty():
if self.lentc and self.lenq:
#self.tc = self.tcases.get()
self.writeTestCases(self.tcases, self.msg)
else:
print "[*] No TestCases"
self.debugger_pid = None
else:
pass
if __name__ == '__main__':
#try:
w = wadi()
w.run()
#except:
# w.close()
| 24.898438 | 81 | 0.612488 | 439 | 3,187 | 4.350797 | 0.257403 | 0.062827 | 0.034031 | 0.033508 | 0.116754 | 0.092147 | 0.025131 | 0 | 0 | 0 | 0 | 0.00908 | 0.239724 | 3,187 | 127 | 82 | 25.094488 | 0.779199 | 0.040477 | 0 | 0.142857 | 0 | 0 | 0.115371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.020408 | 0.081633 | null | null | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aad37494decad9fd0ad1fb72dcfce3587fe45cdf | 1,033 | py | Python | rick_and_morty_app/views.py | esalcedo94/final_project | 7dce4fae8248d820698220d3289bfb49bd96b2cd | [
"MIT"
] | null | null | null | rick_and_morty_app/views.py | esalcedo94/final_project | 7dce4fae8248d820698220d3289bfb49bd96b2cd | [
"MIT"
] | 4 | 2021-03-19T01:50:05.000Z | 2021-09-22T18:52:13.000Z | rick_and_morty_app/views.py | esalcedo94/final_project | 7dce4fae8248d820698220d3289bfb49bd96b2cd | [
"MIT"
] | null | null | null | # from django.shortcuts import render, redirect, get_object_or_404
from .forms import CharacterForm
from rick_and_morty_app.models import Character
from django.views.generic import ListView, CreateView, UpdateView, DetailView, DeleteView
from django.urls import reverse_lazy # new
# Create your views here.
class HomePageView(ListView):
model = Character
template_name = 'character_list.html'
class CreateCharacterView(CreateView):
model = Character
form_class = CharacterForm
template_name = 'character_form.html'
success_url = reverse_lazy('character_list')
class CharacterDetailView(DetailView):
model = Character
template_name = 'character_details.html'
class CharacterUpdate(UpdateView):
model = Character
fields = ['name', 'lastEpisode']
template_name = 'character_update.html'
success_url = reverse_lazy('character_list')
class DeleteCharacter(DeleteView):
model = Character
template_name = 'character_delete.html'
success_url = reverse_lazy('character_list')
| 31.30303 | 89 | 0.771539 | 117 | 1,033 | 6.581197 | 0.435897 | 0.090909 | 0.136364 | 0.101299 | 0.297403 | 0.161039 | 0.161039 | 0.111688 | 0 | 0 | 0 | 0.003421 | 0.151016 | 1,033 | 32 | 90 | 32.28125 | 0.874572 | 0.089061 | 0 | 0.333333 | 0 | 0 | 0.169691 | 0.068303 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
aad397b94b0cb0be8ca7c28476744dda7ab4e655 | 339 | py | Python | samples/contacts/pathUtils.py | Trevol/Mask_RCNN | 18308082e2c5fd5b4df5d6e40f009b3ebd66c26d | [
"MIT"
] | null | null | null | samples/contacts/pathUtils.py | Trevol/Mask_RCNN | 18308082e2c5fd5b4df5d6e40f009b3ebd66c26d | [
"MIT"
] | null | null | null | samples/contacts/pathUtils.py | Trevol/Mask_RCNN | 18308082e2c5fd5b4df5d6e40f009b3ebd66c26d | [
"MIT"
] | null | null | null | import os, sys
def mrcnnPath():
filePath = os.path.dirname(os.path.realpath(__file__))
return os.path.abspath(os.path.join(filePath, os.pardir, os.pardir))
def currentFilePath(file=None):
file = file if file else __file__
return os.path.dirname(os.path.realpath(file))
def mrcnnToPath():
sys.path.append(mrcnnPath()) | 28.25 | 72 | 0.719764 | 49 | 339 | 4.816327 | 0.408163 | 0.152542 | 0.110169 | 0.127119 | 0.262712 | 0.262712 | 0.262712 | 0 | 0 | 0 | 0 | 0 | 0.141593 | 339 | 12 | 73 | 28.25 | 0.810997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.111111 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aadc6caacc7ccc064997d62040d5601470ca380f | 1,656 | py | Python | remove_svn_folder.py | sdenisen/test | 709bcdd16d0f83f03b04e0bc7918bb3785993c59 | [
"Unlicense"
] | null | null | null | remove_svn_folder.py | sdenisen/test | 709bcdd16d0f83f03b04e0bc7918bb3785993c59 | [
"Unlicense"
] | 2 | 2017-08-24T18:55:37.000Z | 2017-08-24T18:59:06.000Z | remove_svn_folder.py | sdenisen/test | 709bcdd16d0f83f03b04e0bc7918bb3785993c59 | [
"Unlicense"
] | null | null | null | __author__ = 'Sergey'
import shutil
import os
import stat
def read_all_directory_path(root_folder, final_directory_list=[], folder_for_remove='.svn'):
under_files_and_folders = os.listdir(root_folder)
if os.path.split(root_folder)[1] == folder_for_remove:
final_directory_list.append(root_folder)
return final_directory_list
if len(under_files_and_folders) == 0:
final_directory_list.append(root_folder)
return final_directory_list
for dir in under_files_and_folders:
sub_path = root_folder + "\\" + dir
if os.path.isfile(sub_path):
continue
read_all_directory_path(sub_path, final_directory_list)
def change_file_attributes_in_folder(folder):
list_files_and_directories = os.listdir(folder)
for item in list_files_and_directories:
sub_path = folder + "\\" + item
if os.path.isfile(sub_path):
os.chmod(sub_path, stat.S_IWRITE)
os.unlink(sub_path)
else:
change_file_attributes_in_folder(sub_path)
directory_list = []
root = "D:\\_svn_repo\\trunk"
remove_dir = ".svn"
# 1 - read all directories recursively and store to a variable list of directory.
read_all_directory_path(root, directory_list)
# 2 - remove directory from the variable.
for directory in directory_list:
if os.path.split(directory)[1] == remove_dir:
try:
shutil.rmtree(directory)
except WindowsError, e:
if "Access is denied" in e.strerror:
change_file_attributes_in_folder(directory)
shutil.rmtree(directory)
else:
raise
| 29.571429 | 92 | 0.679348 | 219 | 1,656 | 4.780822 | 0.305936 | 0.111748 | 0.103152 | 0.057307 | 0.276982 | 0.150907 | 0.110793 | 0.110793 | 0.110793 | 0.110793 | 0 | 0.003965 | 0.238527 | 1,656 | 55 | 93 | 30.109091 | 0.826328 | 0.07186 | 0 | 0.25 | 0 | 0 | 0.035202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.075 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
aadfff06b29955168bb01b2fd687e7d85bcf363a | 563 | py | Python | apps/flow/run.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 349 | 2020-08-04T10:21:01.000Z | 2022-03-23T08:31:29.000Z | apps/flow/run.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 2 | 2021-01-07T06:17:05.000Z | 2021-04-01T06:01:30.000Z | apps/flow/run.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 70 | 2020-08-24T06:46:14.000Z | 2022-03-25T13:23:27.000Z | from apps.flow.settings import config
if config.SERVER_ENV != 'dev':
from gevent import monkey
monkey.patch_all()
else:
pass
from apps.flow.views.deploy import deploy
from apps.flow.views.flow import flow
from library.api.tFlask import tflask
def create_app():
app = tflask(config)
register_blueprints(app)
return app
def register_blueprints(app):
app.register_blueprint(flow, url_prefix="/v1/flow")
app.register_blueprint(deploy, url_prefix="/v1/deploy")
if __name__ == '__main__':
create_app().run(port=config.PORT)
| 20.107143 | 59 | 0.730018 | 80 | 563 | 4.9125 | 0.4375 | 0.061069 | 0.091603 | 0.086514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004237 | 0.161634 | 563 | 27 | 60 | 20.851852 | 0.82839 | 0 | 0 | 0 | 0 | 0 | 0.05151 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0.055556 | 0.277778 | 0 | 0.444444 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2a9c29a9ca43c2e78dfd11fb0c0783a70a2f2e37 | 10,197 | py | Python | xyz2image.py | hentr/xyz2image | bee976e0062ee893594e56ead1e75cb5d7a4e8af | [
"MIT"
] | null | null | null | xyz2image.py | hentr/xyz2image | bee976e0062ee893594e56ead1e75cb5d7a4e8af | [
"MIT"
] | null | null | null | xyz2image.py | hentr/xyz2image | bee976e0062ee893594e56ead1e75cb5d7a4e8af | [
"MIT"
] | null | null | null | '''
Authors: Trond Henninen(trond.henninen@empa.ch) and Feng Wang
This script is for generating normalised Gaussian simulated annular dark-field scanning transmission electron microscopy (ADF-STEM) images from input atomic coordinates.
For rapidly generating a large dataset, it approximates a contrast similar to multislice simulated images by convolving a 2D Gaussian with the atomic coordinates.
This is a decent approximation for up to 10 overlapping atoms, as the contrast is linearly additive for such thin lattices.
Optimized for rapidly generating data with multiprocessing, so can generate millions of images per hour with a desktop processor.
Inputs .xyz files and outputs a .tif image and compressed arrays (.npz) for fast save/load data e.g. for machine learning.
The input coodinates gets blurred by a 3D gaussian and has the z-dimension flattened to make the 2d image.
xyz2image.xyz2image converts just one file, while xyz2image.folder convert all files in the folder.
Keep in mind, dimensions of the xyz coordinates, gauss_sigma and padxyz are all in ångström [å].
TODO: implement binary_2d and direct image output
TODO: convert binary_radius to [å] instead if pixels
TODO: set a parameter for voxsize (e.g. 9 pm), and calculate
Variables:
'folder' is the folder path where to look for .xyz files (default: '.' meaning current folder).
'gauss_sigma' is the gaussian gauss_sigma in ångström [å]. (default: 0.4).
'edgesize' is the size in pixels of the output square image and voxel cube (default: 128).
'padxyz' is the minimum padding (in [å]) added around the atomic coordinate array, adjusted to make the box cubic (default: 1.5)
'n_rotation' is the number of randomly rotated images (and 3D representations) that is generated for each xyz (1 means original orientation, while 8000 gives convenient files of 10s of MB)
'n_stacks' number of .npz stacks to be generated for each input file
'output_types' which type of outputs are generated (enable by setting the different options to True)
'bitrate' bitrate of output files (should be 8 or 16)
'binary_radius' radius of atoms in binary_3d/binary_2d [pixels]
'frameshift' and 'maxframes' are for .xyz files with many frames:
'frameshift' is which frame it start reading from (default 0)
'maxframes' is how many consecutive frames is read (default 50)
'''
import os, glob
import numpy as np
from ase.io import read
from scipy.ndimage.filters import gaussian_filter
from random import uniform
import tifffile as tif
import multiprocessing
from multiprocessing import Pool
######################## PARAMETERS ###################################
class Parameters: #use an empty object to store parameters
pass
P = Parameters() # this object also becomes available to the subprocesses in the pool without being passed to xyz2image()
P.output_types = { #set True for which type of output you want
'coordinates_3d':True, #3D coordinates of the rotated cluster
'delta_3d':True, #binary 3D-array with 1 for atom center coordinates and 0 for rest
'delta_2d':False, #binary 2D-array same as the 3D, but Z-coordinate has been collapsed
'gaussian_3d':False, #delta_3d convolved with a 3D gaussian
'gaussian_2d':True, #Simulated image made by delta_2d convolved with a 2D gaussian
'binary_3d':False, #binary 3D-array with 1 for spherical atoms with radius P.binary_radius, and 0 for rest
'binary_2d':False, #NOT YET IMPLEMENTED binary 2D-array same as the 3D, but Z-coordinate has been collapsed
'delta_2d_image':False, #delta_2d is also stored as a .tif stack
'gaussian_2d_image':False, #gaussian_2d is also stored as a .tif stack
'binary_2d_image':False, #NOT YET IMPLEMENTED binary_2d is also stored as a .tif stack
}
P.folder = r'.'
P.output_folder = './npz_stacks'
P.bitrate = 8 #output bitrate, 8 or 16
P.gauss_sigma = 0.4
P.edgesize = 80
P.padxyz = 0.3
P.n_rotation = (1024)*8 #number of rotations in one .npz stack, 8k is a good compromise of speed/filesize/memory consumption
P.n_stacks = 1 # number of .npz stacks will be generated for each input file
P.frameshift = 0
P.maxframes = 50
P.binary_radius = 7 #[pixels] radius of atoms in binary_3d
P.xyzfiles = glob.glob('*.xyz')
######################## /PARAMETERS ###################################
def load(filename):
# for loading numpy compressed ND-arrays (.npz) files
return(np.load(filename))
def xyz2image(file):
print(file)
fname = os.path.splitext(os.path.basename(file))[0] #gets the name of the file without the file extension
t = read(file,index=':')
t2 = t[min(P.frameshift,len(t)-1):min(P.frameshift+P.maxframes,len(t))] #frameshift and maxframes are for handling if multiple frames in the .xyz file
print(file,len(t2))
if not os.path.exists(P.output_folder): #make new folders if they don't exist
os.makedirs(P.output_folder)
if not os.path.exists(f'{P.output_folder}/{fname}'):
os.makedirs(f'{P.output_folder}/{fname}')
coordinates_3d_stack, delta_3d_stack, delta_2d_stack, gaussian_3d_stack = [],[],[],[]
gaussian_2d_stack, binary_3d_stack, binary_2d_stack = [],[],[]
for at in t2: #for handling if multiple frames in the .xyz file
del at[at.numbers == 6] #delete carbon atoms
for rot in range(P.n_rotation):
if rot == 0: # P.n_rotation == 1: #keep the first frame at same viewpoint as the input xyz file
at.rotate(90, 'z')
else: # random rotation
at.euler_rotate(uniform(0,360),uniform(0,360),uniform(0,360))
atoms = at.get_positions()
atoms[:,0] -= min(atoms[:,0]); atoms[:,1] -= min(atoms[:,1]); atoms[:,2] -= min(atoms[:,2])
maxx,maxy,maxz = max(atoms[:,0]),max(atoms[:,1]),max(atoms[:,2])
maxxyz = max(maxx,maxy,maxz)
padx,pady,padz = (maxxyz-maxx)/2+P.padxyz,(maxxyz-maxy)/2+P.padxyz, (maxxyz-maxz)/2+P.padxyz
atoms[:,0] += padx; atoms[:,1] += pady; atoms[:,2] += padz;
edgemax = maxxyz+2*P.padxyz
voxsize = edgemax/(P.edgesize-1)
sigpix = P.gauss_sigma/voxsize
#print(file,' - ',len(atoms),at,', voxel size -',voxsize)
normatoms = np.round(atoms/edgemax*(P.edgesize-1)) #normalize the coordinate box
normatoms = normatoms.astype(int)
delta_3d = np.zeros((P.edgesize,P.edgesize,P.edgesize))#,dtype=bool)
delta_3d[normatoms[:,0],normatoms[:,1],normatoms[:,2]] = 1
delta_2d = np.zeros((P.edgesize,P.edgesize))#,dtype=bool)
delta_2d[normatoms[:,0],normatoms[:,1]] = 1
if P.output_types['coordinates_3d'] == True:
coordinates_3d_stack.append(normatoms)
if P.output_types['delta_3d'] == True:
delta_3d_stack.append(delta_3d)
if P.output_types['delta_2d'] == True or P.output_types['delta_2d_image'] == True:
delta_2d_stack.append(delta_2d)
if P.output_types['gaussian_2d'] == True or P.output_types['gaussian_2d'] == True:
gaussian_2d = gaussian_filter(delta_2d, sigpix)
gaussian_2d /= np.max(gaussian_2d)/(2**P.bitrate-1)
gaussian_2d = gaussian_2d.astype('uint'+str(P.bitrate))
gaussian_2d_stack.append(gaussian_2d)
if P.output_types['gaussian_3d'] == True:
gaussian_3d = gaussian_filter(delta_3d, sigpix)
gaussian_3d /= np.max(gaussian_3d)/(2**P.bitrate-1)
gaussian_3d = gaussian_3d.astype('uint'+str(P.bitrate))
gaussian_3d_stack.append(gaussian_3d)
if P.output_types['binary_3d'] == True:
binary_3d = np.zeros((P.edgesize,P.edgesize,P.edgesize),dtype=bool)
for atom in normatoms:
y,x,z = np.ogrid[ -atom[0]:P.edgesize-atom[0], -atom[1]:P.edgesize-atom[1], -atom[2]:P.edgesize-atom[2] ]
mask = x*x + y*y + z*z <= P.binary_radius**2
binary_3d[mask] = True
binary_3d_stack.append(binary_3d)
# if P.output_types['binary_2d'] == True or P.output_types['binary_2d_image'] == True:
output_stacks = {}
if P.output_types['coordinates_3d'] == True:
output_stacks['coordinates_3d'] = np.asarray(coordinates_3d_stack)
if P.output_types['delta_3d'] == True:
output_stacks['delta_3d'] = np.asarray(delta_3d_stack).astype(bool)
if P.output_types['delta_2d'] == True:
output_stacks['delta_2d']= np.asarray(delta_2d_stack).astype(bool)
if P.output_types['gaussian_2d'] == True:
output_stacks['gaussian_2d'] = np.asarray(gaussian_2d_stack)
if P.output_types['gaussian_3d'] == True:
output_stacks['gaussian_3d'] = np.asarray(gaussian_3d_stack)
if P.output_types['binary_3d'] == True:
output_stacks['binary_3d'] = np.asarray(binary_3d_stack)
simulated_files = len( glob.glob(f'{P.output_folder}/{fname}/*' ))
file_name = f'{P.output_folder}/{fname}/{str(simulated_files+P.n_rotation*len(t2)).zfill(8)}'
if any([P.output_types['delta_3d'],P.output_types['delta_2d'],P.output_types['gaussian_2d'],P.output_types['gaussian_3d'],P.output_types['binary_3d']]):
np.savez_compressed(file_name+'.npz',**output_stacks) #these files can be loaded with np.load
if P.output_types['delta_2d_image'] == True:
tif.imsave(file_name+'delta_2d.tif',np.invert(np.asarray(delta_2d_stack).astype(bool)))
if P.output_types['gaussian_2d_image'] == True:
tif.imsave(file_name+'gaussian_2d.tif',np.asarray(gaussian_2d_stack))
#if P.output_types['binary_2d_image'] == True:
def folder_parallellized(P): #runs all the .xyz files in the folder, parallelized with one file per thread
os.chdir(P.folder)
threads = multiprocessing.cpu_count()
with Pool(threads) as p:
p.map(xyz2image, P.xyzfiles)
if __name__ == '__main__':
for i in range(0,P.n_stacks):
folder_parallellized(P)
#to load .npz files:
#npz = (load(file_name+'.npz'))
#print(npz.files,np.shape(npz['delta_3d']),np.shape(npz['gaussian_2d']))
| 55.721311 | 188 | 0.674806 | 1,534 | 10,197 | 4.348761 | 0.229465 | 0.033578 | 0.044971 | 0.033578 | 0.254385 | 0.208964 | 0.1532 | 0.080498 | 0.069255 | 0.04617 | 0 | 0.027553 | 0.199176 | 10,197 | 182 | 189 | 56.027473 | 0.789371 | 0.412376 | 0 | 0.065041 | 0 | 0.00813 | 0.107033 | 0.026587 | 0 | 0 | 0 | 0.016484 | 0 | 1 | 0.02439 | false | 0.00813 | 0.065041 | 0.00813 | 0.097561 | 0.01626 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2a9f331439bd99a892c7c6363b31983af66fb320 | 3,949 | py | Python | tensorflow/python/debug/lib/debug_events_reader.py | scentini/tensorflow | 204ed332c0886a0e0ab10b22ba8d67b97e1c83c4 | [
"Apache-2.0"
] | 27 | 2020-02-29T04:13:22.000Z | 2022-02-07T21:54:50.000Z | tensorflow/python/debug/lib/debug_events_reader.py | scentini/tensorflow | 204ed332c0886a0e0ab10b22ba8d67b97e1c83c4 | [
"Apache-2.0"
] | 5 | 2020-06-01T18:50:38.000Z | 2021-07-16T07:13:52.000Z | tensorflow/python/debug/lib/debug_events_reader.py | scentini/tensorflow | 204ed332c0886a0e0ab10b22ba8d67b97e1c83c4 | [
"Apache-2.0"
] | 10 | 2020-12-15T03:55:24.000Z | 2021-12-17T23:14:11.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reader class for tfdbg v2 debug events."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import threading
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
class DebugEventsReader(object):
"""Reader class for a tfdbg v2 DebugEvents directory."""
def __init__(self, dump_root):
if not os.path.isdir(dump_root):
raise ValueError("Specified dump_root is not a directory: %s" % dump_root)
metadata_paths = glob.glob(os.path.join(dump_root, "*.metadata"))
if not metadata_paths:
raise ValueError("Cannot find any metadata file in directory: %s" %
dump_root)
elif len(metadata_paths) > 1:
raise ValueError(
"Unexpected: Found multiple (%d) metadata in directory: %s" %
(len(metadata_paths), dump_root))
self._metadata_path = compat.as_bytes(metadata_paths[0])
self._metadata_reader = None
prefix = metadata_paths[0][:-len(".metadata")]
self._source_files_path = compat.as_bytes("%s.source_files" % prefix)
self._stack_frames_path = compat.as_bytes("%s.stack_frames" % prefix)
self._graphs_path = compat.as_bytes("%s.graphs" % prefix)
self._execution_path = compat.as_bytes("%s.execution" % prefix)
self._graph_execution_traces_path = compat.as_bytes(
"%s.graph_execution_traces" % prefix)
self._readers = dict() # A map from file path to reader.
self._readers_lock = threading.Lock()
def _generic_iterator(self, file_path):
"""A helper method that makes an iterator given a debug-events file path."""
# The following code uses the double-checked locking pattern to optimize
# the common case (where the reader is already initialized).
if file_path not in self._readers: # 1st check, without lock.
with self._readers_lock:
if file_path not in self._readers: # 2nd check, with lock.
with errors.raise_exception_on_not_ok_status() as status:
self._readers[file_path] = pywrap_tensorflow.PyRecordReader_New(
compat.as_bytes(file_path), 0, b"", status)
reader = self._readers[file_path]
while True:
try:
reader.GetNext()
except (errors.DataLossError, errors.OutOfRangeError):
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break
yield debug_event_pb2.DebugEvent.FromString(reader.record())
def metadata_iterator(self):
return self._generic_iterator(self._metadata_path)
def source_files_iterator(self):
return self._generic_iterator(self._source_files_path)
def stack_frames_iterator(self):
return self._generic_iterator(self._stack_frames_path)
def graphs_iterator(self):
return self._generic_iterator(self._graphs_path)
def execution_iterator(self):
return self._generic_iterator(self._execution_path)
def graph_execution_traces_iterator(self):
return self._generic_iterator(self._graph_execution_traces_path)
| 41.135417 | 80 | 0.719423 | 528 | 3,949 | 5.140152 | 0.363636 | 0.05748 | 0.03353 | 0.037583 | 0.160648 | 0.109801 | 0.109801 | 0 | 0 | 0 | 0 | 0.005552 | 0.179033 | 3,949 | 95 | 81 | 41.568421 | 0.831585 | 0.300329 | 0 | 0.033898 | 0 | 0 | 0.088041 | 0.009171 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135593 | false | 0 | 0.169492 | 0.101695 | 0.423729 | 0.016949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
2aadd1bf492d72d9413391d2877375cb66b76d8f | 1,099 | py | Python | neolearnit/src/main/python/get_vocabulary.py | BBN-E/LearnIt | 4f602f113cac9f4a7213b348a42c0fef23e2739c | [
"Apache-2.0"
] | 5 | 2020-08-29T21:23:05.000Z | 2022-03-24T19:57:44.000Z | neolearnit/src/main/python/get_vocabulary.py | BBN-E/LearnIt | 4f602f113cac9f4a7213b348a42c0fef23e2739c | [
"Apache-2.0"
] | 11 | 2020-03-04T23:03:34.000Z | 2022-02-18T04:04:04.000Z | neolearnit/src/main/python/get_vocabulary.py | BBN-E/LearnIt | 4f602f113cac9f4a7213b348a42c0fef23e2739c | [
"Apache-2.0"
] | 1 | 2021-05-19T11:51:51.000Z | 2021-05-19T11:51:51.000Z | import sys, os, re, codecs, json, glob
import random
from random import randint
from collections import defaultdict
from collections import Counter
from sets import Set
words=Set()
def read_json_data(strJsonFile):
with codecs.open(strJsonFile, 'r', encoding='utf-8') as f:
try:
json_data = json.load(f)
except ValueError as ve:
print "While loading: " + filename
print str(ve)
sys.exit(1)
return json_data
def main(input_json):
json_data = read_json_data(input_json)
for j in json_data:
for w in j['head']['word'].split(" "):
words.add(w)
for w in j['tail']['word'].split(" "):
words.add(w)
for w in j['sentence'].split(" "):
words.add(w)
for w in words:
print w
## sample instances
#print "writing json..."
#f.close()
#o.write(json.dumps(relation_mentions, sort_keys=True, indent=4, cls=json.JSONEncoder, ensure_ascii=False))
#o.close()
if __name__ == "__main__":
input_json = sys.argv[1]
main(input_json)
| 22.895833 | 111 | 0.604186 | 153 | 1,099 | 4.189542 | 0.490196 | 0.074883 | 0.037442 | 0.032761 | 0.109204 | 0.109204 | 0.109204 | 0.078003 | 0.078003 | 0 | 0 | 0.005 | 0.272066 | 1,099 | 47 | 112 | 23.382979 | 0.79625 | 0.148317 | 0 | 0.1 | 0 | 0 | 0.060345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.2 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2ab807f882174850903dfd3ada3ceadfda7a1b90 | 554 | py | Python | ownblock/ownblock/apps/parking/views.py | danjac/ownblock | ac662fb7efb2f04567e2f85638c1250286452611 | [
"MIT"
] | 3 | 2015-06-12T04:42:02.000Z | 2018-10-29T17:09:10.000Z | ownblock/ownblock/apps/parking/views.py | danjac/ownblock | ac662fb7efb2f04567e2f85638c1250286452611 | [
"MIT"
] | null | null | null | ownblock/ownblock/apps/parking/views.py | danjac/ownblock | ac662fb7efb2f04567e2f85638c1250286452611 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from .models import Vehicle
from .serializers import VehicleSerializer
class VehicleViewSet(viewsets.ModelViewSet):
model = Vehicle
serializer_class = VehicleSerializer
def pre_save(self, obj):
if obj.resident_id is None:
obj.resident = self.request.user
def get_queryset(self):
return super().get_queryset().filter(
resident__apartment__building=self.request.building
).select_related('resident',
'resident__apartment',)
| 25.181818 | 63 | 0.685921 | 58 | 554 | 6.327586 | 0.603448 | 0.059946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.240072 | 554 | 21 | 64 | 26.380952 | 0.871734 | 0 | 0 | 0 | 0 | 0 | 0.048736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.214286 | 0.071429 | 0.642857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2ac69b5d4714157fc71eff7a6a23f99b61269132 | 8,407 | py | Python | train/solver.py | nhonth/DeLF-pytorch | 5577a447a0330b9e976cff56a10fc91669216b8c | [
"MIT"
] | 315 | 2019-01-28T05:45:25.000Z | 2022-03-28T07:58:13.000Z | train/solver.py | nhonth/DeLF-pytorch | 5577a447a0330b9e976cff56a10fc91669216b8c | [
"MIT"
] | 38 | 2019-02-21T06:17:00.000Z | 2021-07-05T12:40:00.000Z | train/solver.py | nhonth/DeLF-pytorch | 5577a447a0330b9e976cff56a10fc91669216b8c | [
"MIT"
] | 74 | 2019-01-28T08:08:10.000Z | 2021-12-20T09:27:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
PyTorch Implementation of training DeLF feature.
Solver for step 1 (finetune local descriptor)
nashory, 2018.04
'''
import os, sys, time
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from utils import Bar, Logger, AverageMeter, compute_precision_top_k, mkdir_p
'''helper functions.
'''
def __cuda__(x):
if torch.cuda.is_available():
return x.cuda()
else:
return x
def __is_cuda__():
return torch.cuda.is_available()
def __to_var__(x, volatile=False):
return Variable(x, volatile=volatile)
def __to_tensor__(x):
return x.data
class Solver(object):
def __init__(self, config, model):
self.state = {k: v for k, v in config._get_kwargs()}
self.config = config
self.epoch = 0 # global epoch.
self.best_acc = 0 # global best accuracy.
self.prefix = os.path.join('repo', config.expr)
# ship model to cuda
self.model = __cuda__(model)
# define criterion and optimizer
self.criterion = nn.CrossEntropyLoss()
if config.optim.lower() in ['rmsprop']:
self.optimizer = optim.RMSprop(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.lr,
weight_decay=config.weight_decay)
elif config.optim.lower() in ['sgd']:
self.optimizer = optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.lr,
weight_decay=config.weight_decay)
elif config.optim.lower() in ['adam']:
self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.lr,
weight_decay=config.weight_decay)
# decay learning rate by a factor of 0.5 every 10 epochs
self.lr_scheduler = optim.lr_scheduler.StepLR(
self.optimizer,
step_size=config.lr_stepsize,
gamma=config.lr_gamma)
# create directory to save result if not exist.
self.ckpt_path = os.path.join(self.prefix, config.stage, 'ckpt')
self.log_path = os.path.join(self.prefix, config.stage, 'log')
self.image_path = os.path.join(self.prefix, config.stage, 'image')
mkdir_p(self.ckpt_path)
mkdir_p(self.log_path)
mkdir_p(self.image_path)
# set logger.
self.logger = {}
self.title = 'DeLF-{}'.format(config.stage.upper())
self.logger['train'] = Logger(os.path.join(self.prefix, config.stage, 'log/train.log'))
self.logger['val'] = Logger(os.path.join(self.prefix, config.stage, 'log/val.log'))
self.logger['train'].set_names(
['epoch','lr', 'loss', 'top1_accu', 'top3_accu', 'top5_accu'])
self.logger['val'].set_names(
['epoch','lr', 'loss', 'top1_accu', 'top3_accu', 'top5_accu'])
def __exit__(self):
self.train_logger.close()
self.val_logger.close()
def __adjust_pixel_range__(self,
x,
range_from=[0,1],
range_to=[-1,1]):
'''
adjust pixel range from <range_from> to <range_to>.
'''
if not range_from == range_to:
scale = float(range_to[1]-range_to[0])/float(range_from[1]-range_from[0])
bias = range_to[0]-range_from[0]*scale
x = x.mul(scale).add(bias)
return x
def __save_checkpoint__(self, state, ckpt='ckpt', filename='checkpoint.pth.tar'):
filepath = os.path.join(ckpt, filename)
torch.save(state, filepath)
def __solve__(self, mode, epoch, dataloader):
'''solve
mode: train / val
'''
batch_timer = AverageMeter()
data_timer = AverageMeter()
prec_losses = AverageMeter()
prec_top1 = AverageMeter()
prec_top3 = AverageMeter()
prec_top5 = AverageMeter()
if mode in ['val']:
pass;
#confusion_matrix = ConusionMeter()
since = time.time()
bar = Bar('[{}]{}'.format(mode.upper(), self.title), max=len(dataloader))
for batch_idx, (inputs, labels) in enumerate(dataloader):
# measure data loading time
data_timer.update(time.time() - since)
# wrap inputs in variable
if mode in ['train']:
if __is_cuda__():
inputs = inputs.cuda()
labels = labels.cuda(async=True)
inputs = __to_var__(inputs)
labels = __to_var__(labels)
elif mode in ['val']:
if __is_cuda__():
inputs = inputs.cuda()
labels = labels.cuda(async=True)
inputs = __to_var__(inputs, volatile=True)
labels = __to_var__(labels, volatile=False)
# forward
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# backward + optimize
if mode in ['train']:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# statistics
prec_1, prec_3, prec_5 = compute_precision_top_k(
__to_tensor__(outputs),
__to_tensor__(labels),
top_k=(1,3,5))
batch_size = inputs.size(0)
prec_losses.update(__to_tensor__(loss)[0], batch_size)
prec_top1.update(prec_1[0], batch_size)
prec_top3.update(prec_3[0], batch_size)
prec_top5.update(prec_5[0], batch_size)
# measure elapsed time
batch_timer.update(time.time() - since)
since = time.time()
# progress
log_msg = ('\n[{mode}][epoch:{epoch}][iter:({batch}/{size})]'+
'[lr:{lr}] loss: {loss:.4f} | top1: {top1:.4f} | ' +
'top3: {top3:.4f} | top5: {top5:.4f} | eta: ' +
'(data:{dt:.3f}s),(batch:{bt:.3f}s),(total:{tt:})') \
.format(
mode=mode,
epoch=self.epoch+1,
batch=batch_idx+1,
size=len(dataloader),
lr=self.lr_scheduler.get_lr()[0],
loss=prec_losses.avg,
top1=prec_top1.avg,
top3=prec_top3.avg,
top5=prec_top5.avg,
dt=data_timer.val,
bt=batch_timer.val,
tt=bar.elapsed_td)
print(log_msg)
bar.next()
bar.finish()
# write to logger
self.logger[mode].append([self.epoch+1,
self.lr_scheduler.get_lr()[0],
prec_losses.avg,
prec_top1.avg,
prec_top3.avg,
prec_top5.avg])
# save model
if mode == 'val' and prec_top1.avg > self.best_acc:
print('best_acc={}, new_best_acc={}'.format(self.best_acc, prec_top1.avg))
self.best_acc = prec_top1.avg
state = {
'epoch': self.epoch,
'acc': self.best_acc,
'optimizer': self.optimizer.state_dict(),
}
self.model.write_to(state)
filename = 'bestshot.pth.tar'
self.__save_checkpoint__(state, ckpt=self.ckpt_path, filename=filename)
def train(self, mode, epoch, train_loader, val_loader):
self.epoch = epoch
if mode in ['train']:
self.model.train()
self.lr_scheduler.step()
dataloader = train_loader
else:
assert mode == 'val'
self.model.eval()
dataloader = val_loader
self.__solve__(mode, epoch, dataloader)
| 36.872807 | 102 | 0.512906 | 925 | 8,407 | 4.423784 | 0.227027 | 0.017595 | 0.017107 | 0.017107 | 0.217253 | 0.197214 | 0.170821 | 0.170821 | 0.144428 | 0.124878 | 0 | 0.014954 | 0.371595 | 8,407 | 227 | 103 | 37.035242 | 0.759606 | 0.050077 | 0 | 0.141975 | 0 | 0.012346 | 0.059228 | 0.012469 | 0 | 0 | 0 | 0 | 0.006173 | 0 | null | null | 0.006173 | 0.04321 | null | null | 0.012346 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2ac870289e4a135beaad406a435f626a1f8fb78e | 518 | py | Python | data_sqlalchemy/word.py | natter1/estonian_learner | da7837f0d64f4c1f6a212a9c473252c4b834699a | [
"MIT"
] | null | null | null | data_sqlalchemy/word.py | natter1/estonian_learner | da7837f0d64f4c1f6a212a9c473252c4b834699a | [
"MIT"
] | null | null | null | data_sqlalchemy/word.py | natter1/estonian_learner | da7837f0d64f4c1f6a212a9c473252c4b834699a | [
"MIT"
] | null | null | null | import datetime
from data_sqlalchemy.modelbase import SqlAlchemyBase
import sqlalchemy as sa
class Word(SqlAlchemyBase):
__tablename__ = "words"
# id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
id = sa.Column(sa.String, primary_key=True)
created_date = sa.Column(sa.DateTime, default=datetime.datetime.now, index=True)
hint = sa.String() # not supported by sqlite: nullable=True
def __repr__(self): # for more useful debug messages
return f"<Package {self.id}>"
| 32.375 | 84 | 0.727799 | 69 | 518 | 5.289855 | 0.623188 | 0.065753 | 0.082192 | 0.065753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169884 | 518 | 15 | 85 | 34.533333 | 0.848837 | 0.258687 | 0 | 0 | 0 | 0 | 0.063158 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0.1 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2acdbe48c1a63c18df33e8e3cfb8731347ace259 | 466 | py | Python | fbl_handler/test_message_id_extractor.py | wheekey/fbl-handler | 918e2c7ac50033630eebd5741a7ad3e9c56c3930 | [
"MIT"
] | null | null | null | fbl_handler/test_message_id_extractor.py | wheekey/fbl-handler | 918e2c7ac50033630eebd5741a7ad3e9c56c3930 | [
"MIT"
] | null | null | null | fbl_handler/test_message_id_extractor.py | wheekey/fbl-handler | 918e2c7ac50033630eebd5741a7ad3e9c56c3930 | [
"MIT"
] | null | null | null | from unittest import TestCase
from fbl_handler.message_id_extractor import MessageIdExtractor
class TestMessageIdExtractor(TestCase):
def setUp(self):
self.message_id_extractor = MessageIdExtractor()
def test_extract_from_yandex_fbl(self):
with open('files/yandex_fbl.txt', 'r') as file:
file_content = file.read()
self.assertEqual(self.message_id_extractor.extract_from_yandex_fbl(file_content), '1jb6B3-0004MN-Dq')
| 31.066667 | 109 | 0.751073 | 58 | 466 | 5.741379 | 0.534483 | 0.081081 | 0.162162 | 0.132132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017949 | 0.16309 | 466 | 14 | 110 | 33.285714 | 0.835897 | 0 | 0 | 0 | 0 | 0 | 0.07957 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.222222 | false | 0 | 0.222222 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2ae22ebc1ff5af7ea983e4b1cf1879b8fdb9382c | 9,880 | py | Python | python/plot_meridional_avg_multiple_fields_climo.py | E3SM-Project/a-prime | a8c084ab6f727904a2b38d8a93b9c83e2f978e3f | [
"BSD-3-Clause"
] | 1 | 2017-06-07T13:13:32.000Z | 2017-06-07T13:13:32.000Z | python/plot_meridional_avg_multiple_fields_climo.py | ACME-Climate/a-prime | a8c084ab6f727904a2b38d8a93b9c83e2f978e3f | [
"BSD-3-Clause"
] | 31 | 2017-06-07T00:26:58.000Z | 2018-04-09T17:03:15.000Z | python/plot_meridional_avg_multiple_fields_climo.py | ACME-Climate/a-prime | a8c084ab6f727904a2b38d8a93b9c83e2f978e3f | [
"BSD-3-Clause"
] | 1 | 2018-08-05T23:43:59.000Z | 2018-08-05T23:43:59.000Z | #
# Copyright (c) 2017, UT-BATTELLE, LLC
# All rights reserved.
#
# This software is released under the BSD license detailed
# in the LICENSE file in the top level a-prime directory
#
###Work in Progress: Plot meridional averages for different fields in the same plot.
###07/03/2017
import matplotlib as mpl
#changing the default backend to agg to resolve contouring issue on rhea
mpl.use('Agg')
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy
from netCDF4 import Dataset
from read_monthly_data_ts import read_monthly_data_ts
from get_season_months_index import get_season_months_index
from get_days_in_season_months import get_days_in_season_months
from get_reg_area_avg import get_reg_area_avg
from aggregate_ts_weighted import aggregate_ts_weighted
from get_reg_seasonal_avg import get_reg_seasonal_avg
from get_season_name import get_season_name
from get_reg_avg_climo import get_reg_avg_climo
from optparse import OptionParser
import argparse
def plot_meridional_avg_multiple_fields_climo (indir,
casename,
field_name,
interp_grid,
interp_method,
ref_case,
ref_interp_grid,
ref_interp_method,
begin_yr,
end_yr,
begin_month,
end_month,
aggregate,
debug = False):
n_fields = len(field_names)
for i,field_name in enumerate(field_names):
print __name__, 'casename: ', casename
meridional_avg, lon_reg, units = get_reg_meridional_avg_climo (
indir = indir,
casename = casename,
field_name = field_names[i],
interp_grid = interp_grid,
interp_method = interp_method,
begin_yr = begin_yr,
end_yr = end_yr,
begin_month = begin_month,
end_month = end_month,
reg = reg,
debug = debug)
if i == 0:
plot_field = numpy.zeros((n_fields, meridional_avg.shape[0]))
units_list = []
plot_field[i, :] = meridional_avg
units_list.append(units)
if ref_case == 'CERES-EBAF':
if field_name == 'FLNT': field_name_ref = 'FLUT'
if field_name == 'RESTOM': field_name_ref = 'RESTOA'
if field_name == 'FSNT': field_name_ref = 'FSNTOA'
elif ref_case == 'HadISST':
if field_name == 'TS': field_name_ref = 'SST'
else:
field_name_ref = field_name
ref_meridional_avg, lon_reg, ref_units = get_reg_meridional_avg_climo (
indir = ref_case_dir,
casename = ref_case,
field_name = field_name_ref,
interp_grid = ref_interp_grid,
interp_method = ref_interp_method,
begin_yr = begin_yr,
end_yr = end_yr,
begin_month = begin_month,
end_month = end_month,
reg = reg,
debug = debug)
if i == 0: ref_plot_field = numpy.zeros((n_fields, meridional_avg.shape[0]))
ref_plot_field[i, :] = ref_meridional_avg
if debug: print __name__, 'ref_plot_field.shape ', ref_plot_field.shape
if debug: print __name__, 'plot_field: ', plot_field
plot_field_mean = numpy.mean(plot_field, axis = 1)
ref_plot_field_mean = numpy.mean(ref_plot_field, axis = 1)
f, ax = plt.subplots(n_fields, sharex = True, figsize=(8.5,11))
nlon = lon_reg.shape[0]
f.text(0.5, 0.04, 'Longitude', ha='center', fontsize = 24)
season = get_season_name(begin_month, end_month)
plt.suptitle(reg_name + '\n Meridional Avg. ' + season, fontsize = 24)
ref_case_text = ref_case + ' ' + field_name_ref + ' climo'
for i,field_name in enumerate(field_names):
min_plot = min(numpy.amin(plot_field[i, :]), ref_plot_field[i, 0])
max_plot = max(numpy.amax(plot_field[i, :]), ref_plot_field[i, 0])
y_axis_ll = min_plot - 0.5*numpy.std(plot_field[i, :])
y_axis_ul = max_plot + 0.5 * numpy.std(plot_field[i,:])
ax[i].axis([lon_reg[0],lon_reg[-1], y_axis_ll, y_axis_ul])
print 'lon_reg[0],lon_reg[-1], 1.1*min_plot, 1.1*max_plot: ', \
lon_reg[0],lon_reg[-1], 1.1*min_plot, 1.1*max_plot
test_line, = ax[i].plot(lon_reg, plot_field[i, :], color = colors[i], linewidth = 1.0, label = casename)
ref_line, = ax[i].plot(lon_reg, ref_plot_field[i, :], color = 'black', linewidth = 1.0, label = ref_case)
if i == 0:
ax[i].legend(bbox_to_anchor = (1.0,1.5), handles=[ref_line, test_line], fontsize = 10)
ax[i].set_title(field_name, fontsize = 12)
ax[i].text(0.04, 0.5, field_name + ' (' + units_list[i] + ')', va='center', rotation='vertical', fontsize = 16)
ax[i].get_yaxis().get_major_formatter().set_useOffset(False)
ax[i].yaxis.set_major_locator(MaxNLocator(6))
for tick in ax[i].yaxis.get_major_ticks():
tick.label.set_fontsize(10)
for tick in ax[i].xaxis.get_major_ticks():
tick.label.set_fontsize(10)
plt.subplots_adjust(hspace=0.3)
mpl.rcParams['savefig.dpi']=300
outfile = plots_dir + '/' + casename + '_' \
+ meridional_avg + '_' + reg + '_' + season + '.png'
plt.savefig(outfile)
#plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(usage = "python %prog [options]")
parser.add_argument("-d", "--debug", dest = "debug", default = False,
help = "debug option to print some data")
parser.add_argument("--indir", dest = "indir",
help = "filepath to directory model data")
parser.add_argument("-c", "--casename", dest = "casename",
help = "casename of the run")
parser.add_argument("-f", "--field_name", dest = "field_names", nargs = '+',
help = "variable name")
parser.add_argument("--interp_grid", dest = "interp_grid",
help = "variable name")
parser.add_argument("--interp_method", dest = "interp_method",
help = "method used for interpolating the test case e.g. conservative_mapping")
parser.add_argument("--ref_case_dir", dest = "ref_case_dir",
help = "filepath to ref_case directory")
parser.add_argument("--ref_case", dest = "ref_case",
help = "reference casename")
parser.add_argument("--ref_interp_grid", dest = "ref_interp_grid",
help = "name of the interpolated grid of reference case")
parser.add_argument("--ref_interp_method", dest = "ref_interp_method",
help = "method used for interpolating the reference case e.g. conservative_mapping")
parser.add_argument("--begin_yr", dest = "begin_yr", type = int,
help = "begin year")
parser.add_argument("--end_yr", dest = "end_yr", type = int,
help = "end year")
parser.add_argument("--begin_month", dest = "begin_month", type = int,
help = "begin_month", default = 0)
parser.add_argument("--end_month", dest = "end_month", type = int,
help = "end_month", default = 11)
parser.add_argument("--aggregate", dest = "aggregate", type = int,
help = "end_month", default = 1)
parser.add_argument("--reg", dest = "reg", nargs = '+',
help = "regions to be analyzed/plotted")
parser.add_argument("--reg_name", dest = "reg_name", nargs = '+',
help = "names of regions to be placed in plots")
parser.add_argument("--plots_dir", dest = "plots_dir",
help = "filepath to GPCP directory")
args = parser.parse_args()
debug = args.debug
indir = args.indir
casename = args.casename
field_names = args.field_names
interp_grid = args.interp_grid
interp_method = args.interp_method
ref_case_dir = args.ref_case_dir
ref_case = args.ref_case
ref_interp_grid = args.ref_interp_grid
ref_interp_method = args.ref_interp_method
begin_yr = args.begin_yr
end_yr = args.end_yr
begin_month = args.begin_month
end_month = args.end_month
aggregate = args.aggregate
reg = args.reg
reg_name = args.reg_name
plots_dir = args.plots_dir
colors = ['b', 'g', 'r', 'c', 'm', 'y']
x = mpl.get_backend()
print 'backend: ', x
plot_meridional_avg_multiple_fields_climo(
indir = indir,
casename = casename,
field_names = field_names,
interp_grid = interp_grid,
interp_method = interp_method,
ref_case = ref_case,
ref_interp_grid = ref_interp_grid,
ref_interp_method = ref_interp_method,
begin_yr = begin_yr,
end_yr = end_yr,
begin_month = begin_month,
end_month = end_month,
reg = reg,
reg_name = reg_name,
aggregate = aggregate,
debug = debug)
| 37.003745 | 119 | 0.566093 | 1,208 | 9,880 | 4.31457 | 0.194536 | 0.034536 | 0.058711 | 0.020721 | 0.3302 | 0.275326 | 0.233308 | 0.178434 | 0.085572 | 0.085572 | 0 | 0.013196 | 0.332692 | 9,880 | 266 | 120 | 37.142857 | 0.77734 | 0.034514 | 0 | 0.183784 | 0 | 0 | 0.12013 | 0.002415 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.086486 | null | null | 0.032432 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2ae2318f1e53b32601c5c8db2c16277213b2f472 | 1,670 | py | Python | sample/sample.py | dogwood008/python-kabusapi | 7e7a5ac232e037c651b5447b408d8b0b6727c9b0 | [
"MIT"
] | 11 | 2020-10-02T04:27:50.000Z | 2022-01-05T03:41:03.000Z | sample/sample.py | dogwood008/python-kabusapi | 7e7a5ac232e037c651b5447b408d8b0b6727c9b0 | [
"MIT"
] | 1 | 2020-12-15T03:52:45.000Z | 2020-12-15T23:28:04.000Z | sample/sample.py | dogwood008/python-kabusapi | 7e7a5ac232e037c651b5447b408d8b0b6727c9b0 | [
"MIT"
] | 5 | 2021-02-21T16:25:50.000Z | 2022-02-15T08:11:43.000Z | import kabusapi
url = "localhost"
port = "18081" # 検証用, 本番用は18080
password = "hogehoge"
# 初期設定・トークン取得
api = kabusapi.Context(url, port, password)
# 取得トークンの表示
print(api.token)
# トークンを指定した初期設定 パスワードが不要
api = kabusapi.Context(url, port, token='fugafuga')
# 注文発注 (現物買い)
data = {
"Password": "hoge",
"Symbol": 8306, # MUFG
"Exchange": 1,
"SecurityType": 1,
"Side": 2,
"CashMargin": 1,
"MarginTradeType": None,
"DelivType": 1,
"FundType": "02",
"AccountType": 4,
"Qty": 100,
"ClosePositionOrder": None,
"Price": 0,
"ExpireDay": 0,
"FrontOrderType": 10,
}
response = api.sendorder(**data)
# 注文取消
data = {
"OrderId": "hoge",
"Password": "fuga",
}
response = api.cancelorder(**data)
# 取引余力(現物)
response = api.wallet.cash()
# 取引余力(現物)(銘柄指定)
data = {
"symbol": 8306,
"exchange": 1,
}
response = api.wallet.cash(**data)
# 取引余力(信用)
response = api.wallet.margin()
# 取引余力(信用)(銘柄指定)
data = {
"symbol": 8306,
"exchange": 1,
}
response = api.wallet.margin(**data)
# 時価情報・板情報
data = {
"symbol": 8306,
"exchange": 1,
}
response = api.board(**data)
# 銘柄情報
data = {
"symbol": 8306,
"exchange": 1,
}
response = api.symbol(**data)
# 注文約定照会
response = api.orders()
# 残高照会
response = api.positions()
# 銘柄登録
data = {
"Symbols": [
{"Symbol": 8306, "Exchange": 1, },
{"Symbol": 9433, "Exchange": 1, },
]
}
response = api.register(**data)
# 銘柄登録解除
data = {
"Symbols": [
{"Symbol": 8306, "Exchange": 1, },
{"Symbol": 9433, "Exchange": 1, },
]
}
response = api.unregister(**data)
# 銘柄登録全解除
response = api.unregister.all()
| 14.396552 | 51 | 0.575449 | 183 | 1,670 | 5.262295 | 0.42623 | 0.148494 | 0.11215 | 0.11838 | 0.338525 | 0.286604 | 0.286604 | 0.215992 | 0.215992 | 0.124611 | 0 | 0.053822 | 0.232335 | 1,670 | 115 | 52 | 14.521739 | 0.695788 | 0.10479 | 0 | 0.318841 | 0 | 0 | 0.22019 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.057971 | 0.014493 | 0 | 0.014493 | 0.014493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2ae48ee7262087d6a799cdd263fa2699c0692509 | 2,766 | py | Python | tests/runtime/doubling_vector.py | yangdanny97/chocopy-python-compiler | 588cba0cb330bd63f00e06420a32ba47c25c4468 | [
"MIT"
] | 7 | 2021-08-28T18:20:45.000Z | 2022-02-01T07:35:59.000Z | tests/runtime/doubling_vector.py | yangdanny97/chocopy-python-compiler | 588cba0cb330bd63f00e06420a32ba47c25c4468 | [
"MIT"
] | 1 | 2020-05-30T17:57:11.000Z | 2020-05-30T20:44:53.000Z | tests/runtime/doubling_vector.py | yangdanny97/chocopy-python-frontend | d0fb63fc744771640fa4d06076743f42089899c1 | [
"MIT"
] | 2 | 2022-02-05T06:16:16.000Z | 2022-02-24T11:07:09.000Z | # A resizable list of integers
class Vector(object):
# Attributes
items: [int] = None
size: int = 0
# Constructor
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int):
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 16
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
vec:Vector = None
num:int = 0
# Create a vector and populate it with The Numbers
vec = DoublingVector()
for num in [4, 8, 15, 16, 23, 42]:
vec.append(num)
__assert__(vec.capacity() == 8)
__assert__(vec.size == 6)
__assert__(vec.items[0] == 4)
__assert__(vec.items[1] == 8)
__assert__(vec.items[2] == 15)
__assert__(vec.items[3] == 16)
__assert__(vec.items[4] == 23)
__assert__(vec.items[5] == 42)
# extras from doubling
__assert__(vec.items[6] == 15)
__assert__(vec.items[7] == 16)
vec = Vector()
for num in [4, 8, 15, 16, 23, 42]:
vec.append(num)
__assert__(vec.capacity() == 6)
__assert__(vec.size == 6)
__assert__(vec.items[0] == 4)
__assert__(vec.items[1] == 8)
__assert__(vec.items[2] == 15)
__assert__(vec.items[3] == 16)
__assert__(vec.items[4] == 23)
__assert__(vec.items[5] == 42)
vec = vrange(0, 1)
__assert__(vec.capacity() == 1)
__assert__(vec.size == 1)
__assert__(vec.items[0] == 0)
vec = vrange(0, 2)
__assert__(vec.capacity() == 2)
__assert__(vec.size == 2)
__assert__(vec.items[0] == 0)
__assert__(vec.items[1] == 1)
vec = vrange(1, 3)
__assert__(vec.capacity() == 2)
__assert__(vec.size == 2)
__assert__(vec.items[0] == 1)
__assert__(vec.items[1] == 2)
vec = vrange(1, 1)
__assert__(vec.capacity() == 1)
__assert__(vec.size == 0)
vec = vrange(0, -1)
__assert__(vec.capacity() == 1)
__assert__(vec.size == 0)
vec = vrange(1, 100)
__assert__(vec.size == 99)
| 24.696429 | 63 | 0.629067 | 396 | 2,766 | 4.027778 | 0.219697 | 0.19185 | 0.166771 | 0.047022 | 0.397492 | 0.386834 | 0.386834 | 0.386834 | 0.340439 | 0.340439 | 0 | 0.049816 | 0.216197 | 2,766 | 111 | 64 | 24.918919 | 0.685886 | 0.142444 | 0 | 0.423077 | 0 | 0 | 0.016109 | 0 | 0 | 0 | 0 | 0 | 0.435897 | 1 | 0.076923 | false | 0 | 0 | 0.012821 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.