hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e32d6873fc10d3abae751fb17488f24668f4d82 | 340 | py | Python | krankit/interactions/apps.py | ruankranz/blog | d83adc9035bb71f839e8e1c74a036f99be7f9d18 | [
"MIT"
] | null | null | null | krankit/interactions/apps.py | ruankranz/blog | d83adc9035bb71f839e8e1c74a036f99be7f9d18 | [
"MIT"
] | 1 | 2021-05-11T12:43:52.000Z | 2021-05-11T12:43:52.000Z | krankit/interactions/apps.py | ruankranz/blog | d83adc9035bb71f839e8e1c74a036f99be7f9d18 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class InteractionsConfig(AppConfig):
name = "krankit.interactions"
verbose_name = _("Interactions")
def ready(self):
try:
import krankit.interactions.signals # noqa F401
except ImportError:
pass
| 24.285714 | 60 | 0.682353 |
9e024c0ae8b11574a706c0b3a7b78df7509740aa | 4,056 | py | Python | Social network/SocialNetwork/settings.py | alirezaryahi/Django-Social-Network | c14119762c1075c8efe80f373c763ae3b3a1d726 | [
"MIT"
] | null | null | null | Social network/SocialNetwork/settings.py | alirezaryahi/Django-Social-Network | c14119762c1075c8efe80f373c763ae3b3a1d726 | [
"MIT"
] | null | null | null | Social network/SocialNetwork/settings.py | alirezaryahi/Django-Social-Network | c14119762c1075c8efe80f373c763ae3b3a1d726 | [
"MIT"
] | null | null | null | """
Django settings for SocialNetwork project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(*y()t_77p-+*uw15^m@#xk@=r%-3-3h*4f&z+&%oyt$oj^&dm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# our apps
'posts',
'profiles',
# all-auth apps
'allauth',
'allauth.account',
'allauth.socialaccount',
]
SITE_ID = 1
ACCOUNT_EMAIL_UNIQUE = True
ACCOUNT_EMAIL_REQUIRED = True
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# EMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend'
LOGIN_REDIRECT_URL = '/posts'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SocialNetwork.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'profiles.context_processors.profile_pic',
'profiles.context_processors.invite_number',
'profiles.context_processors.friends_number',
],
},
},
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
WSGI_APPLICATION = 'SocialNetwork.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'assets')]
STATIC_ROOT = os.path.join(BASE_DIR, 'static_cdn', 'static_root')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static_cdn', 'media_root')
| 26.684211 | 91 | 0.698718 |
187ecdc8dd3af3214a6ccdc6dd29e224ad6c19de | 8,365 | py | Python | custom/icds/forms.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | null | null | null | custom/icds/forms.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | null | null | null | custom/icds/forms.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | null | null | null | from django import forms
from django.core.exceptions import ValidationError
from django.forms.widgets import Select
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from crispy_forms import layout as crispy
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from corehq.apps.app_manager.dbaccessors import (
get_brief_apps_in_domain,
get_version_build_id,
)
from corehq.apps.app_manager.exceptions import BuildNotFoundException
from corehq.apps.hqwebapp import crispy as hqcrispy
from corehq.apps.hqwebapp.crispy import HQFormHelper
from custom.icds.models import HostedCCZ, HostedCCZLink
from custom.icds.tasks.data_pulls import run_data_pull
from custom.icds_reports.const import CUSTOM_DATA_PULLS
class HostedCCZLinkForm(forms.ModelForm):
class Meta:
model = HostedCCZLink
exclude = ('domain',)
def __init__(self, domain, *args, **kwargs):
super(HostedCCZLinkForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
save_button_text = _('Update') if self.instance.pk else _('Create')
self.helper.layout.append(Submit('save', save_button_text))
if self.instance.pk:
del self.fields['password']
else:
self.fields['password'].widget = forms.PasswordInput()
if self.instance.pk:
self.helper.layout.append(Submit('delete', _('Delete')))
self.helper.layout = crispy.Fieldset(_("CCZ Hosting Link"), self.helper.layout)
self.fields['identifier'].widget.attrs.update({'class': 'text-lowercase'})
self.instance.domain = domain
class HostedCCZForm(forms.Form):
link_id = forms.ChoiceField(label=ugettext_lazy("Link"), choices=(), required=False)
app_id = forms.ChoiceField(label=ugettext_lazy("Application"), choices=(), required=True)
version = forms.IntegerField(label=ugettext_lazy('Version'), required=True, widget=Select(choices=[]))
profile_id = forms.CharField(label=ugettext_lazy('Application Profile'),
required=False, widget=Select(choices=[]))
file_name = forms.CharField(label=ugettext_lazy("CCZ File Name"), required=False)
note = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 3, 'cols': 15}))
status = forms.ChoiceField(label=ugettext_lazy("Status"),
choices=(
('', ugettext_lazy('Select Status')),
(HostedCCZ.PENDING, ugettext_lazy('Pending')),
(HostedCCZ.BUILDING, ugettext_lazy('Building')),
(HostedCCZ.FAILED, ugettext_lazy('Failed')),
(HostedCCZ.COMPLETED, ugettext_lazy('Completed'))),
required=False,
help_text=ugettext_lazy("Applicable for search only"))
def __init__(self, request, domain, email, *args, **kwargs):
self.domain = domain
self.email = email
super(HostedCCZForm, self).__init__(*args, **kwargs)
self.fields['link_id'].choices = self.link_choices()
self.fields['app_id'].choices = self.app_id_choices()
self.helper = HQFormHelper()
if request.GET.get('app_id'):
self.fields['app_id'].initial = request.GET.get('app_id')
if request.GET.get('link_id'):
self.fields['link_id'].initial = request.GET.get('link_id')
if request.GET.get('status'):
self.fields['status'].initial = request.GET.get('status')
self.helper.layout = crispy.Layout(
crispy.Field('link_id', css_class="hqwebapp-select2", data_bind="value: linkId"),
crispy.Field('app_id', css_class="hqwebapp-select2", data_bind="value: appId"),
crispy.Field('version', data_bind="value: version"),
crispy.Field('profile_id', id="build-profile-id-input", data_bind="value: profileId"),
crispy.Field('file_name'),
crispy.Field('note'),
crispy.Field('status', data_bind="value: status"),
hqcrispy.FormActions(
crispy.ButtonHolder(
crispy.Button(
'search',
ugettext_lazy("Search"),
css_class="btn-default",
data_bind="click: search"
),
crispy.Button(
'clear',
ugettext_lazy("Clear"),
css_class="btn-default",
data_bind="click: clear"
),
Submit('submit', ugettext_lazy("Create"))
)
)
)
def clean_link_id(self):
if not self.cleaned_data.get('link_id'):
self.add_error('link_id', _("Please select link"))
return self.cleaned_data.get('link_id')
def app_id_choices(self):
choices = [(None, _('Select Application'))]
for app in get_brief_apps_in_domain(self.domain):
choices.append((app.id, app.name))
return choices
def link_choices(self):
choices = [(None, _('Select Link'))]
for link in HostedCCZLink.objects.filter(domain=self.domain):
choices.append((link.id, link.identifier))
return choices
def _version_exists(self):
return bool(get_version_build_id(self.domain, self.cleaned_data['app_id'],
self.cleaned_data['version']))
def clean(self):
if self.cleaned_data.get('app_id') and self.cleaned_data.get('version'):
try:
self._version_exists()
except BuildNotFoundException as e:
self.add_error('version', e)
def save(self):
try:
HostedCCZ(
link_id=self.cleaned_data['link_id'], app_id=self.cleaned_data['app_id'],
version=self.cleaned_data['version'], profile_id=self.cleaned_data['profile_id'],
file_name=self.cleaned_data['file_name'],
note=self.cleaned_data['note'],
).save(email=self.email)
except ValidationError as e:
return False, ','.join(e.messages)
return True, None
class CustomDataPullForm(forms.Form):
data_pull = forms.ChoiceField(label=ugettext_lazy("Data Pull"), choices=(
(pull.slug, pull.name) for pull in CUSTOM_DATA_PULLS.values()
))
month = forms.DateField(required=True, widget=forms.DateInput())
location_id = forms.CharField(label=ugettext_lazy("Location"), widget=Select(choices=[]), required=False)
def __init__(self, request, domain, *args, **kwargs):
self.domain = domain
super(CustomDataPullForm, self).__init__(*args, **kwargs)
self.helper = HQFormHelper()
self.helper.layout = crispy.Layout(
crispy.Field('data_pull'),
crispy.Field('month', id="month_select", css_class="date-picker"),
crispy.Field('location_id', id='location_search_select'),
hqcrispy.FormActions(
crispy.ButtonHolder(
Submit('submit', ugettext_lazy("Submit"))
)
)
)
def clean_month(self):
month = self.cleaned_data['month']
if month and month.day != 1:
self.add_error("month", "Only first of month should be selected")
return month
def clean_location_id(self):
location_id_slug = self.cleaned_data['location_id']
if location_id_slug:
return self._extract_location_id(location_id_slug)
return location_id_slug
@staticmethod
def _extract_location_id(location_id_slug):
from corehq.apps.reports.filters.users import ExpandedMobileWorkerFilter
selected_ids = ExpandedMobileWorkerFilter.selected_location_ids([location_id_slug])
return selected_ids[0] if selected_ids else None
def submit(self, domain, email):
run_data_pull.delay(self.cleaned_data['data_pull'],
domain,
self.cleaned_data['month'],
self.cleaned_data['location_id'],
email)
| 44.259259 | 109 | 0.609085 |
3e58e53b3b0593da83813cec6a961f905a4998dd | 2,804 | py | Python | src/ultros/core/storage/config/json.py | UltrosBot/Ultros3K | 3aac86beecf94ff1391ca993eafaaf55e513b965 | [
"Artistic-2.0"
] | 11 | 2016-06-29T11:54:42.000Z | 2020-11-02T00:09:41.000Z | src/ultros/core/storage/config/json.py | UltrosBot/Ultros3K | 3aac86beecf94ff1391ca993eafaaf55e513b965 | [
"Artistic-2.0"
] | 4 | 2016-06-29T12:11:25.000Z | 2017-03-21T15:24:32.000Z | src/ultros/core/storage/config/json.py | UltrosBot/Ultros3K | 3aac86beecf94ff1391ca993eafaaf55e513b965 | [
"Artistic-2.0"
] | null | null | null | # coding=utf-8
"""
Class for JSON-based configurations
"""
import json
from typing import Any, List, Dict
from ultros.core.storage import manager as m
from ultros.core.storage.base import MutableAbstractItemAccessMixin, MutableAbstractDictFunctionsMixin
from ultros.core.storage.config.base import MutableConfigFile
__author__ = "Gareth Coles"
class JSONConfig(MutableConfigFile, MutableAbstractItemAccessMixin, MutableAbstractDictFunctionsMixin):
"""
Class for JSON-based configurations
"""
def __init__(self, owner: Any, manager: "m.StorageManager", path: str, *args: List[Any], **kwargs: Dict[Any, Any]):
self.data = {}
super().__init__(owner, manager, path, *args, **kwargs)
def load(self):
with open(self.path, "r") as fh:
self.data = json.load(fh)
def save(self):
if not self.mutable:
raise RuntimeError("You may not modify a defaults file at runtime - check the mutable attribute!")
with open(self.path, "w") as fh:
json.dump(self.data, fh, indent=2)
def reload(self):
self.unload()
self.load()
def unload(self):
self.clear()
# region: Dict functions
def clear(self):
return self.data.clear()
def copy(self):
return self.data.copy()
def get(self, key, default=None):
return self.data.get(key, default)
def items(self):
return self.data.items()
def keys(self):
return self.data.keys()
def pop(self, key, default=None):
return self.data.pop(key, default)
def popitem(self):
return self.data.popitem()
def setdefault(self, key, default=None):
if key not in self.data:
self.data[key] = default
return default
return self.data[key]
def update(self, other):
return self.data.update(other)
def values(self):
return self.data.values()
# endregion
# Item access functions
def __contains__(self, key):
"""
Wrapper for `dict.__contains__()`
"""
return self.data.__contains__(key)
def __delitem__(self, key):
"""
Wrapper for `dict.__delitem__()`
"""
del self.data[key]
def __getitem__(self, key):
"""
Wrapper for `dict.__getitem__()`
"""
return self.data.__getitem__(key)
def __iter__(self):
"""
Wrapper for `dict.__iter__()`
"""
return self.data.__iter__()
def __len__(self):
"""
Wrapper for `dict.__len__()`
"""
return self.data.__len__()
def __setitem__(self, key, value):
"""
Wrapper for `dict.__getitem__()`
"""
return self.data.__setitem__(key, value)
| 22.253968 | 119 | 0.598074 |
96fdc0fd155a8cd4202b4affb07f2cb07211b8d7 | 36,498 | py | Python | python/libsixel/__init__.py | timholy/libsixel | 6a5be8b72d84037b83a5ea838e17bcf372ab1d5f | [
"MIT"
] | 1,938 | 2015-01-26T00:59:18.000Z | 2022-03-30T18:58:49.000Z | python/libsixel/__init__.py | timholy/libsixel | 6a5be8b72d84037b83a5ea838e17bcf372ab1d5f | [
"MIT"
] | 134 | 2015-01-25T10:53:44.000Z | 2022-03-19T20:57:11.000Z | python/libsixel/__init__.py | timholy/libsixel | 6a5be8b72d84037b83a5ea838e17bcf372ab1d5f | [
"MIT"
] | 67 | 2016-02-27T04:55:42.000Z | 2022-02-13T13:29:21.000Z | #!/usr/bin/env python
#
# Copyright (c) 2014-2020 Hayaki Saito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from ctypes import cdll, c_void_p, c_int, c_byte, c_char_p, POINTER, byref, CFUNCTYPE, string_at
from ctypes.util import find_library
# limitations
SIXEL_OUTPUT_PACKET_SIZE = 16384
SIXEL_PALETTE_MIN = 2
SIXEL_PALETTE_MAX = 256
SIXEL_USE_DEPRECATED_SYMBOLS = 1
SIXEL_ALLOCATE_BYTES_MAX = 10248 * 1024 * 128 # up to 128M
SIXEL_WIDTH_LIMIT = 1000000
SIXEL_HEIGHT_LIMIT = 1000000
# loader settings
SIXEL_DEFALUT_GIF_DELAY = 1
# return value
SIXEL_OK = 0x0000
SIXEL_FALSE = 0x1000
# error codes
SIXEL_RUNTIME_ERROR = (SIXEL_FALSE | 0x0100) # runtime error
SIXEL_LOGIC_ERROR = (SIXEL_FALSE | 0x0200) # logic error
SIXEL_FEATURE_ERROR = (SIXEL_FALSE | 0x0300) # feature not enabled
SIXEL_LIBC_ERROR = (SIXEL_FALSE | 0x0400) # errors caused by curl
SIXEL_CURL_ERROR = (SIXEL_FALSE | 0x0500) # errors occures in libc functions
SIXEL_JPEG_ERROR = (SIXEL_FALSE | 0x0600) # errors occures in libjpeg functions
SIXEL_PNG_ERROR = (SIXEL_FALSE | 0x0700) # errors occures in libpng functions
SIXEL_GDK_ERROR = (SIXEL_FALSE | 0x0800) # errors occures in gdk functions
SIXEL_GD_ERROR = (SIXEL_FALSE | 0x0900) # errors occures in gd functions
SIXEL_STBI_ERROR = (SIXEL_FALSE | 0x0a00) # errors occures in stb_image functions
SIXEL_STBIW_ERROR = (SIXEL_FALSE | 0x0b00) # errors occures in stb_image_write functions
SIXEL_INTERRUPTED = (SIXEL_OK | 0x0001) # interrupted by a signal
SIXEL_BAD_ALLOCATION = (SIXEL_RUNTIME_ERROR | 0x0001) # malloc() failed
SIXEL_BAD_ARGUMENT = (SIXEL_RUNTIME_ERROR | 0x0002) # bad argument detected
SIXEL_BAD_INPUT = (SIXEL_RUNTIME_ERROR | 0x0003) # bad input detected
SIXEL_BAD_INTEGER_OVERFLOW = (SIXEL_RUNTIME_ERROR | 0x0004) # integer overflow
SIXEL_NOT_IMPLEMENTED = (SIXEL_FEATURE_ERROR | 0x0001) # feature not implemented
def SIXEL_SUCCEEDED(status):
return (((status) & 0x1000) == 0)
def SIXEL_FAILED(status):
return (((status) & 0x1000) != 0)
# method for finding the largest dimension for splitting,
# and sorting by that component
SIXEL_LARGE_AUTO = 0x0 # choose automatically the method for finding the largest dimension
SIXEL_LARGE_NORM = 0x1 # simply comparing the range in RGB space
SIXEL_LARGE_LUM = 0x2 # transforming into luminosities before the comparison
# method for choosing a color from the box
SIXEL_REP_AUTO = 0x0 # choose automatically the method for selecting representative color from each box
SIXEL_REP_CENTER_BOX = 0x1 # choose the center of the box
SIXEL_REP_AVERAGE_COLORS = 0x2 # choose the average all the color in the box (specified in Heckbert's paper)
SIXEL_REP_AVERAGE_PIXELS = 0x3 # choose the average all the pixels in the box
# method for diffusing
SIXEL_DIFFUSE_AUTO = 0x0 # choose diffusion type automatically
SIXEL_DIFFUSE_NONE = 0x1 # don't diffuse
SIXEL_DIFFUSE_ATKINSON = 0x2 # diffuse with Bill Atkinson's method
SIXEL_DIFFUSE_FS = 0x3 # diffuse with Floyd-Steinberg method
SIXEL_DIFFUSE_JAJUNI = 0x4 # diffuse with Jarvis, Judice & Ninke method
SIXEL_DIFFUSE_STUCKI = 0x5 # diffuse with Stucki's method
SIXEL_DIFFUSE_BURKES = 0x6 # diffuse with Burkes' method
SIXEL_DIFFUSE_A_DITHER = 0x7 # positionally stable arithmetic dither
SIXEL_DIFFUSE_X_DITHER = 0x8 # positionally stable arithmetic xor based dither
# quality modes
SIXEL_QUALITY_AUTO = 0x0 # choose quality mode automatically
SIXEL_QUALITY_HIGH = 0x1 # high quality palette construction
SIXEL_QUALITY_LOW = 0x2 # low quality palette construction
SIXEL_QUALITY_FULL = 0x3 # full quality palette construction
SIXEL_QUALITY_HIGHCOLOR = 0x4 # high color
# built-in dither
SIXEL_BUILTIN_MONO_DARK = 0x0 # monochrome terminal with dark background
SIXEL_BUILTIN_MONO_LIGHT = 0x1 # monochrome terminal with light background
SIXEL_BUILTIN_XTERM16 = 0x2 # xterm 16color
SIXEL_BUILTIN_XTERM256 = 0x3 # xterm 256color
SIXEL_BUILTIN_VT340_MONO = 0x4 # vt340 monochrome
SIXEL_BUILTIN_VT340_COLOR = 0x5 # vt340 color
SIXEL_BUILTIN_G1 = 0x6 # 1bit grayscale
SIXEL_BUILTIN_G2 = 0x7 # 2bit grayscale
SIXEL_BUILTIN_G4 = 0x8 # 4bit grayscale
SIXEL_BUILTIN_G8 = 0x9 # 8bit grayscale
# offset value of pixelFormat
SIXEL_FORMATTYPE_COLOR = (0)
SIXEL_FORMATTYPE_GRAYSCALE = (1 << 6)
SIXEL_FORMATTYPE_PALETTE = (1 << 7)
# pixelformat type of input image
# NOTE: for compatibility, the value of PIXELFORAMT_COLOR_RGB888 must be 3
SIXEL_PIXELFORMAT_RGB555 = (SIXEL_FORMATTYPE_COLOR | 0x01) # 15bpp
SIXEL_PIXELFORMAT_RGB565 = (SIXEL_FORMATTYPE_COLOR | 0x02) # 16bpp
SIXEL_PIXELFORMAT_RGB888 = (SIXEL_FORMATTYPE_COLOR | 0x03) # 24bpp
SIXEL_PIXELFORMAT_BGR555 = (SIXEL_FORMATTYPE_COLOR | 0x04) # 15bpp
SIXEL_PIXELFORMAT_BGR565 = (SIXEL_FORMATTYPE_COLOR | 0x05) # 16bpp
SIXEL_PIXELFORMAT_BGR888 = (SIXEL_FORMATTYPE_COLOR | 0x06) # 24bpp
SIXEL_PIXELFORMAT_ARGB8888 = (SIXEL_FORMATTYPE_COLOR | 0x10) # 32bpp
SIXEL_PIXELFORMAT_RGBA8888 = (SIXEL_FORMATTYPE_COLOR | 0x11) # 32bpp
SIXEL_PIXELFORMAT_ABGR8888 = (SIXEL_FORMATTYPE_COLOR | 0x12) # 32bpp
SIXEL_PIXELFORMAT_BGRA8888 = (SIXEL_FORMATTYPE_COLOR | 0x13) # 32bpp
SIXEL_PIXELFORMAT_G1 = (SIXEL_FORMATTYPE_GRAYSCALE | 0x00) # 1bpp grayscale
SIXEL_PIXELFORMAT_G2 = (SIXEL_FORMATTYPE_GRAYSCALE | 0x01) # 2bpp grayscale
SIXEL_PIXELFORMAT_G4 = (SIXEL_FORMATTYPE_GRAYSCALE | 0x02) # 4bpp grayscale
SIXEL_PIXELFORMAT_G8 = (SIXEL_FORMATTYPE_GRAYSCALE | 0x03) # 8bpp grayscale
SIXEL_PIXELFORMAT_AG88 = (SIXEL_FORMATTYPE_GRAYSCALE | 0x13) # 16bpp gray+alpha
SIXEL_PIXELFORMAT_GA88 = (SIXEL_FORMATTYPE_GRAYSCALE | 0x23) # 16bpp gray+alpha
SIXEL_PIXELFORMAT_PAL1 = (SIXEL_FORMATTYPE_PALETTE | 0x00) # 1bpp palette
SIXEL_PIXELFORMAT_PAL2 = (SIXEL_FORMATTYPE_PALETTE | 0x01) # 2bpp palette
SIXEL_PIXELFORMAT_PAL4 = (SIXEL_FORMATTYPE_PALETTE | 0x02) # 4bpp palette
SIXEL_PIXELFORMAT_PAL8 = (SIXEL_FORMATTYPE_PALETTE | 0x03) # 8bpp palette
# palette type
SIXEL_PALETTETYPE_AUTO = 0 # choose palette type automatically
SIXEL_PALETTETYPE_HLS = 1 # HLS colorspace
SIXEL_PALETTETYPE_RGB = 2 # RGB colorspace
# policies of SIXEL encoding
SIXEL_ENCODEPOLICY_AUTO = 0 # choose encoding policy automatically
SIXEL_ENCODEPOLICY_FAST = 1 # encode as fast as possible
SIXEL_ENCODEPOLICY_SIZE = 2 # encode to as small sixel sequence as possible
# method for re-sampling
SIXEL_RES_NEAREST = 0 # Use nearest neighbor method
SIXEL_RES_GAUSSIAN = 1 # Use guaussian filter
SIXEL_RES_HANNING = 2 # Use hanning filter
SIXEL_RES_HAMMING = 3 # Use hamming filter
SIXEL_RES_BILINEAR = 4 # Use bilinear filter
SIXEL_RES_WELSH = 5 # Use welsh filter
SIXEL_RES_BICUBIC = 6 # Use bicubic filter
SIXEL_RES_LANCZOS2 = 7 # Use lanczos-2 filter
SIXEL_RES_LANCZOS3 = 8 # Use lanczos-3 filter
SIXEL_RES_LANCZOS4 = 9 # Use lanczos-4 filter
# image format
SIXEL_FORMAT_GIF = 0x0 # read only
SIXEL_FORMAT_PNG = 0x1 # read/write
SIXEL_FORMAT_BMP = 0x2 # read only
SIXEL_FORMAT_JPG = 0x3 # read only
SIXEL_FORMAT_TGA = 0x4 # read only
SIXEL_FORMAT_WBMP = 0x5 # read only with --with-gd configure option
SIXEL_FORMAT_TIFF = 0x6 # read only
SIXEL_FORMAT_SIXEL = 0x7 # read only
SIXEL_FORMAT_PNM = 0x8 # read only
SIXEL_FORMAT_GD2 = 0x9 # read only with --with-gd configure option
SIXEL_FORMAT_PSD = 0xa # read only
SIXEL_FORMAT_HDR = 0xb # read only
# loop mode
SIXEL_LOOP_AUTO = 0 # honer the setting of GIF header
SIXEL_LOOP_FORCE = 1 # always enable loop
SIXEL_LOOP_DISABLE = 2 # always disable loop
# setopt flags
SIXEL_OPTFLAG_INPUT = 'i' # -i, --input: specify input file name.
SIXEL_OPTFLAG_OUTPUT = 'o' # -o, --output: specify output file name.
SIXEL_OPTFLAG_OUTFILE = 'o' # -o, --outfile: specify output file name.
SIXEL_OPTFLAG_7BIT_MODE = '7' # -7, --7bit-mode: for 7bit terminals or printers (default)
SIXEL_OPTFLAG_8BIT_MODE = '8' # -8, --8bit-mode: for 8bit terminals or printers
SIXEL_OPTFLAG_COLORS = 'p' # -p COLORS, --colors=COLORS: specify number of colors
SIXEL_OPTFLAG_MAPFILE = 'm' # -m FILE, --mapfile=FILE: specify set of colors
SIXEL_OPTFLAG_MONOCHROME = 'e' # -e, --monochrome: output monochrome sixel image
SIXEL_OPTFLAG_INSECURE = 'k' # -k, --insecure: allow to connect to SSL sites without certs
SIXEL_OPTFLAG_INVERT = 'i' # -i, --invert: assume the terminal background color
SIXEL_OPTFLAG_HIGH_COLOR = 'I' # -I, --high-color: output 15bpp sixel image
SIXEL_OPTFLAG_USE_MACRO = 'u' # -u, --use-macro: use DECDMAC and DEVINVM sequences
SIXEL_OPTFLAG_MACRO_NUMBER = 'n' # -n MACRONO, --macro-number=MACRONO:
# specify macro register number
SIXEL_OPTFLAG_COMPLEXION_SCORE = 'C' # -C COMPLEXIONSCORE, --complexion-score=COMPLEXIONSCORE:
# specify an number argument for the score of
# complexion correction.
SIXEL_OPTFLAG_IGNORE_DELAY = 'g' # -g, --ignore-delay: render GIF animation without delay
SIXEL_OPTFLAG_STATIC = 'S' # -S, --static: render animated GIF as a static image
SIXEL_OPTFLAG_DIFFUSION = 'd' # -d DIFFUSIONTYPE, --diffusion=DIFFUSIONTYPE:
# choose diffusion method which used with -p option.
# DIFFUSIONTYPE is one of them:
# auto -> choose diffusion type
# automatically (default)
# none -> do not diffuse
# fs -> Floyd-Steinberg method
# atkinson -> Bill Atkinson's method
# jajuni -> Jarvis, Judice & Ninke
# stucki -> Stucki's method
# burkes -> Burkes' method
# a_dither -> positionally stable
# arithmetic dither
# x_dither -> positionally stable
# arithmetic xor based dither
SIXEL_OPTFLAG_FIND_LARGEST = 'f' # -f FINDTYPE, --find-largest=FINDTYPE:
# choose method for finding the largest
# dimension of median cut boxes for
# splitting, make sense only when -p
# option (color reduction) is
# specified
# FINDTYPE is one of them:
# auto -> choose finding method
# automatically (default)
# norm -> simply comparing the
# range in RGB space
# lum -> transforming into
# luminosities before the
# comparison
SIXEL_OPTFLAG_SELECT_COLOR = 's' # -s SELECTTYPE, --select-color=SELECTTYPE
# choose the method for selecting
# representative color from each
# median-cut box, make sense only
# when -p option (color reduction) is
# specified
# SELECTTYPE is one of them:
# auto -> choose selecting
# method automatically
# (default)
# center -> choose the center of
# the box
# average -> calculate the color
# average into the box
# histogram -> similar with average
# but considers color
# histogram
SIXEL_OPTFLAG_CROP = 'c' # -c REGION, --crop=REGION:
# crop source image to fit the
# specified geometry. REGION should
# be formatted as '%dx%d+%d+%d'
SIXEL_OPTFLAG_WIDTH = 'w' # -w WIDTH, --width=WIDTH:
# resize image to specified width
# WIDTH is represented by the
# following syntax
# auto -> preserving aspect
# ratio (default)
# <number>% -> scale width with
# given percentage
# <number> -> scale width with
# pixel counts
# <number>px -> scale width with
# pixel counts
SIXEL_OPTFLAG_HEIGHT = 'h' # -h HEIGHT, --height=HEIGHT:
# resize image to specified height
# HEIGHT is represented by the
# following syntax
# auto -> preserving aspect
# ratio (default)
# <number>% -> scale height with
# given percentage
# <number> -> scale height with
# pixel counts
# <number>px -> scale height with
# pixel counts
SIXEL_OPTFLAG_RESAMPLING = 'r' # -r RESAMPLINGTYPE, --resampling=RESAMPLINGTYPE:
# choose resampling filter used
# with -w or -h option (scaling)
# RESAMPLINGTYPE is one of them:
# nearest -> Nearest-Neighbor
# method
# gaussian -> Gaussian filter
# hanning -> Hanning filter
# hamming -> Hamming filter
# bilinear -> Bilinear filter
# (default)
# welsh -> Welsh filter
# bicubic -> Bicubic filter
# lanczos2 -> Lanczos-2 filter
# lanczos3 -> Lanczos-3 filter
# lanczos4 -> Lanczos-4 filter
SIXEL_OPTFLAG_QUALITY = 'q' # -q QUALITYMODE, --quality=QUALITYMODE:
# select quality of color
# quanlization.
# auto -> decide quality mode
# automatically (default)
# low -> low quality and high
# speed mode
# high -> high quality and low
# speed mode
# full -> full quality and careful
# speed mode
SIXEL_OPTFLAG_LOOPMODE = 'l' # -l LOOPMODE, --loop-control=LOOPMODE:
# select loop control mode for GIF
# animation.
# auto -> honor the setting of
# GIF header (default)
# force -> always enable loop
# disable -> always disable loop
SIXEL_OPTFLAG_PALETTE_TYPE = 't' # -t PALETTETYPE, --palette-type=PALETTETYPE:
# select palette color space type
# auto -> choose palette type
# automatically (default)
# hls -> use HLS color space
# rgb -> use RGB color space
SIXEL_OPTFLAG_BUILTIN_PALETTE = 'b' # -b BUILTINPALETTE, --builtin-palette=BUILTINPALETTE:
# select built-in palette type
# xterm16 -> X default 16 color map
# xterm256 -> X default 256 color map
# vt340mono -> VT340 monochrome map
# vt340color -> VT340 color map
# gray1 -> 1bit grayscale map
# gray2 -> 2bit grayscale map
# gray4 -> 4bit grayscale map
# gray8 -> 8bit grayscale map
SIXEL_OPTFLAG_ENCODE_POLICY = 'E' # -E ENCODEPOLICY, --encode-policy=ENCODEPOLICY:
# select encoding policy
# auto -> choose encoding policy
# automatically (default)
# fast -> encode as fast as possible
# size -> encode to as small sixel
# sequence as possible
SIXEL_OPTFLAG_BGCOLOR = 'B' # -B BGCOLOR, --bgcolor=BGCOLOR:
# specify background color
# BGCOLOR is represented by the
# following syntax
# #rgb
# #rrggbb
# #rrrgggbbb
# #rrrrggggbbbb
# rgb:r/g/b
# rgb:rr/gg/bb
# rgb:rrr/ggg/bbb
# rgb:rrrr/gggg/bbbb
SIXEL_OPTFLAG_PENETRATE = 'P' # -P, --penetrate:
# penetrate GNU Screen using DCS
# pass-through sequence
SIXEL_OPTFLAG_PIPE_MODE = 'D' # -D, --pipe-mode: (deprecated)
# read source images from stdin continuously
SIXEL_OPTFLAG_VERBOSE = 'v' # -v, --verbose: show debugging info
SIXEL_OPTFLAG_VERSION = 'V' # -V, --version: show version and license info
SIXEL_OPTFLAG_HELP = 'H' # -H, --help: show this help
if not find_library('sixel'):
raise ImportError("libsixel not found.")
# load shared library
_sixel = cdll.LoadLibrary(find_library('sixel'))
# convert error status code int formatted string
def sixel_helper_format_error(status):
_sixel.sixel_helper_format_error.restype = c_char_p;
_sixel.sixel_helper_format_error.argtypes = [c_int];
return _sixel.sixel_helper_format_error(status)
# compute pixel depth from pixelformat
def sixel_helper_compute_depth(pixelformat):
_sixel.sixel_helper_compute_depth.restype = c_int
_sixel.sixel_encoder_encode.argtypes = [c_int]
return _sixel.sixel_helper_compute_depth(pixelformat)
# create new output context object
def sixel_output_new(fn_write, priv=None, allocator=c_void_p(None)):
def _fn_write_local(data, size, priv_from_c):
fn_write(string_at(data, size), priv)
return size
sixel_write_function = CFUNCTYPE(c_int, c_char_p, c_int, c_void_p)
_sixel.sixel_output_new.restype = c_int
_sixel.sixel_output_new.argtypes = [POINTER(c_void_p), sixel_write_function, c_void_p, c_void_p]
output = c_void_p(None)
_fn_write = sixel_write_function(_fn_write_local)
_fn_write.restype = c_int
_fn_write.argtypes = [sixel_write_function, c_void_p, c_void_p]
status = _sixel.sixel_output_new(byref(output), _fn_write, c_void_p(None), allocator)
if SIXEL_FAILED(status):
message = sixel_helper_format_error(status)
raise RuntimeError(message)
output.__fn_write = _fn_write
return output
# increase reference count of output object (thread-unsafe)
def sixel_output_ref(output):
_sixel.sixel_output_ref.restype = None
_sixel.sixel_output_ref.argtypes = [c_void_p]
_sixel.sixel_output_ref(output)
# decrease reference count of output object (thread-unsafe)
def sixel_output_unref(output):
_sixel.sixel_output_unref.restype = None
_sixel.sixel_output_unref.argtypes = [c_void_p]
_sixel.sixel_output_unref(output)
output.__fn_write = None
# get 8bit output mode which indicates whether it uses C1 control characters
def sixel_output_get_8bit_availability(output):
_sixel.sixel_output_get_8bit_availability.restype = None
_sixel.sixel_output_get_8bit_availability.argtypes = [c_void_p]
_sixel.sixel_output_get_8bit_availability(output)
# set 8bit output mode state
def sixel_output_set_8bit_availability(output):
_sixel.sixel_output_set_8bit_availability.restype = None
_sixel.sixel_output_set_8bit_availability.argtypes = [c_void_p, c_int]
_sixel.sixel_output_set_8bit_availability(output)
# set whether limit arguments of DECGRI('!') to 255
def sixel_output_set_gri_arg_limit(output):
_sixel.sixel_output_set_gri_arg_limit.restype = None
_sixel.sixel_output_set_gri_arg_limit.argtypes = [c_void_p, c_int]
_sixel.sixel_output_set_gri_arg_limit(output)
# set GNU Screen penetration feature enable or disable
def sixel_output_set_penetrate_multiplexer(output):
_sixel.sixel_output_set_penetrate_multiplexer.restype = None
_sixel.sixel_output_set_penetrate_multiplexer.argtypes = [c_void_p, c_int]
_sixel.sixel_output_set_penetrate_multiplexer(output)
# set whether we skip DCS envelope
def sixel_output_set_skip_dcs_envelope(output):
_sixel.sixel_output_set_skip_dcs_envelope.restype = None
_sixel.sixel_output_set_skip_dcs_envelope.argtypes = [c_void_p, c_int]
_sixel.sixel_output_set_skip_dcs_envelope(output)
# set palette type: RGB or HLS
def sixel_output_set_palette_type(output):
_sixel.sixel_output_set_palette_type.restype = None
_sixel.sixel_output_set_palette_type.argtypes = [c_void_p, c_int]
_sixel.sixel_output_set_palette_type(output)
# set encodeing policy: auto, fast or size
def sixel_output_set_encode_policy(output):
_sixel.sixel_output_set_encode_policy.restype = None
_sixel.sixel_output_set_encode_policy.argtypes = [c_void_p, c_int]
_sixel.sixel_output_set_encode_policy(output)
# create dither context object
def sixel_dither_new(ncolors, allocator=None):
_sixel.sixel_dither_new.restype = c_int
_sixel.sixel_dither_new.argtypes = [POINTER(c_void_p), c_int, c_void_p]
dither = c_void_p(None)
status = _sixel.sixel_dither_new(byref(dither), ncolors, allocator)
if SIXEL_FAILED(status):
message = sixel_helper_format_error(status)
raise RuntimeError(message)
return dither
# get built-in dither context object
def sixel_dither_get(builtin_dither):
_sixel.sixel_dither_get.restype = c_void_p
_sixel.sixel_dither_get.argtypes = [c_int]
return _sixel.sixel_dither_get(builtin_dither)
# destroy dither context object
def sixel_dither_destroy(dither):
_sixel.sixel_dither_destroy.restype = None
_sixel.sixel_dither_destroy.argtypes = [c_void_p]
return _sixel.sixel_dither_destroy(dither)
# increase reference count of dither context object (thread-unsafe)
def sixel_dither_ref(dither):
_sixel.sixel_dither_ref.restype = None
_sixel.sixel_dither_ref.argtypes = [c_void_p]
return _sixel.sixel_dither_ref(dither)
# decrease reference count of dither context object (thread-unsafe)
def sixel_dither_unref(dither):
_sixel.sixel_dither_unref.restype = None
_sixel.sixel_dither_unref.argtypes = [c_void_p]
return _sixel.sixel_dither_unref(dither)
# initialize internal palette from specified pixel buffer
def sixel_dither_initialize(dither, data, width, height, pixelformat,
method_for_largest=SIXEL_LARGE_AUTO,
method_for_rep=SIXEL_REP_AUTO,
quality_mode=SIXEL_QUALITY_AUTO):
_sixel.sixel_dither_initialize.restype = c_int
_sixel.sixel_dither_initialize.argtypes = [c_void_p, c_char_p, c_int, c_int, c_int,
c_int, c_int, c_int]
status = _sixel.sixel_dither_initialize(dither, data, width, height, pixelformat,
method_for_largest,
method_for_rep,
quality_mode)
if SIXEL_FAILED(status):
message = sixel_helper_format_error(status)
raise RuntimeError(message)
# set diffusion type, choose from enum methodForDiffuse
def sixel_dither_set_diffusion_type(dither, method_for_diffuse):
_sixel.sixel_dither_set_diffusion_type.restype = None
_sixel.sixel_dither_set_diffusion_type.argtypes = [c_void_p, c_int]
_sixel.sixel_dither_set_diffusion_type(dither, method_for_diffuse)
# get number of palette colors
def sixel_dither_get_num_of_palette_colors(dither):
_sixel.sixel_dither_get_num_of_palette_colors.restype = c_int
_sixel.sixel_dither_get_num_of_palette_colors.argtypes = [c_void_p]
return _sixel.sixel_dither_get_num_of_palette_colors(dither)
# get number of histogram colors */
def sixel_dither_get_num_of_histogram_colors(dither):
_sixel.sixel_dither_get_num_of_histogram_colors.restype = c_int
_sixel.sixel_dither_get_num_of_histogram_colors.argtypes = [c_void_p]
return _sixel.sixel_dither_get_num_of_histogram_colors(dither)
def sixel_dither_get_palette(dither):
_sixel.sixel_dither_get_palette.restype = c_char_p
_sixel.sixel_dither_get_palette.argtypes = [c_void_p]
cpalette = _sixel.sixel_dither_get_palette(dither)
return [ord(c) for c in cpalette]
def sixel_dither_set_palette(dither, palette):
_sixel.sixel_dither_set_palette.restype = None
_sixel.sixel_dither_set_palette.argtypes = [c_void_p, c_char_p]
cpalette = ''.join(map(chr, palette))
_sixel.sixel_dither_set_palette(dither, cpalette)
def sixel_dither_set_complexion_score(dither, score):
_sixel.sixel_dither_set_complexion_score.restype = None
_sixel.sixel_dither_set_complexion_score.argtypes = [c_void_p, c_int]
_sixel.sixel_dither_set_complexion_score(dither, score)
def sixel_dither_set_body_only(dither, bodyonly):
_sixel.sixel_dither_set_body_only.restype = None
_sixel.sixel_dither_set_body_only.argtypes = [c_void_p, c_int]
_sixel.sixel_dither_set_body_only(dither, bodyonly)
def sixel_dither_set_optimize_palette(dither, do_opt):
_sixel.sixel_dither_set_optimize_palette.restype = None
_sixel.sixel_dither_set_optimize_palette.argtypes = [c_void_p, c_int]
_sixel.sixel_dither_set_optimize_palette(dither, do_opt)
def sixel_dither_set_pixelformat(dither, pixelformat):
_sixel.sixel_dither_set_pixelformat.restype = None
_sixel.sixel_dither_set_pixelformat.argtypes = [c_void_p, c_int]
_sixel.sixel_dither_set_pixelformat(dither, pixelformat)
def sixel_dither_set_transparent(dither, transparent):
_sixel.sixel_dither_set_transparent.restype = None
_sixel.sixel_dither_set_transparent.argtypes = [c_void_p, c_int]
_sixel.sixel_dither_set_transparent(dither, transparent)
# convert pixels into sixel format and write it to output context
def sixel_encode(pixels, width, height, depth, dither, output):
_sixel.sixel_encode.restype = c_int
_sixel.sixel_encode.argtypes = [c_char_p, c_int, c_int, c_int, c_void_p, c_void_p]
return _sixel.sixel_encode(pixels, width, height, depth, dither, output)
# create encoder object
def sixel_encoder_new(allocator=c_void_p(None)):
_sixel.sixel_encoder_new.restype = c_int
_sixel.sixel_encoder_new.argtypes = [POINTER(c_void_p), c_void_p]
encoder = c_void_p(None)
status = _sixel.sixel_encoder_new(byref(encoder), allocator)
if SIXEL_FAILED(status):
message = sixel_helper_format_error(status)
raise RuntimeError(message)
return encoder
# increase reference count of encoder object (thread-unsafe)
def sixel_encoder_ref(encoder):
_sixel.sixel_encoder_ref.restype = None
_sixel.sixel_encoder_ref.argtypes = [c_void_p]
_sixel.sixel_encoder_ref(encoder)
# decrease reference count of encoder object (thread-unsafe)
def sixel_encoder_unref(encoder):
_sixel.sixel_encoder_unref.restype = None
_sixel.sixel_encoder_unref.argtypes = [c_void_p]
_sixel.sixel_encoder_unref(encoder)
# set an option flag to encoder object
def sixel_encoder_setopt(encoder, flag, arg=None):
_sixel.sixel_encoder_setopt.restype = c_int
_sixel.sixel_encoder_setopt.argtypes = [c_void_p, c_int, c_char_p]
flag = ord(flag)
if arg:
arg = str(arg).encode('utf-8')
status = _sixel.sixel_encoder_setopt(encoder, flag, arg)
if SIXEL_FAILED(status):
message = sixel_helper_format_error(status)
raise RuntimeError(message)
# load source data from specified file and encode it to SIXEL format
def sixel_encoder_encode(encoder, filename):
import locale
language, encoding = locale.getdefaultlocale()
_sixel.sixel_encoder_encode.restype = c_int
_sixel.sixel_encoder_encode.argtypes = [c_void_p, c_char_p]
status = _sixel.sixel_encoder_encode(encoder, filename.encode(encoding))
if SIXEL_FAILED(status):
message = sixel_helper_format_error(status)
raise RuntimeError(message)
# encode specified pixel data to SIXEL format
def sixel_encoder_encode_bytes(encoder, buf, width, height, pixelformat, palette):
depth = sixel_helper_compute_depth(pixelformat)
if depth <= 0:
raise ValueError("invalid pixelformat value : %d" % pixelformat)
if len(buf) < width * height * depth:
raise ValueError("buf.len is too short : %d < %d * %d * %d" % (buf.len, width, height, depth))
if not hasattr(buf, "readonly") or buf.readonly:
cbuf = c_void_p.from_buffer_copy(buf)
else:
cbuf = c_void_p.from_buffer(buf)
if palette:
cpalettelen = len(palette)
cpalette = (c_byte * cpalettelen)(*palette)
else:
cpalettelen = None
cpalette = None
_sixel.sixel_encoder_encode_bytes.restype = c_int
_sixel.sixel_encoder_encode.argtypes = [c_void_p, c_void_p, c_int, c_int, c_int, c_void_p, c_int]
status = _sixel.sixel_encoder_encode_bytes(encoder, buf, width, height, pixelformat, cpalette, cpalettelen)
if SIXEL_FAILED(status):
message = sixel_helper_format_error(status)
raise RuntimeError(message)
# create decoder object
def sixel_decoder_new(allocator=c_void_p(None)):
_sixel.sixel_decoder_new.restype = c_int
_sixel.sixel_decoder_new.argtypes = [POINTER(c_void_p), c_void_p]
decoder = c_void_p(None)
status = _sixel.sixel_decoder_new(byref(decoder), c_void_p(None))
if SIXEL_FAILED(status):
message = sixel_helper_format_error(status)
raise RuntimeError(message)
return decoder
# increase reference count of decoder object (thread-unsafe)
def sixel_decoder_ref(decoder):
_sixel.sixel_decoder_ref.restype = None
_sixel.sixel_decoder_ref.argtypes = [c_void_p]
_sixel.sixel_decoder_ref(decoder)
# decrease reference count of decoder object (thread-unsafe)
def sixel_decoder_unref(decoder):
_sixel.sixel_decoder_unref.restype = None
_sixel.sixel_decoder_unref.argtypes = [c_void_p]
_sixel.sixel_decoder_unref(decoder)
# set an option flag to decoder object
def sixel_decoder_setopt(decoder, flag, arg=None):
_sixel.sixel_decoder_setopt.restype = c_int
_sixel.sixel_decoder_setopt.argtypes = [c_void_p, c_int, c_char_p]
flag = ord(flag)
if arg:
arg = str(arg).encode('utf-8')
status = _sixel.sixel_decoder_setopt(decoder, flag, arg)
if SIXEL_FAILED(status):
message = sixel_helper_format_error(status)
raise RuntimeError(message)
# load source data from stdin or the file
def sixel_decoder_decode(decoder, infile=None):
_sixel.sixel_decoder_decode.restype = c_int
_sixel.sixel_decoder_decode.argtypes = [c_void_p]
if infile:
sixel_decoder_setopt(decoder, SIXEL_OPTFLAG_INPUT, infile)
status = _sixel.sixel_decoder_decode(decoder)
if SIXEL_FAILED(status):
message = sixel_helper_format_error(status)
raise RuntimeError(message)
| 49.860656 | 114 | 0.59891 |
3517fc833c56bba9b604293b672b44dd8f40498b | 971 | py | Python | finding_file.py | Savioor/sprint2 | a36b434709af6a1c81d1371f74e6c963f0e9daf4 | [
"Apache-2.0"
] | null | null | null | finding_file.py | Savioor/sprint2 | a36b434709af6a1c81d1371f74e6c963f0e9daf4 | [
"Apache-2.0"
] | null | null | null | finding_file.py | Savioor/sprint2 | a36b434709af6a1c81d1371f74e6c963f0e9daf4 | [
"Apache-2.0"
] | null | null | null | import os
import os.path
def _diff(list1, list2):
list_difference = [item for item in list1 if item not in list2]
return list_difference
dl = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
drives = ['%s:' % d for d in dl if os.path.exists('%s:' % d)]
def _check_usb():
global drives
while True:
uncheckeddrives = ['%s:' % d for d in dl if os.path.exists('%s:' % d)]
x = _diff(uncheckeddrives, drives)
if x:
return x[0]
x = _diff(drives, uncheckeddrives)
if x:
drives = ['%s:' % d for d in dl if os.path.exists('%s:' % d)]
def find():
path = _check_usb()
valid_files = []
for root, dirs, files in os.walk(path):
if len(root) < 3:
for file in files:
if file[-4:] in (".txt", ".bmp"):
valid_files.append(root + "\\" + file)
valid_files.sort(key=lambda f: os.stat(f).st_size, reverse=True)
return list(reversed(valid_files))[1:]
| 26.972222 | 78 | 0.563337 |
cdd1c47a5dcf0063cd1e32dded82e0a74c7023dd | 131,233 | py | Python | yandex/cloud/mdb/postgresql/v1/config/postgresql13_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | yandex/cloud/mdb/postgresql/v1/config/postgresql13_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | yandex/cloud/mdb/postgresql/v1/config/postgresql13_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/mdb/postgresql/v1/config/postgresql13.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/mdb/postgresql/v1/config/postgresql13.proto',
package='yandex.cloud.mdb.postgresql.v1.config',
syntax='proto3',
serialized_options=b'\n)yandex.cloud.api.mdb.postgresql.v1.configZTgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config;postgresql',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n8yandex/cloud/mdb/postgresql/v1/config/postgresql13.proto\x12%yandex.cloud.mdb.postgresql.v1.config\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1dyandex/cloud/validation.proto\"\xd3]\n\x12PostgresqlConfig13\x12\x34\n\x0fmax_connections\x18\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x33\n\x0eshared_buffers\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x31\n\x0ctemp_buffers\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12>\n\x19max_prepared_transactions\x18\x04 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x08work_mem\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x39\n\x14maintenance_work_mem\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x38\n\x13\x61utovacuum_work_mem\x18\x07 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x34\n\x0ftemp_file_limit\x18\x08 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x36\n\x11vacuum_cost_delay\x18\t \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x39\n\x14vacuum_cost_page_hit\x18\n \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12:\n\x15vacuum_cost_page_miss\x18\x0b \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12;\n\x16vacuum_cost_page_dirty\x18\x0c \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x36\n\x11vacuum_cost_limit\x18\r \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x41\n\x0e\x62gwriter_delay\x18\x0e \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x0c\xfa\xc7\x31\x08\x31\x30-10000\x12:\n\x15\x62gwriter_lru_maxpages\x18\x0f \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12=\n\x17\x62gwriter_lru_multiplier\x18\x10 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x45\n\x14\x62gwriter_flush_after\x18\x11 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\n\xfa\xc7\x31\x06\x30-2048\x12\x44\n\x13\x62\x61\x63kend_flush_after\x18\x12 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\n\xfa\xc7\x31\x06\x30-2048\x12L\n\x16old_snapshot_threshold\x18\x13 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x0f\xfa\xc7\x31\x0b-1-86400000\x12U\n\twal_level\x18\x14 \x01(\x0e\x32\x42.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.WalLevel\x12g\n\x12synchronous_commit\x18\x15 \x01(\x0e\x32K.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.SynchronousCommit\x12K\n\x12\x63heckpoint_timeout\x18\x16 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x12\xfa\xc7\x31\x0e\x33\x30\x30\x30\x30-86400000\x12\x42\n\x1c\x63heckpoint_completion_target\x18\x17 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12G\n\x16\x63heckpoint_flush_after\x18\x18 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\n\xfa\xc7\x31\x06\x30-2048\x12\x31\n\x0cmax_wal_size\x18\x19 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x31\n\x0cmin_wal_size\x18\x1a \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12@\n\x1bmax_standby_streaming_delay\x18\x1b \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12>\n\x19\x64\x65\x66\x61ult_statistics_target\x18\x1c \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12k\n\x14\x63onstraint_exclusion\x18\x1d \x01(\x0e\x32M.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.ConstraintExclusion\x12;\n\x15\x63ursor_tuple_fraction\x18\x1e \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12J\n\x13\x66rom_collapse_limit\x18\x1f \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x10\xfa\xc7\x31\x0c\x31-2147483647\x12J\n\x13join_collapse_limit\x18 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x10\xfa\xc7\x31\x0c\x31-2147483647\x12h\n\x13\x66orce_parallel_mode\x18! \x01(\x0e\x32K.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.ForceParallelMode\x12_\n\x13\x63lient_min_messages\x18\" \x01(\x0e\x32\x42.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.LogLevel\x12\\\n\x10log_min_messages\x18# \x01(\x0e\x32\x42.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.LogLevel\x12\x63\n\x17log_min_error_statement\x18$ \x01(\x0e\x32\x42.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.LogLevel\x12?\n\x1alog_min_duration_statement\x18% \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x33\n\x0flog_checkpoints\x18& \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0flog_connections\x18\' \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x36\n\x12log_disconnections\x18( \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\x0clog_duration\x18) \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12h\n\x13log_error_verbosity\x18* \x01(\x0e\x32K.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.LogErrorVerbosity\x12\x32\n\x0elog_lock_waits\x18+ \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12]\n\rlog_statement\x18, \x01(\x0e\x32\x46.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.LogStatement\x12\x33\n\x0elog_temp_files\x18- \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x13\n\x0bsearch_path\x18. \x01(\t\x12\x30\n\x0crow_security\x18/ \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12u\n\x1d\x64\x65\x66\x61ult_transaction_isolation\x18\x30 \x01(\x0e\x32N.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.TransactionIsolation\x12\x36\n\x11statement_timeout\x18\x31 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x31\n\x0clock_timeout\x18\x32 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12H\n#idle_in_transaction_session_timeout\x18\x33 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12[\n\x0c\x62ytea_output\x18\x34 \x01(\x0e\x32\x45.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.ByteaOutput\x12V\n\txmlbinary\x18\x35 \x01(\x0e\x32\x43.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.XmlBinary\x12V\n\txmloption\x18\x36 \x01(\x0e\x32\x43.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.XmlOption\x12;\n\x16gin_pending_list_limit\x18\x37 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x35\n\x10\x64\x65\x61\x64lock_timeout\x18\x38 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12>\n\x19max_locks_per_transaction\x18\x39 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x43\n\x1emax_pred_locks_per_transaction\x18: \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12/\n\x0b\x61rray_nulls\x18; \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x61\n\x0f\x62\x61\x63kslash_quote\x18< \x01(\x0e\x32H.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.BackslashQuote\x12\x35\n\x11\x64\x65\x66\x61ult_with_oids\x18= \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x15\x65scape_string_warning\x18> \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x38\n\x14lo_compat_privileges\x18? \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12?\n\x1boperator_precedence_warning\x18@ \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x15quote_all_identifiers\x18\x41 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12?\n\x1bstandard_conforming_strings\x18\x42 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x38\n\x14synchronize_seqscans\x18\x43 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x15transform_null_equals\x18\x44 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x31\n\rexit_on_error\x18\x45 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\rseq_page_cost\x18\x46 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x36\n\x10random_page_cost\x18G \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x45\n\x16\x61utovacuum_max_workers\x18H \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x08\xfa\xc7\x31\x04\x31-32\x12M\n\x1c\x61utovacuum_vacuum_cost_delay\x18I \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\n\xfa\xc7\x31\x06-1-100\x12O\n\x1c\x61utovacuum_vacuum_cost_limit\x18J \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x0c\xfa\xc7\x31\x08-1-10000\x12J\n\x12\x61utovacuum_naptime\x18K \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x11\xfa\xc7\x31\r1000-86400000\x12H\n\x0f\x61rchive_timeout\x18L \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x12\xfa\xc7\x31\x0e\x31\x30\x30\x30\x30-86400000\x12N\n\x19track_activity_query_size\x18M \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x0e\xfa\xc7\x31\n100-102400\x12\x35\n\x11\x65nable_bitmapscan\x18P \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x32\n\x0e\x65nable_hashagg\x18Q \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0f\x65nable_hashjoin\x18R \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x34\n\x10\x65nable_indexscan\x18S \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x38\n\x14\x65nable_indexonlyscan\x18T \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0f\x65nable_material\x18U \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x34\n\x10\x65nable_mergejoin\x18V \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0f\x65nable_nestloop\x18W \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x32\n\x0e\x65nable_seqscan\x18X \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x0b\x65nable_sort\x18Y \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x32\n\x0e\x65nable_tidscan\x18Z \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x45\n\x14max_worker_processes\x18[ \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\n\xfa\xc7\x31\x06\x30-1024\x12\x45\n\x14max_parallel_workers\x18\\ \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\n\xfa\xc7\x31\x06\x30-1024\x12P\n\x1fmax_parallel_workers_per_gather\x18] \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\n\xfa\xc7\x31\x06\x30-1024\x12Q\n\x1e\x61utovacuum_vacuum_scale_factor\x18^ \x01(\x0b\x32\x1c.google.protobuf.DoubleValueB\x0b\xfa\xc7\x31\x07\x30.0-1.0\x12R\n\x1f\x61utovacuum_analyze_scale_factor\x18_ \x01(\x0b\x32\x1c.google.protobuf.DoubleValueB\x0b\xfa\xc7\x31\x07\x30.0-1.0\x12\x41\n\x1d\x64\x65\x66\x61ult_transaction_read_only\x18` \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x10\n\x08timezone\x18\x61 \x01(\t\x12:\n\x16\x65nable_parallel_append\x18\x62 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x38\n\x14\x65nable_parallel_hash\x18\x63 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12<\n\x18\x65nable_partition_pruning\x18\x64 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x42\n\x1e\x65nable_partitionwise_aggregate\x18\x65 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12=\n\x19\x65nable_partitionwise_join\x18\x66 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\'\n\x03jit\x18g \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12N\n max_parallel_maintenance_workers\x18h \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x07\xfa\xc7\x31\x03>=0\x12\x41\n\x1dparallel_leader_participation\x18i \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12^\n!vacuum_cleanup_index_scale_factor\x18j \x01(\x0b\x32\x1c.google.protobuf.DoubleValueB\x15\xfa\xc7\x31\x11\x30.0-10000000000.0\x12N\n\x1blog_transaction_sample_rate\x18k \x01(\x0b\x32\x1c.google.protobuf.DoubleValueB\x0b\xfa\xc7\x31\x07\x30.0-1.0\x12`\n\x0fplan_cache_mode\x18l \x01(\x0e\x32G.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.PlanCacheMode\x12I\n\x18\x65\x66\x66\x65\x63tive_io_concurrency\x18m \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\n\xfa\xc7\x31\x06\x30-1000\x12M\n\x14\x65\x66\x66\x65\x63tive_cache_size\x18n \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x12\xfa\xc7\x31\x0e\x30-549755813888\x12r\n\x18shared_preload_libraries\x18o \x03(\x0e\x32P.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.SharedPreloadLibraries\x12U\n\x1d\x61uto_explain_log_min_duration\x18p \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x11\xfa\xc7\x31\r-1-2147483647\x12<\n\x18\x61uto_explain_log_analyze\x18q \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12<\n\x18\x61uto_explain_log_buffers\x18r \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12;\n\x17\x61uto_explain_log_timing\x18s \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12=\n\x19\x61uto_explain_log_triggers\x18t \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12<\n\x18\x61uto_explain_log_verbose\x18u \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x46\n\"auto_explain_log_nested_statements\x18v \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12K\n\x18\x61uto_explain_sample_rate\x18w \x01(\x0b\x32\x1c.google.protobuf.DoubleValueB\x0b\xfa\xc7\x31\x07\x30.0-1.0\x12<\n\x18pg_hint_plan_enable_hint\x18x \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x42\n\x1epg_hint_plan_enable_hint_table\x18y \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12p\n\x18pg_hint_plan_debug_print\x18z \x01(\x0e\x32N.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.PgHintPlanDebugPrint\x12\x66\n\x1apg_hint_plan_message_level\x18{ \x01(\x0e\x32\x42.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.LogLevel\x12I\n\x13hash_mem_multiplier\x18| \x01(\x0b\x32\x1c.google.protobuf.DoubleValueB\x0e\xfa\xc7\x31\n0.0-1000.0\x12W\n\x19logical_decoding_work_mem\x18~ \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x17\xfa\xc7\x31\x13\x36\x35\x35\x33\x36-1099511627776\x12K\n\x1amaintenance_io_concurrency\x18\x7f \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\n\xfa\xc7\x31\x06\x30-1000\x12U\n\x16max_slot_wal_keep_size\x18\x80\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x17\xfa\xc7\x31\x13-1-2251799812636672\x12L\n\rwal_keep_size\x18\x81\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x17\xfa\xc7\x31\x13-1-2251799812636672\x12<\n\x17\x65nable_incremental_sort\x18\x82\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12[\n\"autovacuum_vacuum_insert_threshold\x18\x83\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x11\xfa\xc7\x31\r-1-2147483647\x12[\n%autovacuum_vacuum_insert_scale_factor\x18\x84\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValueB\r\xfa\xc7\x31\t0.0-100.0\x12P\n\x17log_min_duration_sample\x18\x85\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x11\xfa\xc7\x31\r-1-2147483647\x12M\n\x19log_statement_sample_rate\x18\x86\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValueB\x0b\xfa\xc7\x31\x07\x30.0-1.0\x12Q\n\x18log_parameter_max_length\x18\x87\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x11\xfa\xc7\x31\r-1-2147483647\x12Z\n!log_parameter_max_length_on_error\x18\x88\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x11\xfa\xc7\x31\r-1-2147483647\x12\x39\n\x14pg_qualstats_enabled\x18\x89\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x41\n\x1cpg_qualstats_track_constants\x18\x8a\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x36\n\x10pg_qualstats_max\x18\x8b\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12>\n\x19pg_qualstats_resolve_oids\x18\x8c\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12?\n\x18pg_qualstats_sample_rate\x18\x8d\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\"S\n\x08WalLevel\x12\x19\n\x15WAL_LEVEL_UNSPECIFIED\x10\x00\x12\x15\n\x11WAL_LEVEL_REPLICA\x10\x01\x12\x15\n\x11WAL_LEVEL_LOGICAL\x10\x02\"\xd6\x01\n\x11SynchronousCommit\x12\"\n\x1eSYNCHRONOUS_COMMIT_UNSPECIFIED\x10\x00\x12\x19\n\x15SYNCHRONOUS_COMMIT_ON\x10\x01\x12\x1a\n\x16SYNCHRONOUS_COMMIT_OFF\x10\x02\x12\x1c\n\x18SYNCHRONOUS_COMMIT_LOCAL\x10\x03\x12#\n\x1fSYNCHRONOUS_COMMIT_REMOTE_WRITE\x10\x04\x12#\n\x1fSYNCHRONOUS_COMMIT_REMOTE_APPLY\x10\x05\"\x9a\x01\n\x13\x43onstraintExclusion\x12$\n CONSTRAINT_EXCLUSION_UNSPECIFIED\x10\x00\x12\x1b\n\x17\x43ONSTRAINT_EXCLUSION_ON\x10\x01\x12\x1c\n\x18\x43ONSTRAINT_EXCLUSION_OFF\x10\x02\x12\"\n\x1e\x43ONSTRAINT_EXCLUSION_PARTITION\x10\x03\"\x92\x01\n\x11\x46orceParallelMode\x12#\n\x1f\x46ORCE_PARALLEL_MODE_UNSPECIFIED\x10\x00\x12\x1a\n\x16\x46ORCE_PARALLEL_MODE_ON\x10\x01\x12\x1b\n\x17\x46ORCE_PARALLEL_MODE_OFF\x10\x02\x12\x1f\n\x1b\x46ORCE_PARALLEL_MODE_REGRESS\x10\x03\"\x92\x02\n\x08LogLevel\x12\x19\n\x15LOG_LEVEL_UNSPECIFIED\x10\x00\x12\x14\n\x10LOG_LEVEL_DEBUG5\x10\x01\x12\x14\n\x10LOG_LEVEL_DEBUG4\x10\x02\x12\x14\n\x10LOG_LEVEL_DEBUG3\x10\x03\x12\x14\n\x10LOG_LEVEL_DEBUG2\x10\x04\x12\x14\n\x10LOG_LEVEL_DEBUG1\x10\x05\x12\x11\n\rLOG_LEVEL_LOG\x10\x06\x12\x14\n\x10LOG_LEVEL_NOTICE\x10\x07\x12\x15\n\x11LOG_LEVEL_WARNING\x10\x08\x12\x13\n\x0fLOG_LEVEL_ERROR\x10\t\x12\x13\n\x0fLOG_LEVEL_FATAL\x10\n\x12\x13\n\x0fLOG_LEVEL_PANIC\x10\x0b\"\x99\x01\n\x11LogErrorVerbosity\x12#\n\x1fLOG_ERROR_VERBOSITY_UNSPECIFIED\x10\x00\x12\x1d\n\x19LOG_ERROR_VERBOSITY_TERSE\x10\x01\x12\x1f\n\x1bLOG_ERROR_VERBOSITY_DEFAULT\x10\x02\x12\x1f\n\x1bLOG_ERROR_VERBOSITY_VERBOSE\x10\x03\"\x8a\x01\n\x0cLogStatement\x12\x1d\n\x19LOG_STATEMENT_UNSPECIFIED\x10\x00\x12\x16\n\x12LOG_STATEMENT_NONE\x10\x01\x12\x15\n\x11LOG_STATEMENT_DDL\x10\x02\x12\x15\n\x11LOG_STATEMENT_MOD\x10\x03\x12\x15\n\x11LOG_STATEMENT_ALL\x10\x04\"\xe6\x01\n\x14TransactionIsolation\x12%\n!TRANSACTION_ISOLATION_UNSPECIFIED\x10\x00\x12*\n&TRANSACTION_ISOLATION_READ_UNCOMMITTED\x10\x01\x12(\n$TRANSACTION_ISOLATION_READ_COMMITTED\x10\x02\x12)\n%TRANSACTION_ISOLATION_REPEATABLE_READ\x10\x03\x12&\n\"TRANSACTION_ISOLATION_SERIALIZABLE\x10\x04\"[\n\x0b\x42yteaOutput\x12\x1c\n\x18\x42YTEA_OUTPUT_UNSPECIFIED\x10\x00\x12\x14\n\x10\x42YTEA_OUTPUT_HEX\x10\x01\x12\x18\n\x14\x42YTEA_OUTPUT_ESCAPED\x10\x02\"R\n\tXmlBinary\x12\x1a\n\x16XML_BINARY_UNSPECIFIED\x10\x00\x12\x15\n\x11XML_BINARY_BASE64\x10\x01\x12\x12\n\x0eXML_BINARY_HEX\x10\x02\"X\n\tXmlOption\x12\x1a\n\x16XML_OPTION_UNSPECIFIED\x10\x00\x12\x17\n\x13XML_OPTION_DOCUMENT\x10\x01\x12\x16\n\x12XML_OPTION_CONTENT\x10\x02\"\x9a\x01\n\x0e\x42\x61\x63kslashQuote\x12\x1f\n\x1b\x42\x41\x43KSLASH_QUOTE_UNSPECIFIED\x10\x00\x12\x13\n\x0f\x42\x41\x43KSLASH_QUOTE\x10\x01\x12\x16\n\x12\x42\x41\x43KSLASH_QUOTE_ON\x10\x02\x12\x17\n\x13\x42\x41\x43KSLASH_QUOTE_OFF\x10\x03\x12!\n\x1d\x42\x41\x43KSLASH_QUOTE_SAFE_ENCODING\x10\x04\"\x99\x01\n\rPlanCacheMode\x12\x1f\n\x1bPLAN_CACHE_MODE_UNSPECIFIED\x10\x00\x12\x18\n\x14PLAN_CACHE_MODE_AUTO\x10\x01\x12%\n!PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN\x10\x02\x12&\n\"PLAN_CACHE_MODE_FORCE_GENERIC_PLAN\x10\x03\"\xd0\x01\n\x14PgHintPlanDebugPrint\x12(\n$PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED\x10\x00\x12 \n\x1cPG_HINT_PLAN_DEBUG_PRINT_OFF\x10\x01\x12\x1f\n\x1bPG_HINT_PLAN_DEBUG_PRINT_ON\x10\x02\x12%\n!PG_HINT_PLAN_DEBUG_PRINT_DETAILED\x10\x03\x12$\n PG_HINT_PLAN_DEBUG_PRINT_VERBOSE\x10\x04\"\xed\x01\n\x16SharedPreloadLibraries\x12(\n$SHARED_PRELOAD_LIBRARIES_UNSPECIFIED\x10\x00\x12)\n%SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN\x10\x01\x12)\n%SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN\x10\x02\x12(\n$SHARED_PRELOAD_LIBRARIES_TIMESCALEDB\x10\x03\x12)\n%SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS\x10\x04\"\x8f\x02\n\x15PostgresqlConfigSet13\x12S\n\x10\x65\x66\x66\x65\x63tive_config\x18\x01 \x01(\x0b\x32\x39.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13\x12N\n\x0buser_config\x18\x02 \x01(\x0b\x32\x39.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13\x12Q\n\x0e\x64\x65\x66\x61ult_config\x18\x03 \x01(\x0b\x32\x39.yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13B\x81\x01\n)yandex.cloud.api.mdb.postgresql.v1.configZTgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config;postgresqlb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,])
_POSTGRESQLCONFIG13_WALLEVEL = _descriptor.EnumDescriptor(
name='WalLevel',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.WalLevel',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='WAL_LEVEL_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WAL_LEVEL_REPLICA', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WAL_LEVEL_LOGICAL', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=9706,
serialized_end=9789,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_WALLEVEL)
_POSTGRESQLCONFIG13_SYNCHRONOUSCOMMIT = _descriptor.EnumDescriptor(
name='SynchronousCommit',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.SynchronousCommit',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SYNCHRONOUS_COMMIT_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SYNCHRONOUS_COMMIT_ON', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SYNCHRONOUS_COMMIT_OFF', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SYNCHRONOUS_COMMIT_LOCAL', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SYNCHRONOUS_COMMIT_REMOTE_WRITE', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SYNCHRONOUS_COMMIT_REMOTE_APPLY', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=9792,
serialized_end=10006,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_SYNCHRONOUSCOMMIT)
_POSTGRESQLCONFIG13_CONSTRAINTEXCLUSION = _descriptor.EnumDescriptor(
name='ConstraintExclusion',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.ConstraintExclusion',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='CONSTRAINT_EXCLUSION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CONSTRAINT_EXCLUSION_ON', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CONSTRAINT_EXCLUSION_OFF', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CONSTRAINT_EXCLUSION_PARTITION', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=10009,
serialized_end=10163,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_CONSTRAINTEXCLUSION)
_POSTGRESQLCONFIG13_FORCEPARALLELMODE = _descriptor.EnumDescriptor(
name='ForceParallelMode',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.ForceParallelMode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='FORCE_PARALLEL_MODE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FORCE_PARALLEL_MODE_ON', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FORCE_PARALLEL_MODE_OFF', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FORCE_PARALLEL_MODE_REGRESS', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=10166,
serialized_end=10312,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_FORCEPARALLELMODE)
_POSTGRESQLCONFIG13_LOGLEVEL = _descriptor.EnumDescriptor(
name='LogLevel',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.LogLevel',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_DEBUG5', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_DEBUG4', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_DEBUG3', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_DEBUG2', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_DEBUG1', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_LOG', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_NOTICE', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_WARNING', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_ERROR', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_FATAL', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_LEVEL_PANIC', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=10315,
serialized_end=10589,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_LOGLEVEL)
_POSTGRESQLCONFIG13_LOGERRORVERBOSITY = _descriptor.EnumDescriptor(
name='LogErrorVerbosity',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.LogErrorVerbosity',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='LOG_ERROR_VERBOSITY_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_ERROR_VERBOSITY_TERSE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_ERROR_VERBOSITY_DEFAULT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_ERROR_VERBOSITY_VERBOSE', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=10592,
serialized_end=10745,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_LOGERRORVERBOSITY)
_POSTGRESQLCONFIG13_LOGSTATEMENT = _descriptor.EnumDescriptor(
name='LogStatement',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.LogStatement',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='LOG_STATEMENT_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_STATEMENT_NONE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_STATEMENT_DDL', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_STATEMENT_MOD', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOG_STATEMENT_ALL', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=10748,
serialized_end=10886,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_LOGSTATEMENT)
_POSTGRESQLCONFIG13_TRANSACTIONISOLATION = _descriptor.EnumDescriptor(
name='TransactionIsolation',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.TransactionIsolation',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TRANSACTION_ISOLATION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRANSACTION_ISOLATION_READ_UNCOMMITTED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRANSACTION_ISOLATION_READ_COMMITTED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRANSACTION_ISOLATION_REPEATABLE_READ', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRANSACTION_ISOLATION_SERIALIZABLE', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=10889,
serialized_end=11119,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_TRANSACTIONISOLATION)
_POSTGRESQLCONFIG13_BYTEAOUTPUT = _descriptor.EnumDescriptor(
name='ByteaOutput',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.ByteaOutput',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='BYTEA_OUTPUT_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BYTEA_OUTPUT_HEX', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BYTEA_OUTPUT_ESCAPED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=11121,
serialized_end=11212,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_BYTEAOUTPUT)
_POSTGRESQLCONFIG13_XMLBINARY = _descriptor.EnumDescriptor(
name='XmlBinary',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.XmlBinary',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='XML_BINARY_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='XML_BINARY_BASE64', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='XML_BINARY_HEX', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=11214,
serialized_end=11296,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_XMLBINARY)
_POSTGRESQLCONFIG13_XMLOPTION = _descriptor.EnumDescriptor(
name='XmlOption',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.XmlOption',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='XML_OPTION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='XML_OPTION_DOCUMENT', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='XML_OPTION_CONTENT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=11298,
serialized_end=11386,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_XMLOPTION)
_POSTGRESQLCONFIG13_BACKSLASHQUOTE = _descriptor.EnumDescriptor(
name='BackslashQuote',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.BackslashQuote',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='BACKSLASH_QUOTE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BACKSLASH_QUOTE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BACKSLASH_QUOTE_ON', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BACKSLASH_QUOTE_OFF', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BACKSLASH_QUOTE_SAFE_ENCODING', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=11389,
serialized_end=11543,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_BACKSLASHQUOTE)
_POSTGRESQLCONFIG13_PLANCACHEMODE = _descriptor.EnumDescriptor(
name='PlanCacheMode',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.PlanCacheMode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='PLAN_CACHE_MODE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PLAN_CACHE_MODE_AUTO', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PLAN_CACHE_MODE_FORCE_GENERIC_PLAN', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=11546,
serialized_end=11699,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_PLANCACHEMODE)
_POSTGRESQLCONFIG13_PGHINTPLANDEBUGPRINT = _descriptor.EnumDescriptor(
name='PgHintPlanDebugPrint',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.PgHintPlanDebugPrint',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PG_HINT_PLAN_DEBUG_PRINT_OFF', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PG_HINT_PLAN_DEBUG_PRINT_ON', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PG_HINT_PLAN_DEBUG_PRINT_DETAILED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PG_HINT_PLAN_DEBUG_PRINT_VERBOSE', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=11702,
serialized_end=11910,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_PGHINTPLANDEBUGPRINT)
_POSTGRESQLCONFIG13_SHAREDPRELOADLIBRARIES = _descriptor.EnumDescriptor(
name='SharedPreloadLibraries',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.SharedPreloadLibraries',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SHARED_PRELOAD_LIBRARIES_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SHARED_PRELOAD_LIBRARIES_TIMESCALEDB', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=11913,
serialized_end=12150,
)
_sym_db.RegisterEnumDescriptor(_POSTGRESQLCONFIG13_SHAREDPRELOADLIBRARIES)
_POSTGRESQLCONFIG13 = _descriptor.Descriptor(
name='PostgresqlConfig13',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='max_connections', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_connections', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='shared_buffers', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.shared_buffers', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='temp_buffers', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.temp_buffers', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_prepared_transactions', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_prepared_transactions', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='work_mem', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.work_mem', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='maintenance_work_mem', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.maintenance_work_mem', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autovacuum_work_mem', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.autovacuum_work_mem', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='temp_file_limit', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.temp_file_limit', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vacuum_cost_delay', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.vacuum_cost_delay', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vacuum_cost_page_hit', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.vacuum_cost_page_hit', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vacuum_cost_page_miss', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.vacuum_cost_page_miss', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vacuum_cost_page_dirty', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.vacuum_cost_page_dirty', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vacuum_cost_limit', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.vacuum_cost_limit', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bgwriter_delay', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.bgwriter_delay', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\01010-10000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bgwriter_lru_maxpages', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.bgwriter_lru_maxpages', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bgwriter_lru_multiplier', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.bgwriter_lru_multiplier', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bgwriter_flush_after', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.bgwriter_flush_after', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-2048', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backend_flush_after', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.backend_flush_after', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-2048', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='old_snapshot_threshold', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.old_snapshot_threshold', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\013-1-86400000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='wal_level', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.wal_level', index=19,
number=20, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='synchronous_commit', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.synchronous_commit', index=20,
number=21, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='checkpoint_timeout', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.checkpoint_timeout', index=21,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\01630000-86400000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='checkpoint_completion_target', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.checkpoint_completion_target', index=22,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='checkpoint_flush_after', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.checkpoint_flush_after', index=23,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-2048', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_wal_size', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_wal_size', index=24,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='min_wal_size', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.min_wal_size', index=25,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_standby_streaming_delay', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_standby_streaming_delay', index=26,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_statistics_target', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.default_statistics_target', index=27,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='constraint_exclusion', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.constraint_exclusion', index=28,
number=29, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cursor_tuple_fraction', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.cursor_tuple_fraction', index=29,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='from_collapse_limit', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.from_collapse_limit', index=30,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0141-2147483647', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='join_collapse_limit', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.join_collapse_limit', index=31,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0141-2147483647', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='force_parallel_mode', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.force_parallel_mode', index=32,
number=33, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='client_min_messages', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.client_min_messages', index=33,
number=34, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_min_messages', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_min_messages', index=34,
number=35, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_min_error_statement', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_min_error_statement', index=35,
number=36, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_min_duration_statement', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_min_duration_statement', index=36,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_checkpoints', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_checkpoints', index=37,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_connections', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_connections', index=38,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_disconnections', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_disconnections', index=39,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_duration', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_duration', index=40,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_error_verbosity', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_error_verbosity', index=41,
number=42, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_lock_waits', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_lock_waits', index=42,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_statement', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_statement', index=43,
number=44, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_temp_files', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_temp_files', index=44,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='search_path', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.search_path', index=45,
number=46, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='row_security', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.row_security', index=46,
number=47, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_transaction_isolation', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.default_transaction_isolation', index=47,
number=48, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='statement_timeout', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.statement_timeout', index=48,
number=49, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='lock_timeout', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.lock_timeout', index=49,
number=50, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='idle_in_transaction_session_timeout', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.idle_in_transaction_session_timeout', index=50,
number=51, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bytea_output', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.bytea_output', index=51,
number=52, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='xmlbinary', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.xmlbinary', index=52,
number=53, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='xmloption', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.xmloption', index=53,
number=54, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gin_pending_list_limit', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.gin_pending_list_limit', index=54,
number=55, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deadlock_timeout', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.deadlock_timeout', index=55,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_locks_per_transaction', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_locks_per_transaction', index=56,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_pred_locks_per_transaction', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_pred_locks_per_transaction', index=57,
number=58, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='array_nulls', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.array_nulls', index=58,
number=59, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backslash_quote', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.backslash_quote', index=59,
number=60, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_with_oids', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.default_with_oids', index=60,
number=61, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='escape_string_warning', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.escape_string_warning', index=61,
number=62, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='lo_compat_privileges', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.lo_compat_privileges', index=62,
number=63, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='operator_precedence_warning', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.operator_precedence_warning', index=63,
number=64, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quote_all_identifiers', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.quote_all_identifiers', index=64,
number=65, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='standard_conforming_strings', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.standard_conforming_strings', index=65,
number=66, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='synchronize_seqscans', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.synchronize_seqscans', index=66,
number=67, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='transform_null_equals', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.transform_null_equals', index=67,
number=68, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='exit_on_error', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.exit_on_error', index=68,
number=69, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='seq_page_cost', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.seq_page_cost', index=69,
number=70, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='random_page_cost', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.random_page_cost', index=70,
number=71, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autovacuum_max_workers', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.autovacuum_max_workers', index=71,
number=72, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0041-32', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autovacuum_vacuum_cost_delay', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.autovacuum_vacuum_cost_delay', index=72,
number=73, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\006-1-100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autovacuum_vacuum_cost_limit', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.autovacuum_vacuum_cost_limit', index=73,
number=74, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\010-1-10000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autovacuum_naptime', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.autovacuum_naptime', index=74,
number=75, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\r1000-86400000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='archive_timeout', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.archive_timeout', index=75,
number=76, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\01610000-86400000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='track_activity_query_size', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.track_activity_query_size', index=76,
number=77, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\n100-102400', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_bitmapscan', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_bitmapscan', index=77,
number=80, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_hashagg', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_hashagg', index=78,
number=81, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_hashjoin', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_hashjoin', index=79,
number=82, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_indexscan', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_indexscan', index=80,
number=83, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_indexonlyscan', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_indexonlyscan', index=81,
number=84, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_material', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_material', index=82,
number=85, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_mergejoin', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_mergejoin', index=83,
number=86, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_nestloop', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_nestloop', index=84,
number=87, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_seqscan', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_seqscan', index=85,
number=88, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_sort', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_sort', index=86,
number=89, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_tidscan', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_tidscan', index=87,
number=90, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_worker_processes', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_worker_processes', index=88,
number=91, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1024', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_parallel_workers', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_parallel_workers', index=89,
number=92, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1024', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_parallel_workers_per_gather', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_parallel_workers_per_gather', index=90,
number=93, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1024', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autovacuum_vacuum_scale_factor', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.autovacuum_vacuum_scale_factor', index=91,
number=94, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0070.0-1.0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autovacuum_analyze_scale_factor', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.autovacuum_analyze_scale_factor', index=92,
number=95, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0070.0-1.0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_transaction_read_only', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.default_transaction_read_only', index=93,
number=96, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timezone', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.timezone', index=94,
number=97, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_parallel_append', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_parallel_append', index=95,
number=98, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_parallel_hash', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_parallel_hash', index=96,
number=99, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_partition_pruning', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_partition_pruning', index=97,
number=100, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_partitionwise_aggregate', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_partitionwise_aggregate', index=98,
number=101, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_partitionwise_join', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_partitionwise_join', index=99,
number=102, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='jit', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.jit', index=100,
number=103, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_parallel_maintenance_workers', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_parallel_maintenance_workers', index=101,
number=104, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\003>=0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parallel_leader_participation', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.parallel_leader_participation', index=102,
number=105, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vacuum_cleanup_index_scale_factor', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.vacuum_cleanup_index_scale_factor', index=103,
number=106, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0210.0-10000000000.0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_transaction_sample_rate', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_transaction_sample_rate', index=104,
number=107, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0070.0-1.0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='plan_cache_mode', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.plan_cache_mode', index=105,
number=108, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_io_concurrency', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.effective_io_concurrency', index=106,
number=109, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cache_size', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.effective_cache_size', index=107,
number=110, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0160-549755813888', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='shared_preload_libraries', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.shared_preload_libraries', index=108,
number=111, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='auto_explain_log_min_duration', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.auto_explain_log_min_duration', index=109,
number=112, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\r-1-2147483647', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='auto_explain_log_analyze', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.auto_explain_log_analyze', index=110,
number=113, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='auto_explain_log_buffers', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.auto_explain_log_buffers', index=111,
number=114, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='auto_explain_log_timing', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.auto_explain_log_timing', index=112,
number=115, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='auto_explain_log_triggers', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.auto_explain_log_triggers', index=113,
number=116, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='auto_explain_log_verbose', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.auto_explain_log_verbose', index=114,
number=117, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='auto_explain_log_nested_statements', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.auto_explain_log_nested_statements', index=115,
number=118, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='auto_explain_sample_rate', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.auto_explain_sample_rate', index=116,
number=119, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0070.0-1.0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pg_hint_plan_enable_hint', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.pg_hint_plan_enable_hint', index=117,
number=120, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pg_hint_plan_enable_hint_table', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.pg_hint_plan_enable_hint_table', index=118,
number=121, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pg_hint_plan_debug_print', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.pg_hint_plan_debug_print', index=119,
number=122, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pg_hint_plan_message_level', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.pg_hint_plan_message_level', index=120,
number=123, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='hash_mem_multiplier', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.hash_mem_multiplier', index=121,
number=124, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\n0.0-1000.0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='logical_decoding_work_mem', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.logical_decoding_work_mem', index=122,
number=126, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\02365536-1099511627776', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='maintenance_io_concurrency', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.maintenance_io_concurrency', index=123,
number=127, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_slot_wal_keep_size', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.max_slot_wal_keep_size', index=124,
number=128, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\023-1-2251799812636672', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='wal_keep_size', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.wal_keep_size', index=125,
number=129, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\023-1-2251799812636672', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enable_incremental_sort', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.enable_incremental_sort', index=126,
number=130, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autovacuum_vacuum_insert_threshold', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.autovacuum_vacuum_insert_threshold', index=127,
number=131, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\r-1-2147483647', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autovacuum_vacuum_insert_scale_factor', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.autovacuum_vacuum_insert_scale_factor', index=128,
number=132, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\t0.0-100.0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_min_duration_sample', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_min_duration_sample', index=129,
number=133, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\r-1-2147483647', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_statement_sample_rate', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_statement_sample_rate', index=130,
number=134, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0070.0-1.0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_parameter_max_length', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_parameter_max_length', index=131,
number=135, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\r-1-2147483647', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_parameter_max_length_on_error', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.log_parameter_max_length_on_error', index=132,
number=136, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\r-1-2147483647', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pg_qualstats_enabled', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.pg_qualstats_enabled', index=133,
number=137, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pg_qualstats_track_constants', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.pg_qualstats_track_constants', index=134,
number=138, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pg_qualstats_max', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.pg_qualstats_max', index=135,
number=139, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pg_qualstats_resolve_oids', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.pg_qualstats_resolve_oids', index=136,
number=140, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pg_qualstats_sample_rate', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13.pg_qualstats_sample_rate', index=137,
number=141, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_POSTGRESQLCONFIG13_WALLEVEL,
_POSTGRESQLCONFIG13_SYNCHRONOUSCOMMIT,
_POSTGRESQLCONFIG13_CONSTRAINTEXCLUSION,
_POSTGRESQLCONFIG13_FORCEPARALLELMODE,
_POSTGRESQLCONFIG13_LOGLEVEL,
_POSTGRESQLCONFIG13_LOGERRORVERBOSITY,
_POSTGRESQLCONFIG13_LOGSTATEMENT,
_POSTGRESQLCONFIG13_TRANSACTIONISOLATION,
_POSTGRESQLCONFIG13_BYTEAOUTPUT,
_POSTGRESQLCONFIG13_XMLBINARY,
_POSTGRESQLCONFIG13_XMLOPTION,
_POSTGRESQLCONFIG13_BACKSLASHQUOTE,
_POSTGRESQLCONFIG13_PLANCACHEMODE,
_POSTGRESQLCONFIG13_PGHINTPLANDEBUGPRINT,
_POSTGRESQLCONFIG13_SHAREDPRELOADLIBRARIES,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=12150,
)
_POSTGRESQLCONFIGSET13 = _descriptor.Descriptor(
name='PostgresqlConfigSet13',
full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet13',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='effective_config', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet13.effective_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_config', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet13.user_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_config', full_name='yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet13.default_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=12153,
serialized_end=12424,
)
_POSTGRESQLCONFIG13.fields_by_name['max_connections'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['shared_buffers'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['temp_buffers'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['max_prepared_transactions'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['work_mem'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['maintenance_work_mem'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_work_mem'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['temp_file_limit'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['vacuum_cost_delay'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['vacuum_cost_page_hit'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['vacuum_cost_page_miss'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['vacuum_cost_page_dirty'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['vacuum_cost_limit'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['bgwriter_delay'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['bgwriter_lru_maxpages'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['bgwriter_lru_multiplier'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['bgwriter_flush_after'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['backend_flush_after'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['old_snapshot_threshold'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['wal_level'].enum_type = _POSTGRESQLCONFIG13_WALLEVEL
_POSTGRESQLCONFIG13.fields_by_name['synchronous_commit'].enum_type = _POSTGRESQLCONFIG13_SYNCHRONOUSCOMMIT
_POSTGRESQLCONFIG13.fields_by_name['checkpoint_timeout'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['checkpoint_completion_target'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['checkpoint_flush_after'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['max_wal_size'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['min_wal_size'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['max_standby_streaming_delay'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['default_statistics_target'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['constraint_exclusion'].enum_type = _POSTGRESQLCONFIG13_CONSTRAINTEXCLUSION
_POSTGRESQLCONFIG13.fields_by_name['cursor_tuple_fraction'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['from_collapse_limit'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['join_collapse_limit'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['force_parallel_mode'].enum_type = _POSTGRESQLCONFIG13_FORCEPARALLELMODE
_POSTGRESQLCONFIG13.fields_by_name['client_min_messages'].enum_type = _POSTGRESQLCONFIG13_LOGLEVEL
_POSTGRESQLCONFIG13.fields_by_name['log_min_messages'].enum_type = _POSTGRESQLCONFIG13_LOGLEVEL
_POSTGRESQLCONFIG13.fields_by_name['log_min_error_statement'].enum_type = _POSTGRESQLCONFIG13_LOGLEVEL
_POSTGRESQLCONFIG13.fields_by_name['log_min_duration_statement'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['log_checkpoints'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['log_connections'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['log_disconnections'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['log_duration'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['log_error_verbosity'].enum_type = _POSTGRESQLCONFIG13_LOGERRORVERBOSITY
_POSTGRESQLCONFIG13.fields_by_name['log_lock_waits'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['log_statement'].enum_type = _POSTGRESQLCONFIG13_LOGSTATEMENT
_POSTGRESQLCONFIG13.fields_by_name['log_temp_files'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['row_security'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['default_transaction_isolation'].enum_type = _POSTGRESQLCONFIG13_TRANSACTIONISOLATION
_POSTGRESQLCONFIG13.fields_by_name['statement_timeout'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['lock_timeout'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['idle_in_transaction_session_timeout'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['bytea_output'].enum_type = _POSTGRESQLCONFIG13_BYTEAOUTPUT
_POSTGRESQLCONFIG13.fields_by_name['xmlbinary'].enum_type = _POSTGRESQLCONFIG13_XMLBINARY
_POSTGRESQLCONFIG13.fields_by_name['xmloption'].enum_type = _POSTGRESQLCONFIG13_XMLOPTION
_POSTGRESQLCONFIG13.fields_by_name['gin_pending_list_limit'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['deadlock_timeout'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['max_locks_per_transaction'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['max_pred_locks_per_transaction'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['array_nulls'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['backslash_quote'].enum_type = _POSTGRESQLCONFIG13_BACKSLASHQUOTE
_POSTGRESQLCONFIG13.fields_by_name['default_with_oids'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['escape_string_warning'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['lo_compat_privileges'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['operator_precedence_warning'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['quote_all_identifiers'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['standard_conforming_strings'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['synchronize_seqscans'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['transform_null_equals'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['exit_on_error'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['seq_page_cost'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['random_page_cost'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_max_workers'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_vacuum_cost_delay'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_vacuum_cost_limit'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_naptime'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['archive_timeout'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['track_activity_query_size'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_bitmapscan'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_hashagg'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_hashjoin'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_indexscan'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_indexonlyscan'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_material'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_mergejoin'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_nestloop'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_seqscan'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_sort'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_tidscan'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['max_worker_processes'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['max_parallel_workers'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['max_parallel_workers_per_gather'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_vacuum_scale_factor'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_analyze_scale_factor'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['default_transaction_read_only'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_parallel_append'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_parallel_hash'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_partition_pruning'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_partitionwise_aggregate'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_partitionwise_join'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['jit'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['max_parallel_maintenance_workers'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['parallel_leader_participation'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['vacuum_cleanup_index_scale_factor'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['log_transaction_sample_rate'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['plan_cache_mode'].enum_type = _POSTGRESQLCONFIG13_PLANCACHEMODE
_POSTGRESQLCONFIG13.fields_by_name['effective_io_concurrency'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['effective_cache_size'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['shared_preload_libraries'].enum_type = _POSTGRESQLCONFIG13_SHAREDPRELOADLIBRARIES
_POSTGRESQLCONFIG13.fields_by_name['auto_explain_log_min_duration'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['auto_explain_log_analyze'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['auto_explain_log_buffers'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['auto_explain_log_timing'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['auto_explain_log_triggers'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['auto_explain_log_verbose'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['auto_explain_log_nested_statements'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['auto_explain_sample_rate'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['pg_hint_plan_enable_hint'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['pg_hint_plan_enable_hint_table'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['pg_hint_plan_debug_print'].enum_type = _POSTGRESQLCONFIG13_PGHINTPLANDEBUGPRINT
_POSTGRESQLCONFIG13.fields_by_name['pg_hint_plan_message_level'].enum_type = _POSTGRESQLCONFIG13_LOGLEVEL
_POSTGRESQLCONFIG13.fields_by_name['hash_mem_multiplier'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['logical_decoding_work_mem'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['maintenance_io_concurrency'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['max_slot_wal_keep_size'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['wal_keep_size'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['enable_incremental_sort'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_vacuum_insert_threshold'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_vacuum_insert_scale_factor'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['log_min_duration_sample'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['log_statement_sample_rate'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13.fields_by_name['log_parameter_max_length'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['log_parameter_max_length_on_error'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['pg_qualstats_enabled'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['pg_qualstats_track_constants'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['pg_qualstats_max'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_POSTGRESQLCONFIG13.fields_by_name['pg_qualstats_resolve_oids'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_POSTGRESQLCONFIG13.fields_by_name['pg_qualstats_sample_rate'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_POSTGRESQLCONFIG13_WALLEVEL.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_SYNCHRONOUSCOMMIT.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_CONSTRAINTEXCLUSION.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_FORCEPARALLELMODE.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_LOGLEVEL.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_LOGERRORVERBOSITY.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_LOGSTATEMENT.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_TRANSACTIONISOLATION.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_BYTEAOUTPUT.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_XMLBINARY.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_XMLOPTION.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_BACKSLASHQUOTE.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_PLANCACHEMODE.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_PGHINTPLANDEBUGPRINT.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIG13_SHAREDPRELOADLIBRARIES.containing_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIGSET13.fields_by_name['effective_config'].message_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIGSET13.fields_by_name['user_config'].message_type = _POSTGRESQLCONFIG13
_POSTGRESQLCONFIGSET13.fields_by_name['default_config'].message_type = _POSTGRESQLCONFIG13
DESCRIPTOR.message_types_by_name['PostgresqlConfig13'] = _POSTGRESQLCONFIG13
DESCRIPTOR.message_types_by_name['PostgresqlConfigSet13'] = _POSTGRESQLCONFIGSET13
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PostgresqlConfig13 = _reflection.GeneratedProtocolMessageType('PostgresqlConfig13', (_message.Message,), {
'DESCRIPTOR' : _POSTGRESQLCONFIG13,
'__module__' : 'yandex.cloud.mdb.postgresql.v1.config.postgresql13_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13)
})
_sym_db.RegisterMessage(PostgresqlConfig13)
PostgresqlConfigSet13 = _reflection.GeneratedProtocolMessageType('PostgresqlConfigSet13', (_message.Message,), {
'DESCRIPTOR' : _POSTGRESQLCONFIGSET13,
'__module__' : 'yandex.cloud.mdb.postgresql.v1.config.postgresql13_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet13)
})
_sym_db.RegisterMessage(PostgresqlConfigSet13)
DESCRIPTOR._options = None
_POSTGRESQLCONFIG13.fields_by_name['bgwriter_delay']._options = None
_POSTGRESQLCONFIG13.fields_by_name['bgwriter_flush_after']._options = None
_POSTGRESQLCONFIG13.fields_by_name['backend_flush_after']._options = None
_POSTGRESQLCONFIG13.fields_by_name['old_snapshot_threshold']._options = None
_POSTGRESQLCONFIG13.fields_by_name['checkpoint_timeout']._options = None
_POSTGRESQLCONFIG13.fields_by_name['checkpoint_flush_after']._options = None
_POSTGRESQLCONFIG13.fields_by_name['from_collapse_limit']._options = None
_POSTGRESQLCONFIG13.fields_by_name['join_collapse_limit']._options = None
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_max_workers']._options = None
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_vacuum_cost_delay']._options = None
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_vacuum_cost_limit']._options = None
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_naptime']._options = None
_POSTGRESQLCONFIG13.fields_by_name['archive_timeout']._options = None
_POSTGRESQLCONFIG13.fields_by_name['track_activity_query_size']._options = None
_POSTGRESQLCONFIG13.fields_by_name['max_worker_processes']._options = None
_POSTGRESQLCONFIG13.fields_by_name['max_parallel_workers']._options = None
_POSTGRESQLCONFIG13.fields_by_name['max_parallel_workers_per_gather']._options = None
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_vacuum_scale_factor']._options = None
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_analyze_scale_factor']._options = None
_POSTGRESQLCONFIG13.fields_by_name['max_parallel_maintenance_workers']._options = None
_POSTGRESQLCONFIG13.fields_by_name['vacuum_cleanup_index_scale_factor']._options = None
_POSTGRESQLCONFIG13.fields_by_name['log_transaction_sample_rate']._options = None
_POSTGRESQLCONFIG13.fields_by_name['effective_io_concurrency']._options = None
_POSTGRESQLCONFIG13.fields_by_name['effective_cache_size']._options = None
_POSTGRESQLCONFIG13.fields_by_name['auto_explain_log_min_duration']._options = None
_POSTGRESQLCONFIG13.fields_by_name['auto_explain_sample_rate']._options = None
_POSTGRESQLCONFIG13.fields_by_name['hash_mem_multiplier']._options = None
_POSTGRESQLCONFIG13.fields_by_name['logical_decoding_work_mem']._options = None
_POSTGRESQLCONFIG13.fields_by_name['maintenance_io_concurrency']._options = None
_POSTGRESQLCONFIG13.fields_by_name['max_slot_wal_keep_size']._options = None
_POSTGRESQLCONFIG13.fields_by_name['wal_keep_size']._options = None
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_vacuum_insert_threshold']._options = None
_POSTGRESQLCONFIG13.fields_by_name['autovacuum_vacuum_insert_scale_factor']._options = None
_POSTGRESQLCONFIG13.fields_by_name['log_min_duration_sample']._options = None
_POSTGRESQLCONFIG13.fields_by_name['log_statement_sample_rate']._options = None
_POSTGRESQLCONFIG13.fields_by_name['log_parameter_max_length']._options = None
_POSTGRESQLCONFIG13.fields_by_name['log_parameter_max_length_on_error']._options = None
# @@protoc_insertion_point(module_scope)
| 70.028282 | 18,234 | 0.799883 |
b219c7dfd3411460e794d90cd648936a82d18d82 | 1,868 | py | Python | seq2annotation/trainer/cli.py | lanSeFangZhou/seq2annotation | a824520d46f0b3d70268fae422976a5ce1b3f4ce | [
"Apache-2.0"
] | 90 | 2018-11-29T07:05:16.000Z | 2021-11-22T11:32:58.000Z | seq2annotation/trainer/cli.py | howl-anderson/seq2annotation | 9069614bb9ee0bea2ec2b3e711914b067e9003bb | [
"Apache-2.0"
] | 50 | 2019-06-27T07:11:18.000Z | 2022-02-10T00:01:02.000Z | seq2annotation/trainer/cli.py | lanSeFangZhou/seq2annotation | a824520d46f0b3d70268fae422976a5ce1b3f4ce | [
"Apache-2.0"
] | 23 | 2019-01-03T14:57:15.000Z | 2022-03-08T07:50:33.000Z | from typing import Any
from deliverable_model.request import Request
from deliverable_model.response import Response
from deliverable_model.utils import create_dir_if_needed
from ioflow.configure import read_configure
from ioflow.corpus import get_corpus_processor
from seq2annotation.input import build_input_func, generate_tagset
from seq2annotation.model import Model
from seq2annotation.trainer.estimator_utils import export_as_deliverable_model
from deliverable_model.converter_base import ConverterBase
from seq2annotation_for_deliverable.main import (
ConverterForRequest,
ConverterForResponse,
)
def main():
raw_config = read_configure()
model = Model(raw_config)
config = model.get_default_config()
config.update(raw_config)
corpus = get_corpus_processor(config)
corpus.prepare()
train_data_generator_func = corpus.get_generator_func(corpus.TRAIN)
eval_data_generator_func = corpus.get_generator_func(corpus.EVAL)
corpus_meta_data = corpus.get_meta_info()
config["tags_data"] = generate_tagset(corpus_meta_data["tags"])
# train and evaluate model
train_input_func = build_input_func(train_data_generator_func, config)
eval_input_func = (
build_input_func(eval_data_generator_func, config)
if eval_data_generator_func
else None
)
evaluate_result, export_results, final_saved_model = model.train_and_eval_then_save(
train_input_func, eval_input_func, config
)
export_as_deliverable_model(
create_dir_if_needed(config["deliverable_model_dir"]),
tensorflow_saved_model=final_saved_model,
converter_for_request=ConverterForRequest(),
converter_for_response=ConverterForResponse(),
addition_model_dependency=["micro_toolkit", "seq2annotation_for_deliverable"],
)
if __name__ == "__main__":
main()
| 32.77193 | 88 | 0.784797 |
bc0e72f2f74e9bc0ffbe33fe08f2d9251d2b1f04 | 28,501 | py | Python | brainfck/brainfck.py | GitAcrown/RedAppsv2 | a3a1fb5a5c659ce6e54e62503012a79a71763d47 | [
"MIT"
] | 1 | 2022-03-07T01:54:10.000Z | 2022-03-07T01:54:10.000Z | brainfck/brainfck.py | GitAcrown/RedAppsv2 | a3a1fb5a5c659ce6e54e62503012a79a71763d47 | [
"MIT"
] | null | null | null | brainfck/brainfck.py | GitAcrown/RedAppsv2 | a3a1fb5a5c659ce6e54e62503012a79a71763d47 | [
"MIT"
] | null | null | null | import asyncio
import logging
import yaml
import random
import os
import string
import time
from datetime import datetime
from fuzzywuzzy import process
import discord
from redbot.core.data_manager import cog_data_path
from redbot.core.utils.menus import start_adding_reactions, menu, DEFAULT_CONTROLS
from typing import Union, Tuple
from redbot.core import Config, commands, checks, errors
from redbot.core.utils.chat_formatting import box, humanize_number
from tabulate import tabulate
logger = logging.getLogger("red.RedAppsv2.brainfck")
class BrainfckError(Exception):
"""Erreurs liées au module Brainfck"""
class InvalidFile(BrainfckError):
"""Soulevée lorsque le fichier .yaml est mal formatté (clés manquantes)"""
class InvalidID(BrainfckError):
"""Soulevée lorsqu'un fichier avec le même ID a été déjà chargé"""
class ContentError(BrainfckError):
"""Soulevée lorsque la clef 'content' contient trop ou trop peu de questions"""
class ReaderError(BrainfckError):
"""Soulevée lorsqu'il y a une erreur de lecture du fichier"""
class Brainfck(commands.Cog):
"""Mesurez-vous aux autres dans une série de quiz customisables !"""
def __init__(self, bot):
super().__init__()
self.bot = bot
self.config = Config.get_conf(self, identifier=736144321857978388, force_registration=True)
default_global = {"Global_Leaderboard": {},
"Packs_Leaderboard": {},
"Sessions": {}}
default_user = {"stats": {"w": 0, "d": 0, "l": 0},
"receive_lb_notifs": False}
self.config.register_global(**default_global)
self.config.register_user(**default_user)
self.packs = cog_data_path(self) / "packs"
self.packs.mkdir(exist_ok=True, parents=True)
self.loaded_packs = {}
def read_pack_file(self, path: str) -> Tuple[str, dict]:
"""Extraire un Pack de questions depuis un fichier .yaml"""
try:
with open(path, 'rt', encoding='utf8') as f:
pack = yaml.safe_load(f)
except Exception as e:
logger.info(msg=f"Erreur dans la lecture du fichier yaml : {e}", exc_info=True)
raise ReaderError("Erreur lors de la lecture du fichier : `{}`".format(e))
if all([i in pack for i in ("id", "name", "description", "author_id", "content")]):
if len(pack['id']) > 10:
raise InvalidFile("L'ID du pack est trop long (<= 10 caractères)")
delay = pack.get('custom_delay', 10)
if delay < 5:
delay = 5
color = pack.get('color', None)
if color:
if self.format_color(color):
color = int(self.format_color(color, "0x"), base=16)
else:
color = None
new = {"name": pack['name'],
"description": pack['description'],
"author": pack['author_id'],
"pack_thumbnail": pack.get('thumbnail', None),
"content": {},
"delay": delay,
"color": color}
for q in pack['content']:
if 'good' in pack['content'][q] and 'bad' in pack['content'][q]:
if len(pack['content'][q]['bad']) >= 3:
add_q = {'image': pack['content'][q].get('image', None),
'good': pack['content'][q]['good'],
'bad': pack['content'][q]['bad'],
'show': pack['content'][q].get('show', '')}
new['content'][q] = add_q
if len(new['content']) < 15:
raise ContentError("Le pack ne contient pas assez de questions valides (< 15)")
return pack['id'], new
raise InvalidFile("Le pack n'est pas formatté correctement, il manque des champs obligatoires (v. exemple)")
def filespaths(self, directory):
paths = []
for dirpath, _, filenames in os.walk(directory):
for f in filenames:
if f.endswith(".yaml"):
paths.append(os.path.abspath(os.path.join(dirpath, f)))
return paths
def load_packs(self):
self.loaded_packs = {}
for path in self.filespaths(str(self.packs)):
pid, content = self.read_pack_file(path)
self.loaded_packs[pid] = content
return self.loaded_packs
async def reset_sessions_for(self, packid):
sessions = await self.config.Sessions()
for sess in sessions:
if sessions[sess]['pack_id'] == packid:
await self.config.Sessions.clear_raw(sess)
return sessions
def get_random_pack(self):
if self.loaded_packs:
return random.choice([i for i in self.loaded_packs])
return None
def format_color(self, color: str, prefixe: str = None):
"""Vérifie que la couleur donnée est un hexadécimal et renvoie la couleur avec ou sans préfixe (0x ou #)"""
if len(color) >= 6:
color = color[-6:]
try:
int(color, base=16)
return color.upper() if not prefixe else prefixe + color.upper()
except ValueError:
return None
return None
@commands.command(name="brainfck", aliases=["bf", "quiz"])
@commands.max_concurrency(1, commands.BucketType.user)
async def brainfck_play(self, ctx, theme_invite: str = None):
"""Faire un Quiz Brainfck
<theme_invite> = Identifiant du pack ou invitation
Ne rien mettre affiche la liste des thèmes disponibles"""
emcolor = await ctx.embed_color()
confirm, cancel = self.bot.get_emoji(812451214037221439), self.bot.get_emoji(812451214179434551)
if not self.loaded_packs:
self.load_packs()
if not theme_invite:
txt = ""
em = discord.Embed(title="Liste des thèmes disponibles", color=emcolor)
page = 1
for p in self.loaded_packs:
nb = len(self.loaded_packs[p]['content'])
chunk = f"• `{p}` : {self.loaded_packs[p]['description']} (#{nb})\n"
if len(txt + chunk) < 2000:
txt += chunk
else:
em.description = txt
txt = chunk
em.set_footer(text=f"Page #{page}")
await ctx.send(embed=em)
page += 1
if txt:
em.description = txt
em.set_footer(text=f"Page #{page}")
await ctx.send(embed=em)
else:
await ctx.send("**Aucun thème n'est disponible**")
return
sessions = await self.config.Sessions()
packid = theme_invite.upper() if theme_invite.upper() in self.loaded_packs else None
invite = theme_invite if theme_invite in sessions else None
if invite:
sess_author = self.bot.get_user(int(sessions[invite]['author']))
if ctx.author == sess_author:
return await ctx.send(f"**Impossible de jouer** • Vous êtes l'auteur de ce défi, vous ne pouvez pas vous défier vous-même !")
sess_pack_id = sessions[invite]['pack_id']
sess_players = sessions[invite]['leaderboard']
if ctx.author.id in [int(us) for us in sess_players]:
return await ctx.send(f"**Impossible d'y rejouer** • Votre score ({sess_players[ctx.author.id]} points)"
f" figure déjà dans le classement pour cette partie !")
theme_invite = self.loaded_packs[sess_pack_id]
packid = sess_pack_id
packname = theme_invite['name']
emcolor = theme_invite['color'] if theme_invite['color'] else emcolor
em = discord.Embed(color=emcolor)
em.set_footer(text="Accepter | Annuler")
em.add_field(name=packname, value=theme_invite['description'])
if theme_invite['pack_thumbnail']:
em.set_thumbnail(url=theme_invite['pack_thumbnail'])
if sess_author:
desc = f"**{sess_author.name}** vous a défié sur ***{packname}***"
em.description = desc
em.set_author(name=sess_author, icon_url=sess_author.avatar_url)
else:
desc = f"Un joueur inconnu vous a défié sur ***{packname}***"
em.description = desc
em.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url)
conf = await ctx.send(embed=em)
start_adding_reactions(conf, [confirm, cancel])
try:
react, ruser = await self.bot.wait_for("reaction_add",
check=lambda m,
u: u == ctx.author and m.message.id == conf.id,
timeout=30)
except asyncio.TimeoutError:
return await conf.delete()
if react.emoji == cancel:
return await conf.delete()
elif packid:
theme_invite = self.loaded_packs[packid]
emcolor = theme_invite['color'] if theme_invite['color'] else emcolor
em = discord.Embed(color=emcolor, description=theme_invite['description'], title=theme_invite['name'])
em.set_footer(text="Jouer | Annuler")
if theme_invite['pack_thumbnail']:
em.set_thumbnail(url=theme_invite['pack_thumbnail'])
conf = await ctx.send(embed=em)
start_adding_reactions(conf, [confirm, cancel])
try:
react, ruser = await self.bot.wait_for("reaction_add",
check=lambda m,
u: u == ctx.author and m.message.id == conf.id,
timeout=30)
except asyncio.TimeoutError:
return await conf.delete()
if react.emoji == cancel:
return await conf.delete()
else:
return await ctx.send("**Identifiant de thème ou code de partie invalide** • Consultez la liste des thèmes avec `;bf` ou vérifiez que l'invitation donnée est correcte (Attention aux 'O'/0)")
seed = sessions[invite]['seed'] if invite else random.randint(1, 999999)
rng = random.Random(seed)
pack = theme_invite
await ctx.send("**La partie va commencer ...**")
await asyncio.sleep(3)
manche = 1
pts = 0
letters = [i for i in '🇦🇧🇨🇩']
present_session = {'author': ctx.author.id,
'pack_id': packid,
'answers': {},
'score': 0,
'seed': seed,
'leaderboard': {}}
qlist = list(pack['content'].keys())
timelimit = pack['delay']
while manche <= 6:
question = rng.choice(qlist)
qlist.remove(question)
good = pack['content'][question]['good']
bad = rng.sample(pack['content'][question]['bad'], 3)
reps = [good] + bad
rng.shuffle(reps)
if manche != 6:
em = discord.Embed(title=f"{pack['name']} • Question #{manche}",
description=box(question), color=emcolor)
em.set_footer(text="Préparez-vous ...")
else:
em = discord.Embed(title=f"{pack['name']} • Question #{manche} (BONUS)",
description=box(question), color=emcolor)
em.set_footer(text="Préparez-vous ... (x2 points)")
if pack['content'][question]['image']:
em.set_image(url=pack['content'][question]['image'])
start = await ctx.send(embed=em)
await asyncio.sleep((0.075 * len(question)) + 1)
rtxt = ""
rdict = {}
for rep in reps:
rindex = reps.index(rep)
rtxt += f"{letters[rindex]} → {rep}\n"
rdict[letters[rindex]] = rep
em.add_field(name="Réponses possibles", value=rtxt)
em.set_footer(text=f"Répondez avec les emojis ci-dessous | {str(timelimit)}s")
await start.edit(embed=em)
start_adding_reactions(start, letters)
starttime = time.time()
try:
react, ruser = await self.bot.wait_for("reaction_add",
check=lambda m,
u: u == ctx.author and m.message.id == start.id,
timeout=timelimit)
except asyncio.TimeoutError:
react, ruser = None, None
finally:
timescore = time.time() - starttime
if timescore > 10:
timescore = 10
roundscore = round((10 - timescore) * 10)
if manche != 6:
end = discord.Embed(title=f"{pack['name']} • Question #{manche}",
description=box(question), color=emcolor)
else:
end = discord.Embed(title=f"{pack['name']} • Question #{manche} (BONUS)",
description=box(question), color=emcolor)
roundscore *= 2
reptxt = ""
waittime = 5
if react:
if rdict.get(react.emoji, None) == good:
present_session['answers'][question] = {'answer': good,
'time': timescore}
pts += roundscore
reptxt += random.choice((f"Bravo ! La bonne réponse était **{good}** !",
f"Bien joué ! La réponse était évidemment **{good}** !",
f"Bonne réponse ! Il fallait répondre **{good}**"))
else:
present_session['answers'][question] = {'answer': rdict[react.emoji],
'time': timescore}
reptxt += random.choice((f"Dommage ! La bonne réponse était **{good}** !",
f"Manqué ! La réponse était **{good}** !",
f"Mauvaise réponse ! Il fallait répondre **{good}**"))
end.set_footer(text=f"Vous avez répondu en {round(timescore, 2)}s | Score actuel = {pts}")
else:
present_session['answers'][question] = {'answer': None,
'time': timescore}
reptxt += random.choice((f"Une absence ? La bonne réponse était **{good}** !",
f"Aucune réponse ? La réponse était **{good}** !"))
end.set_footer(text=f"Vous n'avez pas répondu | Score actuel = {pts}")
if invite:
waittime += 3
reptxt += "\n"
sess_author = self.bot.get_user(int(sessions[invite]['author']))
sess_rep = sessions[invite]['answers'][question]['answer']
if sess_rep == None:
sess_rep = "[Aucune réponse]"
sess_time = round(sessions[invite]['answers'][question]['time'], 2)
is_good = "(Bonne réponse)" if sess_rep == good else "(Mauvaise réponse)"
advname = sess_author.name if sess_author else "Votre adversaire"
reptxt += f"***{advname}*** a répondu *{sess_rep}* {is_good} en {sess_time}s"
end.add_field(name="Réponse", value=reptxt)
if pack['content'][question].get('show', False):
end.add_field(name="Détails", value=pack['content'][question]['show'])
waittime += 0.03 * len(pack['content'][question]['show'])
await start.edit(embed=end)
manche += 1
await asyncio.sleep(waittime)
present_session['score'] = pts
result = discord.Embed(title=f"{pack['name']} • Fin de la partie", color=emcolor)
if invite:
sess_author = self.bot.get_user(int(sessions[invite]['author']))
dvname = sess_author.name if sess_author else "Votre adversaire"
sess_score = sessions[invite]['score']
sessions[invite]['leaderboard'][ctx.author.id] = pts
if pts > sess_score:
result.description = f"Bravo, vous avez battu **{dvname}** !\n" \
f"- __Votre score__ : {pts}\n" \
f"- Son score : {sess_score}"
notifdesc = f"**{ctx.author.name}** a participé à votre défi [{invite}] sur le thème ***{pack['name']}*** et a gagné :\n" \
f"- Son score : {pts}\n" \
f"- __Votre score__ : {sess_score}"
elif pts == sess_score:
result.description = f"Vous avez fait égalité avec **{dvname}** !\n" \
f"- Vos scores : {pts}"
notifdesc = f"**{ctx.author.name}** a participé à votre défi [{invite}] sur le thème ***{pack['name']}*** et a fait le même score que vous (égalité) :\n" \
f"- Vos scores : {pts}"
else:
result.description = f"Vous avez perdu face à **{dvname}** !\n" \
f"- __Votre score__ : {pts}\n" \
f"- Son score : {sess_score}"
notifdesc = f"**{ctx.author.name}** a participé à votre défi [{invite}] sur le thème ***{pack['name']}*** et a perdu :\n" \
f"- Son score : {pts}\n" \
f"- __Votre score__ : {sess_score}"
await self.config.Sessions.set_raw(invite, value=sessions[invite])
result.set_footer(text=f"Votre score a été enregistré au leaderboard de ce défi. Consultez-le avec \";bfl {invite}\"")
notif = discord.Embed(description=notifdesc, color=await ctx.embed_color())
notif.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)
if pack['pack_thumbnail']:
notif.set_thumbnail(url=pack['pack_thumbnail'])
notif.set_footer(text="Notification de défi Brainfck")
try:
await sess_author.send(embed=notif)
except:
pass
else:
newinvite = lambda: "&" + str(''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(5)))
sessinvite = newinvite()
while newinvite in sessions:
sessinvite = newinvite()
await self.config.Sessions.set_raw(sessinvite, value=present_session)
if pts >= 500: encour = " Excellent !"
elif pts >= 350: encour = " Bien joué !"
elif pts >= 200: encour = " Pas mal."
else:
encour = ""
result.description = f"Vous avez fait un score de **{pts} points**.{encour}"
result.add_field(name="Code de la partie", value=box(sessinvite))
result.set_footer(text="Partagez ce code pour défier d'autres personnes sur ce thème !")
await ctx.send(embed=result)
@commands.command(name='bfleaderboard', aliases=['bfl'])
async def brainfck_leaderboard(self, ctx, invite: str):
"""Affiche le leaderboard sur une partie (défi)"""
sessions = await self.config.Sessions()
if not self.loaded_packs:
self.load_packs()
if invite in sessions:
lb = sessions[invite]['leaderboard']
if lb:
pack_id = sessions[invite]['pack_id']
auteur = self.bot.get_user(int(sessions[invite]['author']))
autname = auteur if auteur else "Inconnu"
pack = self.loaded_packs.get(pack_id, None)
sess_score = sessions[invite]['score']
packname = pack['name'] if pack else f"SUPPR:{pack_id}"
embeds = []
tabl = []
for u in lb:
if len(tabl) < 20:
tabl.append((self.bot.get_user(int(u)) if self.bot.get_user(int(u)) else str(u), lb[u]))
else:
em = discord.Embed(title=f"Partie [{invite}] sur le thème \"{packname}\"",
color=await ctx.embed_color())
em.description = box(tabulate(tabl, headers=("Pseudo", "Score")))
em.set_footer(text=f"Auteur du défi : {autname} | Score : {sess_score}")
embeds.append(em)
tabl = []
if tabl:
em = discord.Embed(title=f"Partie [{invite}] sur le thème \"{packname}\"",
color=await ctx.embed_color())
em.description = box(tabulate(tabl, headers=("Nom", "Score")))
em.set_footer(text=f"Auteur : {autname} | Score : {sess_score}")
embeds.append(em)
if embeds:
return await menu(ctx, embeds, DEFAULT_CONTROLS)
return await ctx.send(f"**Aucun score** • Il n'y a aucun score à afficher pour ce code de partie")
else:
await ctx.send(f"**Code invalide** • Vérifiez que le code donné corresponde à un code de partie valide")
@commands.command(name="brainfcknotif", aliases=['bfnotif'])
async def brainfck_allow_notifs(self, ctx):
"""Active/Désactive la réception d'une notification quand quelqu'un termine votre défi"""
base = await self.config.user(ctx.author).receive_lb_notifs()
if base:
await self.config.user(ctx.author).receive_lb_notifs.set(False)
await ctx.send("**Notifications désactivées** • Vous ne recevrez plus de notifications lorsqu'un membre termine un de vos défis")
else:
await self.config.user(ctx.author).receive_lb_notifs.set(True)
await ctx.send(
"**Notifications activées** • Vous recevrez des notifications lorsqu'un membre termine un de vos défis")
@commands.command(name="brainfckexemple", aliases=['bfex'])
async def brainfck_file_example(self, ctx):
"""Affiche un tuto pour créer votre fichier thème pour Brainfck"""
txt = "Les fichiers thème sont des fichiers en **.yaml** qui suivent le format de cet exemple :\n```yaml\n" \
"# ----- Paramètres obligatoires -----\n" \
"id: QJ2021 # Identifiant de votre thème, 10 caractères max\n" \
"name: Quiz Janvier 2021 # Nom de votre thème\n" \
"description: Questions de culture générale tirées du Grand Quiz de l'Appart de Janvier 2021 # Courte description du thème\n" \
"author_id: 172376505354158080 # Votre ID\n\n" \
"# ----- Paramètres Optionnels -----\n" \
"custom_delay: 8 # Modifier le délai pour répondre aux questions (min. 5s), par défaut 10\n" \
"thumbnail: https://i.imgur.com/JpNIjwm.png # Ajouter une image pour représenter son thème\n" \
"color: ffa8be # Couleur personnalisée du thème, en HEX sans prefixe\n\n" \
"# ----- Contenu du thème -----\n" \
"content:\n" \
". Quel pays a pour capitale Taipei ?: # Question\n" \
".. image: https://www.guidesulysse.com/images/destinations/iStock-861177234.jpg # Optionnel: Image à afficher s'il y en a une\n" \
".. good: Taïwan # Bonne réponse\n" \
".. bad: # Mauvaises réponses : min. 3\n" \
"... - Corée du Nord\n" \
"... - Japon\n" \
"... - Corée du Sud\n" \
"... - Vietnam\n" \
".. show: > # Optionnel: Message à ajouter après avoir répondu\n" \
"... Taipei est la capitale politique, culturelle et économique *de facto* de l'île de Taïwan.\n\n" \
"# . = Indentation```"
em = discord.Embed(title="Créer un fichier thème pour Brainfck", color=await ctx.embed_color(), description=txt)
em.add_field(name="Notes importantes", value="- Les fichiers doivent contenir (dans 'content') au moins 15 questions valides pour être accepté\n"
"- Mettre à jour un fichier de thème efface tous les codes de parties liées à celui-ci\n"
"- Les fichiers sont à enregistrer en YAML, formattage UTF-8\n"
"Conseil : Même s'il est possible d'utiliser un simple Bloc-notes, utilisez un logiciel tel que Notepad++ pour éditer des fichiers YAML plus facilement")
await ctx.send(embed=em)
@commands.group(name="brainfckset", aliases=['bfset'])
@checks.is_owner()
async def _brainfuck_settings(self, ctx):
"""Gestion des paramètres Brainfck"""
@_brainfuck_settings.command()
async def getfile(self, ctx, name: str):
"""Charge sur Discord un Pack de questions"""
name += ".yaml"
path = self.packs / name
try:
await ctx.send("Voici votre fichier :", files=[discord.File(path)])
except:
await ctx.send("**Fichier introuvable**")
async def save_file(self, msg: discord.Message):
filename = msg.attachments[0].filename
file_path = "{}/{}".format(str(self.packs), filename)
await msg.attachments[0].save(file_path)
self.load_packs()
return file_path
@_brainfuck_settings.command()
async def addfile(self, ctx):
"""Ajoute un fichier aux packs"""
files = ctx.message.attachments
if files:
path = await self.save_file(ctx.message)
await ctx.send("**Fichier sauvegardé** • Chemin = `{}`".format(path))
else:
await ctx.send("**Erreur** • Aucun fichier attaché au message")
@_brainfuck_settings.command()
async def deletefile(self, ctx, name: str):
"""Supprime un fichier .yaml des packs"""
name += ".yaml"
path = self.packs / name
try:
os.remove(str(path))
await ctx.send("**Fichier supprimé**")
self.load_packs()
except Exception as e:
logger.error(msg=f"Fichier non supprimé ({path})", exc_info=True)
await ctx.send(f"**Erreur** • Impossible de supprimer le fichier : `{e}`")
@_brainfuck_settings.command()
async def files(self, ctx):
"""Liste les fichiers dispos pour le Quiz"""
arr_txt = [x for x in os.listdir(str(self.packs)) if x.endswith(".yaml")]
if arr_txt:
em = discord.Embed(title="Fichiers Brainfck disponibles", description="\n".join([f"*{n}*" for n in arr_txt]))
await ctx.send(embed=em)
else:
await ctx.send(f"**Vide** • Aucun fichier n'est disponible")
@_brainfuck_settings.command()
async def reload(self, ctx):
"""Recharge manuellement la liste des packs chargés"""
try:
self.load_packs()
except Exception as e:
await ctx.send(f"**Erreur** : `{e}`")
raise
else:
await ctx.send("**Pack de questions rechargés**")
@_brainfuck_settings.command()
async def resetsess(self, ctx, packid: str):
"""Reset les sessions d'un pack"""
if packid in self.loaded_packs:
await self.reset_sessions_for(packid)
await ctx.send(f"**Reset des sessions de {packid} effectué**")
else:
await ctx.send("**Le pack demandé n'est pas chargé**")
| 47.981481 | 206 | 0.531315 |
6ef0453ab1eb43cae0885a7c010bae8ed5a79a56 | 4,641 | py | Python | geoopt/manifolds/siegel/siegel.py | leonMatzner/geoopt | 4a7058e43bf78ab5012b862076a74bec175df221 | [
"Apache-2.0"
] | 438 | 2019-03-05T11:24:03.000Z | 2022-03-31T14:46:42.000Z | geoopt/manifolds/siegel/siegel.py | leonMatzner/geoopt | 4a7058e43bf78ab5012b862076a74bec175df221 | [
"Apache-2.0"
] | 98 | 2019-03-07T21:38:24.000Z | 2022-03-25T10:48:45.000Z | geoopt/manifolds/siegel/siegel.py | leonMatzner/geoopt | 4a7058e43bf78ab5012b862076a74bec175df221 | [
"Apache-2.0"
] | 58 | 2019-04-13T04:52:16.000Z | 2022-03-14T09:26:00.000Z | from abc import ABC
from typing import Union, Tuple, Optional
import torch
from ..base import Manifold
from geoopt import linalg as lalg
from ..siegel import csym_math as sm
from .vvd_metrics import SiegelMetricType, SiegelMetricFactory
class SiegelManifold(Manifold, ABC):
"""Abstract Manifold to work on Siegel spaces.
The implementation is aimed to work with realization of the Siegel space as
spaces of complex symmetric matrices.
References
----------
- Federico López, Beatrice Pozzetti, Steve Trettel, Michael Strube, Anna Wienhard.
"Symmetric Spaces for Graph Embeddings: A Finsler-Riemannian Approach", 2021.
Parameters
----------
metric: SiegelMetricType
one of Riemannian, Finsler One, Finsler Infinity, Finsler metric of minimum entropy, or learnable weighted sum.
rank: int
Rank of the space. Only mandatory for "fmin" and "wsum" metrics.
"""
__scaling__ = Manifold.__scaling__.copy()
name = "Siegel Space"
ndim = 2
reversible = False
def __init__(
self, metric: SiegelMetricType = SiegelMetricType.RIEMANNIAN, rank: int = None
):
super().__init__()
self.metric = SiegelMetricFactory.get(metric, rank)
def dist(
self, z1: torch.Tensor, z2: torch.Tensor, *, keepdim=False
) -> torch.Tensor:
"""
Compute distance between two points on the manifold according to the specified metric.
Calculates the distance for the Upper Half Space Manifold (UHSM)
It is implemented here since the way to calculate distances in the Bounded Domain Manifold
requires mapping the points to the UHSM, and then applying this formula.
Parameters
----------
z1 : torch.Tensor
point on the manifold
z2 : torch.Tensor
point on the manifold
keepdim : bool, optional
keep the last dim?, by default False
Returns
-------
torch.Tensor
distance between two points
"""
# with Z1 = X + iY, define Z3 = sqrt(Y)^-1 (Z2 - X) sqrt(Y)^-1
x, y = z1.real, z1.imag
inv_sqrt_y = lalg.sym_inv_sqrtm1(y).type_as(z1)
z3 = inv_sqrt_y @ (z2 - x) @ inv_sqrt_y
w = sm.inverse_cayley_transform(z3)
evalues = sm.takagi_eigvals(w) # evalues are in ascending order e1 < e2 < en
# assert 0 <= evalues <= 1
eps = sm.EPS[evalues.dtype]
assert torch.all(evalues >= 0 - eps), f"Eigenvalues: {evalues}"
assert torch.all(evalues <= 1.01), f"Eigenvalues: {evalues}"
# Vector-valued distance: v_i = log((1 + e_i) / (1 - e_i))
vvd = (1 + evalues) / (1 - evalues).clamp(min=eps)
vvd = torch.log(vvd)
res = self.metric.compute_metric(vvd)
return res
def retr(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
# always assume u is scaled properly
approx = x + u
return self.projx(approx)
def _check_matrices_are_symmetric(
self, x: torch.Tensor, *, atol: float = 1e-5, rtol: float = 1e-5
):
"""Check that matrices are symmetric.
Parameters
----------
x : torch.Tensor
point on the manifold
atol : float
absolute tolerance for allclose
rtol : float
relative tolerance for allclose
Returns
-------
boolean
whether the points in x are complex symmetric or not
"""
return sm.is_complex_symmetric(x, atol, rtol)
def projx(self, x: torch.Tensor) -> torch.Tensor:
return lalg.sym(x)
def proju(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
return self.egrad2rgrad(x, u)
def transp(self, x: torch.Tensor, y: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
return v
def expmap(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def logmap(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def _check_vector_on_tangent(
self, x: torch.Tensor, u: torch.Tensor, *, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
ok = torch.allclose(u, u.transpose(-1, -2), atol=atol, rtol=rtol)
if not ok:
return (
False,
"u is not symmetric (u != u.transpose) with atol={}, rtol={}".format(
atol, rtol
),
)
return True, None
def extra_repr(self) -> str:
return f"metric={type(self.metric).__name__}"
| 33.388489 | 119 | 0.605688 |
4cfa878696398f8647e5278b78520966f030dd4d | 1,157 | py | Python | neutron/tests/unit/mlnx/test_agent_scheduler.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/mlnx/test_agent_scheduler.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/mlnx/test_agent_scheduler.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.tests.unit.mlnx import test_mlnx_plugin
from neutron.tests.unit.openvswitch import test_agent_scheduler
class MlnxAgentSchedulerTestCase(
test_agent_scheduler.OvsAgentSchedulerTestCase):
plugin_str = test_mlnx_plugin.PLUGIN_NAME
l3_plugin = None
class MlnxL3AgentNotifierTestCase(
test_agent_scheduler.OvsL3AgentNotifierTestCase):
plugin_str = test_mlnx_plugin.PLUGIN_NAME
l3_plugin = None
class MlnxDhcpAgentNotifierTestCase(
test_agent_scheduler.OvsDhcpAgentNotifierTestCase):
plugin_str = test_mlnx_plugin.PLUGIN_NAME
| 33.057143 | 69 | 0.793431 |
04ac537f3752eafe0297c40bdc4328e23236c3ff | 3,066 | py | Python | tools/low_cpu_check.py | Incubaid/arakoon | 43a8d0b26e4876ef91d9657149f105c7e57e0cb0 | [
"Apache-2.0"
] | 41 | 2015-02-11T03:23:36.000Z | 2020-12-27T12:13:52.000Z | tools/low_cpu_check.py | Incubaid/arakoon | 43a8d0b26e4876ef91d9657149f105c7e57e0cb0 | [
"Apache-2.0"
] | 36 | 2015-01-04T16:58:51.000Z | 2020-11-12T12:05:37.000Z | tools/low_cpu_check.py | Incubaid/arakoon | 43a8d0b26e4876ef91d9657149f105c7e57e0cb0 | [
"Apache-2.0"
] | 7 | 2015-07-10T08:04:01.000Z | 2021-09-28T08:09:23.000Z | import bz2
import struct
import binascii
import os
import time
import sys
def sn_from(buf, offset):
r = struct.unpack_from("q",buf, offset)
return r[0], offset + 8
def int32_from(buf, offset):
r = struct.unpack_from("I", buf, offset)
return r[0], offset + 4
def string_from(buf, offset):
size,o2 = int32_from(buf, offset)
too = o2 + size
v = buf[o2:too]
return v, too
def test(sn,prev):
if sn == prev + 1 or sn == prev:
pass
else:
raise Exception("%i <-> %i" % (sn,prev))
def do_entry(inflated, offset):
t0 = time.time()
sn,o2 = sn_from(inflated, offset)
crc,o3 = int32_from(inflated,o2)
cmd,o4 = string_from(inflated,o3)
t1 = time.time()
delay = t1 - t0
time.sleep(delay)
return (sn,crc,cmd), o4
def do_chunk(prev_i, chunk):
t0 = time.time()
inflated = bz2.decompress(chunk)
t1 = time.time()
delay = t1 - t0
time.sleep(delay)
too = len(inflated)
offset = 0
prev = prev_i
while offset < too:
#print "\t",binascii.hexlify(inflated[offset: offset+16])
(sn,crc,cmd),o2 = do_entry(inflated, offset)
test(sn,prev)
#print sn
offset = o2
prev = sn
#print prev_i,prev
return prev
def do_tlc_chunk(prev, chunk):
t0 = time.time()
inflated = bz2.decompress(chunk)
t1 = time.time()
delay = t1 - t0
time.sleep(delay)
offset = 0
too = len(inflated)
while offset < too:
(sn,crc,cmd), o2 = do_entry(inflated, offset)
test(sn,prev)
offset = o2
prev = sn
return prev
def do_tlf(first, canonical) :
f = open(canonical,'rb')
all = f.read()
f.close()
offset = 0
too = len(all)
while offset < too:
last_i,o2 = sn_from(all,offset)
chunk, o3 = string_from(all, o2)
new_f = do_chunk(first, chunk)
assert last_i == new_f
offset = o3
first = new_f
return first
def do_tlc(first, canonical):
f = open(canonical,'rb')
all = f.read()
f.close()
offset = 0
too = len(all)
while offset < too:
n_entries,o2 = int32_from(all,offset)
chunk,o3 = string_from(all,o2)
new_f = do_tlc_chunk(first, chunk)
offset = o3
first = new_f
return first
def do_dir(dn):
fns = filter(lambda f: f.endswith(".tlf") or f.endswith(".tlc"),
os.listdir(dn))
def n_from(e): return int(e[:e.index('.')])
def cmp(a,b): return n_from(a) - n_from(b)
fns.sort(cmp)
for fn in fns:
canonical = "%s/%s" % (dn,fn)
first = int(fn[:fn.index('.')]) * 100000
if fn.endswith(".tlf"):
last = do_tlf(first, canonical)
else:
last = do_tlc(first, canonical)
assert first + 99999 == last
print fn, "ok"
#do_tlc(500000,'/tmp/010.tlc')
if __name__ == '__main__':
if len(sys.argv) <2:
print "python",sys.argv[0], "<path_to_tlog_dir>"
sys.exit(-1)
else:
do_dir(sys.argv[1])
| 23.227273 | 69 | 0.561644 |
c7612a0fd660718393bd2e406b13851d8a5120ad | 28,858 | py | Python | pyvista/utilities/helpers.py | JevinJ/pyvista | c9be18ed209de3f80e1a70ef01eef3355b3616ce | [
"MIT"
] | null | null | null | pyvista/utilities/helpers.py | JevinJ/pyvista | c9be18ed209de3f80e1a70ef01eef3355b3616ce | [
"MIT"
] | null | null | null | pyvista/utilities/helpers.py | JevinJ/pyvista | c9be18ed209de3f80e1a70ef01eef3355b3616ce | [
"MIT"
] | null | null | null | """Supporting functions for polydata and grid objects."""
import collections.abc
import enum
import logging
import signal
import sys
import warnings
from threading import Thread
import threading
import traceback
import numpy as np
import scooby
import vtk
import vtk.util.numpy_support as nps
import pyvista
from .fileio import from_meshio
class FieldAssociation(enum.Enum):
"""Represents which type of vtk field a scalar or vector array is associated with."""
POINT = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
CELL = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
NONE = vtk.vtkDataObject.FIELD_ASSOCIATION_NONE
ROW = vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS
def get_vtk_type(typ):
"""Look up the VTK type for a give python data type.
Corrects for string type mapping issues.
Returns
-------
int : the integer type id specified in vtkType.h
"""
typ = nps.get_vtk_array_type(typ)
# This handles a silly string type bug
if typ == 3:
return 13
return typ
def vtk_bit_array_to_char(vtkarr_bint):
"""Cast vtk bit array to a char array."""
vtkarr = vtk.vtkCharArray()
vtkarr.DeepCopy(vtkarr_bint)
return vtkarr
def vtk_id_list_to_array(vtk_id_list):
"""Convert a vtkIdList to a NumPy array."""
return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())])
def convert_string_array(arr, name=None):
"""Convert a numpy array of strings to a vtkStringArray or vice versa.
Note that this is terribly inefficient - inefficient support
is better than no support :). If you have ideas on how to make this faster,
please consider opening a pull request.
"""
if isinstance(arr, np.ndarray):
vtkarr = vtk.vtkStringArray()
########### OPTIMIZE ###########
for val in arr:
vtkarr.InsertNextValue(val)
################################
if isinstance(name, str):
vtkarr.SetName(name)
return vtkarr
# Otherwise it is a vtk array and needs to be converted back to numpy
############### OPTIMIZE ###############
nvalues = arr.GetNumberOfValues()
return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U')
########################################
def convert_array(arr, name=None, deep=0, array_type=None):
"""Convert a NumPy array to a vtkDataArray or vice versa.
Parameters
-----------
arr : ndarray or vtkDataArry
A numpy array or vtkDataArry to convert
name : str
The name of the data array for VTK
deep : bool
if input is numpy array then deep copy values
Returns
-------
vtkDataArray, ndarray, or DataFrame:
the converted array (if input is a NumPy ndaray then returns
``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy
``ndarray``). If pdf==True and the input is ``vtkDataArry``,
return a pandas DataFrame.
"""
if arr is None:
return
if isinstance(arr, np.ndarray):
if arr.dtype is np.dtype('O'):
arr = arr.astype('|S')
arr = np.ascontiguousarray(arr)
if arr.dtype.type in (np.str_, np.bytes_):
# This handles strings
vtk_data = convert_string_array(arr)
else:
# This will handle numerical data
arr = np.ascontiguousarray(arr)
vtk_data = nps.numpy_to_vtk(num_array=arr, deep=deep, array_type=array_type)
if isinstance(name, str):
vtk_data.SetName(name)
return vtk_data
# Otherwise input must be a vtkDataArray
if not isinstance(arr, (vtk.vtkDataArray, vtk.vtkBitArray, vtk.vtkStringArray)):
raise TypeError(f'Invalid input array type ({type(arr)}).')
# Handle booleans
if isinstance(arr, vtk.vtkBitArray):
arr = vtk_bit_array_to_char(arr)
# Handle string arrays
if isinstance(arr, vtk.vtkStringArray):
return convert_string_array(arr)
# Convert from vtkDataArry to NumPy
return nps.vtk_to_numpy(arr)
def is_pyvista_dataset(obj):
"""Return True if the Object is a PyVista wrapped dataset."""
return isinstance(obj, (pyvista.Common, pyvista.MultiBlock))
def point_array(mesh, name):
"""Return point array of a vtk object."""
vtkarr = mesh.GetPointData().GetAbstractArray(name)
return convert_array(vtkarr)
def field_array(mesh, name):
"""Return field array of a vtk object."""
vtkarr = mesh.GetFieldData().GetAbstractArray(name)
return convert_array(vtkarr)
def cell_array(mesh, name):
"""Return cell array of a vtk object."""
vtkarr = mesh.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
def row_array(data_object, name):
"""Return row array of a vtk object."""
vtkarr = data_object.GetRowData().GetAbstractArray(name)
return convert_array(vtkarr)
def parse_field_choice(field):
"""Return the id of the given field."""
if isinstance(field, str):
field = field.strip().lower()
if field in ['cell', 'c', 'cells']:
field = FieldAssociation.CELL
elif field in ['point', 'p', 'points']:
field = FieldAssociation.POINT
elif field in ['field', 'f', 'fields']:
field = FieldAssociation.NONE
elif field in ['row', 'r',]:
field = FieldAssociation.ROW
else:
raise ValueError(f'Data field ({field}) not supported.')
elif isinstance(field, FieldAssociation):
pass
else:
raise ValueError(f'Data field ({field}) not supported.')
return field
def get_array(mesh, name, preference='cell', info=False, err=False):
"""Search point, cell and field data for an array.
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``
info : bool
Return info about the array rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
"""
if isinstance(mesh, vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
field = FieldAssociation.ROW
if info:
return arr, field
return arr
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
preference = parse_field_choice(preference)
if np.sum([parr is not None, carr is not None, farr is not None]) > 1:
if preference == FieldAssociation.CELL:
if info:
return carr, FieldAssociation.CELL
else:
return carr
elif preference == FieldAssociation.POINT:
if info:
return parr, FieldAssociation.POINT
else:
return parr
elif preference == FieldAssociation.NONE:
if info:
return farr, FieldAssociation.NONE
else:
return farr
else:
raise ValueError(f'Data field ({preference}) not supported.')
arr = None
field = None
if parr is not None:
arr = parr
field = FieldAssociation.POINT
elif carr is not None:
arr = carr
field = FieldAssociation.CELL
elif farr is not None:
arr = farr
field = FieldAssociation.NONE
elif err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
if info:
return arr, field
return arr
def vtk_points(points, deep=True):
"""Convert numpy points to a vtkPoints object."""
if not points.flags['C_CONTIGUOUS']:
points = np.ascontiguousarray(points)
vtkpts = vtk.vtkPoints()
vtkpts.SetData(nps.numpy_to_vtk(points, deep=deep))
return vtkpts
def line_segments_from_points(points):
"""Generate non-connected line segments from points.
Assumes points are ordered as line segments and an even number of points
are
Parameters
----------
points : np.ndarray
Points representing line segments. An even number must be given as
every two vertices represent a single line segment. For example, two
line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other line.
>>> import pyvista
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = pyvista.lines_from_points(points)
>>> lines.plot() # doctest:+SKIP
"""
if len(points) % 2 != 0:
raise ValueError("An even number of points must be given to define each segment.")
# Assuming ordered points, create array defining line order
n_points = len(points)
n_lines = n_points // 2
lines = np.c_[(2 * np.ones(n_lines, np.int_),
np.arange(0, n_points-1, step=2),
np.arange(1, n_points+1, step=2))]
poly = pyvista.PolyData()
poly.points = points
poly.lines = lines
return poly
def lines_from_points(points, close=False):
"""Make a connected line set given an array of points.
Parameters
----------
points : np.ndarray
Points representing the vertices of the connected segments. For
example, two line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
close : bool, optional
If True, close the line segments into a loop
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
"""
poly = pyvista.PolyData()
poly.points = points
cells = np.full((len(points)-1, 3), 2, dtype=np.int_)
cells[:, 1] = np.arange(0, len(points)-1, dtype=np.int_)
cells[:, 2] = np.arange(1, len(points), dtype=np.int_)
if close:
cells = np.append(cells, [[2, len(points)-1, 0],], axis=0)
poly.lines = cells
return poly
def make_tri_mesh(points, faces):
"""Construct a ``pyvista.PolyData`` mesh using points and faces arrays.
Construct a mesh from an Nx3 array of points and an Mx3 array of
triangle indices, resulting in a mesh with N vertices and M
triangles. This function does not require the standard VTK
"padding" column and simplifies mesh creation.
Parameters
----------
points : np.ndarray
Array of points with shape (N, 3) storing the vertices of the
triangle mesh.
faces : np.ndarray
Array of indices with shape (M, 3) containing the triangle
indices.
Returns
-------
tri_mesh : pyvista.PolyData
PolyData instance containing the triangle mesh.
Examples
--------
This example discretizes the unit square into a triangle mesh with
nine vertices and eight faces.
>>> import numpy as np
>>> import pyvista as pv
>>> points = np.array([[0, 0, 0], [0.5, 0, 0], [1, 0, 0], [0, 0.5, 0],
... [0.5, 0.5, 0], [1, 0.5, 0], [0, 1, 0], [0.5, 1, 0],
... [1, 1, 0]])
>>> faces = np.array([[0, 1, 4], [4, 7, 6], [2, 5, 4], [4, 5, 8],
... [0, 4, 3], [3, 4, 6], [1, 2, 4], [4, 8, 7]])
>>> tri_mesh = pyvista.make_tri_mesh(points, faces)
>>> tri_mesh.plot(show_edges=True) # doctest:+SKIP
"""
if points.shape[1] != 3:
raise ValueError("Points array should have shape (N, 3).")
if faces.ndim != 2 or faces.shape[1] != 3:
raise ValueError("Face array should have shape (M, 3).")
cells = np.empty((faces.shape[0], 4), dtype=faces.dtype)
cells[:, 0] = 3
cells[:, 1:] = faces
return pyvista.PolyData(points, cells)
def vector_poly_data(orig, vec):
"""Create a vtkPolyData object composed of vectors."""
# shape, dimension checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise ValueError('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise ValueError('vec array must be 3D')
# Create vtk points and cells objects
vpts = vtk.vtkPoints()
vpts.SetData(nps.numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE)
vcells = pyvista.utilities.cells.CellArray(cells, npts)
# Create vtkPolyData object
pdata = vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1)**0.5
vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return pyvista.PolyData(pdata)
def trans_from_matrix(matrix): # pragma: no cover
"""Convert a vtk matrix to a numpy.ndarray.
DEPRECATED: Please use ``array_from_vtkmatrix``.
"""
# import needs to happen here to prevent a circular import
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``array_from_vtkmatrix``.')
def array_from_vtkmatrix(matrix):
"""Convert a vtk matrix to a ``numpy.ndarray``.
Parameters
----------
matrix : vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4
The vtk matrix to be converted to a ``numpy.ndarray``.
Returned ndarray has shape (3, 3) or (4, 4) as appropriate.
"""
if isinstance(matrix, vtk.vtkMatrix3x3):
shape = (3, 3)
elif isinstance(matrix, vtk.vtkMatrix4x4):
shape = (4, 4)
else:
raise TypeError('Expected vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 input,'
f' got {type(matrix).__name__} instead.')
array = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
array[i, j] = matrix.GetElement(i, j)
return array
def vtkmatrix_from_array(array):
"""Convert a ``numpy.ndarray`` or array-like to a vtk matrix.
Parameters
----------
array : numpy.ndarray or array-like
The array or array-like to be converted to a vtk matrix.
Shape (3, 3) gets converted to a ``vtk.vtkMatrix3x3``, shape (4, 4)
gets converted to a ``vtk.vtkMatrix4x4``. No other shapes are valid.
"""
array = np.asarray(array)
if array.shape == (3, 3):
matrix = vtk.vtkMatrix3x3()
elif array.shape == (4, 4):
matrix = vtk.vtkMatrix4x4()
else:
raise ValueError(f'Invalid shape {array.shape}, must be (3, 3) or (4, 4).')
m, n = array.shape
for i in range(m):
for j in range(n):
matrix.SetElement(i, j, array[i, j])
return matrix
def is_meshio_mesh(mesh):
"""Test if passed object is instance of ``meshio.Mesh``."""
try:
import meshio
return isinstance(mesh, meshio.Mesh)
except ImportError:
return False
def wrap(dataset):
"""Wrap any given VTK data object to its appropriate PyVista data object.
Other formats that are supported include:
* 2D :class:`numpy.ndarray` of XYZ vertices
* 3D :class:`numpy.ndarray` representing a volume. Values will be scalars.
* 3D :class:`trimesh.Trimesh` mesh.
Parameters
----------
dataset : :class:`numpy.ndarray`, :class:`trimesh.Trimesh`, or VTK object
Dataset to wrap.
Returns
-------
wrapped_dataset : pyvista class
The `pyvista` wrapped dataset.
Examples
--------
Wrap a numpy array representing a random point cloud
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> cloud = pyvista.wrap(points)
>>> cloud # doctest:+SKIP
PolyData (0x7fc52db83d70)
N Cells: 10
N Points: 10
X Bounds: 1.123e-01, 7.457e-01
Y Bounds: 1.009e-01, 9.877e-01
Z Bounds: 2.346e-03, 9.640e-01
N Arrays: 0
Wrap a Trimesh object
>>> import trimesh
>>> import pyvista
>>> points = [[0, 0, 0], [0, 0, 1], [0, 1, 0]]
>>> faces = [[0, 1, 2]]
>>> tmesh = trimesh.Trimesh(points, faces=faces, process=False)
>>> mesh = pyvista.wrap(tmesh)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
Wrap a VTK object
>>> import pyvista
>>> import vtk
>>> points = vtk.vtkPoints()
>>> p = [1.0, 2.0, 3.0]
>>> vertices = vtk.vtkCellArray()
>>> pid = points.InsertNextPoint(p)
>>> _ = vertices.InsertNextCell(1)
>>> _ = vertices.InsertCellPoint(pid)
>>> point = vtk.vtkPolyData()
>>> _ = point.SetPoints(points)
>>> _ = point.SetVerts(vertices)
>>> mesh = pyvista.wrap(point)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
"""
wrappers = {
'vtkUnstructuredGrid': pyvista.UnstructuredGrid,
'vtkRectilinearGrid': pyvista.RectilinearGrid,
'vtkStructuredGrid': pyvista.StructuredGrid,
'vtkPolyData': pyvista.PolyData,
'vtkImageData': pyvista.UniformGrid,
'vtkStructuredPoints': pyvista.UniformGrid,
'vtkMultiBlockDataSet': pyvista.MultiBlock,
'vtkTable': pyvista.Table,
# 'vtkParametricSpline': pyvista.Spline,
}
# Otherwise, we assume a VTK data object was passed
if hasattr(dataset, 'GetClassName'):
key = dataset.GetClassName()
elif dataset is None:
return None
elif isinstance(dataset, np.ndarray):
if dataset.ndim == 1 and dataset.shape[0] == 3:
return pyvista.PolyData(dataset)
if dataset.ndim > 1 and dataset.ndim < 3 and dataset.shape[1] == 3:
return pyvista.PolyData(dataset)
elif dataset.ndim == 3:
mesh = pyvista.UniformGrid(dataset.shape)
mesh['values'] = dataset.ravel(order='F')
mesh.active_scalars_name = 'values'
return mesh
else:
print(dataset.shape, dataset)
raise NotImplementedError('NumPy array could not be converted to PyVista.')
elif is_meshio_mesh(dataset):
return from_meshio(dataset)
elif dataset.__class__.__name__ == 'Trimesh':
# trimesh doesn't pad faces
n_face = dataset.faces.shape[0]
faces = np.empty((n_face, 4), dataset.faces.dtype)
faces[:, 1:] = dataset.faces
faces[:, 0] = 3
return pyvista.PolyData(np.asarray(dataset.vertices), faces)
else:
raise NotImplementedError(f'Type ({type(dataset)}) not able to be wrapped into a PyVista mesh.')
try:
wrapped = wrappers[key](dataset)
except KeyError:
logging.warning(f'VTK data type ({key}) is not currently supported by pyvista.')
return dataset # if not supported just passes the VTK data object
return wrapped
def image_to_texture(image):
"""Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``."""
return pyvista.Texture(image)
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture."""
return pyvista.Texture(image)
def is_inside_bounds(point, bounds):
"""Check if a point is inside a set of bounds.
This is implemented through recursion so that this is N-dimensional.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance(point, collections.deque):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise ValueError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError(f'Unknown input data type ({type(point)}).')
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False
def fit_plane_to_points(points, return_meta=False):
"""Fit a plane to a set of points.
Parameters
----------
points : np.ndarray
Size n by 3 array of points to fit a plane through
return_meta : bool
If true, also returns the center and normal used to generate the plane
"""
data = np.array(points)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
plane = pyvista.Plane(center=center, direction=normal)
if return_meta:
return plane, center, normal
return plane
def raise_not_matching(scalars, mesh):
"""Raise exception about inconsistencies."""
if isinstance(mesh, vtk.vtkTable):
raise ValueError(f'Number of scalars ({scalars.size}) must match number of rows ({mesh.n_rows}).')
raise ValueError(f'Number of scalars ({scalars.size}) ' +
f'must match either the number of points ({mesh.n_points}) ' +
f'or the number of cells ({mesh.n_cells}).')
def generate_plane(normal, origin):
"""Return a vtk.vtkPlane."""
plane = vtk.vtkPlane()
# NORMAL MUST HAVE MAGNITUDE OF 1
normal = normal / np.linalg.norm(normal)
plane.SetNormal(normal)
plane.SetOrigin(origin)
return plane
def try_callback(func, *args):
"""Wrap a given callback in a try statement."""
try:
func(*args)
except Exception:
etype, exc, tb = sys.exc_info()
stack = traceback.extract_tb(tb)[1:]
formatted_exception = \
'Encountered issue in callback (most recent call last):\n' + \
''.join(traceback.format_list(stack) +
traceback.format_exception_only(etype, exc)).rstrip('\n')
logging.warning(formatted_exception)
return
def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0):
"""Check if depth peeling is available.
Attempts to use depth peeling to see if it is available for the current
environment. Returns ``True`` if depth peeling is available and has been
successfully leveraged, otherwise ``False``.
"""
# Try Depth Peeling with a basic scene
source = vtk.vtkSphereSource()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# requires opacity < 1
actor.GetProperty().SetOpacity(0.5)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetOffScreenRendering(True)
renderWindow.SetAlphaBitPlanes(True)
renderWindow.SetMultiSamples(0)
renderer.AddActor(actor)
renderer.SetUseDepthPeeling(True)
renderer.SetMaximumNumberOfPeels(number_of_peels)
renderer.SetOcclusionRatio(occlusion_ratio)
renderWindow.Render()
return renderer.GetLastRenderingUsedDepthPeeling() == 1
def threaded(fn):
"""Call a function using a thread."""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class conditional_decorator:
"""Conditional decorator for methods."""
def __init__(self, dec, condition):
"""Initialize."""
self.decorator = dec
self.condition = condition
def __call__(self, func):
"""Call the decorated function if condition is matched."""
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
class ProgressMonitor():
"""A standard class for monitoring the progress of a VTK algorithm.
This must be use in a ``with`` context and it will block keyboard
interrupts from happening until the exit event as interrupts will crash
the kernel if the VTK algorithm is still executing.
"""
def __init__(self, algorithm, message="", scaling=100):
"""Initialize observer."""
try:
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to monitor algorithms.")
self.event_type = vtk.vtkCommand.ProgressEvent
self.progress = 0.0
self._last_progress = self.progress
self.algorithm = algorithm
self.message = message
self._interrupt_signal_received = False
self._old_progress = 0
self._old_handler = None
self._progress_bar = None
def handler(self, sig, frame):
"""Pass signal to custom interrupt handler."""
self._interrupt_signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt until '
'VTK algorithm finishes.')
def __call__(self, obj, event, *args):
"""Call progress update callback.
On an event occurrence, this function executes.
"""
if self._interrupt_signal_received:
obj.AbortExecuteOn()
else:
progress = obj.GetProgress()
step = progress - self._old_progress
self._progress_bar.update(step)
self._old_progress = progress
def __enter__(self):
"""Enter event for ``with`` context."""
from tqdm import tqdm
# check if in main thread
if threading.current_thread().__class__.__name__ == '_MainThread':
self._old_handler = signal.signal(signal.SIGINT, self.handler)
self._progress_bar = tqdm(total=1, leave=True,
bar_format='{l_bar}{bar}[{elapsed}<{remaining}]')
self._progress_bar.set_description(self.message)
self.algorithm.AddObserver(self.event_type, self)
return self._progress_bar
def __exit__(self, type, value, traceback):
"""Exit event for ``with`` context."""
self._progress_bar.total = 1
self._progress_bar.refresh()
self._progress_bar.close()
self.algorithm.RemoveObservers(self.event_type)
if threading.current_thread().__class__.__name__ == '_MainThread':
signal.signal(signal.SIGINT, self._old_handler)
def abstract_class(cls_):
"""Decorate a class, overriding __new__.
Preventing a class from being instantiated similar to abc.ABCMeta
but does not require an abstract method.
"""
def __new__(cls, *args, **kwargs):
if cls is cls_:
raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.')
return object.__new__(cls)
cls_.__new__ = __new__
return cls_
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):
"""Rotate points angle (in deg) about an axis."""
axis = axis.lower()
# Copy original array to if not inplace
if not inplace:
points = points.copy()
# Convert angle to radians
if deg:
angle *= np.pi / 180
if axis == 'x':
y = points[:, 1] * np.cos(angle) - points[:, 2] * np.sin(angle)
z = points[:, 1] * np.sin(angle) + points[:, 2] * np.cos(angle)
points[:, 1] = y
points[:, 2] = z
elif axis == 'y':
x = points[:, 0] * np.cos(angle) + points[:, 2] * np.sin(angle)
z = - points[:, 0] * np.sin(angle) + points[:, 2] * np.cos(angle)
points[:, 0] = x
points[:, 2] = z
elif axis == 'z':
x = points[:, 0] * np.cos(angle) - points[:, 1] * np.sin(angle)
y = points[:, 0] * np.sin(angle) + points[:, 1] * np.cos(angle)
points[:, 0] = x
points[:, 1] = y
else:
raise ValueError('invalid axis. Must be either "x", "y", or "z"')
if not inplace:
return points
| 32.243575 | 110 | 0.622808 |
039c0ff62e22ab614c6961d3a82fe1fa46f5e18e | 127 | py | Python | __init__.py | munasaber/djlib | 2066353ff718a6fe30dd8897f635ac0f4616b948 | [
"MIT"
] | null | null | null | __init__.py | munasaber/djlib | 2066353ff718a6fe30dd8897f635ac0f4616b948 | [
"MIT"
] | null | null | null | __init__.py | munasaber/djlib | 2066353ff718a6fe30dd8897f635ac0f4616b948 | [
"MIT"
] | null | null | null | from . import clex
from . import mc
from . import casmcalls
from . import vasputils
from . import voltage
from .djlib import *
| 18.142857 | 23 | 0.755906 |
65c9968621cc82c96799c6059ed2551c70dfc1c5 | 6,446 | py | Python | data_preprocessing.py | hwRG/FastSpeech2-Pytorch-old-man_city | c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573 | [
"MIT"
] | null | null | null | data_preprocessing.py | hwRG/FastSpeech2-Pytorch-old-man_city | c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573 | [
"MIT"
] | null | null | null | data_preprocessing.py | hwRG/FastSpeech2-Pytorch-old-man_city | c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573 | [
"MIT"
] | null | null | null | ### Data Preprocessing
## 1. Json to Transcript
## 2. Aligner
## 3. Text Replace
from jamo import h2j
import json
import os, re, tqdm
import unicodedata
from tqdm import tqdm
import hparams as hp
name = hp.dataset
first_dir = os.getcwd()
transcript = name + '_transcript.txt'
dict_name = name + '_korean_dict.txt'
data_dir = 'wavs'
json_label_dir = 'label'
def change_name(base_dir, format):
print('Change', format, 'name')
cnt = 0
speaker_table = os.listdir(base_dir)
new_speaker_table = []
for speaker in speaker_table:
if cnt == 0:
os.chdir(base_dir)
new_speaker_name = re.sub(r'[^0-9]', '', speaker)
overlap = 1
while new_speaker_name in new_speaker_table:
print(new_speaker_name, 'is dangerous')
new_speaker_name = str(overlap) + new_speaker_name[1:]
overlap += 1
new_speaker_table.append(re.sub(r'[^0-9]', '', new_speaker_name))
print(new_speaker_name, 'ok')
temp = 0
for wav in os.listdir(speaker):
if temp == 0:
os.chdir(speaker)
new_wav_name = re.sub(r'[^0-9]', '', wav)
# new wav_name을 그대로 사용해야 함
if new_wav_name[:len(new_speaker_name)] != wav:
if new_wav_name[:len(new_speaker_name)] == new_speaker_name:
new_wav_name = new_wav_name + wav[-(len(format)+1):]
else:
new_wav_name = new_speaker_name + new_wav_name + wav[-(len(format)+1):]
os.rename(wav, new_wav_name)
temp+=1; cnt +=1
os.chdir('../')
os.rename(speaker, new_speaker_name)
print(cnt,'All Done', end='\n\n')
os.chdir('../')
def json_to_transcripts():
speakers = os.listdir(json_label_dir)
speakers.sort()
print(len(speakers), "speaker's are Sorted.")
os.chdir(json_label_dir)
utterance_text = []
cnt = 1
for speaker in speakers:
for file in os.listdir(speaker):
if cnt % 1000 == 0:
print(cnt, 'Done')
utterance_set = []
with open(os.path.join(speaker, file)) as f:
json_data = json.load(f)
utterance_set.append(file[:-4] + 'wav')
utterance_set.append(line_replace(json_data['발화정보']['stt']))
sep_text = unicodedata.normalize('NFD',line_replace(json_data['발화정보']['stt']))
utterance_set.append(sep_text)
utterance_set.append(round(float(json_data['발화정보']['recrdTime']),1))
utterance_text.append(utterance_set)
cnt+=1
print(cnt-1, 'All Done')
os.chdir('../')
with open(transcript, "w") as file:
for utt in utterance_text:
file.write(utt[0][:6] + '/' + utt[0] + '|' + utt[1] + '|' + utt[1] + '|' + utt[2] + '|' + str(utt[3]) + '|' + 'None\n')
def line_replace(line):
line = line.replace('(SP:)', '')
line = line.replace('(SP:', '')
line = line.replace('(SN:)', '')
line = line.replace('(SN:', '')
line = line.replace('(NO:)', '')
line = line.replace('(NO:', '')
line = line.replace('spn', '')
line = line.replace('', '')
line = line.replace('', '')
line = line.replace('', '')
line = line.replace('', '')
line = line.replace('毛', '')
line = line.replace(')', '')
line = line.replace('(', '')
line = line.replace('"', '')
line = line.replace('.', '')
line = line.replace('[', '')
line = line.replace(',', '')
line = line.replace('!', '')
line = line.replace('?', '')
line = line.replace(']', '')
line = line.replace('.', '')
line = line.replace(' ', ' ')
return line
def aligner():
filters = '([.,!?])"'
file_list = []
with open(transcript, 'r', encoding='utf-8') as f:
for line in f.readlines():
temp = line.split('|')
file_dir, script = temp[0], temp[3]
script = re.sub(re.compile(filters), '', script)
script = line_replace(script) # !!! 여기서 핵심 삭제
#file_dir = file_dir.split('/') 폴더 별로 나눌 경우
fn = file_dir[:-3] + 'lab'
file_dir = os.path.join(data_dir, fn)
#print(file_dir)
with open(file_dir, 'w', encoding='utf-8') as f:
f.write(script)
file_list.append(os.path.join(file_dir))
jamo_dict = {}
for file_name in tqdm(file_list):
sentence = open(file_name, 'r', encoding='utf-8').readline()
jamo = h2j(sentence).split(' ')
for i, s in enumerate(jamo):
if s not in jamo_dict:
jamo_dict[s] = ' '.join(jamo[i])
with open(dict_name, 'w', encoding='utf-8') as f:
for key in jamo_dict.keys():
content = '{}\t{}\n'.format(key, jamo_dict[key])
f.write(content)
print("Aligner Done\n")
def mfa_train():
print("MFA Training Start.. \n")
os.system('mfa train_g2p ' + dict_name + ' ' + name + '_korean.zip --clear')
print("MFA train_g2p Done\n")
os.system('mfa g2p ' + name + '_korean.zip ' + data_dir + ' ' + name + '_korean.txt')
print("MFA g2p Done\n")
os.system('mfa train ' + data_dir + ' ' + name + '_korean.txt ./textgrids --clean')
os.system('mv ~/Documents/MFA/wavs_train_acoustic_model/sat_2_ali/textgrids ./')
os.system('zip -r textgrids.zip textgrids')
os.system('mv textgrids.zip ' + first_dir) # 메인 dir로 옮겨
print("MFA Training Done! \n")
def lab_separate():
speaker_list = os.listdir('wavs')
os.mkdir('lab')
for speaker in speaker_list:
os.mkdir('lab/' + speaker)
lab_list = os.listdir(os.path.join('wavs', speaker))
for lab in lab_list:
if lab[-3:] == 'lab':
os.system('mv ' 'wavs/' + speaker + '/' + lab + ' lab/' + speaker)
if __name__ == '__main__':
os.chdir('dataset/' + hp.dataset)
change_name('wavs', 'wav')
#change_name('label', 'json')
#json_to_transcripts()
aligner()
mfa_train()
lab_separate() | 31.910891 | 135 | 0.51691 |
5e02ae6b1c8cb7febbb96c9e1913b3c3300398b4 | 257 | py | Python | src/main/resources/assets/openpython/opos/v1.1/lib/micropython/gettext.py | fossabot/OpenPython | 8fe3f794f2a6c543d96c1ef5c097ffa18f90b680 | [
"PSF-2.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 1,556 | 2015-01-18T01:10:21.000Z | 2022-03-31T23:27:33.000Z | unix-ffi/gettext/gettext.py | Li-Lian1069/micropython-lib | 1dfca5ad343b2841965df6c4e59f92d6d94a24bd | [
"PSF-2.0"
] | 414 | 2015-01-01T09:01:22.000Z | 2022-03-31T15:08:24.000Z | unix-ffi/gettext/gettext.py | Li-Lian1069/micropython-lib | 1dfca5ad343b2841965df6c4e59f92d6d94a24bd | [
"PSF-2.0"
] | 859 | 2015-02-05T13:23:00.000Z | 2022-03-28T02:28:16.000Z | import ffilib
libc = ffilib.libc()
gettext_ = libc.func("s", "gettext", "s")
ngettext_ = libc.func("s", "ngettext", "ssL")
def gettext(message):
return gettext_(message)
def ngettext(singular, plural, n):
return ngettext_(singular, plural, n)
| 17.133333 | 45 | 0.677043 |
b567af380fd6ac6c9951e289cdba0dc6da54a2fb | 538 | py | Python | python/hail/typecheck/__init__.py | maccum/hail | e9e8a40bb4f0c2337e5088c26186a4da4948bed2 | [
"MIT"
] | null | null | null | python/hail/typecheck/__init__.py | maccum/hail | e9e8a40bb4f0c2337e5088c26186a4da4948bed2 | [
"MIT"
] | null | null | null | python/hail/typecheck/__init__.py | maccum/hail | e9e8a40bb4f0c2337e5088c26186a4da4948bed2 | [
"MIT"
] | null | null | null | from .check import *
__all__ = ['TypeChecker',
'typecheck',
'typecheck_method',
'anytype',
'nullable',
'sequenceof',
'tupleof',
'sized_tupleof',
'dictof',
'setof',
'oneof',
'exactly',
'numeric',
'char',
'lazy',
'enumeration',
'identity',
'transformed',
'func_spec',
'table_key_type',
'TypecheckFailure',
]
| 21.52 | 30 | 0.390335 |
f4b9660567e98f61f12d7c85bdcb71e46cd81e71 | 1,078 | py | Python | bitcoinaddress/key/seed.py | Arsi44/bitcoinaddress | 5a87cb81a072a8325d62c26ca109e3eb5f82270f | [
"MIT"
] | null | null | null | bitcoinaddress/key/seed.py | Arsi44/bitcoinaddress | 5a87cb81a072a8325d62c26ca109e3eb5f82270f | [
"MIT"
] | null | null | null | bitcoinaddress/key/seed.py | Arsi44/bitcoinaddress | 5a87cb81a072a8325d62c26ca109e3eb5f82270f | [
"MIT"
] | null | null | null | # Bitcoin Address v0.1
# Copyright (c) 2020 - https://github.com/fortesp/bitcoinaddress
# This software is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present distribution,
# or http://opensource.org/licenses/MIT.
import os
import time
from random import randrange
class Seed:
def __init__(self, entropy=None, entropy_seed=None):
# print('Внутри сида', entropy_seed)
self.entropy_seed = entropy_seed
self.entropy = entropy
if self.entropy is None:
self.generate()
def generate(self):
self.entropy = self.random()
@staticmethod
def of(entropy=None):
return Seed(entropy)
def random(self):
# from bitcoin project
if self.entropy_seed:
return str(os.urandom(32).hex()) + str(randrange(2 ** 256)) + str(int(time.time() * 1000000))
else:
return str(os.urandom(32).hex()) + str(randrange(2 ** 256)) + str(int(time.time() * 1000000))
def __str__(self):
return self.entropy
| 29.135135 | 105 | 0.641002 |
947b9204f4667c39296401f3df28429eae6e7d8a | 1,538 | py | Python | http_request_randomizer/requests/parsers/js/UnPacker.py | alsrua7222/HTTP_Request_Randomizer | 9ab14148becf58e39292e479629ef08a265bd6a3 | [
"MIT"
] | null | null | null | http_request_randomizer/requests/parsers/js/UnPacker.py | alsrua7222/HTTP_Request_Randomizer | 9ab14148becf58e39292e479629ef08a265bd6a3 | [
"MIT"
] | null | null | null | http_request_randomizer/requests/parsers/js/UnPacker.py | alsrua7222/HTTP_Request_Randomizer | 9ab14148becf58e39292e479629ef08a265bd6a3 | [
"MIT"
] | null | null | null | import re
import requests
import logging
logger = logging.getLogger(__name__)
class JsUnPacker(object):
"""
It takes the javascript file's url which contains the port numbers for
the encrypted strings. The file has to be unpacked to a readable form just like
http://matthewfl.com/unPacker.html does. Then we create a dictionary for
every key:port pair.
"""
# TODO: it might not be necessary to unpack the js code
def __init__(self, js_file_url, headers=None):
logger.info("JS UnPacker init path: {}".format(js_file_url))
r = requests.get(js_file_url, headers=headers)
encrypted = r.text.strip()
encrypted = '(' + encrypted.split('}(')[1][:-1]
unpacked = eval('self.unpack' +encrypted) # string of the js code in unpacked form
matches = re.findall(r".*?\('\.([a-zA-Z0-9]{1,6})'\).*?\((\d+)\)", unpacked)
self.ports = dict((key, port) for key, port in matches)
logger.debug('portmap: '+str(self.ports))
def baseN(self, num, b, numerals="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"):
return ((num == 0) and numerals[0]) or (self.baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def unpack(self, p, a, c, k, e=None, d=None):
while c:
c -= 1
if k[c]:
p = re.sub("\\b" + self.baseN(c, a) + "\\b", k[c], p)
return p
def get_port(self, key):
return self.ports[key]
def get_ports(self):
return self.ports
| 36.619048 | 122 | 0.615085 |
8f37fd30e84d2548d9bed8371f68b035be4433e2 | 3,387 | py | Python | setup.py | JAvito-GC/Linux-Utils | 41d8905063380f0e27475063ffaaf1a9edca6867 | [
"MIT"
] | 4 | 2018-10-20T15:49:07.000Z | 2020-12-03T03:44:52.000Z | setup.py | JAvito-GC/Linux-Utils | 41d8905063380f0e27475063ffaaf1a9edca6867 | [
"MIT"
] | null | null | null | setup.py | JAvito-GC/Linux-Utils | 41d8905063380f0e27475063ffaaf1a9edca6867 | [
"MIT"
] | 4 | 2017-10-18T12:49:42.000Z | 2022-03-09T16:21:09.000Z | #!/usr/bin/env python
# Setup script for the `linux-utils' package.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: February 9, 2020
# URL: https://linux-utils.readthedocs.io
"""
Setup script for the ``linux-utils`` package.
**python setup.py install**
Install from the working directory into the current Python environment.
**python setup.py sdist**
Build a source distribution archive.
**python setup.py bdist_wheel**
Build a wheel distribution archive.
"""
# Standard library modules.
import codecs
import os
import re
# De-facto standard solution for Python packaging.
from setuptools import find_packages, setup
def get_contents(*args):
"""Get the contents of a file relative to the source distribution directory."""
with codecs.open(get_absolute_path(*args), 'r', 'UTF-8') as handle:
return handle.read()
def get_version(*args):
"""Extract the version number from a Python module."""
contents = get_contents(*args)
metadata = dict(re.findall('__([a-z]+)__ = [\'"]([^\'"]+)', contents))
return metadata['version']
def get_requirements(*args):
"""Get requirements from pip requirement files."""
requirements = set()
with open(get_absolute_path(*args)) as handle:
for line in handle:
# Strip comments.
line = re.sub(r'^#.*|\s#.*', '', line)
# Ignore empty lines
if line and not line.isspace():
requirements.add(re.sub(r'\s+', '', line))
return sorted(requirements)
def get_absolute_path(*args):
"""Transform relative pathnames into absolute pathnames."""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args)
setup(name="linux-utils",
version=get_version('linux_utils', '__init__.py'),
description="Linux system administration tools for Python",
long_description=get_contents('README.rst'),
url='https://linux-utils.readthedocs.io',
author="Peter Odding",
author_email='peter@peterodding.com',
license='MIT',
packages=find_packages(),
entry_points=dict(console_scripts=[
'cryptdisks-start-fallback = linux_utils.cli:cryptdisks_start_cli',
'cryptdisks-stop-fallback = linux_utils.cli:cryptdisks_stop_cli',
]),
install_requires=get_requirements('requirements.txt'),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Systems Administration',
'Topic :: Terminals',
'Topic :: Utilities',
])
| 34.561224 | 83 | 0.638323 |
124b3e43be7262b0a03a6aae2614a12e0459aff3 | 295 | py | Python | checkout/api/urls.py | shukranjs/E-commerce-project | 047a509c78d9dc9ba65910349383df1e3d7228bd | [
"MIT"
] | 2 | 2021-07-07T07:19:44.000Z | 2021-08-19T19:20:14.000Z | checkout/api/urls.py | Emrahgs/E-commerce-project | 4c0ee3444701c3c1782e6b6cf75b7c63aee32371 | [
"MIT"
] | null | null | null | checkout/api/urls.py | Emrahgs/E-commerce-project | 4c0ee3444701c3c1782e6b6cf75b7c63aee32371 | [
"MIT"
] | null | null | null | from django.urls import path
from checkout.api.views import OrderItemAPIView, OrderItemDetailAPIView
urlpatterns = [
path('order-item/', OrderItemAPIView.as_view(), name='order_item_api'),
path('order-item/<int:pk>', OrderItemDetailAPIView.as_view(), name='order_item_detail_api'),
] | 42.142857 | 96 | 0.762712 |
05b5c4ba861d0c860d6013aadae21c8b3f781e5e | 2,740 | py | Python | 0x0C-python-almost_a_circle/tests/test_models/test_base.py | Trice254/alx-higher_level_programming | b49b7adaf2c3faa290b3652ad703914f8013c67c | [
"MIT"
] | null | null | null | 0x0C-python-almost_a_circle/tests/test_models/test_base.py | Trice254/alx-higher_level_programming | b49b7adaf2c3faa290b3652ad703914f8013c67c | [
"MIT"
] | null | null | null | 0x0C-python-almost_a_circle/tests/test_models/test_base.py | Trice254/alx-higher_level_programming | b49b7adaf2c3faa290b3652ad703914f8013c67c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
Contains tests for Base class
"""
import unittest
import json
from models import base
Base = base.Base
class TestBase(unittest.TestCase):
"""check functionality of Base class"""
def _too_many_args(self):
"""testing too many args to init"""
with self.assertRaises(TypeError):
b = Base(1, 1)
def _no_id(self):
"""Testing id as None"""
b = Base()
self.assertEqual(b.id, 1)
def _id_set(self):
"""Testing id as not None"""
b98 = Base(98)
self.assertEqual(b98.id, 98)
def _no_id_after_set(self):
"""Testing id as None after not None"""
b2 = Base()
self.assertEqual(b2.id, 2)
def _nb_private(self):
"""Testing nb_objects as a private instance attribute"""
b = Base(3)
with self.assertRaises(AttributeError):
print(b.nb_objects)
with self.assertRaises(AttributeError):
print(b.__nb_objects)
def _to_json_string(self):
"""Testing regular to json string"""
Base._Base__nb_objects = 0
d1 = {"id": 9, "width": 5, "height": 6, "x": 7, "y": 8}
d2 = {"id": 2, "width": 2, "height": 3, "x": 4, "y": 0}
json_s = Base.to_json_string([d1, d2])
self.assertTrue(type(json_s) is str)
d = json.loads(json_s)
self.assertEqual(d, [d1, d2])
def _empty_to_json_string(self):
"""Test for passing empty list"""
json_s = Base.to_json_string([])
self.assertTrue(type(json_s) is str)
self.assertEqual(json_s, "[]")
def _None_to_json_String(self):
"""testting None to a json"""
json_s = Base.to_json_string(None)
self.assertTrue(type(json_s) is str)
self.assertEqual(json_s, "[]")
def _from_json_string(self):
"""Tests normal from_json_string"""
json_str = '[{"id": 9, "width": 5, "height": 6, "x": 7, "y": 8}, \
{"id": 2, "width": 2, "height": 3, "x": 4, "y": 0}]'
json_l = Base.from_json_string(json_str)
self.assertTrue(type(json_l) is list)
self.assertEqual(len(json_l), 2)
self.assertTrue(type(json_l[0]) is dict)
self.assertTrue(type(json_l[1]) is dict)
self.assertEqual(json_l[0],
{"id": 9, "width": 5, "height": 6, "x": 7, "y": 8})
self.assertEqual(json_l[1],
{"id": 2, "width": 2, "height": 3, "x": 4, "y": 0})
def _frjs_empty(self):
"""Tests from_json_string empty string"""
self.assertEqual([], Base.from_json_string(""))
def _frjs_None(self):
"""Testing from_json_string none string"""
self.assertEqual([], Base.from_json_string(None))
| 32.235294 | 76 | 0.568613 |
cfa5f4a105163dec690b3190fab235197e72fb63 | 19,026 | py | Python | geode/exact/test_circle.py | Haider-BA/geode | b9ebbc0c61acd17ceb21200dba0d52546a3dbff2 | [
"BSD-3-Clause"
] | 1 | 2021-06-19T13:12:35.000Z | 2021-06-19T13:12:35.000Z | geode/exact/test_circle.py | Haider-BA/geode | b9ebbc0c61acd17ceb21200dba0d52546a3dbff2 | [
"BSD-3-Clause"
] | null | null | null | geode/exact/test_circle.py | Haider-BA/geode | b9ebbc0c61acd17ceb21200dba0d52546a3dbff2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
from geode import *
import sys
def random_circle_arcs(n,k):
'''Generate n random circular arc polygons with k arcs per contour'''
arcs = empty((n,k),dtype=CircleArc).view(recarray)
arcs.x = random.randn(n,1,2)+.5*random.randn(n,k,2)
arcs.q = random.uniform(-1.5,1.5,size=n*k).reshape(n,k)
return Nested(arcs)
def draw_circle_arcs(arcs,n=100,label=False,full=False,dots=False,jitter=None):
import pylab
for p,poly in enumerate(arcs):
X = poly['x']
q = poly['q']
Xn = roll(X,-1,axis=0)
l = magnitudes(Xn-X)
center = .5*(X+Xn)+.25*(1/q-q).reshape(-1,1)*rotate_left_90(Xn-X)
if 0:
print 'draw %d: x0 %s, x1 %s, center %s'%(0,X[0],Xn[0],center[0])
radius = .25*l*(1/q+q)
print 'radius = %s'%radius
assert allclose(magnitudes(X-center),abs(radius))
theta = array([2*pi]) if full else 4*atan(q)
points = center.reshape(-1,1,2)+Rotation.from_angle(theta[:,None]*arange(n+1)/n)*(X-center).reshape(-1,1,2)
if dots:
pylab.plot(X[:,0],X[:,1],'.')
if full:
for i,pp in enumerate(points):
pylab.plot(pp[:,0],pp[:,1],'--')
pylab.plot(center[i,0],center[i,1],'+')
if label:
pylab.annotate(str(arcs.offsets[p]+i),center[i])
else:
if label:
for i in xrange(len(poly)):
pylab.annotate(str(arcs.offsets[p]+i),points[i,n//2])
points = concatenate([points.reshape(-1,2),[points[-1,-1]]])
if jitter is not None:
points += jitter*random.uniform(-1,1,points.shape) # Jitter if you want to be able to differentiate concident points:
pylab.plot(points[:,0],points[:,1])
def subplot_arcs(arcs,subplot_index=111,title=None,full=True,label=True,dots=True,jitter=None):
import pylab
ax = pylab.subplot(subplot_index)
if title is not None:
pylab.title(title)
if full:
draw_circle_arcs(arcs,full=True,label=label,jitter=jitter)
draw_circle_arcs(arcs,label=label,dots=dots,jitter=jitter)
ax.set_aspect('equal')
def test_circle_reverse():
random.seed(1240405)
for k in [2,3,4,10,100]:
for i in range(10):
arcs = random_circle_arcs(1 + (i%2),k)
area_before = circle_arc_area(arcs)
reverse_arcs(arcs)
area_after = circle_arc_area(arcs)
assert abs(area_before + area_after) < 1e-7
def test_circle_quantize():
random_circle_quantize_test(12312) # Test quantization for complete circles
random.seed(37130)
arcs0 = random_circle_arcs(1,100)
#arcs0 = random_circle_arcs(20,5)
arcs1 = circle_arc_quantize_test(arcs0)
assert all(arcs0.offsets==arcs1.offsets)
ex = relative_error(arcs0.flat['x'],arcs1.flat['x'])
i = argmax(abs(arcs0.flat['q']-arcs1.flat['q']))
q0,q1 = arcs0.flat['q'][i],arcs1.flat['q'][i]
eq = abs(q0-q1)
comparison_str = 'ex = %g, eq = %g (%d: %g to %g)'%(ex,eq,i,q0,q1)
print comparison_str
show_results = False # Enable this if you want comparisons between expected and actual results
if show_results and not (ex<1e-6 and eq<3e-5):
plot_args = dict(full=False, label=True, dots=True)
import pylab
pylab.suptitle(comparison_str)
subplot_arcs(arcs0, 121, "Before Quantization", **plot_args)
subplot_arcs(arcs1, 122, "After Quantization", **plot_args)
pylab.show()
assert ex<1e-6 and eq<3e-5 #This threshold is pretty agressive and might not work for many seeds
def to_arcs(py_arcs):
arrays = []
for contour in py_arcs:
arrays.append(asarray([((a[0][0],a[0][1]),a[1]) for a in contour], dtype=CircleArc))
return Nested(arrays, dtype=CircleArc)
def test_circles():
# Test quantization routine
random.seed(37130)
# We don't know how to write a unit test, so use a regression test instead. Each entry is indexed by (k,n,i):
known = {(3,1,0):[]
,(3,1,1):[[[[-3.010773,0.755762],-0.336175],[[-2.858849,1.309345],0.589384],[[-2.848044,0.356576],0.698345]]]
,(3,1,2):[[[[-2.724896,-0.718846],-0.034550],[[-1.800262,-0.054708],1.110004]]]
,(3,1,3):[[[[-0.303861,1.024886],0.142447],[[0.656238,0.432930],-0.216057],[[0.573075,0.905945],0.297362]]]
,(3,1,4):[[[[-0.522519,-0.584078],-0.000456],[[0.181312,-1.825772],0.333828],[[0.316874,-0.423081],1.205249]]]
,(3,1,5):[[[[-2.301922,1.591784],0.590868],[[-2.248770,0.515245],-0.404141]]]
,(3,1,6):[[[[1.991475,-0.087749],0.058138],[[2.252511,-0.129968],0.079171]]]
,(3,1,7):[[[[1.686977,0.901313],-0.825506],[[2.173907,0.323295],-0.889160],[[2.394244,-0.216802],1.083369]]]
,(3,1,8):[[[[0.826107,-0.051135],-0.001554],[[0.828592,-0.052081],-0.000613],[[0.829624,-0.051610],-0.010869]]]
,(3,1,9):[]
,(3,2,0):[]
,(3,2,1):[[[[-2.216575,0.262133],0.840664],[[-1.715437,0.378521],-0.836815],[[-1.520104,1.476167],1.110229]],[[[0.733133,0.198975],0.201225],[[1.350660,0.190027],1.056223],[[0.828611,0.498085],-0.799089]]]
,(3,2,2):[[[[0.426560,0.796812],-0.015221],[[0.438715,0.801127],-0.008692],[[0.427843,0.808990],0.002491]]]
,(3,2,3):[[[[-1.189061,0.319839],-0.024296],[[-1.156225,0.315736],-0.008650],[[-1.155983,0.335169],0.046473]],[[[-1.149088,-0.354165],0.676161],[[-0.874696,-0.170949],-0.014745],[[-0.901609,-0.153892],-0.247980]],[[[-0.979996,0.312666],0.037660],[[-0.977415,0.224464],-0.105190],[[-0.898515,0.106663],0.308622]]]
,(3,2,4):[[[[-1.724795,0.024468],-0.061043],[[-1.619382,0.251626],-0.019812],[[-1.664982,0.292379],-0.095631],[[-1.716633,0.262827],0.098298]],[[[-1.178215,-0.486595],0.548680],[[-0.543385,0.328433],0.000952],[[-0.553205,0.331992],-0.225385],[[-1.164077,0.076010],-0.137576]]]
,(3,3,0):[[[[-2.666048,-1.426976],0.383100],[[-2.301528,-1.981148],0.693661],[[-1.673099,-2.802806],0.925914]],[[[1.887855,-2.119635],0.141599],[[3.836737,-2.084426],1.436873],[[2.200843,-1.110354],0.453311]]]
,(3,3,1):'boring'
,(3,3,2):[[[[-0.510294,0.704167],-0.028318],[[-0.093683,0.411961],0.901914],[[-0.136030,-0.807826],0.161745],[[0.439782,-0.251437],0.009192],[[0.155160,0.013840],0.068953],[[0.272982,0.088631],-0.016613],[[0.478865,-0.127765],0.072894],[[0.443719,-0.243846],0.022231],[[0.490841,-0.141275],-0.040178],[[0.923357,-0.718623],0.338586],[[1.273104,-0.350055],-0.306153],[[0.492852,-0.136300],0.087512],[[0.583693,0.295422],-0.189865],[[0.344185,0.162598],1.105698]],[[[-0.040488,0.369307],-0.016236],[[0.179752,0.177727],-0.203408]],[[[0.442339,-0.253911],0.480081],[[0.897114,-0.721975],0.015420]]]
,(3,3,3):[[[[-1.185738,-0.402147],0.126455],[[-0.880949,-1.409412],-0.123348],[[-0.850696,-0.873056],0.063155],[[-0.718194,-1.066745],1.362333],[[0.381933,0.324818],1.294009],[[0.290114,0.431820],0.512688],[[-0.953336,-0.432871],-0.054159]],[[[0.496306,0.333617],0.030982],[[0.994209,0.177878],0.113508],[[1.353632,0.193640],0.958044]]]
,(3,10,0):[[[[-1.453289,-2.809749],0.667486],[[-0.893038,-2.812135],1.490685],[[-1.150855,-2.549945],1.322488]],[[[-0.429733,-0.417243],0.109782],[[-0.409798,-0.636888],-0.092850],[[-0.281360,-0.786052],0.092081],[[-0.407264,-0.644607],0.147577],[[-0.237065,-0.883907],-0.214415],[[-0.279956,-1.320125],0.220890],[[-0.234773,-0.885674],0.136238],[[0.017467,-0.987308],-0.022269],[[0.588381,-0.410025],0.065205],[[-0.208725,-0.386703],-0.118615],[[-0.082548,-0.127479],0.005456],[[-0.237814,-0.081107],0.186024],[[-0.426253,-0.394363],-0.190440],[[-0.253192,-0.391536],0.014528]],[[[-0.213546,-0.410876],-0.481085],[[-0.167188,-0.775884],-0.152764]],[[[-0.007673,0.775326],0.176070],[[0.613992,0.413336],-0.089050],[[0.658953,0.505964],0.030758],[[0.680313,0.403523],0.060150],[[0.932777,0.404811],-0.022780],[[0.946646,0.441288],0.037220],[[0.950331,0.407172],0.137119],[[1.469238,0.635559],-0.123964],[[1.337842,1.336018],-0.159037],[[1.592977,0.755306],0.409897],[[1.507176,2.231095],1.039402],[[0.734075,2.406343],0.983069]],[[[0.223366,-1.242424],-0.235979],[[0.559076,-1.256569],0.859877],[[0.832823,-1.400939],0.379925]],[[[0.657113,-1.107158],0.214171],[[1.088048,-1.123091],1.005347]],[[[0.856394,-0.109806],-0.474681],[[1.257573,-1.012384],0.696716]],[[[1.494908,-0.128537],0.271431],[[1.717105,0.067760],-0.178489]]]
,(3,10,1):[[[[-2.503355,0.135105],-0.235193],[[-2.262554,-0.052479],1.248027],[[-2.484257,0.160738],0.000804]],[[[-2.058643,-0.230671],-0.319316],[[-1.559567,0.037099],-0.347047],[[-1.520080,0.972124],-0.127287],[[-0.884079,0.884983],-0.582577],[[-0.773175,1.214582],-0.071359],[[-0.573586,1.135969],-0.110738],[[-0.538833,1.055704],0.033427],[[-0.551665,1.122991],-0.154137],[[-0.253933,0.779053],-0.018736],[[-0.251264,0.721719],0.186409],[[0.102279,0.980313],0.222817],[[0.212778,0.686069],-0.055145],[[-0.251409,0.698373],-0.572340],[[-1.404591,0.053789],0.068171],[[-1.448225,0.009452],-0.216132],[[-1.163233,-0.276318],0.185753],[[-0.399292,-0.271834],-0.035356],[[-0.375776,-0.228709],-0.082833],[[1.403343e-05,0.003181],0.167021],[[0.273520,0.638108],0.483418],[[0.821076,0.829935],-0.066828],[[0.276247,0.692359],0.113827],[[0.180329,1.160739],0.011219],[[0.186372,1.187336],0.012633],[[0.174799,1.172644],0.395583],[[-1.178649,1.707578],0.434007]],[[[0.171906,-1.799055],0.264830],[[0.957447,-1.409122],-0.242764]],[[[0.563410,-0.477915],0.194847],[[1.259138,-0.127758],-0.183250],[[1.807019,-0.905868],0.396841],[[1.437020,0.158107],0.364968],[[1.034103,1.432217],0.148412],[[1.197739,0.255950],-0.006023],[[1.198297,0.238953],0.199504],[[0.583267,0.100560],-0.109097],[[1.140365,-0.056058],-0.271876]],[[[1.854054,0.808627],0.251675],[[1.945366,0.092593],-0.350994],[[2.320178,0.071094],0.115192]]]
,(3,10,2):[[[[-1.850146,2.031636],1.193297],[[-0.984657,1.698693],1.014384],[[-1.128600,1.713286],0.260469]],[[[-1.615670,0.262574],-0.144464],[[-1.466613,-0.348527],-0.121168],[[-1.356335,-0.012708],1.464144]],[[[-0.905989,1.906895],-0.162077],[[-0.764319,1.916432],0.020880],[[-0.785940,2.080328],-0.126287]],[[[-0.021940,0.793038],0.346693],[[0.391923,0.269867],0.038725],[[0.357187,0.077516],0.098515],[[0.564597,0.257287],0.130665],[[0.824858,0.351844],-0.048783],[[0.841263,0.228062],-0.126355],[[0.532853,0.027745],0.198267],[[0.775957,-0.076660],-0.292794],[[0.184168,-0.435120],1.496861],[[0.199749,-0.990425],0.447133],[[1.049210,0.050447],0.404356],[[1.009394,0.530790],-0.062213],[[0.938907,0.361636],0.018593],[[0.903196,0.418342],0.059189],[[0.980460,0.519300],-0.038557],[[0.984967,0.550321],0.155798],[[0.784737,0.616728],0.003545],[[0.773293,0.636896],-0.023078],[[0.736807,0.601684],0.009748],[[0.709403,0.623688],0.040277],[[0.710757,0.736803],0.003148],[[0.699102,0.753780],-0.007487],[[0.656040,0.773481],0.017096],[[0.601999,0.706070],-0.021003],[[0.642533,0.670598],0.014970],[[0.596151,0.698178],0.051963],[[0.463728,0.472149],-0.046316],[[0.362389,0.464862],0.037989],[[0.192825,0.810718],0.059983]],[[[-0.021763,0.807847],-0.077177],[[0.171174,0.846831],0.023279],[[0.038768,1.042495],0.113888]],[[[0.703049,0.574277],0.009093],[[0.706748,0.599587],-0.006917],[[0.717442,0.585423],-0.008281]],[[[1.031037,0.640434],-0.013174],[[1.038349,0.648279],-0.001011],[[1.032066,0.649494],-0.003083]]]
,(3,10,3):[[[[-2.688785,-1.212539],0.769230],[[-1.080329,-1.143351],-0.203676],[[-1.388706,-0.914265],-0.008203],[[-1.174639,-0.834228],-0.019157],[[-1.203638,-0.781211],0.146600],[[-1.424285,-0.839841],-0.258863],[[-1.355535,-0.368259],0.053953],[[-1.279858,-0.530940],-0.078995],[[-1.272768,-0.283315],-0.016662],[[-1.246601,-0.263668],-0.079393],[[-1.188952,-0.189290],-0.058420],[[-1.110709,0.066915],-0.011862],[[-1.086477,0.095440],0.002787],[[-1.093293,0.104767],-0.197982],[[-0.440406,0.689701],-0.009223],[[-0.449116,0.725582],0.309172],[[-1.292900,0.308640],0.918747]],[[[-0.729724,1.978493],0.470730],[[0.183745,2.094192],-0.207443]],[[[1.205548,1.853441],0.540626],[[2.324679,1.474149],0.316197],[[1.687294,1.499578],0.745920]]]
,(3,10,4):[[[[-1.713445,-1.244464],0.034561],[[-1.599586,-1.287285],0.081800],[[-1.306104,-1.085377],-0.227450],[[-1.512949,-0.621891],0.781910]],[[[-1.441501,1.437139],0.431596],[[-0.731085,1.545868],-0.191798],[[-1.250495,1.640124],-0.818925]],[[[-1.093375,-0.800970],0.235370],[[-1.076625,0.176586],0.400043]],[[[-0.802951,0.619093],-0.206489],[[-0.157361,-0.008610],-0.110921],[[-0.309586,-0.120498],0.067734],[[-0.108911,-0.152166],-0.105902],[[-0.086171,-0.627787],-0.037552],[[0.083356,-0.591277],0.045093],[[0.121198,-0.469432],0.629025],[[0.254361,-0.067518],0.227726],[[0.645948,0.454210],0.190258],[[1.166980,0.824583],1.155420],[[1.355540,1.073012],0.196773],[[0.615545,0.856931],0.009870],[[0.604889,0.884693],0.121660],[[0.107467,0.510668],0.501837]],[[[-0.296379,-1.164791],0.108890],[[-0.142394,-1.395791],0.138141],[[-0.295848,-1.160402],0.001700]],[[[-0.168680,-0.903918],0.115345],[[0.047980,-0.664735],0.086158],[[-0.128552,-0.799981],-0.024519]],[[[1.467030,-0.182280],0.285611],[[1.978747,-1.307214],-0.252715],[[1.872244,-0.268930],-0.282824]]]
,(3,10,5):[[[[-3.060150,-0.355061],0.224263],[[-2.431354,-0.431142],0.460216],[[-1.440838,-0.498447],1.370885]],[[[0.020557,0.738515],-0.019147],[[0.131563,0.767062],-0.045649],[[0.182079,0.960629],-0.050604],[[0.274621,1.016848],-0.076444],[[0.271610,1.154057],0.482941]],[[[0.416269,1.119232],-0.034160],[[0.433523,1.064196],-0.368116],[[1.018625,0.688291],-0.000390],[[1.020787,0.687399],1.023208]],[[[0.957602,0.711869],-0.001475],[[0.965901,0.708841],0.013196],[[0.992423,0.735853],-0.023427]],[[[2.197525,1.038404],-0.056303],[[2.274004,0.763559],0.147383]]]
}
def arc_error(correct,arcs):
e = 0
for cs,xs in zip(correct,arcs):
for c,x in zip(cs,xs):
e = max(e,maxabs(c[0]-x['x']),abs(c[1]-x['q']))
return e
# Test CSG
k = 3
plot_args = dict(full=False, label=True, dots=True)
for n in 1,2,3,10,40,100:
for i in xrange({1:10,2:5,3:4,10:6,40:20,100:10}[n]):
correct = known.get((k,n,i))
if correct=='boring':
continue
print '(k,n,i) (%d,%d,%d)'%(k,n,i)
random.seed(18183181+1000*k+10*n+i)
arcs0 = canonicalize_circle_arcs(random_circle_arcs(n,k))
circle_arc_quantize_test(arcs0);
if (k,n,i)==None: # Enable to visualize before union
print
print 'arcs0 = %s'%compact_str(arcs0)
import pylab
pylab.suptitle('k %d, n %d, i %d'%(k,n,i))
subplot_arcs(arcs0,**plot_args)
pylab.show()
arcs1 = canonicalize_circle_arcs(circle_arc_union(arcs0))
error = 0 if n>=40 else inf if correct is None else arc_error(correct,arcs1)
if error>2e-5:
print 'error = %f' % error
print 'expected area = %f' % circle_arc_area(to_arcs(correct))
print 'result area = %f' % circle_arc_area(arcs1)
print 'arcs0 = %s'%compact_str(arcs0)
print '\narcs1 = %s'%compact_str(arcs1)
print '\ncorrect = %s'%compact_str(correct)
if 0: # Enable this if you want comparisons between expected and actual results
import pylab
pylab.suptitle('k %d, n %d, i %d, error %g'%(k,n,i,error))
subplot_arcs(arcs0, 121, "Input to union", **plot_args)
subplot_arcs(arcs1, 122, "Output of union", **plot_args)
pylab.figure()
subplot_arcs(arcs0, 121, "Before Quantization", **plot_args)
subplot_arcs(circle_arc_quantize_test(arcs0), 122, "After Quantization", **plot_args)
pylab.figure()
subplot_arcs(to_arcs(correct), 121, "Expected", **plot_args)
subplot_arcs(arcs1, 122, "Returned", **plot_args)
pylab.show()
assert False
# Check extremely degenerate situations
if n==40 and i<2:
area = circle_arc_area(arcs1)
assert allclose(area,circle_arc_area(circle_arc_union(arcs1,arcs1)))
assert allclose(area,circle_arc_area(circle_arc_intersection(arcs1,arcs1)))
def test_single_circle(show_results=False):
seed = 151193
max_count = 10
for count in range(max_count):
num_trials = 1 if count == 0 else 10
for trial in range(num_trials):
input_arcs, union_arcs, overlap_arcs = single_circle_handling_test(seed + trial*max_count + count, count)
if show_results:
import pylab
plot_args = dict(full=False, label=True, dots=True)
pylab.suptitle('seed %d, count %d'%(seed, count))
subplot_arcs(input_arcs, 121, "Input arcs", **plot_args)
subplot_arcs(union_arcs, 122, "Output of union", **plot_args)
pylab.figure()
subplot_arcs(input_arcs, 121, "Input arcs", **plot_args)
subplot_arcs(overlap_arcs, 122, "Output of overlaps", **plot_args)
pylab.show()
def test_offsets():
random.seed(441424)
arcs0 = circle_arc_union(random_circle_arcs(10,10))
print "Offsetting arcs"
arcs1 = offset_arcs(arcs0, 0.1)
assert circle_arc_area(arcs1) > circle_arc_area(arcs0)
print "Offsetting arcs with shells"
shells = offset_shells(arcs0, 0.2, 10)
# Check that we have monatonically increasing area
prev_area, prev_arcs = 0, []
for arcs in [arcs0, arcs1] + shells:
area = circle_arc_area(arcs)
if not area > prev_area:
error = "Positive offset caused decrease in area from %g to %g" % (prev_area, area)
print error
if 0:
import pylab
pylab.suptitle(error)
subplot_arcs(prev_arcs, 121, "Previous shell", full=False)
subplot_arcs(arcs, 122, "After offset", full=False)
pylab.show()
assert False
prev_area, prev_arcs = area, arcs
print "Offsetting of open arcs"
arcs4 = offset_open_arcs(arcs0, 0.001) # Mostly this just ensures we don't hit any asserts
assert circle_arc_area(arcs4) > 0 # We should at least have a positive area
def test_negative_offsets(seed=7056389):
print "Testing negative offset"
random.seed(seed)
d = 0.4
# Offset inward then outward would normally erode sharp features, but we can use a positive offset to generate a shape with no sharp features
arcs0 = offset_arcs(random_circle_arcs(10,10), d*1.5) # Generate random arcs and ensure features big enough to not disappear if we inset/offset again
inset = offset_arcs(arcs0, -d)
reset = offset_arcs(inset, d)
arcs0_area = circle_arc_area(arcs0)
inset_area = circle_arc_area(inset)
reset_area = circle_arc_area(reset)
assert inset_area < arcs0_area # Offset by negative amount should reduce area
area_error = abs(arcs0_area - reset_area)
assert area_error < 2e-6
# xor input arcs and result after inset/offset to get difference
delta = split_arcs_by_parity(Nested.concatenate(arcs0,reset))
# We expect thin features around edges of input arcs, but a small negative offset should erase everything
squeezed_delta = offset_arcs(delta,-1e-6)
assert len(squeezed_delta) == 0
# Check that a large negative offset leaves nothing
empty_arcs = offset_arcs(random_circle_arcs(10,10), -100.)
assert len(empty_arcs) == 0
if __name__=='__main__':
test_offsets()
test_negative_offsets()
test_circle_quantize()
test_single_circle()
test_circles()
| 72.896552 | 1,527 | 0.641648 |
7083d968b6bb96522f1a98cee725544f9efa5e12 | 1,203 | py | Python | segmentation/libs/utils/metric.py | LvJC/CONTA | 5337911a8fb35eadfcedf8ab18b192bff556e626 | [
"MIT"
] | 121 | 2020-09-26T00:48:50.000Z | 2021-06-24T20:45:22.000Z | segmentation/libs/utils/metric.py | LvJC/CONTA | 5337911a8fb35eadfcedf8ab18b192bff556e626 | [
"MIT"
] | 23 | 2020-09-28T16:50:13.000Z | 2021-04-10T16:40:37.000Z | segmentation/libs/utils/metric.py | LvJC/CONTA | 5337911a8fb35eadfcedf8ab18b192bff556e626 | [
"MIT"
] | 17 | 2020-09-29T10:22:12.000Z | 2021-06-09T09:34:50.000Z | # Originally written by wkentaro
# https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py
import numpy as np
def _fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) + label_pred[mask],
minlength=n_class ** 2,
).reshape(n_class, n_class)
return hist
def scores(label_trues, label_preds, n_class):
hist = np.zeros((n_class, n_class))
for lt, lp in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
valid = hist.sum(axis=1) > 0 # added
mean_iu = np.nanmean(iu[valid])
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(n_class), iu))
return {
"Pixel Accuracy": acc,
"Mean Accuracy": acc_cls,
"Frequency Weighted IoU": fwavacc,
"Mean IoU": mean_iu,
"Class IoU": cls_iu,
}
| 35.382353 | 78 | 0.625104 |
8b91c10853089cd8fb7a285d7c3a6ea5010e43b8 | 13,815 | py | Python | tests/contrib/molten/test_molten.py | zhammer/dd-trace-py | 4c30f6e36bfa34a63cd9b6884677c977f76d2a01 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-04-28T21:35:01.000Z | 2021-04-28T21:35:01.000Z | tests/contrib/molten/test_molten.py | zhammer/dd-trace-py | 4c30f6e36bfa34a63cd9b6884677c977f76d2a01 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contrib/molten/test_molten.py | zhammer/dd-trace-py | 4c30f6e36bfa34a63cd9b6884677c977f76d2a01 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import molten
from molten.testing import TestClient
from ddtrace import Pin
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.ext import errors, http
from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID
from ddtrace.contrib.molten import patch, unpatch
from ddtrace.contrib.molten.patch import MOLTEN_VERSION
from ...base import BaseTracerTestCase
from ...utils import assert_span_http_status_code, assert_is_measured
# NOTE: Type annotations required by molten otherwise parameters cannot be coerced
def hello(name: str, age: int) -> str:
return f'Hello {age} year old named {name}!'
def molten_client(headers=None, params=None):
app = molten.App(routes=[molten.Route('/hello/{name}/{age}', hello)])
client = TestClient(app)
uri = app.reverse_uri('hello', name='Jim', age=24)
return client.request('GET', uri, headers=headers, params=params)
class TestMolten(BaseTracerTestCase):
""""Ensures Molten is properly instrumented."""
TEST_SERVICE = 'molten-patch'
def setUp(self):
super(TestMolten, self).setUp()
patch()
Pin.override(molten, tracer=self.tracer)
def tearDown(self):
super(TestMolten, self).setUp()
unpatch()
def test_route_success(self):
""" Tests request was a success with the expected span tags """
response = molten_client()
spans = self.tracer.writer.pop()
self.assertEqual(response.status_code, 200)
# TestResponse from TestClient is wrapper around Response so we must
# access data property
self.assertEqual(response.data, '"Hello 24 year old named Jim!"')
span = spans[0]
assert_is_measured(span)
self.assertEqual(span.service, 'molten')
self.assertEqual(span.name, 'molten.request')
self.assertEqual(span.span_type, 'web')
self.assertEqual(span.resource, 'GET /hello/{name}/{age}')
self.assertEqual(span.get_tag('http.method'), 'GET')
self.assertEqual(span.get_tag(http.URL), 'http://127.0.0.1:8000/hello/Jim/24')
assert_span_http_status_code(span, 200)
assert http.QUERY_STRING not in span.meta
# See test_resources below for specifics of this difference
if MOLTEN_VERSION >= (0, 7, 2):
self.assertEqual(len(spans), 18)
else:
self.assertEqual(len(spans), 16)
# test override of service name
Pin.override(molten, service=self.TEST_SERVICE)
response = molten_client()
spans = self.tracer.writer.pop()
self.assertEqual(spans[0].service, 'molten-patch')
def test_route_success_query_string(self):
with self.override_http_config('molten', dict(trace_query_string=True)):
response = molten_client(params={'foo': 'bar'})
spans = self.tracer.writer.pop()
self.assertEqual(response.status_code, 200)
# TestResponse from TestClient is wrapper around Response so we must
# access data property
self.assertEqual(response.data, '"Hello 24 year old named Jim!"')
span = spans[0]
assert_is_measured(span)
self.assertEqual(span.service, 'molten')
self.assertEqual(span.name, 'molten.request')
self.assertEqual(span.resource, 'GET /hello/{name}/{age}')
self.assertEqual(span.get_tag('http.method'), 'GET')
self.assertEqual(span.get_tag(http.URL), 'http://127.0.0.1:8000/hello/Jim/24')
assert_span_http_status_code(span, 200)
self.assertEqual(span.get_tag(http.QUERY_STRING), 'foo=bar')
def test_analytics_global_on_integration_default(self):
"""
When making a request
When an integration trace search is not event sample rate is not set and globally trace search is enabled
We expect the root span to have the appropriate tag
"""
with self.override_global_config(dict(analytics_enabled=True)):
response = molten_client()
self.assertEqual(response.status_code, 200)
# TestResponse from TestClient is wrapper around Response so we must
# access data property
self.assertEqual(response.data, '"Hello 24 year old named Jim!"')
root_span = self.get_root_span()
root_span.assert_matches(
name='molten.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 1.0},
)
def test_analytics_global_on_integration_on(self):
"""
When making a request
When an integration trace search is enabled and sample rate is set and globally trace search is enabled
We expect the root span to have the appropriate tag
"""
with self.override_global_config(dict(analytics_enabled=True)):
with self.override_config('molten', dict(analytics_enabled=True, analytics_sample_rate=0.5)):
response = molten_client()
self.assertEqual(response.status_code, 200)
# TestResponse from TestClient is wrapper around Response so we must
# access data property
self.assertEqual(response.data, '"Hello 24 year old named Jim!"')
root_span = self.get_root_span()
root_span.assert_matches(
name='molten.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5},
)
def test_analytics_global_off_integration_default(self):
"""
When making a request
When an integration trace search is not set and sample rate is set and globally trace search is disabled
We expect the root span to not include tag
"""
with self.override_global_config(dict(analytics_enabled=False)):
response = molten_client()
self.assertEqual(response.status_code, 200)
# TestResponse from TestClient is wrapper around Response so we must
# access data property
self.assertEqual(response.data, '"Hello 24 year old named Jim!"')
root_span = self.get_root_span()
self.assertIsNone(root_span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_global_off_integration_on(self):
"""
When making a request
When an integration trace search is enabled and sample rate is set and globally trace search is disabled
We expect the root span to have the appropriate tag
"""
with self.override_global_config(dict(analytics_enabled=False)):
with self.override_config('molten', dict(analytics_enabled=True, analytics_sample_rate=0.5)):
response = molten_client()
self.assertEqual(response.status_code, 200)
# TestResponse from TestClient is wrapper around Response so we must
# access data property
self.assertEqual(response.data, '"Hello 24 year old named Jim!"')
root_span = self.get_root_span()
root_span.assert_matches(
name='molten.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5},
)
def test_route_failure(self):
app = molten.App(routes=[molten.Route('/hello/{name}/{age}', hello)])
client = TestClient(app)
response = client.get('/goodbye')
spans = self.tracer.writer.pop()
self.assertEqual(response.status_code, 404)
span = spans[0]
assert_is_measured(span)
self.assertEqual(span.service, 'molten')
self.assertEqual(span.name, 'molten.request')
self.assertEqual(span.resource, 'GET 404')
self.assertEqual(span.get_tag(http.URL), 'http://127.0.0.1:8000/goodbye')
self.assertEqual(span.get_tag('http.method'), 'GET')
assert_span_http_status_code(span, 404)
def test_route_exception(self):
def route_error() -> str:
raise Exception('Error message')
app = molten.App(routes=[molten.Route('/error', route_error)])
client = TestClient(app)
response = client.get('/error')
spans = self.tracer.writer.pop()
self.assertEqual(response.status_code, 500)
span = spans[0]
assert_is_measured(span)
route_error_span = spans[-1]
self.assertEqual(span.service, 'molten')
self.assertEqual(span.name, 'molten.request')
self.assertEqual(span.resource, 'GET /error')
self.assertEqual(span.error, 1)
# error tags only set for route function span and not root span
self.assertIsNone(span.get_tag(errors.ERROR_MSG))
self.assertEqual(route_error_span.get_tag(errors.ERROR_MSG), 'Error message')
def test_resources(self):
""" Tests request has expected span resources """
molten_client()
spans = self.tracer.writer.pop()
# `can_handle_parameter` appears twice since two parameters are in request
# TODO[tahir]: missing ``resolve` method for components
expected = [
'GET /hello/{name}/{age}',
'molten.middleware.ResponseRendererMiddleware',
'molten.components.HeaderComponent.can_handle_parameter',
'molten.components.CookiesComponent.can_handle_parameter',
'molten.components.QueryParamComponent.can_handle_parameter',
'molten.components.RequestBodyComponent.can_handle_parameter',
'molten.components.RequestDataComponent.can_handle_parameter',
'molten.components.SchemaComponent.can_handle_parameter',
'molten.components.UploadedFileComponent.can_handle_parameter',
'molten.components.HeaderComponent.can_handle_parameter',
'molten.components.CookiesComponent.can_handle_parameter',
'molten.components.QueryParamComponent.can_handle_parameter',
'molten.components.RequestBodyComponent.can_handle_parameter',
'molten.components.RequestDataComponent.can_handle_parameter',
'molten.components.SchemaComponent.can_handle_parameter',
'molten.components.UploadedFileComponent.can_handle_parameter',
'tests.contrib.molten.test_molten.hello',
'molten.renderers.JSONRenderer.render'
]
# Addition of `UploadedFileComponent` in 0.7.2 changes expected spans
if MOLTEN_VERSION < (0, 7, 2):
expected = [
r
for r in expected
if not r.startswith('molten.components.UploadedFileComponent')
]
self.assertEqual(len(spans), len(expected))
self.assertEqual([s.resource for s in spans], expected)
def test_distributed_tracing(self):
""" Tests whether span IDs are propogated when distributed tracing is on """
# Default: distributed tracing enabled
response = molten_client(headers={
HTTP_HEADER_TRACE_ID: '100',
HTTP_HEADER_PARENT_ID: '42',
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), 'Hello 24 year old named Jim!')
spans = self.tracer.writer.pop()
span = spans[0]
self.assertEqual(span.name, 'molten.request')
self.assertEqual(span.trace_id, 100)
self.assertEqual(span.parent_id, 42)
# Explicitly enable distributed tracing
with self.override_config('molten', dict(distributed_tracing=True)):
response = molten_client(headers={
HTTP_HEADER_TRACE_ID: '100',
HTTP_HEADER_PARENT_ID: '42',
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), 'Hello 24 year old named Jim!')
spans = self.tracer.writer.pop()
span = spans[0]
self.assertEqual(span.name, 'molten.request')
self.assertEqual(span.trace_id, 100)
self.assertEqual(span.parent_id, 42)
# Now without tracing on
with self.override_config('molten', dict(distributed_tracing=False)):
response = molten_client(headers={
HTTP_HEADER_TRACE_ID: '100',
HTTP_HEADER_PARENT_ID: '42',
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), 'Hello 24 year old named Jim!')
spans = self.tracer.writer.pop()
span = spans[0]
self.assertEqual(span.name, 'molten.request')
self.assertNotEqual(span.trace_id, 100)
self.assertNotEqual(span.parent_id, 42)
def test_unpatch_patch(self):
""" Tests unpatch-patch cycle """
unpatch()
self.assertIsNone(Pin.get_from(molten))
molten_client()
spans = self.tracer.writer.pop()
self.assertEqual(len(spans), 0)
patch()
# Need to override Pin here as we do in setUp
Pin.override(molten, tracer=self.tracer)
self.assertTrue(Pin.get_from(molten) is not None)
molten_client()
spans = self.tracer.writer.pop()
self.assertTrue(len(spans) > 0)
def test_patch_unpatch(self):
""" Tests repatch-unpatch cycle """
# Already call patch in setUp
self.assertTrue(Pin.get_from(molten) is not None)
molten_client()
spans = self.tracer.writer.pop()
self.assertTrue(len(spans) > 0)
# Test unpatch
unpatch()
self.assertTrue(Pin.get_from(molten) is None)
molten_client()
spans = self.tracer.writer.pop()
self.assertEqual(len(spans), 0)
def test_patch_idempotence(self):
""" Tests repatching """
# Already call patch in setUp but patch again
patch()
molten_client()
spans = self.tracer.writer.pop()
self.assertTrue(len(spans) > 0)
| 43.171875 | 117 | 0.646833 |
d363b1ee09b25b3b6f70f89ed5229b45491692a0 | 6,619 | py | Python | venv/Lib/site-packages/nuitka/utils/ModuleNames.py | patmloi/PalettePal | 66c6528a990c8bd6159fad128b2aca559f3ea0a4 | [
"MIT"
] | 5,421 | 2018-09-24T08:04:06.000Z | 2022-03-31T20:02:37.000Z | venv/Lib/site-packages/nuitka/utils/ModuleNames.py | matthijsvanvliet/raytracing-python | 73d692b47330ab94eedde579a51063e3a907e92b | [
"MIT"
] | 1,348 | 2018-09-22T13:41:00.000Z | 2022-03-31T22:33:40.000Z | venv/Lib/site-packages/nuitka/utils/ModuleNames.py | matthijsvanvliet/raytracing-python | 73d692b47330ab94eedde579a51063e3a907e92b | [
"MIT"
] | 396 | 2018-09-28T15:37:03.000Z | 2022-03-29T10:52:09.000Z | # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Module names are common string type, which deserves special operations.
These are used in Nuitka for module and package names in most places, and
allow to easily make checks on them.
"""
import fnmatch
import os
class ModuleName(str):
def __init__(self, value):
assert ".." not in str(value), value
str.__init__(value)
@staticmethod
def makeModuleNameInPackage(module_name, package_name):
"""Create a module name in a package.
Args:
- module_name (str or ModuleName) module name to put below the package
- package_name (str or ModuleName or None) package to put below
Returns:
Module name "package_name.module_name" or if "package_name" is None
then simply "module_name".
Notes:
Prefer this factory function over manually duplicating the pattern
behind it.
"""
if package_name is not None:
return ModuleName(package_name + "." + module_name)
else:
return ModuleName(module_name)
def __repr__(self):
return "<ModuleName %s>" % str(self)
def asString(self):
"""Get a simply str value.
Notes:
This should only be used to create constant values for code
generation, there is no other reason to lower the type of
these values otherwise.
"""
return str(self)
def asPath(self):
return str(self).replace(".", os.path.sep)
def getPackageName(self):
"""Get the package name if any.
Returns:
ModuleName of the containing package or None if already
top level.
"""
return self.splitModuleBasename()[0]
def getTopLevelPackageName(self):
"""Get the top level package name.
Returns:
ModuleName of the top level name.
"""
package_name = self.getPackageName()
if package_name is None:
return self
else:
return package_name.getTopLevelPackageName()
def getBasename(self):
"""Get leaf name of the module without package part.
Returns:
ModuleName without package.
"""
return self.splitModuleBasename()[1]
def splitModuleBasename(self):
"""Split a module into package name and module name."""
if "." in self:
package_part = ModuleName(self[: self.rfind(".")])
module_name = ModuleName(self[self.rfind(".") + 1 :])
else:
package_part = None
module_name = self
return package_part, module_name
def splitPackageName(self):
"""Split a module into the top level package name and remaining module name."""
if "." in self:
package_part = ModuleName(self[: self.find(".")])
module_name = ModuleName(self[self.find(".") + 1 :])
else:
package_part = None
module_name = self
return package_part, module_name
def hasNamespace(self, package_name):
return self == package_name or self.isBelowNamespace(package_name)
def hasOneOfNamespaces(self, *package_names):
"""Check if a module name is below one of many namespaces.
Args:
- package_names: Star argument that allows also lists and tuples
Returns:
bool - module name is below one of the packages.
"""
for package_name in package_names:
if type(package_name) in (tuple, list):
if self.hasOneOfNamespaces(*package_name):
return True
elif self.hasNamespace(package_name):
return True
return False
def isBelowNamespace(self, package_name):
assert type(package_name) in (str, ModuleName), package_name
# Avoid startswith on these.
return str(self).startswith(package_name + ".")
def getChildNamed(self, *args):
return ModuleName(".".join([self] + list(args)))
def matchesToShellPatterns(self, patterns):
"""Match a module name to a list of patterns
Args:
patters:
List of patterns that comply with fnmatch.fnmatch description
or also is below the package. So "*.tests" will matches to also
"something.tests.MyTest", thereby allowing to match whole
packages with one pattern only.
Returns:
Tuple of two values, where the first value is the result, second value
explains which pattern matched and how.
"""
for pattern in patterns:
if self == pattern:
return True, "is exact match of %r" % pattern
elif self.isBelowNamespace(pattern):
return True, "is package content of %r" % pattern
elif fnmatch.fnmatch(self.asString(), pattern):
return True, "matches pattern %r" % pattern
elif fnmatch.fnmatch(self.asString(), pattern + ".*"):
return True, "is package content of match to pattern %r" % pattern
return False, None
# Reject APIs being used. TODO: Maybe make this a decorator for reuse.
# TODO: Add rsplit and subscript operations too.
for _func_name in ("split", "startswith", "endswith"):
code = """\
def %(func_name)s(*args, **kwargs):
from nuitka.Errors import NuitkaCodeDeficit
raise NuitkaCodeDeficit('''
Do not use %(func_name)s on ModuleName objects, use e.g.
.hasNamespace(),
.getBasename(),
.getTopLevelPackageName()
.hasOneOfNamespaces
Check API documentation of nuitka.utils.ModuleNames.ModuleName
''')
""" % {
"func_name": _func_name
}
exec(code) # Avoid code duplication, pylint: disable=exec-used
| 31.975845 | 87 | 0.618976 |
e1abf5448da57a663bed5092015fff0940ab95af | 41,910 | py | Python | larch/wxxas/xasnorm_panel.py | dryabov/xraylarch | 0c376a31f057a066ae15976d5f7215e96ac47b91 | [
"BSD-2-Clause"
] | 1 | 2019-11-29T20:51:55.000Z | 2019-11-29T20:51:55.000Z | larch/wxxas/xasnorm_panel.py | maurov/xraylarch | b76f2ce29b6d183f69a7586ea8daccbe0a89ace3 | [
"BSD-2-Clause"
] | null | null | null | larch/wxxas/xasnorm_panel.py | maurov/xraylarch | b76f2ce29b6d183f69a7586ea8daccbe0a89ace3 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""
XANES Normalization panel
"""
import os
import time
import wx
import numpy as np
from functools import partial
from xraydb import guess_edge, atomic_number
from lmfit.printfuncs import gformat
from larch.math import index_of
from larch.xafs.xafsutils import guess_energy_units
from larch.wxlib import (BitmapButton, FloatCtrl, FloatSpin, get_icon,
SimpleText, pack, Button, HLine, Choice, Check,
GridPanel, CEN, RIGHT, LEFT, plotlabels)
from larch.wxlib.plotter import last_cursor_pos
from .xas_dialogs import EnergyUnitsDialog
from .taskpanel import TaskPanel, autoset_fs_increment
from larch.xray import atomic_symbols
ATSYMS = ['?'] + atomic_symbols
EDGES = ['K', 'L3', 'L2', 'L1', 'M5']
np.seterr(all='ignore')
PLOTOPTS_1 = dict(style='solid', linewidth=3, marker='None', markersize=4)
PLOTOPTS_2 = dict(style='short dashed', linewidth=2, zorder=3,
marker='None', markersize=4)
PLOTOPTS_D = dict(style='solid', linewidth=2, zorder=2,
side='right', marker='None', markersize=4)
PlotOne_Choices = {'Raw \u03BC(E)': 'mu',
'Normalized \u03BC(E)': 'norm',
'\u03BC(E) + Pre-/Post-edge': 'prelines',
'Flattened \u03BC(E)': 'flat',
'\u03BC(E) + MBACK \u03BC(E)': 'mback_norm',
'MBACK + Poly Normalized': 'mback_poly',
'd\u03BC(E)/dE': 'dmude',
'Raw \u03BC(E) + d\u03BC(E)/dE': 'mu+dmude',
'Normalized \u03BC(E) + d\u03BC(E)/dE': 'norm+dnormde',
'd^2\u03BC(E)/dE^2': 'd2mude',
'Normalized \u03BC(E) + d^2\u03BC(E)/dE^2': 'norm+d2normde',
}
PlotSel_Choices = {'Raw \u03BC(E)': 'mu',
'Normalized \u03BC(E)': 'norm',
'Flattened \u03BC(E)': 'flat',
'd\u03BC(E)/dE (raw)': 'dmude',
'd\u03BC(E)/dE (normalized)': 'dnormde',
'd^2\u03BC(E)/dE^2': 'd2normde'}
Plot_EnergyRanges = {'full E range': None,
'E0 -20:+80eV': (-20, 80),
'E0 -30:+120eV': (-30, 120),
'E0 -50:+250eV': (-50, 250),
'E0 -100:+500eV': (-100, 500)}
PlotOne_Choices_nonxas = {'Raw Data': 'mu',
'Scaled Data': 'norm',
'Derivative': 'dmude',
'Data + Derivative': 'norm+dmude'}
PlotSel_Choices_nonxas = {'Raw Data': 'mu',
'Scaled Data': 'norm',
'Derivative': 'dmude'}
Nnorm_choices = {None:'auto', 0:'constant', 1:'linear', 2:'quadratic', 3:'cubic'}
Nnorm_names = {'auto':None, 'constant':0, 'linear':1, 'quadratic':2, 'cubic':3}
defaults = dict(e0=0, edge_step=None, auto_step=True, auto_e0=True,
show_e0=True, pre1=None, pre2=None, norm1=None, norm2=None,
norm_method='polynomial', edge='K', atsym='?',
nvict=0, nnorm=None, scale=1, energy_ref=None)
def is_xasgroup(dgroup):
return getattr(dgroup, 'datatype', 'raw').startswith('xa')
class XASNormPanel(TaskPanel):
"""XAS normalization Panel"""
def __init__(self, parent, controller=None, **kws):
TaskPanel.__init__(self, parent, controller,
configname='xasnorm_config',
title='XAS Normalization',
config=defaults, **kws)
def build_display(self):
panel = self.panel
self.wids = {}
self.last_plot_type = 'one'
self.plotone_op = Choice(panel, choices=list(PlotOne_Choices.keys()),
action=self.onPlotOne, size=(200, -1))
self.plotsel_op = Choice(panel, choices=list(PlotSel_Choices.keys()),
action=self.onPlotSel, size=(200, -1))
self.plot_erange = Choice(panel, choices=list(Plot_EnergyRanges.keys()),
action=self.onPlotEither, size=(120, -1))
self.plot_erange.SetSelection(0)
self.plotone_op.SetSelection(1)
self.plotsel_op.SetSelection(1)
plot_one = Button(panel, 'Plot Current Group', size=(170, -1),
action=self.onPlotOne)
plot_sel = Button(panel, 'Plot Selected Groups', size=(170, -1),
action=self.onPlotSel)
e0panel = wx.Panel(panel)
self.wids['auto_e0'] = Check(e0panel, default=True, label='auto?',
action=self.onSet_XASE0)
self.wids['showe0'] = Check(e0panel, default=True, label='show?',
action=self.onSet_XASE0)
sx = wx.BoxSizer(wx.HORIZONTAL)
sx.Add(self.wids['auto_e0'], 0, LEFT, 4)
sx.Add(self.wids['showe0'], 0, LEFT, 4)
pack(e0panel, sx)
self.wids['energy_ref'] = Choice(panel, choices=['None'],
action=self.onEnergyRef, size=(200, -1))
self.wids['auto_step'] = Check(panel, default=True, label='auto?',
action=self.onNormMethod)
self.wids['nvict'] = Choice(panel, choices=('0', '1', '2', '3'),
size=(100, -1), action=self.onNormMethod,
default=0)
self.wids['nnorm'] = Choice(panel, choices=list(Nnorm_choices.values()),
size=(100, -1), action=self.onNormMethod,
default=0)
opts = {'size': (100, -1), 'digits': 2, 'increment': 5.0,
'action': self.onSet_Ranges}
xas_pre1 = self.add_floatspin('pre1', value=defaults['pre1'], **opts)
xas_pre2 = self.add_floatspin('pre2', value=defaults['pre2'], **opts)
xas_norm1 = self.add_floatspin('norm1', value=defaults['norm1'], **opts)
xas_norm2 = self.add_floatspin('norm2', value=defaults['norm2'], **opts)
opts = {'digits': 3, 'increment': 0.1, 'value': 0}
plot_voff = self.add_floatspin('plot_voff', with_pin=False,
size=(80, -1),
action=self.onVoffset, **opts)
xas_e0 = self.add_floatspin('e0', action=self.onSet_XASE0Val, **opts)
xas_step = self.add_floatspin('step', action=self.onSet_XASStep,
with_pin=False, min_val=0.0, **opts)
opts['value'] = 1.0
scale = self.add_floatspin('scale', action=self.onSet_Scale, **opts)
self.wids['norm_method'] = Choice(panel, choices=('polynomial', 'mback'), # , 'area'),
size=(120, -1), action=self.onNormMethod)
self.wids['norm_method'].SetSelection(0)
self.wids['atsym'] = Choice(panel, choices=ATSYMS, size=(75, -1))
self.wids['edge'] = Choice(panel, choices=EDGES, size=(60, -1))
self.wids['is_frozen'] = Check(panel, default=False, label='Freeze Group',
action=self.onFreezeGroup)
saveconf = Button(panel, 'Save as Default Settings', size=(200, -1),
action=self.onSaveConfigBtn)
use_auto = Button(panel, 'Use Default Settings',
size=(200, -1),
action=self.onAutoNorm)
copy_auto = Button(panel, 'Copy',
size=(60, -1), action=self.onCopyAuto)
def CopyBtn(name):
return Button(panel, 'Copy', size=(60, -1),
action=partial(self.onCopyParam, name))
add_text = self.add_text
HLINEWID = 575
panel.Add(SimpleText(panel, 'XAS Pre-edge subtraction and Normalization',
size=(350, -1), **self.titleopts), style=LEFT, dcol=4)
panel.Add(plot_sel, newrow=True)
panel.Add(self.plotsel_op, dcol=3)
panel.Add(SimpleText(panel, 'Y Offset:'), style=RIGHT)
panel.Add(plot_voff, style=RIGHT)
panel.Add(plot_one, newrow=True)
panel.Add(self.plotone_op, dcol=3)
panel.Add(self.plot_erange, dcol=1)
panel.Add(HLine(panel, size=(HLINEWID, 3)), dcol=6, newrow=True)
add_text('Non-XAS Data Scale:')
panel.Add(scale, dcol=2)
panel.Add(SimpleText(panel, 'Copy to Selected Groups:'),
style=RIGHT, dcol=3)
panel.Add(HLine(panel, size=(HLINEWID, 3)), dcol=6, newrow=True)
add_text('XAS Data:')
panel.Add(use_auto, dcol=4)
panel.Add(copy_auto, dcol=1, style=RIGHT)
add_text('Element and Edge: ', newrow=True)
panel.Add(self.wids['atsym'])
panel.Add(self.wids['edge'], dcol=3)
panel.Add(CopyBtn('atsym'), dcol=1, style=RIGHT)
add_text('Energy Reference Group: ')
panel.Add(self.wids['energy_ref'], dcol=4)
panel.Add(CopyBtn('energy_ref'), dcol=1, style=RIGHT)
add_text('E0 : ')
panel.Add(xas_e0)
panel.Add(e0panel, dcol=3)
panel.Add(CopyBtn('xas_e0'), dcol=1, style=RIGHT)
add_text('Edge Step: ')
panel.Add(xas_step)
panel.Add(self.wids['auto_step'], dcol=3)
panel.Add(CopyBtn('xas_step'), dcol=1, style=RIGHT)
panel.Add((5, 5), newrow=True)
panel.Add(HLine(panel, size=(HLINEWID, 3)), dcol=6, newrow=True)
add_text('Pre-edge range: ')
panel.Add(xas_pre1)
add_text(' : ', newrow=False)
panel.Add(xas_pre2, dcol=2)
panel.Add(CopyBtn('xas_pre'), dcol=1, style=RIGHT)
panel.Add(SimpleText(panel, 'Victoreen order:'), newrow=True)
panel.Add(self.wids['nvict'], dcol=4)
panel.Add((5, 5), newrow=True)
panel.Add(HLine(panel, size=(HLINEWID, 3)), dcol=6, newrow=True)
add_text('Normalization method: ')
panel.Add(self.wids['norm_method'], dcol=4)
panel.Add(CopyBtn('xas_norm'), dcol=1, style=RIGHT)
add_text('Normalization range: ')
panel.Add(xas_norm1)
add_text(' : ', newrow=False)
panel.Add(xas_norm2, dcol=2)
panel.Add(SimpleText(panel, 'Polynomial Type:'), newrow=True)
panel.Add(self.wids['nnorm'], dcol=4)
panel.Add(HLine(panel, size=(HLINEWID, 3)), dcol=6, newrow=True)
panel.Add((5, 5), newrow=True)
panel.Add(self.wids['is_frozen'], newrow=True)
panel.Add(saveconf, dcol=5)
panel.Add((5, 5), newrow=True)
panel.Add(HLine(panel, size=(HLINEWID, 3)), dcol=6, newrow=True)
panel.pack()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add((5, 5), 0, LEFT, 3)
sizer.Add(panel, 0, LEFT, 3)
sizer.Add((5, 5), 0, LEFT, 3)
pack(self, sizer)
def get_config(self, dgroup=None):
"""custom get_config to possibly inherit from Athena settings"""
if dgroup is None:
dgroup = self.controller.get_group()
if dgroup is None:
return self.get_defaultconfig()
if hasattr(dgroup, self.configname):
conf = getattr(dgroup, self.configname)
else:
conf = self.get_defaultconfig()
if hasattr(dgroup, 'bkg_params'): # from Athena
for attr in ('e0', 'pre1', 'pre2', 'nnorm'):
conf[attr] = getattr(dgroup.bkg_params, attr, conf[attr])
for attr, aattr in (('norm1', 'nor1'), ('norm2', 'nor2')):
conf[attr] = getattr(dgroup.bkg_params, aattr, conf[attr])
conf['auto_step'] = (float(getattr(dgroup.bkg_params, 'fixstep', 0.0))< 0.5)
conf['edge_step'] = getattr(dgroup.bkg_params, 'step', conf['edge_step'])
if conf['edge_step'] is None:
conf['edge_step'] = getattr(dgroup, 'edge_step', conf['edge_step'])
conf['atsym'] = getattr(dgroup, 'atsym', conf['atsym'])
conf['edge'] = getattr(dgroup,'edge', conf['edge'])
conf['energy_ref'] = getattr(dgroup,'energy_ref', conf['energy_ref'])
if conf['energy_ref'] in (None, 'None'):
conf['energy_ref'] = dgroup.groupname
if hasattr(dgroup, 'e0') and conf['atsym'] == '?':
atsym, edge = guess_edge(dgroup.e0)
conf['atsym'] = atsym
conf['edge'] = edge
if hasattr(dgroup, 'mback_params'):
conf['atsym'] = getattr(dgroup.mback_params, 'atsym', conf['atsym'])
conf['edge'] = getattr(dgroup.mback_params, 'edge', conf['edge'])
setattr(dgroup, self.configname, conf)
return conf
def fill_form(self, dgroup):
"""fill in form from a data group"""
opts = self.get_config(dgroup)
self.skip_process = True
if is_xasgroup(dgroup):
if self.plotone_op.GetCount() != len(PlotOne_Choices.keys()):
self.plotone_op.SetChoices(list(PlotOne_Choices.keys()))
self.plotone_op.SetSelection(1)
if self.plotsel_op.GetCount() != len(PlotSel_Choices.keys()):
self.plotsel_op.SetChoices(list(PlotSel_Choices.keys()))
self.plotsel_op.SetSelection(1)
groupnames = list(self.controller.file_groups.keys())
self.wids['energy_ref'].SetChoices(groupnames)
eref = opts.get('energy_ref', dgroup.groupname)
for key, val in self.controller.file_groups.items():
if eref in (val, key):
self.wids['energy_ref'].SetStringSelection(key)
self.wids['e0'].SetValue(opts['e0'])
edge_step = opts.get('edge_step', None)
if edge_step is None:
edge_step = 1.0
if hasattr(dgroup, 'e0') and opts['atsym'] == '?':
atsym, edge = guess_edge(dgroup.e0)
opts['atsym'] = atsym
opts['edge'] = edge
self.wids['step'].SetValue(edge_step)
autoset_fs_increment(self.wids['step'], edge_step)
for attr in ('pre1', 'pre2', 'norm1', 'norm2'):
val = opts.get(attr, None)
if val is not None:
self.wids[attr].SetValue(val)
self.set_nnorm_widget(opts.get('nnorm'))
self.wids['nvict'].SetSelection(opts['nvict'])
self.wids['showe0'].SetValue(opts['show_e0'])
self.wids['auto_e0'].SetValue(opts['auto_e0'])
self.wids['auto_step'].SetValue(opts['auto_step'])
self.wids['edge'].SetStringSelection(opts['edge'].title())
self.wids['atsym'].SetStringSelection(opts['atsym'].title())
self.wids['norm_method'].SetStringSelection(opts['norm_method'].lower())
for attr in ('pre1', 'pre2', 'norm1', 'norm2', 'nnorm', 'edge',
'atsym', 'step', 'norm_method'):
self.wids[attr].Enable()
self.wids['scale'].Disable()
else:
self.plotone_op.SetChoices(list(PlotOne_Choices_nonxas.keys()))
self.plotsel_op.SetChoices(list(PlotSel_Choices_nonxas.keys()))
self.wids['scale'].SetValue(opts['scale'])
for attr in ('pre1', 'pre2', 'norm1', 'norm2', 'nnorm', 'edge',
'atsym', 'step', 'norm_method'):
self.wids[attr].Disable()
self.wids['scale'].Enable()
frozen = opts.get('is_frozen', False)
if hasattr(dgroup, 'is_frozen'):
frozen = dgroup.is_frozen
self.wids['is_frozen'].SetValue(frozen)
self._set_frozen(frozen)
wx.CallAfter(self.unset_skip_process)
def set_nnorm_widget(self, nnorm=None):
if nnorm is None:
nnorm_str = 'auto'
else:
try:
nnorm = int(nnorm)
except ValueError:
nnorm = None
nnorm_str = Nnorm_choices.get(nnorm, 'auto')
self.wids['nnorm'].SetStringSelection(nnorm_str)
def unset_skip_process(self):
self.skip_process = False
def read_form(self):
"read form, return dict of values"
form_opts = {}
form_opts['e0'] = self.wids['e0'].GetValue()
form_opts['edge_step'] = self.wids['step'].GetValue()
for attr in ('pre1', 'pre2', 'norm1', 'norm2'):
val = self.wids[attr].GetValue()
if val == 0: val = None
form_opts[attr] = val
form_opts['nnorm'] = Nnorm_names.get(self.wids['nnorm'].GetStringSelection(), None)
form_opts['nvict'] = int(self.wids['nvict'].GetSelection())
form_opts['plotone_op'] = self.plotone_op.GetStringSelection()
form_opts['plotsel_op'] = self.plotsel_op.GetStringSelection()
form_opts['plot_voff'] = self.wids['plot_voff'].GetValue()
form_opts['show_e0'] = self.wids['showe0'].IsChecked()
form_opts['auto_e0'] = self.wids['auto_e0'].IsChecked()
form_opts['auto_step'] = self.wids['auto_step'].IsChecked()
form_opts['norm_method'] = self.wids['norm_method'].GetStringSelection().lower()
form_opts['edge'] = self.wids['edge'].GetStringSelection().title()
form_opts['atsym'] = self.wids['atsym'].GetStringSelection().title()
form_opts['scale'] = self.wids['scale'].GetValue()
form_opts['energy_ref'] = self.wids['energy_ref'].GetStringSelection()
return form_opts
def onNormMethod(self, evt=None):
method = self.wids['norm_method'].GetStringSelection().lower()
self.update_config({'norm_method': method})
if method.startswith('mback'):
dgroup = self.controller.get_group()
cur_elem = self.wids['atsym'].GetStringSelection()
if hasattr(dgroup, 'e0') and cur_elem == 'H':
atsym, edge = guess_edge(dgroup.e0)
self.wids['edge'].SetStringSelection(edge)
self.wids['atsym'].SetStringSelection(atsym)
self.update_config({'edge': edge, 'atsym': atsym})
time.sleep(0.01)
wx.CallAfter(self.onReprocess)
def _set_frozen(self, frozen):
try:
dgroup = self.controller.get_group()
dgroup.is_frozen = frozen
except:
pass
for wattr in ('e0', 'step', 'pre1', 'pre2', 'norm1', 'norm2',
'nvict', 'nnorm', 'showe0', 'auto_e0', 'auto_step',
'norm_method', 'edge', 'atsym'):
self.wids[wattr].Enable(not frozen)
def onFreezeGroup(self, evt=None):
self._set_frozen(evt.IsChecked())
def onEnergyRef(self, evt=None):
dgroup = self.controller.get_group()
eref = self.wids['energy_ref'].GetStringSelection()
gname = self.controller.file_groups[eref]
dgroup.xasnorm_config['energy_ref'] = gname
self.update_config({'energy_ref': gname}, dgroup=dgroup)
def onPlotEither(self, evt=None):
if self.last_plot_type == 'multi':
self.onPlotSel(evt=evt)
else:
self.onPlotOne(evt=evt)
def onPlotOne(self, evt=None):
self.last_plot_type = 'one'
self.plot(self.controller.get_group())
def onVoffset(self, evt=None):
time.sleep(0.01)
wx.CallAfter(self.onPlotSel)
def onPlotSel(self, evt=None):
newplot = True
self.last_plot_type = 'multi'
group_ids = self.controller.filelist.GetCheckedStrings()
if len(group_ids) < 1:
return
last_id = group_ids[-1]
groupname = self.controller.file_groups[str(last_id)]
dgroup = self.controller.get_group(groupname)
plot_choices = PlotSel_Choices
if not is_xasgroup(dgroup):
plot_choices = PlotSel_Choices_nonxas
ytitle = self.plotsel_op.GetStringSelection()
yarray_name = plot_choices[ytitle]
ylabel = getattr(plotlabels, yarray_name, ytitle)
if yarray_name == 'norm':
norm_method = self.wids['norm_method'].GetStringSelection().lower()
if norm_method.startswith('mback'):
yarray_name = 'norm_mback'
ylabel = "%s (MBACK)" % ylabel
elif norm_method.startswith('area'):
yarray_name = 'norm_area'
ylabel = "%s (Area)" % ylabel
voff = self.wids['plot_voff'].GetValue()
for ix, checked in enumerate(group_ids):
yoff = ix * voff
groupname = self.controller.file_groups[str(checked)]
dgroup = self.controller.get_group(groupname)
plot_yarrays = [(yarray_name, PLOTOPTS_1, dgroup.filename)]
if dgroup is not None:
dgroup.plot_extras = []
self.plot(dgroup, title='', new=newplot, multi=True,
yoff=yoff, plot_yarrays=plot_yarrays,
with_extras=False, delay_draw=True)
newplot = False
ppanel = self.controller.get_display(stacked=False).panel
ppanel.conf.show_legend=True
ppanel.conf.draw_legend()
ppanel.unzoom_all()
def onAutoNorm(self, evt=None):
dgroup = self.controller.get_group()
try:
norm2 = max(dgroup.energy) - dgroup.e0
norm1 = 5.0*int(norm2/15.0)
nnorm = 2
if (norm2-norm1 < 350): nnorm = 1
if (norm2-norm1 < 50): nnorm = 0
except:
nnorm = None
self.wids['auto_step'].SetValue(1)
self.wids['auto_e0'].SetValue(1)
self.wids['nvict'].SetSelection(0)
self.wids['pre1'].SetValue(0)
self.wids['pre2'].SetValue(0)
self.wids['norm1'].SetValue(0)
self.wids['norm2'].SetValue(0)
if nnorm is not None:
self.set_nnorm_widget(nnorm)
self.wids['norm_method'].SetSelection(0)
self.onReprocess()
def onCopyAuto(self, evt=None):
opts = dict(pre1=0, pre2=0, nvict=0, norm1=0, norm2=0,
norm_method='polynomial', nnorm=2, auto_e0=1,
auto_step=1)
for checked in self.controller.filelist.GetCheckedStrings():
groupname = self.controller.file_groups[str(checked)]
grp = self.controller.get_group(groupname)
if grp != self.controller.group and not grp.is_frozen:
self.update_config(opts, dgroup=grp)
self.fill_form(grp)
self.process(grp, force=True)
def onSaveConfigBtn(self, evt=None):
conf = self.get_config()
conf.update(self.read_form())
self.set_defaultconfig(conf)
def onCopyParam(self, name=None, evt=None):
conf = self.get_config()
form = self.read_form()
conf.update(form)
dgroup = self.controller.get_group()
self.update_config(conf)
self.fill_form(dgroup)
opts = {}
name = str(name)
def copy_attrs(*args):
for a in args:
opts[a] = conf[a]
if name == 'xas_e0':
copy_attrs('e0', 'show_e0', 'auto_e0')
elif name == 'xas_step':
copy_attrs('edge_step', 'auto_step')
elif name == 'xas_pre':
copy_attrs('pre1', 'pre2', 'nvict')
elif name == 'atsym':
copy_attrs('atsym', 'edge')
elif name == 'xas_norm':
copy_attrs('norm_method', 'nnorm', 'norm1', 'norm2')
elif name == 'energy_ref':
copy_attrs('energy_ref')
for checked in self.controller.filelist.GetCheckedStrings():
groupname = self.controller.file_groups[str(checked)]
grp = self.controller.get_group(groupname)
if grp != self.controller.group and not grp.is_frozen:
self.update_config(opts, dgroup=grp)
self.fill_form(grp)
self.process(grp, force=True)
def onSet_XASE0(self, evt=None, value=None):
"handle setting auto e0 / show e0"
auto_e0 = self.wids['auto_e0'].GetValue()
self.update_config({'e0': self.wids['e0'].GetValue(),
'auto_e0':self.wids['auto_e0'].GetValue()})
time.sleep(0.01)
wx.CallAfter(self.onReprocess)
def onSet_XASE0Val(self, evt=None, value=None):
"handle setting e0"
self.wids['auto_e0'].SetValue(0)
self.update_config({'e0': self.wids['e0'].GetValue(),
'auto_e0':self.wids['auto_e0'].GetValue()})
time.sleep(0.01)
wx.CallAfter(self.onReprocess)
def onSet_XASStep(self, evt=None, value=None):
"handle setting edge step"
edge_step = self.wids['step'].GetValue()
if edge_step < 0:
self.wids['step'].SetValue(abs(edge_step))
self.wids['auto_step'].SetValue(0)
self.update_config({'edge_step': abs(edge_step), 'auto_step': False})
autoset_fs_increment(self.wids['step'], abs(edge_step))
time.sleep(0.01)
wx.CallAfter(self.onReprocess)
def onSet_Scale(self, evt=None, value=None):
"handle setting non-XAFS scale value"
self.update_config({'scale': self.wids['scale'].GetValue()})
time.sleep(0.01)
wx.CallAfter(self.onReprocess)
def onSet_Ranges(self, evt=None, **kws):
conf = {}
for attr in ('pre1', 'pre2', 'norm1', 'norm2'):
conf[attr] = self.wids[attr].GetValue()
self.update_config(conf)
time.sleep(0.01)
wx.CallAfter(self.onReprocess)
def onSelPoint(self, evt=None, opt='__', relative_e0=True, win=None):
"""
get last selected point from a specified plot window
and fill in the value for the widget defined by `opt`.
by default it finds the latest cursor position from the
cursor history of the first 20 plot windows.
"""
if opt not in self.wids:
return None
_x, _y = last_cursor_pos(win=win, _larch=self.larch)
if _x is None:
return
e0 = self.wids['e0'].GetValue()
if opt == 'e0':
self.wids['e0'].SetValue(_x)
self.wids['auto_e0'].SetValue(0)
elif opt in ('pre1', 'pre2', 'norm1', 'norm2'):
self.wids[opt].SetValue(_x-e0)
time.sleep(0.01)
wx.CallAfter(self.onReprocess)
def onReprocess(self, evt=None, value=None, **kws):
"handle request reprocess"
if self.skip_process:
return
try:
dgroup = self.controller.get_group()
except TypeError:
return
if not hasattr(dgroup, self.configname):
return
form = self.read_form()
self.process(dgroup=dgroup)
self.onPlotEither()
def make_dnormde(self, dgroup):
form = dict(group=dgroup.groupname)
self.larch_eval("{group:s}.dnormde={group:s}.dmude/{group:s}.edge_step".format(**form))
self.larch_eval("{group:s}.d2normde={group:s}.d2mude/{group:s}.edge_step".format(**form))
def process(self, dgroup=None, force_mback=False, force=False, **kws):
""" handle process (pre-edge/normalize) of XAS data from XAS form
"""
if self.skip_process and not force:
return
if dgroup is None:
dgroup = self.controller.get_group()
if dgroup is None:
return
self.skip_process = True
conf = self.get_config(dgroup)
dgroup.custom_plotopts = {}
form = self.read_form()
form['group'] = dgroup.groupname
groupnames = list(self.controller.file_groups.keys())
self.wids['energy_ref'].SetChoices(groupnames)
eref = conf.get('energy_ref', dgroup.groupname)
for key, val in self.controller.file_groups.items():
if eref in (val, key):
self.wids['energy_ref'].SetStringSelection(key)
if not is_xasgroup(dgroup):
self.skip_process = False
dgroup.mu = dgroup.ydat * 1.0
opts = {'group': dgroup.groupname, 'scale': conf.get('scale', 1.0)}
self.larch_eval("{group:s}.scale = {scale:.8f}".format(**opts))
self.larch_eval("{group:s}.norm = {scale:.8f}*{group:s}.ydat".format(**opts))
return
en_units = getattr(dgroup, 'energy_units', None)
if en_units is None:
en_units = guess_energy_units(dgroup.energy)
if en_units != 'eV':
mono_dspace = getattr(dgroup, 'mono_dspace', 1)
dlg = EnergyUnitsDialog(self.parent, dgroup.energy,
unitname=en_units,
dspace=mono_dspace)
res = dlg.GetResponse()
dlg.Destroy()
if res.ok:
en_units = res.units
dgroup.mono_dspace = res.dspace
dgroup.xdat = dgroup.energy = res.energy
dgroup.energy_units = en_units
e0 = form['e0']
edge_step = form['edge_step']
copts = [dgroup.groupname]
if not form['auto_e0']:
if e0 < max(dgroup.energy) and e0 > min(dgroup.energy):
copts.append("e0=%.4f" % float(e0))
if not form['auto_step']:
copts.append("step=%s" % gformat(float(edge_step)))
for attr in ('pre1', 'pre2', 'nvict', 'nnorm', 'norm1', 'norm2'):
if form[attr] is None:
copts.append("%s=None" % attr)
else:
copts.append("%s=%.2f" % (attr, form[attr]))
self.larch_eval("pre_edge(%s)" % (', '.join(copts)))
self.larch_eval("{group:s}.norm_poly = 1.0*{group:s}.norm".format(**form))
norm_method = form['norm_method'].lower()
form['normmeth'] = 'poly'
if force_mback or norm_method.startswith('mback'):
form['normmeth'] = 'mback'
copts = [dgroup.groupname]
copts.append("z=%d" % atomic_number(form['atsym']))
copts.append("edge='%s'" % form['edge'])
for attr in ('pre1', 'pre2', 'nvict', 'nnorm', 'norm1', 'norm2'):
if form[attr] is None:
copts.append("%s=None" % attr)
else:
copts.append("%s=%.2f" % (attr, form[attr]))
self.larch_eval("mback_norm(%s)" % (', '.join(copts)))
if form['auto_step']:
norm_expr = """{group:s}.norm = 1.0*{group:s}.norm_{normmeth:s}
{group:s}.edge_step = 1.0*{group:s}.edge_step_{normmeth:s}"""
self.larch_eval(norm_expr.format(**form))
else:
norm_expr = """{group:s}.norm = 1.0*{group:s}.norm_{normmeth:s}
{group:s}.norm *= {group:s}.edge_step_{normmeth:s}/{edge_step:.8f}"""
self.larch_eval(norm_expr.format(**form))
if norm_method.startswith('area'):
form['normmeth'] = 'area'
expr = """{group:s}.norm = 1.0*{group:s}.norm_{normmeth:s}
{group:s}.edge_step = 1.0*{group:s}.edge_step_{normmeth:s}"""
self.larch_eval(expr.format(**form))
self.make_dnormde(dgroup)
if form['auto_e0']:
self.wids['e0'].SetValue(dgroup.e0)
if form['auto_step']:
self.wids['step'].SetValue(dgroup.edge_step)
autoset_fs_increment(self.wids['step'], dgroup.edge_step)
self.wids['atsym'].SetStringSelection(dgroup.atsym)
self.wids['edge'].SetStringSelection(dgroup.edge)
self.set_nnorm_widget(dgroup.pre_edge_details.nnorm)
for attr in ('e0', 'edge_step'):
conf[attr] = getattr(dgroup, attr)
for attr in ('pre1', 'pre2', 'norm1', 'norm2'):
conf[attr] = val = getattr(dgroup.pre_edge_details, attr, None)
if val is not None:
self.wids[attr].SetValue(val)
if hasattr(dgroup, 'mback_params'): # from mback
conf['atsym'] = getattr(dgroup.mback_params, 'atsym')
conf['edge'] = getattr(dgroup.mback_params, 'edge')
self.update_config(conf, dgroup=dgroup)
wx.CallAfter(self.unset_skip_process)
def get_plot_arrays(self, dgroup):
lab = plotlabels.norm
if dgroup is None:
return
dgroup.plot_y2label = None
dgroup.plot_xlabel = plotlabels.energy
dgroup.plot_yarrays = [('norm', PLOTOPTS_1, lab)]
if not is_xasgroup(dgroup):
pchoice = PlotOne_Choices_nonxas[self.plotone_op.GetStringSelection()]
dgroup.plot_xlabel = 'x'
dgroup.plot_ylabel = 'y'
dgroup.plot_yarrays = [('ydat', PLOTOPTS_1, 'ydat')]
dgroup.dmude = np.gradient(dgroup.ydat)/np.gradient(dgroup.xdat)
dgroup.d2mude = np.gradient(dgroup.dmude)/np.gradient(dgroup.xdat)
if not hasattr(dgroup, 'scale'):
dgroup.scale = 1.0
dgroup.norm = dgroup.ydat*dgroup.scale
if pchoice == 'dmude':
dgroup.plot_ylabel = 'dy/dx'
dgroup.plot_yarrays = [('dmude', PLOTOPTS_1, 'dy/dx')]
elif pchoice == 'd2mude':
dgroup.plot_ylabel = 'd2y/dx2'
dgroup.plot_yarrays = [('d2mude', PLOTOPTS_1, 'd2y/dx')]
elif pchoice == 'norm':
dgroup.plot_ylabel = 'scaled y'
dgroup.plot_yarrays = [('norm', PLOTOPTS_1, 'y/scale')]
elif pchoice == 'norm+dnormde':
lab = plotlabels.norm
dgroup.plot_y2label = 'dy/dx'
dgroup.plot_yarrays = [('ydat', PLOTOPTS_1, 'y'),
('dnormde', PLOTOPTS_D, 'dy/dx')]
elif pchoice == 'norm+d2normde':
lab = plotlabels.norm
dgroup.plot_y2label = 'd2y/dx2'
dgroup.plot_yarrays = [('ydat', PLOTOPTS_1, 'y'),
('d2normde', PLOTOPTS_D, 'd2y/dx')]
return
req_attrs = ['e0', 'norm', 'dmude', 'd2mude', 'pre_edge']
pchoice = PlotOne_Choices[self.plotone_op.GetStringSelection()]
if pchoice in ('mu', 'norm', 'flat', 'dmude', 'd2mude'):
lab = getattr(plotlabels, pchoice)
dgroup.plot_yarrays = [(pchoice, PLOTOPTS_1, lab)]
elif pchoice == 'prelines':
dgroup.plot_yarrays = [('mu', PLOTOPTS_1, plotlabels.mu),
('pre_edge', PLOTOPTS_2, 'pre edge'),
('post_edge', PLOTOPTS_2, 'post edge')]
elif pchoice == 'preedge':
lab = r'pre-edge subtracted $\mu$'
dgroup.pre_edge_sub = dgroup.norm * dgroup.edge_step
dgroup.plot_yarrays = [('pre_edge_sub', PLOTOPTS_1, lab)]
elif pchoice == 'mu+dmude':
lab = plotlabels.mu
lab2 = plotlabels.dmude
dgroup.plot_yarrays = [('mu', PLOTOPTS_1, lab),
('dmude', PLOTOPTS_D, lab2)]
dgroup.plot_y2label = lab2
elif pchoice == 'mu+d2mude':
lab = plotlabels.mu
lab2 = plotlabels.d2mude
dgroup.plot_yarrays = [('mu', PLOTOPTS_1, lab),
('d2mude', PLOTOPTS_D, lab2)]
dgroup.plot_y2label = lab2
elif pchoice == 'norm+dnormde':
lab = plotlabels.norm
lab2 = plotlabels.dmude + ' (normalized)'
dgroup.plot_yarrays = [('norm', PLOTOPTS_1, lab),
('dnormde', PLOTOPTS_D, lab2)]
dgroup.plot_y2label = lab2
elif pchoice == 'norm+d2normde':
lab = plotlabels.norm
lab2 = plotlabels.d2mude + ' (normalized)'
dgroup.plot_yarrays = [('norm', PLOTOPTS_1, lab),
('d2normde', PLOTOPTS_D, lab2)]
dgroup.plot_y2label = lab2
elif pchoice == 'mback_norm':
req_attrs.append('mback_norm')
lab = r'$\mu$'
if not hasattr(dgroup, 'mback_mu'):
self.process(dgroup=dgroup, force_mback=True)
dgroup.plot_yarrays = [('mu', PLOTOPTS_1, lab),
('mback_mu', PLOTOPTS_2, r'tabulated $\mu(E)$')]
elif pchoice == 'mback_poly':
req_attrs.append('mback_norm')
lab = plotlabels.norm
if not hasattr(dgroup, 'mback_mu'):
self.process(dgroup=dgroup, force_mback=True)
dgroup.plot_yarrays = [('norm_mback', PLOTOPTS_1, 'mback'),
('norm_poly', PLOTOPTS_2, 'polynomial')]
elif pchoice == 'area_norm':
dgroup.plot_yarrays = [('norm_area', PLOTOPTS_1, 'area'),
('norm_poly', PLOTOPTS_2, 'polynomial')]
dgroup.plot_ylabel = lab
needs_proc = False
for attr in req_attrs:
needs_proc = needs_proc or (not hasattr(dgroup, attr))
if needs_proc:
self.process(dgroup=dgroup, force=True)
y4e0 = dgroup.ydat = getattr(dgroup, dgroup.plot_yarrays[0][0], dgroup.mu)
dgroup.plot_extras = []
if self.wids['showe0'].IsChecked():
ie0 = index_of(dgroup.energy, dgroup.e0)
dgroup.plot_extras.append(('marker', dgroup.e0, y4e0[ie0], {}))
def plot(self, dgroup, title=None, plot_yarrays=None, yoff=0,
delay_draw=False, multi=False, new=True, zoom_out=True,
with_extras=True, **kws):
if self.skip_plotting:
return
ppanel = self.controller.get_display(stacked=False).panel
viewlims = ppanel.get_viewlimits()
plotcmd = ppanel.oplot
if new:
plotcmd = ppanel.plot
erange = Plot_EnergyRanges[self.plot_erange.GetStringSelection()]
self.controller.set_plot_erange(erange)
groupname = getattr(dgroup, 'groupname', None)
if groupname is None:
return
if not hasattr(dgroup, 'xdat'):
print("Cannot plot group ", groupname)
if ((getattr(dgroup, 'plot_yarrays', None) is None or
getattr(dgroup, 'energy', None) is None or
getattr(dgroup, 'mu', None) is None or
getattr(dgroup, 'e0', None) is None or
getattr(dgroup, 'dmude', None) is None or
getattr(dgroup, 'd2mude', None) is None or
getattr(dgroup, 'norm', None) is None)):
self.process(dgroup=dgroup)
self.get_plot_arrays(dgroup)
if plot_yarrays is None and hasattr(dgroup, 'plot_yarrays'):
plot_yarrays = dgroup.plot_yarrays
popts = kws
path, fname = os.path.split(dgroup.filename)
if 'label' not in popts:
popts['label'] = dgroup.plot_ylabel
zoom_out = (zoom_out or min(dgroup.xdat) >= viewlims[1] or
max(dgroup.xdat) <= viewlims[0] or
min(dgroup.ydat) >= viewlims[3] or
max(dgroup.ydat) <= viewlims[2])
if not zoom_out:
popts['xmin'] = viewlims[0]
popts['xmax'] = viewlims[1]
popts['ymin'] = viewlims[2]
popts['ymax'] = viewlims[3]
if erange is not None and hasattr(dgroup, 'e0'):
popts['xmin'] = dgroup.e0 + erange[0]
popts['xmax'] = dgroup.e0 + erange[1]
popts['xlabel'] = dgroup.plot_xlabel
popts['ylabel'] = dgroup.plot_ylabel
if getattr(dgroup, 'plot_y2label', None) is not None:
popts['y2label'] = dgroup.plot_y2label
plot_choices = PlotSel_Choices
if not is_xasgroup(dgroup):
plot_choices = PlotSel_Choices_nonxas
if multi:
ylabel = self.plotsel_op.GetStringSelection()
yarray_name = plot_choices[ylabel]
if is_xasgroup(dgroup):
ylabel = getattr(plotlabels, yarray_name, ylabel)
popts['ylabel'] = ylabel
plot_extras = None
if new:
if title is None:
title = fname
plot_extras = getattr(dgroup, 'plot_extras', None)
popts['title'] = title
popts['delay_draw'] = delay_draw
if hasattr(dgroup, 'custom_plotopts'):
popts.update(dgroup.custom_plotopts)
popts['show_legend'] = len(plot_yarrays) > 1
narr = len(plot_yarrays) - 1
for i, pydat in enumerate(plot_yarrays):
yaname, yopts, yalabel = pydat
# print(" PLOT :: ", i, pydat)
popts.update(yopts)
if yalabel is not None:
popts['label'] = yalabel
popts['delay_draw'] = delay_draw or (i != narr)
if yaname in ('dnormde', 'd2normde') and not hasattr(dgroup, yaname):
self.make_dnormde(dgroup)
if yaname == 'norm_mback' and not hasattr(dgroup, yaname):
self.process(dgroup=dgroup, force=True, force_mback=True)
plotcmd(dgroup.xdat, getattr(dgroup, yaname)+yoff, **popts)
plotcmd = ppanel.oplot
if with_extras and plot_extras is not None:
axes = ppanel.axes
for etype, x, y, opts in plot_extras:
if etype == 'marker':
xpopts = {'marker': 'o', 'markersize': 6,
'label': '_nolegend_',
'markerfacecolor': 'red',
'markeredgecolor': '#884444'}
xpopts.update(opts)
axes.plot([x], [y], **xpopts)
elif etype == 'vline':
xpopts = {'ymin': 0, 'ymax': 1.0,
'label': '_nolegend_',
'color': '#888888'}
xpopts.update(opts)
axes.axvline(x, **xpopts)
if not popts['delay_draw']:
ppanel.canvas.draw()
| 40.610465 | 97 | 0.552732 |
028786be203cfab487178af46b72361a573dcec0 | 5,114 | py | Python | main.py | sagnik403/Marksheet-Generater-tkinter | a1c999f932060e6f3799c28a271ee9360df5c623 | [
"MIT"
] | null | null | null | main.py | sagnik403/Marksheet-Generater-tkinter | a1c999f932060e6f3799c28a271ee9360df5c623 | [
"MIT"
] | null | null | null | main.py | sagnik403/Marksheet-Generater-tkinter | a1c999f932060e6f3799c28a271ee9360df5c623 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import messagebox
from PIL import Image, ImageDraw, ImageFont
import math
# ====================================================================================================================
global m
global p
global c
global t
global a
# functions
def calc():
m = float(math1.get())
p = float(physics1.get())
c = float(chemistry1.get())
t = (m+p+c)
a = t/3
total1.insert(0,t)
avg1.insert(0,a)
if (a>=95):
grade1.insert(0,"O")
elif (a>=90 and a<95):
grade1.insert(0,"A+")
elif (a>=80 and a<90):
grade1.insert(0,"A")
elif (a>=70 and a<80):
grade1.insert(0,"B+")
elif (a>=60 and a<70):
grade1.insert(0,"B")
elif (a>=50 and a<60):
grade1.insert(0,"C")
elif (a>=40 and a<50):
grade1.insert(0,"P")
else:
grade1.insert(0,"Fail")
def delete():
math1.delete(0,'end')
physics1.delete(0,'end')
chemistry1.delete(0,'end')
total1.delete(0,'end')
avg1.delete(0,'end')
grade1.delete(0,'end')
t1.delete(0,'end')
t2.delete(0,'end')
t3.delete(0,'end')
def mg():
name = str(t1.get())
class1 = str(t2.get())
roll = str(t3.get())
m1 = str(math1.get())
p1 = str(physics1.get())
c1 = str(chemistry1.get())
totalm = str(total1.get())
avgm = str(avg1.get())
gradem = str(grade1.get())
image = Image.open('ms.png')
draw = ImageDraw.Draw(image)
points1 = 120,65
points2 = 120,115
points3 = 120,155
points4 = 115,265
points5 = 180,265
points6 = 250,265
points7 = 415,265
points8 = 480,265
points9 = 770,265
font1 = ImageFont.truetype("arial.ttf", 20)
draw.text(points1,name,"black",font=font1)
draw.text(points2,class1,"black",font=font1)
draw.text(points3,roll,"black",font=font1)
draw.text(points4,m1,"black",font=font1)
draw.text(points5,p1,"black",font=font1)
draw.text(points6,c1,"black",font=font1)
draw.text(points7,totalm,"black",font=font1)
draw.text(points8,avgm,"black",font=font1)
draw.text(points9,gradem,"black",font=font1)
image.save(rf'C:\Users\User\Desktop\marksheet generater\marksheets\{t1.get()}.png')
image.show()
# ====================================================================================================================
win = Tk()
win.title("Marksheet Generater")
win.geometry("800x500")
win.iconbitmap(r"C:\Users\Public\Pictures\Sample Pictures\Treetog-Junior-Monitor-desktop.ico")
win.maxsize(800,500)
win.minsize(800,500)
win['bg'] = "dark orange"
# labels and texts
l1 = Label(win,text="Student Name",font=("verdana",12,"bold"),borderwidth=5).grid(row=0,column=0,padx=20,pady=25)
t1 = Entry(win,borderwidth=7,width=20,font=("verdana 10 bold"))
t1.grid(row=0,column=1,padx=20,pady=25)
l2 = Label(win,text="Student Class",font=("verdana",12,"bold"),borderwidth=5).grid(row=1,column=0,padx=20,pady=25)
t2 = Entry(win,borderwidth=7,width=20,font=("verdana 10 bold"))
t2.grid(row=1,column=1,padx=20,pady=25)
l3 = Label(win,text="Student Roll",font=("verdana",12,"bold"),borderwidth=5).grid(row=2,column=0,padx=20,pady=25)
t3 = Entry(win,borderwidth=7,width=20,font=("verdana 10 bold"))
t3.grid(row=2,column=1,padx=20,pady=25)
# marks space
heading = Label(win,text="Marks",font=("verdana",18,"bold"),fg="gold",bg="dark orange",borderwidth=5).place(x=575,y=0)
math = Label(win,text="Math",font=("verdana",12,"bold"),borderwidth=5).place(x=475,y=60)
math1 = Entry(win,borderwidth=7,width=15,font=("verdana 10 bold"))
math1.place(x=590,y=60)
physics = Label(win,text="Physics",font=("verdana",12,"bold"),borderwidth=5).place(x=475,y=120)
physics1 = Entry(win,borderwidth=7,width=15,font=("verdana 10 bold"))
physics1.place(x=590,y=120)
chemistry = Label(win,text="Chemistry",font=("verdana",12,"bold"),borderwidth=5).place(x=475,y=180)
chemistry1 = Entry(win,borderwidth=7,width=15,font=("verdana 10 bold"))
chemistry1.place(x=590,y=180)
# result space
total = Label(win,text="Total",font=("verdana",12,"bold"),borderwidth=5).place(x=80,y=300)
total1 = Entry(win,borderwidth=7,width=20,font=("verdana 10 bold"))
total1.place(x=200,y=300)
avg = Label(win,text="Avarage",font=("verdana",12,"bold"),borderwidth=5).place(x=80,y=360)
avg1 = Entry(win,borderwidth=7,width=20,font=("verdana 10 bold"))
avg1.place(x=200,y=360)
grade = Label(win,text="Grade",font=("verdana",12,"bold"),borderwidth=5).place(x=80,y=420)
grade1 = Entry(win,borderwidth=7,width=20,font=("verdana 10 bold"))
grade1.place(x=200,y=420)
# buttons
calculate = Button(win,text="Calculate",width=12,borderwidth=5,font=("verdana 8 bold"),command=calc).place(x=600,y=260)
generate = Button(win,text="Generate",width=12,borderwidth=5,font=("verdana 8 bold"),command=mg).place(x=600,y=300)
clear = Button(win,text="Clear",width=12,borderwidth=5,font=("verdana 8 bold"),command=delete).place(x=600,y=340)
win.mainloop()
| 31.9625 | 120 | 0.609308 |
dc7ecb8b63a7e25bfd2378167dcb21e9fe36a327 | 13,356 | py | Python | tests/document/test_dynamic.py | shellcodesniper/mongoengine | d76cb345be98045cde0fa078569cc8021c0d0162 | [
"MIT"
] | null | null | null | tests/document/test_dynamic.py | shellcodesniper/mongoengine | d76cb345be98045cde0fa078569cc8021c0d0162 | [
"MIT"
] | null | null | null | tests/document/test_dynamic.py | shellcodesniper/mongoengine | d76cb345be98045cde0fa078569cc8021c0d0162 | [
"MIT"
] | null | null | null | import unittest
import pytest
from mongoengine import *
from tests.utils import MongoDBTestCase
__all__ = ("TestDynamicDocument",)
class TestDynamicDocument(MongoDBTestCase):
def setUp(self):
super(TestDynamicDocument, self).setUp()
class Person(DynamicDocument):
name = StringField()
meta = {"allow_inheritance": True}
Person.drop_collection()
self.Person = Person
def test_simple_dynamic_document(self):
"""Ensures simple dynamic documents are saved correctly"""
p = self.Person()
p.name = "James"
p.age = 34
assert p.to_mongo() == {"_cls": "Person", "name": "James", "age": 34}
assert p.to_mongo().keys() == ["_cls", "name", "age"]
p.save()
assert p.to_mongo().keys() == ["_id", "_cls", "name", "age"]
assert self.Person.objects.first().age == 34
# Confirm no changes to self.Person
assert not hasattr(self.Person, "age")
def test_dynamic_document_parse_values_in_constructor_like_document_do(self):
class ProductDynamicDocument(DynamicDocument):
title = StringField()
price = FloatField()
class ProductDocument(Document):
title = StringField()
price = FloatField()
product = ProductDocument(title="Blabla", price="12.5")
dyn_product = ProductDynamicDocument(title="Blabla", price="12.5")
assert product.price == dyn_product.price == 12.5
def test_change_scope_of_variable(self):
"""Test changing the scope of a dynamic field has no adverse effects"""
p = self.Person()
p.name = "Dean"
p.misc = 22
p.save()
p = self.Person.objects.get()
p.misc = {"hello": "world"}
p.save()
p = self.Person.objects.get()
assert p.misc == {"hello": "world"}
def test_delete_dynamic_field(self):
"""Test deleting a dynamic field works"""
self.Person.drop_collection()
p = self.Person()
p.name = "Dean"
p.misc = 22
p.save()
p = self.Person.objects.get()
p.misc = {"hello": "world"}
p.save()
p = self.Person.objects.get()
assert p.misc == {"hello": "world"}
collection = self.db[self.Person._get_collection_name()]
obj = collection.find_one()
assert sorted(obj.keys()) == ["_cls", "_id", "misc", "name"]
del p.misc
p.save()
p = self.Person.objects.get()
assert not hasattr(p, "misc")
obj = collection.find_one()
assert sorted(obj.keys()) == ["_cls", "_id", "name"]
def test_reload_after_unsetting(self):
p = self.Person()
p.misc = 22
p.save()
p.update(unset__misc=1)
p.reload()
def test_reload_dynamic_field(self):
self.Person.objects.delete()
p = self.Person.objects.create()
p.update(age=1)
assert len(p._data) == 3
assert sorted(p._data.keys()) == ["_cls", "id", "name"]
p.reload()
assert len(p._data) == 4
assert sorted(p._data.keys()) == ["_cls", "age", "id", "name"]
def test_fields_without_underscore(self):
"""Ensure we can query dynamic fields"""
Person = self.Person
p = self.Person(name="Dean")
p.save()
raw_p = Person.objects.as_pymongo().get(id=p.id)
assert raw_p == {"_cls": u"Person", "_id": p.id, "name": u"Dean"}
p.name = "OldDean"
p.newattr = "garbage"
p.save()
raw_p = Person.objects.as_pymongo().get(id=p.id)
assert raw_p == {
"_cls": u"Person",
"_id": p.id,
"name": "OldDean",
"newattr": u"garbage",
}
def test_fields_containing_underscore(self):
"""Ensure we can query dynamic fields"""
class WeirdPerson(DynamicDocument):
name = StringField()
_name = StringField()
WeirdPerson.drop_collection()
p = WeirdPerson(name="Dean", _name="Dean")
p.save()
raw_p = WeirdPerson.objects.as_pymongo().get(id=p.id)
assert raw_p == {"_id": p.id, "_name": u"Dean", "name": u"Dean"}
p.name = "OldDean"
p._name = "NewDean"
p._newattr1 = "garbage" # Unknown fields won't be added
p.save()
raw_p = WeirdPerson.objects.as_pymongo().get(id=p.id)
assert raw_p == {"_id": p.id, "_name": u"NewDean", "name": u"OldDean"}
def test_dynamic_document_queries(self):
"""Ensure we can query dynamic fields"""
p = self.Person()
p.name = "Dean"
p.age = 22
p.save()
assert 1 == self.Person.objects(age=22).count()
p = self.Person.objects(age=22)
p = p.get()
assert 22 == p.age
def test_complex_dynamic_document_queries(self):
class Person(DynamicDocument):
name = StringField()
Person.drop_collection()
p = Person(name="test")
p.age = "ten"
p.save()
p1 = Person(name="test1")
p1.age = "less then ten and a half"
p1.save()
p2 = Person(name="test2")
p2.age = 10
p2.save()
assert Person.objects(age__icontains="ten").count() == 2
assert Person.objects(age__gte=10).count() == 1
def test_complex_data_lookups(self):
"""Ensure you can query dynamic document dynamic fields"""
p = self.Person()
p.misc = {"hello": "world"}
p.save()
assert 1 == self.Person.objects(misc__hello="world").count()
def test_three_level_complex_data_lookups(self):
"""Ensure you can query three level document dynamic fields"""
self.Person.objects.create(misc={"hello": {"hello2": "world"}})
assert 1 == self.Person.objects(misc__hello__hello2="world").count()
def test_complex_embedded_document_validation(self):
"""Ensure embedded dynamic documents may be validated"""
class Embedded(DynamicEmbeddedDocument):
content = URLField()
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
embedded_doc_1 = Embedded(content="http://mongoengine.org")
embedded_doc_1.validate()
embedded_doc_2 = Embedded(content="this is not a url")
with pytest.raises(ValidationError):
embedded_doc_2.validate()
doc.embedded_field_1 = embedded_doc_1
doc.embedded_field_2 = embedded_doc_2
with pytest.raises(ValidationError):
doc.validate()
def test_inheritance(self):
"""Ensure that dynamic document plays nice with inheritance"""
class Employee(self.Person):
salary = IntField()
Employee.drop_collection()
assert "name" in Employee._fields
assert "salary" in Employee._fields
assert Employee._get_collection_name() == self.Person._get_collection_name()
joe_bloggs = Employee()
joe_bloggs.name = "Joe Bloggs"
joe_bloggs.salary = 10
joe_bloggs.age = 20
joe_bloggs.save()
assert 1 == self.Person.objects(age=20).count()
assert 1 == Employee.objects(age=20).count()
joe_bloggs = self.Person.objects.first()
assert isinstance(joe_bloggs, Employee)
def test_embedded_dynamic_document(self):
"""Test dynamic embedded documents"""
class Embedded(DynamicEmbeddedDocument):
pass
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
embedded_1 = Embedded()
embedded_1.string_field = "hello"
embedded_1.int_field = 1
embedded_1.dict_field = {"hello": "world"}
embedded_1.list_field = ["1", 2, {"hello": "world"}]
doc.embedded_field = embedded_1
assert doc.to_mongo() == {
"embedded_field": {
"_cls": "Embedded",
"string_field": "hello",
"int_field": 1,
"dict_field": {"hello": "world"},
"list_field": ["1", 2, {"hello": "world"}],
}
}
doc.save()
doc = Doc.objects.first()
assert doc.embedded_field.__class__ == Embedded
assert doc.embedded_field.string_field == "hello"
assert doc.embedded_field.int_field == 1
assert doc.embedded_field.dict_field == {"hello": "world"}
assert doc.embedded_field.list_field == ["1", 2, {"hello": "world"}]
def test_complex_embedded_documents(self):
"""Test complex dynamic embedded documents setups"""
class Embedded(DynamicEmbeddedDocument):
pass
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
embedded_1 = Embedded()
embedded_1.string_field = "hello"
embedded_1.int_field = 1
embedded_1.dict_field = {"hello": "world"}
embedded_2 = Embedded()
embedded_2.string_field = "hello"
embedded_2.int_field = 1
embedded_2.dict_field = {"hello": "world"}
embedded_2.list_field = ["1", 2, {"hello": "world"}]
embedded_1.list_field = ["1", 2, embedded_2]
doc.embedded_field = embedded_1
assert doc.to_mongo() == {
"embedded_field": {
"_cls": "Embedded",
"string_field": "hello",
"int_field": 1,
"dict_field": {"hello": "world"},
"list_field": [
"1",
2,
{
"_cls": "Embedded",
"string_field": "hello",
"int_field": 1,
"dict_field": {"hello": "world"},
"list_field": ["1", 2, {"hello": "world"}],
},
],
}
}
doc.save()
doc = Doc.objects.first()
assert doc.embedded_field.__class__ == Embedded
assert doc.embedded_field.string_field == "hello"
assert doc.embedded_field.int_field == 1
assert doc.embedded_field.dict_field == {"hello": "world"}
assert doc.embedded_field.list_field[0] == "1"
assert doc.embedded_field.list_field[1] == 2
embedded_field = doc.embedded_field.list_field[2]
assert embedded_field.__class__ == Embedded
assert embedded_field.string_field == "hello"
assert embedded_field.int_field == 1
assert embedded_field.dict_field == {"hello": "world"}
assert embedded_field.list_field == ["1", 2, {"hello": "world"}]
def test_dynamic_and_embedded(self):
"""Ensure embedded documents play nicely"""
class Address(EmbeddedDocument):
city = StringField()
class Person(DynamicDocument):
name = StringField()
Person.drop_collection()
Person(name="Ross", address=Address(city="London")).save()
person = Person.objects.first()
person.address.city = "Lundenne"
person.save()
assert Person.objects.first().address.city == "Lundenne"
person = Person.objects.first()
person.address = Address(city="Londinium")
person.save()
assert Person.objects.first().address.city == "Londinium"
person = Person.objects.first()
person.age = 35
person.save()
assert Person.objects.first().age == 35
def test_dynamic_embedded_works_with_only(self):
"""Ensure custom fieldnames on a dynamic embedded document are found by qs.only()"""
class Address(DynamicEmbeddedDocument):
city = StringField()
class Person(DynamicDocument):
address = EmbeddedDocumentField(Address)
Person.drop_collection()
Person(
name="Eric", address=Address(city="San Francisco", street_number="1337")
).save()
assert Person.objects.first().address.street_number == "1337"
assert (
Person.objects.only("address__street_number").first().address.street_number
== "1337"
)
def test_dynamic_and_embedded_dict_access(self):
"""Ensure embedded dynamic documents work with dict[] style access"""
class Address(EmbeddedDocument):
city = StringField()
class Person(DynamicDocument):
name = StringField()
Person.drop_collection()
Person(name="Ross", address=Address(city="London")).save()
person = Person.objects.first()
person.attrval = "This works"
person["phone"] = "555-1212" # but this should too
# Same thing two levels deep
person["address"]["city"] = "Lundenne"
person.save()
assert Person.objects.first().address.city == "Lundenne"
assert Person.objects.first().phone == "555-1212"
person = Person.objects.first()
person.address = Address(city="Londinium")
person.save()
assert Person.objects.first().address.city == "Londinium"
person = Person.objects.first()
person["age"] = 35
person.save()
assert Person.objects.first().age == 35
if __name__ == "__main__":
unittest.main()
| 30.354545 | 92 | 0.572701 |
230bbdcbefbf4a205a4192753726e17d511107fa | 6,080 | py | Python | doc/source/conf.py | n-piipel/pygraphviz | 37208d1135e69bda35f8d5d9038be24857dd878f | [
"BSD-3-Clause"
] | null | null | null | doc/source/conf.py | n-piipel/pygraphviz | 37208d1135e69bda35f8d5d9038be24857dd878f | [
"BSD-3-Clause"
] | null | null | null | doc/source/conf.py | n-piipel/pygraphviz | 37208d1135e69bda35f8d5d9038be24857dd878f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Sphinx documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 8 21:47:50 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os, re
from datetime import date
# If your extensions are in another directory, add it here.
#sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.abspath('../sphinxext'))
#sys.path.append(os.path.abspath('../sphinxext/numpyext'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath','sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'PyGraphviz'
copyright = '2004-{}, PyGraphviz Developers'.format(date.today().year)
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import pygraphviz
version =pygraphviz.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'sphinxdoc.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'contents.html'
# Custom sidebar templates, maps page names to templates.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
#html_additional_pages = {'index': 'index.html'}
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
html_use_opensearch = 'http://pygraphviz.github.io'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyGraphviz'
pngmath_use_preview = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [('index', 'pygraphviz.tex', 'PyGraphviz Documentation',
'PyGraphviz Developers', 'manual', 1)]
#latex_use_parts = True
# Additional stuff for the LaTeX preamble.
latex_elements = {
'fontpkg': '\\usepackage{palatino}'
}
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# Extension interface
# -------------------
from sphinx import addnodes
dir_sig_re = re.compile(r'\.\. ([^:]+)::(.*)$')
def parse_directive(env, sig, signode):
if not sig.startswith('.'):
dec_sig = '.. %s::' % sig
signode += addnodes.desc_name(dec_sig, dec_sig)
return sig
m = dir_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
dec_name = '.. %s::' % name
signode += addnodes.desc_name(dec_name, dec_name)
signode += addnodes.desc_addname(args, args)
return name
def parse_role(env, sig, signode):
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)')
def parse_event(env, sig, signode):
m = event_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(','):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
return name
def setup(app):
from sphinx.ext.autodoc import cut_lines
app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_object_type('directive', 'dir', 'pair: %s; directive', parse_directive)
app.add_object_type('role', 'role', 'pair: %s; role', parse_role)
app.add_object_type('confval', 'confval', 'pair: %s; configuration value')
app.add_object_type('event', 'event', 'pair: %s; event', parse_event)
| 30.862944 | 83 | 0.692763 |
19ec7da5451ca8cbfb5821f946a1fe7a9acea2b4 | 1,600 | py | Python | setup.py | moh2236945/pytorch_classification | 8816f08af327e06208b348a78d9c63c133b6a628 | [
"MIT"
] | 1 | 2020-06-22T14:35:28.000Z | 2020-06-22T14:35:28.000Z | setup.py | moh2236945/pytorch_classification | 8816f08af327e06208b348a78d9c63c133b6a628 | [
"MIT"
] | null | null | null | setup.py | moh2236945/pytorch_classification | 8816f08af327e06208b348a78d9c63c133b6a628 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pytorch_classification',
version='0.0.34',
description='Image classification models for PyTorch',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/moh2236945/pytorch_classification',
author='Mohamed Ahmed',
author_email='engmohamedelshrbeny@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Image Recognition',
],
keywords='machine-learning deep-learning neuralnetwork image-classification pytorch imagenet cifar svhn vgg resnet '
'pyramidnet diracnet densenet condensenet wrn drn dpn darknet fishnet espnetv2 xdensnet squeezenet '
'squeezenext shufflenet menet mobilenet igcv3 mnasnet darts xception inception polynet nasnet pnasnet ror '
'proxylessnas dianet efficientnet mixnet image-segmentation voc ade20k cityscapes coco pspnet deeplabv3 '
'fcn',
packages=find_packages(exclude=['datasets', 'metrics', 'others', '*.others', 'others.*', '*.others.*']),
include_package_data=True,
install_requires=['numpy', 'requests'],
)
| 44.444444 | 120 | 0.703125 |
6a3a4bc423941f4bf5873c0138c2037bcd00d67b | 410 | py | Python | collectors/models.py | zxyctn/PhenObs | c5ed2e2fdd6a1bee5085c1336dfba31bf9e6abdf | [
"BSD-3-Clause"
] | null | null | null | collectors/models.py | zxyctn/PhenObs | c5ed2e2fdd6a1bee5085c1336dfba31bf9e6abdf | [
"BSD-3-Clause"
] | 44 | 2021-10-19T15:59:57.000Z | 2022-03-23T14:39:30.000Z | collectors/models.py | zxyctn/PhenObs | c5ed2e2fdd6a1bee5085c1336dfba31bf9e6abdf | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from gardens.models import Garden
class Collector(models.Model):
User = settings.AUTH_USER_MODEL
user = models.OneToOneField(User, on_delete=models.CASCADE)
gardens = ArrayField(base_field=Garden.garden_id, verbose_name="Garden")
def __str__(self):
return self.user
| 27.333333 | 76 | 0.773171 |
076ac874cba720785900b4bb2b298cab84485004 | 6,171 | py | Python | benchmarks/f3_wrong_hints_permutations/scaling_software_termination/11-2Nested_false-termination_34.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints_permutations/scaling_software_termination/11-2Nested_false-termination_34.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints_permutations/scaling_software_termination/11-2Nested_false-termination_34.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, mgr.Plus(pc, i_1)))
loc1 = Location(env, mgr.GT(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc0", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(x, y)))
loc1 = Location(env, mgr.GE(x, i_2), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.LE(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.LE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.GE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(x, y)))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
loc0 = Location(env, mgr.TRUE())
loc0.set_progress(0, mgr.TRUE())
h_pc = Hint("h_pc1", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(2, mgr.GE(x_y, i_20))
loc2 = Location(env, mgr.TRUE())
loc2.set_progress(0, mgr.And(mgr.GE(x_y, m_100), mgr.LE(x_y, i_0)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(i_1, y)))
h_y = Hint("h_y0", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
loc0 = Location(env, mgr.LE(x, i_0))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, x)))
loc1 = Location(env, mgr.GE(x, i_0))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x5", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
return frozenset(res)
| 30.399015 | 77 | 0.584022 |
d670d0130ff4d04f852a5a1b4f394cee750adfde | 4,842 | py | Python | scripts/inference.py | lsheiba/pixel2style2pixel | a5bb5c2031615e2425b2a93442f7d31e54db1b84 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | 53 | 2021-01-22T08:52:02.000Z | 2022-03-30T13:58:57.000Z | scripts/inference.py | rebotnix/pixel2style2pixel | ffa934820eb7cbd728a520377fca1ab7128a7b27 | [
"MIT"
] | 7 | 2021-09-26T16:33:21.000Z | 2021-12-13T09:05:19.000Z | scripts/inference.py | rebotnix/pixel2style2pixel | ffa934820eb7cbd728a520377fca1ab7128a7b27 | [
"MIT"
] | 24 | 2021-09-12T21:41:26.000Z | 2022-02-18T15:48:04.000Z | import os
from argparse import Namespace
from tqdm import tqdm
import time
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
import sys
sys.path.append(".")
sys.path.append("..")
from configs import data_configs
from datasets.inference_dataset import InferenceDataset
from utils.common import tensor2im, log_input_image
from options.test_options import TestOptions
from models.psp import pSp
def run():
test_opts = TestOptions().parse()
if test_opts.resize_factors is not None:
assert len(test_opts.resize_factors.split(',')) == 1, "When running inference, provide a single downsampling factor!"
out_path_results = os.path.join(test_opts.exp_dir, 'inference_results',
'downsampling_{}'.format(test_opts.resize_factors))
out_path_coupled = os.path.join(test_opts.exp_dir, 'inference_coupled',
'downsampling_{}'.format(test_opts.resize_factors))
else:
out_path_results = os.path.join(test_opts.exp_dir, 'inference_results')
out_path_coupled = os.path.join(test_opts.exp_dir, 'inference_coupled')
os.makedirs(out_path_results, exist_ok=True)
os.makedirs(out_path_coupled, exist_ok=True)
# update test options with options used during training
ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
if 'learn_in_w' not in opts:
opts['learn_in_w'] = False
opts = Namespace(**opts)
net = pSp(opts)
net.eval()
net.cuda()
print('Loading dataset for {}'.format(opts.dataset_type))
dataset_args = data_configs.DATASETS[opts.dataset_type]
transforms_dict = dataset_args['transforms'](opts).get_transforms()
dataset = InferenceDataset(root=opts.data_path,
transform=transforms_dict['transform_inference'],
opts=opts)
dataloader = DataLoader(dataset,
batch_size=opts.test_batch_size,
shuffle=False,
num_workers=int(opts.test_workers),
drop_last=True)
if opts.n_images is None:
opts.n_images = len(dataset)
global_i = 0
global_time = []
for input_batch in tqdm(dataloader):
if global_i >= opts.n_images:
break
with torch.no_grad():
input_cuda = input_batch.cuda().float()
tic = time.time()
result_batch = run_on_batch(input_cuda, net, opts)
toc = time.time()
global_time.append(toc - tic)
for i in range(opts.test_batch_size):
result = tensor2im(result_batch[i])
im_path = dataset.paths[global_i]
if opts.couple_outputs or global_i % 100 == 0:
input_im = log_input_image(input_batch[i], opts)
resize_amount = (256, 256) if opts.resize_outputs else (1024, 1024)
if opts.resize_factors is not None:
# for super resolution, save the original, down-sampled, and output
source = Image.open(im_path)
res = np.concatenate([np.array(source.resize(resize_amount)),
np.array(input_im.resize(resize_amount, resample=Image.NEAREST)),
np.array(result.resize(resize_amount))], axis=1)
else:
# otherwise, save the original and output
res = np.concatenate([np.array(input_im.resize(resize_amount)),
np.array(result.resize(resize_amount))], axis=1)
Image.fromarray(res).save(os.path.join(out_path_coupled, os.path.basename(im_path)))
im_save_path = os.path.join(out_path_results, os.path.basename(im_path))
Image.fromarray(np.array(result)).save(im_save_path)
global_i += 1
stats_path = os.path.join(opts.exp_dir, 'stats.txt')
result_str = 'Runtime {:.4f}+-{:.4f}'.format(np.mean(global_time), np.std(global_time))
print(result_str)
with open(stats_path, 'w') as f:
f.write(result_str)
def run_on_batch(inputs, net, opts):
if opts.latent_mask is None:
result_batch = net(inputs, randomize_noise=False, resize=opts.resize_outputs)
else:
latent_mask = [int(l) for l in opts.latent_mask.split(",")]
result_batch = []
for image_idx, input_image in enumerate(inputs):
# get latent vector to inject into our input image
vec_to_inject = np.random.randn(1, 512).astype('float32')
_, latent_to_inject = net(torch.from_numpy(vec_to_inject).to("cuda"),
input_code=True,
return_latents=True)
# get output image with injected style vector
res = net(input_image.unsqueeze(0).to("cuda").float(),
latent_mask=latent_mask,
inject_latent=latent_to_inject,
alpha=opts.mix_alpha,
resize=opts.resize_outputs)
result_batch.append(res)
result_batch = torch.cat(result_batch, dim=0)
return result_batch
if __name__ == '__main__':
run()
| 36.134328 | 120 | 0.678439 |
6a7cfaa8bca8e600a7ce58afed1ef31234baf327 | 1,236 | py | Python | TextDetect.py | ieee820/text-detection | 07c9c63a69bc78a9bd85495512641e15ee1c5423 | [
"BSD-3-Clause"
] | null | null | null | TextDetect.py | ieee820/text-detection | 07c9c63a69bc78a9bd85495512641e15ee1c5423 | [
"BSD-3-Clause"
] | null | null | null | TextDetect.py | ieee820/text-detection | 07c9c63a69bc78a9bd85495512641e15ee1c5423 | [
"BSD-3-Clause"
] | 1 | 2019-12-02T02:25:47.000Z | 2019-12-02T02:25:47.000Z | import os,sys
import numpy as np
import cv2
# author: qzane@live.com
# reference: http://stackoverflow.com/a/23565051
# further reading: http://docs.opencv.org/master/da/d56/group__text__detect.html#gsc.tab=0
def text_detect(img,ele_size=(8,2)): #
if len(img.shape)==3:
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img_sobel = cv2.Sobel(img,cv2.CV_8U,1,0)#same as default,None,3,1,0,cv2.BORDER_DEFAULT)
img_threshold = cv2.threshold(img_sobel,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
element = cv2.getStructuringElement(cv2.MORPH_RECT,ele_size)
img_threshold = cv2.morphologyEx(img_threshold[1],cv2.MORPH_CLOSE,element)
contours = cv2.findContours(img_threshold,0,1)
Rect = [cv2.boundingRect(i) for i in contours[1] if i.shape[0]>100]
RectP = [(int(i[0]-i[2]*0.08),int(i[1]-i[3]*0.08),int(i[0]+i[2]*1.1),int(i[1]+i[3]*1.1)) for i in Rect]
return RectP
def main(inputFile):
outputFile = inputFile.split('.')[0]+'-rect.'+'.'.join(inputFile.split('.')[1:])
print(outputFile)
img = cv2.imread(inputFile)
rect = text_detect(img)
for i in rect:
cv2.rectangle(img,i[:2],i[2:],(0,0,255))
cv2.imwrite(outputFile, img)
if __name__ == '__main__':
main(sys.argv[1])
| 38.625 | 107 | 0.68123 |
08c9bf0b77edb866c40c8772bffdad090c73db40 | 181 | py | Python | hospitalmanagement/wsgi.py | Shahriar075/HospitalManagement_Django | 687df49dd19323aeb91b2155ac74a5b7bb1eb9cd | [
"MIT"
] | null | null | null | hospitalmanagement/wsgi.py | Shahriar075/HospitalManagement_Django | 687df49dd19323aeb91b2155ac74a5b7bb1eb9cd | [
"MIT"
] | null | null | null | hospitalmanagement/wsgi.py | Shahriar075/HospitalManagement_Django | 687df49dd19323aeb91b2155ac74a5b7bb1eb9cd | [
"MIT"
] | null | null | null |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hospitalmanagement.settings')
application = get_wsgi_application()
| 18.1 | 78 | 0.828729 |
b5182750607a4439fef3a4a005399cae07289d60 | 9,517 | py | Python | microservices/http/client.py | viatoriche/microservices | 3510563edd15dc6131b8a948d6062856cd904ac7 | [
"MIT"
] | 18 | 2016-04-04T03:01:37.000Z | 2020-08-18T03:03:40.000Z | microservices/http/client.py | viatoriche/microservices | 3510563edd15dc6131b8a948d6062856cd904ac7 | [
"MIT"
] | 7 | 2016-05-06T14:23:16.000Z | 2019-11-20T11:16:35.000Z | microservices/http/client.py | viatoriche/microservices | 3510563edd15dc6131b8a948d6062856cd904ac7 | [
"MIT"
] | 5 | 2016-05-06T08:20:40.000Z | 2019-07-13T01:34:38.000Z | import requests
import six
import six.moves.urllib.parse as urlparse
from six.moves.urllib.parse import urlencode
from microservices.helpers.logs import InstanceLogger
from microservices.utils import get_logger
@six.python_2_unicode_compatible
class ResponseError(Exception):
def __init__(self, response, description, *args, **kwargs):
"""Exception
exception instance has:
response, description, content and status_code
:param response: requests.response
:param description: str - description for error
"""
self.response = response
self.description = description
self.status_code = response.status_code
self.content = response.content
super(ResponseError, self).__init__(*args, **kwargs)
def __repr__(self): # pragma: no cover
return 'Error status code: {}. Description: {}'.format(
self.response.status_code, self.description)
def __str__(self): # pragma: no cover
return self.__repr__()
def __unicode__(self): # pragma: no cover
return self.__str__().decode()
class Resource(object):
def __init__(self, client, resources):
"""Resource
:param client: instance of Client
:param resources: list of url things ['one', 'two', 'three']
"""
self.client = client
self.resources = resources
self.logger = client.logger
def __getattr__(self, item):
return lambda *resources, **kwargs: self.request(item, *resources,
**kwargs)
def request(self, method, *resources, **kwargs):
resources = tuple(self.resources) + resources
return self.client.request(method, *resources, **kwargs)
def resource(self, *resources):
"""Resource builder with resources url
:param resources: 'one', 'two', 'three'
:return: instance of Resource
"""
resources = tuple(self.resources) + resources
return Resource(self.client, resources)
@six.python_2_unicode_compatible
class Client(object):
ok_statuses = (200, 201, 202,)
to_none_statuses = (404,)
def __init__(self, endpoint, ok_statuses=None, to_none_statuses=None,
empty_to_none=True, close_slash=True,
logger=None, name=None, keep_blank_values=True):
"""Create a client
:param endpoint: str, ex. http://localhost:5000 or http://localhost:5000/api/
:param ok_statuses: default - (200, 201, 202, ), status codes for "ok"
:param to_none_statuses: statuses, for generate None as response, default - (404, )
:param empty_to_none: boolean, default - True, if True - empty response will be generate None response (empty str, empty list, empty dict)
:param close_slash: boolean, url += '/', if url.endswith != '/', default - True
:param logger: logger instance
:param name: name for client
:type name: str
"""
if name is None:
name = '<client: {}>'.format(endpoint)
if logger is None:
logger = get_logger(__name__)
self.logger = InstanceLogger(self, logger)
if endpoint.endswith('/'):
endpoint = endpoint[:-1]
if ok_statuses is not None:
self.ok_statuses = ok_statuses
if to_none_statuses is not None:
self.to_none_statuses = to_none_statuses
self.empty_to_none = empty_to_none
self.close_slash = close_slash
parsed_url = urlparse.urlparse(endpoint)
endpoint = self.get_endpoint_from_parsed_url(parsed_url)
self.keep_blank_values = keep_blank_values
self.endpoint = endpoint
self.path = parsed_url.path
self.query = urlparse.parse_qs(parsed_url.query,
keep_blank_values=self.keep_blank_values)
self.fragment = parsed_url.fragment
self.params = parsed_url.params
self.name = name
self.logger.debug(
'Client built, endpoint: "%s", path: "%s", query: %s, params: %s, fragment: %s',
self.endpoint, self.path,
self.query, self.params, self.fragment)
def __str__(self):
return self.name
@staticmethod
def get_endpoint_from_parsed_url(parsed_url):
url_list = [(lambda: x if e < 2 else '')() for e, x in
enumerate(list(parsed_url))]
return urlparse.urlunparse(url_list)
def build_resource(self, resources):
"""Build uri from list
:param resources: ['one', 'two', 'three']
:return: one/two/three
"""
resource = '/'.join(resources)
self.logger.debug('Resource "%s" built from %s', resource, resources)
return resource
def url_for(self, resource='', query=None, params='', fragment='',
keep_blank_values=None):
"""Generate url for resource
Use endpoint for generation
Ex. resource = 'one/two/three'
result - http://localhost:5000/api/one/two/three/
if endpoint == http://localhost:5000/api/
:param resource: str
:param query: dict for generate query string {a: 1, b: 2} -> ?a=1&b=2, or string
:param params: params for last path url
:param fragment: #fragment
:return: str, url
"""
parsed_url = list(urlparse.urlparse(self.endpoint))
if resource:
path = self.path + '/' + resource
else:
path = self.path
if self.close_slash:
if not path.endswith('/'):
path += '/'
if not params:
params = self.params
if not fragment:
fragment = self.fragment
parsed_url[2] = path
parsed_url[3] = params
parsed_url[5] = fragment
if self.query:
parsed_url[4] = urlencode(self.query, doseq=1)
if query is not None:
if keep_blank_values is None:
keep_blank_values = self.keep_blank_values
if isinstance(query, six.string_types):
query = urlparse.parse_qs(query,
keep_blank_values=keep_blank_values)
req_query = dict(self.query)
req_query.update(query)
req_query = urlencode(req_query, doseq=1)
parsed_url[4] = req_query
url = urlparse.urlunparse(parsed_url)
self.logger.debug('Url %s built for resource "%s"', url, resource)
return url
def handle_response(self, response, response_key=None):
"""Handler for response object
:param response: requests.response obj
:param response_key: key for dict in response obj
:return object, result for response, python obj
"""
status_code = response.status_code
try:
result = response.json()
except Exception as e:
self.logger.exception(e)
raise ResponseError(response, e)
if result:
if response_key is not None and status_code in self.ok_statuses:
if response_key in result:
result = result[response_key]
else:
raise ResponseError(response, 'Response key not found!')
elif response_key is not None and status_code in self.to_none_statuses:
result = None
elif status_code not in self.ok_statuses and status_code not in self.to_none_statuses:
raise ResponseError(response,
'Status code {} not in ok_statuses {}'.format(
status_code, self.ok_statuses))
if response_key is not None and self.empty_to_none and result is not None and not result:
result = None
return result
def __getattr__(self, method):
return lambda *resources, **kwargs: self.request(method, *resources,
**kwargs)
def request(self, method, *resources, **kwargs):
method = method.upper()
response_key = kwargs.pop('response_key', None)
key = kwargs.pop('key', None)
if key is not None:
response_key = key
query = kwargs.pop('query', None)
data = kwargs.pop('data', None)
fragment = kwargs.pop('fragment', '')
params = kwargs.pop('params', '')
keep_blank_values = kwargs.pop('keep_blank_values', None)
timeout = kwargs.pop('timeout', 60)
resource = self.build_resource(resources)
content_type = kwargs.pop('content_type', 'json')
if data is not None:
if 'json' in content_type:
kwargs['json'] = data
if content_type == 'body':
kwargs['data'] = data
url = self.url_for(resource, query, params=params,
fragment=fragment,
keep_blank_values=keep_blank_values)
self.logger.info('Request %s for %s', method, url)
response = requests.request(method, url, timeout=timeout, **kwargs)
return self.handle_response(response, response_key=response_key)
def resource(self, *resources):
"""Generate Resource object with resources
:param resources: 'one', 'two', 'three'
:return: Resource with /one/two/three endpoint
"""
return Resource(self, resources)
| 38.068 | 146 | 0.595251 |
5fa152cfcd4858545a467af278956d8a844f2435 | 5,756 | py | Python | src/zc/monitor/__init__.py | stevepiercy/zc.monitor | e69c32da1c50d055c83ff60e2e4fcb7ca2460ca1 | [
"ZPL-2.1"
] | null | null | null | src/zc/monitor/__init__.py | stevepiercy/zc.monitor | e69c32da1c50d055c83ff60e2e4fcb7ca2460ca1 | [
"ZPL-2.1"
] | null | null | null | src/zc/monitor/__init__.py | stevepiercy/zc.monitor | e69c32da1c50d055c83ff60e2e4fcb7ca2460ca1 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2005-2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Zope 3 Monitor Server
"""
import errno, logging, os, stat, traceback, socket
import zope.component
import zc.monitor.interfaces
INTERACTIVE_MARKER = object()
QUIT_MARKER = object()
MORE_MARKER = object()
class Server:
last_command = None
def __init__(self, connection):
import zc.ngi.adapters
connection = zc.ngi.adapters.Lines(connection)
self.connection = connection
connection.set_handler(self)
self.mode = QUIT_MARKER
def handle_input(self, connection, data):
args = data.strip().split()
if self.mode is MORE_MARKER:
command_name = self.last_command[0]
elif not args:
if self.last_command is not None:
command_name, args = self.last_command
else:
return
else:
command_name = args.pop(0)
self.last_command = (command_name, args)
command = zope.component.queryUtility(
zc.monitor.interfaces.IMonitorPlugin,
command_name)
if command is None:
connection.write(
'Invalid command %r\nTry "help".\n' % command_name)
else:
try:
res = command(connection, *args)
except Exception, v:
traceback.print_exc(100, connection)
print >> connection, "%s: %s\n" % (v.__class__.__name__, v)
else:
if res in (INTERACTIVE_MARKER, QUIT_MARKER, MORE_MARKER):
self.mode = res
if self.mode is QUIT_MARKER:
connection.write(zc.ngi.END_OF_DATA)
def handle_close(self, connection, reason):
pass # Don't care
#testing support
last_listener = None
def start(address):
"""start monitor server.
Returns the listener address (which may be different from the
given address) if monitor server started; returns False if the
port is already in use; and raises an exception otherwise.
"""
import zc.ngi.async
ourAddress = None
if isinstance(address, int):
#a port is passed as int
ourAddress = ('', address)
elif isinstance(address, tuple):
#an (address, port) tuple is passed
ourAddress = address
elif isinstance(address, basestring):
#a unix domain socket string is passed
ourAddress = address
if os.path.exists(ourAddress):
m = os.stat(ourAddress)
if stat.S_ISSOCK(m.st_mode):
os.unlink(ourAddress)
try:
global last_listener
last_listener = zc.ngi.async.listener(ourAddress, Server)
except socket.error, e:
if e.args[0] == errno.EADDRINUSE:
# Don't kill the process just because somebody else has our port.
# This might be a zopectl debug or some other innocuous problem.
# (Existing Unix-domain sockets are removed before binding, so
# this doesn't work that way for those. Use a separate offline
# configuration in that case.)
logging.warning(
'unable to start zc.monitor server because the address %s '\
'is in use.',
ourAddress)
return False
else:
raise
return last_listener.address
# default commands
def interactive(connection):
"""Turn on monitor's interactive mode
Normally, the monitor releases the connection after a single command.
By entering the interactive mode, the monitor will not end the connection
until you enter the "quit" command.
In interactive mode, an empty line repeats the last command.
"""
connection.write('Interactive mode on. Use "quit" To exit.\n')
return INTERACTIVE_MARKER
def quit(connection):
"""Quit the monitor
This is only really useful in interactive mode (see the "interactive"
command).
"""
connection.write('Goodbye.\n')
return QUIT_MARKER
def help(connection, command_name=None):
"""Get help about server commands
By default, a list of commands and summaries is printed. Provide
a command name to get detailed documentation for a command.
"""
if command_name is None:
connection.write(str(
"Supported commands:\n "
+ '\n '.join(sorted(
"%s -- %s" % (name, (u.__doc__ or '?').split('\n', 1)[0])
for (name, u) in
zope.component.getUtilitiesFor(
zc.monitor.interfaces.IMonitorPlugin)))
+ '\n'))
else:
command = zope.component.getUtility(
zc.monitor.interfaces.IMonitorPlugin,
command_name)
connection.write("Help for %s:\n\n%s\n"
% (command_name, command.__doc__)
)
def register(command, name=None):
if name is None:
name = command.__name__
zope.component.provideUtility(
command, zc.monitor.interfaces.IMonitorPlugin, name)
def register_basics():
register(help)
register(interactive)
register(quit)
| 33.08046 | 78 | 0.601633 |
e170584d863234c8e6b2e7883ae6871893865c83 | 44,156 | py | Python | dmtree/decision_tree.py | jdvelasq/hardDecisions | 54f886e82784c4061200d843841ef600b0ac366b | [
"MIT"
] | null | null | null | dmtree/decision_tree.py | jdvelasq/hardDecisions | 54f886e82784c4061200d843841ef600b0ac366b | [
"MIT"
] | null | null | null | dmtree/decision_tree.py | jdvelasq/hardDecisions | 54f886e82784c4061200d843841ef600b0ac366b | [
"MIT"
] | 1 | 2021-07-17T19:27:54.000Z | 2021-07-17T19:27:54.000Z | """
Decision Tree Model
==============================================================================
"""
# import math
from typing import Any, List
# import numpy as np
class DecisionTree:
"""Creates and evaluates a decision tree model."""
def __init__(self):
"""Decision tree constructor."""
self.data: List = []
# self.tree: List = []
# self.globals = {}
# self.utility_function = None
# self.inv_utility_function = None
# self.R: float = 0
def display_nodes(self) -> None:
"""Display all the data nodes in the decision tree."""
def display_decision_node(node):
txt = []
txt.append(" Type: " + node.get("type"))
txt[-1] += (
" - Maximum Payoff" if node.get("max") is True else " - Minimum Payoff"
)
txt.append(" Name: " + node.get("tag"))
txt.append(" Branches:")
txt.append(" Value Next Node")
for (outcome, next_node) in node.get("branches"):
txt.append(
" {:12.3f} {:d}".format(outcome, next_node)
)
txt.append("")
return txt
def display_chance_node(node):
txt = []
txt.append(" Type: " + node.get("type"))
txt.append(" Name: " + node.get("tag"))
txt.append(" Branches:")
txt.append(" Chance Value Next Node")
for (prob, outcome, next_node) in node.get("branches"):
txt.append(
" {:5.2f} {:12.3f} {:d}".format(
prob, outcome, next_node
)
)
txt.append("")
return txt
def display_terminal_node(node):
txt = []
txt.append(" Type: " + node.get("type"))
if node.get("expr") is None:
txt.append(" Expr: (cumulative)")
else:
txt.append(" Expr: (User fn)")
txt.append("")
return txt
txt = []
for index, node in enumerate(self.data):
txt.append("Node {:d}".format(index))
if node.get("type") == "DECISION":
txt += display_decision_node(node)
elif node.get("type") == "CHANCE":
txt += display_chance_node(node)
elif node.get("type") == "TERMINAL":
txt += display_terminal_node(node)
else:
raise ValueError(
"Node type unknown: " + node.tag + ", " + node.get("type")
)
print("\n".join(txt))
def terminal_node(
self,
expr: Any = None,
) -> None:
"""Creates a decision tree's terminal node.
Args:
:param expr:
It is a valid python code used for computing the value of the
terminal node in the tree. The name of the nodes can be used in the
expression. When the value is `None`, the expression is created as
a sum of the names of the branches in the tree.
The following example creates a simple terminal node.
>>> tree = DecisionTree()
>>> tree.terminal_node(expr='python code')
>>> tree.display_nodes() # doctest: +NORMALIZE_WHITESPACE
Node 0
Type: TERMINAL
Expr: python code
<BLANKLINE>
"""
self.data.append(
{
"type": "TERMINAL",
"expr": expr,
"id": len(
self.data,
),
}
)
# def chance_node(
# self,
# name: str = None,
# branches: List = None,
# ignore: bool = False,
# ) -> None:
# """Creates a decisions tree's internal chance node.
# :param name:
# A valid name for variablesl in Python.
# :param branches:
# A list of tuples, where each tuple contains the
# corresponding information of each branch in the node. Each tuple
# has the probability, the value of the branch and the index of
# the next node.
# :param ignore:
# When it is `True`, the name of the node is not used
# for creating the default expression for the terminal nodes in
# the path containing this node.
# The following example creates a tree with a chance node in the root with
# four branches finished in the same terminal node.
# >>> tree = DecisionTree()
# >>> tree.chance_node(
# ... name="ChanceNode",
# ... branches=[
# ... (20.0, 100, 1),
# ... (30.0, 200, 1),
# ... (50.0, 300, 1),
# ... ],
# ... )
# >>> tree.terminal_node()
# >>> tree.display_nodes() # doctest: +NORMALIZE_WHITESPACE
# Node 0
# Type: CHANCE
# Name: ChanceNode
# Branches:
# Chance Value Next Node
# 20.00 100.000 1
# 30.00 200.000 1
# 50.00 300.000 1
# <BLANKLINE>
# Node 1
# Type: TERMINAL
# Expr: (cumulative)
# <BLANKLINE>
# """
# self.data.append(
# {
# "tag": name,
# "type": "CHANCE",
# "branches": branches,
# "ignore": ignore,
# "id": len(self.data),
# }
# )
# def decision_node(
# self,
# name: str = None,
# branches: List = None,
# max: bool = True,
# ignore: bool = False,
# ) -> None:
# """Creates a decisions tree's internal decision node.
# :param name:
# A valid name for variablesl in Python.
# :param branches:
# A list of tuples, where each tuple contains the corresponding
# information of each branch in the node. Each tuple has the value
# of the branch and the index of the next node.
# :param max:
# When it is `True`, selects the branch with the maximum expected value.
# :param ignore:
# When it is `True`, the name of the node is not used for creating
# the default expression for the terminal nodes in the path
# containing this node.
# The following example creates a tree with a decision node in the root with
# four branches finished in the same terminal node.
# >>> tree = DecisionTree()
# >>> tree.decision_node(
# ... name='DecisionNode',
# ... branches=[
# ... (100, 1),
# ... (200, 1),
# ... (300, 1),
# ... (400, 1),
# ... ],
# ... max=True,
# ... )
# >>> tree.terminal_node()
# >>> tree.display_nodes() # doctest: +NORMALIZE_WHITESPACE
# Node 0
# Type: DECISION - Maximum Payoff
# Name: DecisionNode
# Branches:
# Value Next Node
# 100.000 1
# 200.000 1
# 300.000 1
# 400.000 1
# <BLANKLINE>
# Node 1
# Type: TERMINAL
# Expr: (cumulative)
# <BLANKLINE>
# """
# self.data.append(
# {
# "tag": name,
# "type": "DECISION",
# "branches": branches,
# "max": max,
# "ignore": ignore,
# "id": len(self.data),
# }
# )
# def build_tree(self) -> None:
# """Builds the decision tree using the information in the variables."""
# def get_current_branch(id):
# for var_id, var_branch in self.stack:
# if var_id == id:
# return var_branch
# return None
# def find_value(data):
# if isinstance(data, tuple):
# id, values = data
# return find_value(values[get_current_branch(id)])
# return data
# def new_branch():
# self.tree.append(
# {
# "ExpVal": None,
# "sel_strategy": None,
# "id": len(self.tree),
# }
# )
# return (len(self.tree) - 1, self.tree[-1])
# def set_branch_data(this_branch, this_node, path):
# def set_terminal():
# this_branch["type"] = this_node.get("type")
# if this_branch.get("ignore", True) is False:
# path.append(this_branch.get("tag"))
# this_branch["expr"] = (
# "+".join(path)
# if this_node.get("expr") is None
# else this_node.get("expr")
# )
# def set_decision():
# #
# this_branch["type"] = this_node.get("type")
# this_branch["forced_branch_idx"] = None
# this_branch["next_branches"] = []
# this_branch["max"] = this_node.get("max")
# if this_branch.get("ignore", True) is False:
# path.append(this_branch.get("tag"))
# #
# for idx, (value, next_node) in enumerate(this_node.get("branches")):
# #
# self.stack.append((this_node["id"], idx))
# #
# next_branch_id, next_branch = new_branch()
# this_branch["next_branches"].append(next_branch_id)
# next_branch["ignore"] = this_node.get("ignore")
# next_branch["tag"] = this_node.get("tag")
# next_branch["value"] = find_value(value)
# #
# set_branch_data(
# this_branch=next_branch,
# this_node=self.data[next_node],
# path=path.copy(),
# )
# #
# self.stack.pop()
# def set_chance():
# this_branch["type"] = this_node.get("type")
# this_branch["forced_branch_idx"] = None
# this_branch["next_branches"] = []
# if this_branch.get("ignore", True) is False:
# path.append(this_branch.get("tag"))
# for idx, (prob, value, next_node) in enumerate(
# this_node.get("branches")
# ):
# self.stack.append((this_node["id"], idx))
# next_branch_id, next_branch = new_branch()
# this_branch["next_branches"].append(next_branch_id)
# next_branch["ignore"] = this_node.get("ignore")
# next_branch["tag"] = this_node.get("tag")
# next_branch["value"] = find_value(value)
# next_branch["prob"] = find_value(prob)
# set_branch_data(
# this_branch=next_branch,
# this_node=self.data[next_node],
# path=path.copy(),
# )
# self.stack.pop()
# ####
# if this_node.get("type") == "DECISION":
# set_decision()
# elif this_node.get("type") == "CHANCE":
# set_chance()
# elif this_node.get("type") == "TERMINAL":
# set_terminal()
# else:
# pass
# ###
# self.stack = []
# self.tree = []
# path = []
# _, this_branch = new_branch()
# set_branch_data(
# this_branch=this_branch, this_node=self.data[0], path=path.copy()
# )
# del self.stack
# def evaluate(self, locals=locals()):
# """Evalute the tree. First, the cumulative probabilities in all nodes
# are calculated. Finally, the algorithm computes the expected values.
# Args:
# None.
# Returns:
# None.
# """
# def compute_expected_values():
# """computes expected values."""
# def compute_branch_expvalue(this_branch):
# if this_branch.get("type") == "DECISION":
# #
# if "tag" in this_branch.keys():
# self.globals[this_branch["tag"]] = this_branch["value"]
# ismax = this_branch["max"]
# expval = None
# exputl = None
# CE = None
# #
# if self.utility_function is None:
# for branch_idx, branch_id in enumerate(
# this_branch["next_branches"]
# ):
# compute_branch_expvalue(this_branch=self.tree[branch_id])
# if this_branch["forced_branch_idx"] is None:
# if expval is None:
# expval = self.tree[branch_id].get("ExpVal")
# this_branch["opt_branch_idx"] = branch_idx
# if ismax is True and expval < self.tree[branch_id].get(
# "ExpVal"
# ):
# expval = self.tree[branch_id].get("ExpVal")
# this_branch["opt_branch_idx"] = branch_idx
# if ismax is False and expval > self.tree[branch_id].get(
# "ExpVal"
# ):
# expval = self.tree[branch_id].get("ExpVal")
# this_branch["opt_branch_idx"] = branch_idx
# else:
# if branch_idx == this_branch["forced_branch_idx"]:
# expval = self.tree[branch_id].get("ExpVal")
# this_branch["opt_branch_idx"] = branch_idx
# this_branch["ExpVal"] = expval
# else:
# for branch_idx, branch_id in enumerate(
# this_branch["next_branches"]
# ):
# compute_branch_expvalue(this_branch=self.tree[branch_id])
# if this_branch["forced_branch_idx"] is None:
# if expval is None:
# expval = self.tree[branch_id].get("ExpVal")
# exputl = self.tree[branch_id].get("ExpUtl")
# CE = self.tree[branch_id].get("CE")
# this_branch["opt_branch_idx"] = branch_idx
# if exputl < self.tree[branch_id].get("ExpUtl"):
# expval = self.tree[branch_id].get("ExpVal")
# exputl = self.tree[branch_id].get("ExpUtl")
# CE = self.tree[branch_id].get("CE")
# this_branch["opt_branch_idx"] = branch_idx
# else:
# if branch_idx == this_branch["forced_branch_idx"]:
# expval = self.tree[branch_id].get("ExpVal")
# exputl = self.tree[branch_id].get("ExpUtl")
# CE = self.tree[branch_id].get("CE")
# this_branch["opt_branch_idx"] = branch_idx
# this_branch["ExpVal"] = expval
# this_branch["ExpUtl"] = exputl
# this_branch["CE"] = CE
# if this_branch.get("type") == "CHANCE":
# self.globals[this_branch["tag"]] = this_branch["value"]
# expval = 0
# exputl = 0
# CE = None
# #
# if self.utility_function is None:
# if this_branch["forced_branch_idx"] is None:
# for branch_id in this_branch["next_branches"]:
# compute_branch_expvalue(
# this_branch=self.tree[branch_id]
# )
# expval += (
# self.tree[branch_id].get("ExpVal")
# * self.tree[branch_id].get("prob")
# / 100
# )
# else:
# for branch_idx, branch_id in enumerate(
# this_branch["next_branches"]
# ):
# if branch_idx == this_branch["forced_branch_idx"]:
# compute_branch_expvalue(
# this_branch=self.tree[branch_id]
# )
# expval += self.tree[branch_id].get("ExpVal")
# else:
# compute_branch_expvalue(
# this_branch=self.tree[branch_id]
# )
# expval += 0
# this_branch["ExpVal"] = expval
# else:
# if this_branch["forced_branch_idx"] is None:
# for branch_id in this_branch["next_branches"]:
# compute_branch_expvalue(
# this_branch=self.tree[branch_id]
# )
# expval += (
# self.tree[branch_id].get("ExpVal")
# * self.tree[branch_id].get("prob")
# / 100
# )
# exputl += (
# self.tree[branch_id].get("ExpUtl")
# * self.tree[branch_id].get("prob")
# / 100
# )
# else:
# for branch_idx, branch_id in enumerate(
# this_branch["next_branches"]
# ):
# if branch_idx == this_branch["forced_branch_idx"]:
# compute_branch_expvalue(
# this_branch=self.tree[branch_id]
# )
# expval += self.tree[branch_id].get("ExpVal")
# exputl += self.tree[branch_id].get("ExpUtl")
# else:
# compute_branch_expvalue(
# this_branch=self.tree[branch_id]
# )
# expval += 0
# exputl += 0
# this_branch["ExpVal"] = expval
# this_branch["ExpUtl"] = exputl
# this_branch["CE"] = self.inv_utility_function(exputl)
# # if this_branch.get('type') == 'TERMINAL':
# # var = this_branch['tag']
# # value = this_branch['value']
# # self.globals[var] = value
# # glb = self.globals.copy()
# # glb.update(locals().copy())
# # # this_branch['ExpVal'] = eval(this_branch['expr'], self.globals.copy())
# # this_branch['ExpVal'] = eval(this_branch['expr'], glb.copy())
# #
# # if self.utility_function is not None:
# # this_branch['ExpUtl'] = self.utility_function(this_branch['ExpVal'])
# # this_branch['CE'] = this_branch['ExpVal']
# if this_branch.get("type") == "TERMINAL":
# var = this_branch["tag"]
# value = this_branch["value"]
# self.globals[var] = value
# #
# # globals = globals()
# # self.globals.copy()
# # for var in self.globals:
# # eval(var + ' = ' + str(self.globals[var]))
# #
# this_branch["ExpVal"] = eval(
# this_branch["expr"], self.globals.copy(), locals.copy()
# )
# if self.utility_function is not None:
# this_branch["ExpUtl"] = self.utility_function(
# this_branch["ExpVal"]
# )
# this_branch["CE"] = this_branch["ExpVal"]
# compute_branch_expvalue(this_branch=self.tree[0])
# def compute_path_probabilities():
# """Computes the probabilities in all tree branches."""
# def compute_branch_prob(this_branch, probability, sel_strategy):
# if this_branch["type"] == "DECISION":
# this_branch["sel_strategy"] = sel_strategy
# if sel_strategy is True:
# for branch_idx, branch_id in enumerate(
# this_branch["next_branches"]
# ):
# if branch_idx == this_branch["opt_branch_idx"]:
# compute_branch_prob(
# this_branch=self.tree[branch_id],
# probability=probability,
# sel_strategy=True,
# )
# else:
# compute_branch_prob(
# this_branch=self.tree[branch_id],
# probability=0,
# sel_strategy=False,
# )
# else:
# if sel_strategy is True:
# current_prob = probability
# else:
# current_prob = 0
# for branch_id in this_branch["next_branches"]:
# compute_branch_prob(
# this_branch=self.tree[branch_id],
# probability=current_prob,
# sel_strategy=False,
# )
# if this_branch["type"] == "CHANCE":
# this_branch["sel_strategy"] = sel_strategy
# if this_branch["forced_branch_idx"] is None:
# for branch_id in this_branch["next_branches"]:
# prob = self.tree[branch_id]["prob"]
# compute_branch_prob(
# this_branch=self.tree[branch_id],
# probability=probability * prob / 100,
# sel_strategy=sel_strategy,
# )
# else:
# for branch_idx, branch_id in enumerate(
# this_branch["next_branches"]
# ):
# if branch_idx == this_branch["forced_branch_idx"]:
# prob = self.tree[branch_id]["prob"]
# prob = 100
# compute_branch_prob(
# this_branch=self.tree[branch_id],
# probability=probability * prob / 100,
# sel_strategy=True,
# )
# else:
# prob = self.tree[branch_id]["prob"]
# prob = 0
# compute_branch_prob(
# this_branch=self.tree[branch_id],
# probability=probability * prob / 100,
# sel_strategy=False,
# )
# if this_branch["type"] == "TERMINAL":
# this_branch["sel_strategy"] = sel_strategy
# this_branch["PathProb"] = probability * 100
# #
# compute_branch_prob(
# this_branch=self.tree[0], probability=1.0, sel_strategy=True
# )
# for branch in self.tree:
# if "RiskProfile" in branch.keys():
# del branch["RiskProfile"]
# self.cumvalue = 0
# compute_expected_values()
# compute_path_probabilities()
# def display_tree(self, maxdeep=None, selected_strategy=False):
# r"""Prints the tree as a text diagram.
# Args:
# maxdeep (int, None): maximum deep of tree to print.
# selected_strategy (bool): When it is `True`, only the
# optimal (or forced branches) in the tree are displayed.
# Returns:
# None.
# The following example creates a decision tree with a unique decision
# node at the root of the tree. When the tree has not been evaluated,
# this function shows only the number of the branch and the name and
# value of the variable representing the type of node.
# >>> tree = DecisionTree()
# >>> tree.decision_node(name='DecisionNode',
# ... branches=[(100, 1),
# ... (200, 1),
# ... (300, 1),
# ... (400, 1)],
# ... max=True)
# >>> tree.terminal_node()
# >>> tree.build_tree()
# >>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
# |
# | #0
# \-------[D]
# |
# | #1
# | DecisionNode=100
# +-------[T] DecisionNode
# |
# | #2
# | DecisionNode=200
# +-------[T] DecisionNode
# |
# | #3
# | DecisionNode=300
# +-------[T] DecisionNode
# |
# | #4
# | DecisionNode=400
# \-------[T] DecisionNode
# When the tree is evaluated, additional information is displayed for
# each branch. `PathProb` is the path probability for the corresponding
# branch of the tree. `ExpVal` is the expected value of the node.
# `(selected strategy)` indicates the branches corresponding to the
# optimal (or forced) decision strategy.
# >>> tree.evaluate()
# >>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
# |
# | #0
# | ExpVal=400.00
# | (selected strategy)
# \-------[D]
# |
# | #1
# | DecisionNode=100
# | PathProb=0.00
# | ExpVal=100.00
# +-------[T] DecisionNode
# |
# | #2
# | DecisionNode=200
# | PathProb=0.00
# | ExpVal=200.00
# +-------[T] DecisionNode
# |
# | #3
# | DecisionNode=300
# | PathProb=0.00
# | ExpVal=300.00
# +-------[T] DecisionNode
# |
# | #4
# | DecisionNode=400
# | PathProb=100.00
# | ExpVal=400.00
# | (selected strategy)
# \-------[T] DecisionNode
# The parameter `selected_strategy` are used to print the branches of
# tree in the optimal decision strategy. This option allows the user
# to analyze the sequence of optimal decisions.
# >>> tree.display_tree(selected_strategy=True) # doctest: +NORMALIZE_WHITESPACE
# |
# | #0
# | ExpVal=400.00
# | (selected strategy)
# \-------[D]
# |
# | #4
# | DecisionNode=400
# | PathProb=100.00
# | ExpVal=400.00
# | (selected strategy)
# \-------[T] DecisionNode
# """
# def print_branch(prefix, this_branch, is_node_last_branch):
# print(prefix + "|")
# type = this_branch.get("type")
# if "id" in this_branch.keys():
# print(prefix + "| #" + str(this_branch.get("id")))
# ## prints the name and value of the variable
# if "tag" in this_branch.keys():
# var = this_branch["tag"]
# if "value" in this_branch.keys():
# txt = "| " + var + "=" + str(this_branch["value"])
# else:
# txt = "| " + var
# print(prefix + txt)
# ## prints the probability
# if "prob" in this_branch.keys():
# txt = "| Prob={:1.2f}".format(this_branch["prob"])
# print(prefix + txt)
# ## prints the cumulative probability
# if type == "TERMINAL" and "PathProb" in this_branch.keys():
# txt = "| PathProb={:1.2f}".format(this_branch["PathProb"])
# print(prefix + txt)
# if "ExpVal" in this_branch.keys() and this_branch["ExpVal"] is not None:
# txt = "| ExpVal={:1.2f}".format(this_branch["ExpVal"])
# print(prefix + txt)
# if "ExpUtl" in this_branch.keys() and this_branch["ExpUtl"] is not None:
# txt = "| ExpUtl={:1.2f}".format(this_branch["ExpUtl"])
# print(prefix + txt)
# if "CE" in this_branch.keys() and this_branch["CE"] is not None:
# txt = "| CE={:1.2f}".format(this_branch["CE"])
# print(prefix + txt)
# if "RiskProfile" in this_branch.keys() and type != "TERMINAL":
# print(prefix + "| Risk Profile:")
# print(prefix + "| Value Prob")
# for key in sorted(this_branch["RiskProfile"]):
# txt = "| {:8.2f} {:5.2f}".format(
# key, this_branch["RiskProfile"][key]
# )
# print(prefix + txt)
# if (
# "sel_strategy" in this_branch.keys()
# and this_branch["sel_strategy"] is True
# ):
# txt = "| (selected strategy)"
# print(prefix + txt)
# if (
# "forced_branch_idx" in this_branch.keys()
# and this_branch["forced_branch_idx"] is not None
# ):
# txt = "| (forced branch = {:1d})".format(
# this_branch["forced_branch_idx"]
# )
# print(prefix + txt)
# next_branches = (
# this_branch["next_branches"]
# if "next_branches" in this_branch.keys()
# else None
# )
# if is_node_last_branch is True:
# if type == "DECISION":
# txt = r"\-------[D]"
# if type == "CHANCE":
# txt = r"\-------[C]"
# if type == "TERMINAL":
# txt = r"\-------[T] {:s}".format(this_branch["expr"])
# else:
# if type == "DECISION":
# txt = "+-------[D]"
# if type == "CHANCE":
# txt = "+-------[C]"
# if type == "TERMINAL":
# txt = "+-------[T] {:s}".format(this_branch["expr"])
# print(prefix + txt)
# if maxdeep is not None and self.current_deep == maxdeep:
# return
# self.current_deep += 1
# if next_branches is not None:
# if selected_strategy is True and type == "DECISION":
# optbranch = this_branch["opt_branch_idx"]
# if is_node_last_branch is True:
# print_branch(
# prefix + " " * 9,
# self.tree[next_branches[optbranch]],
# is_node_last_branch=True,
# )
# else:
# print_branch(
# prefix + "|" + " " * 8,
# self.tree[next_branches[optbranch]],
# is_node_last_branch=True,
# )
# else:
# for next_branch_idx, next_branch_id in enumerate(next_branches):
# is_last_tree_branch = (
# True if next_branch_idx == len(next_branches) - 1 else False
# )
# if is_node_last_branch is True:
# print_branch(
# prefix + " " * 9,
# self.tree[next_branch_id],
# is_node_last_branch=is_last_tree_branch,
# )
# else:
# print_branch(
# prefix + "|" + " " * 8,
# self.tree[next_branch_id],
# is_node_last_branch=is_last_tree_branch,
# )
# self.current_deep -= 1
# self.current_deep = 0
# print_branch(prefix="", this_branch=self.tree[0], is_node_last_branch=True)
# def compute_risk_profile(self):
# r"""Computes the risk profile for the selected strategy.
# In the following example, a decision tree with a decision node in the
# root followed by a chance node is created and evaluated.
# >>> tree = DecisionTree()
# >>> tree.decision_node(name='DecisionNode',
# ... branches=[(100, 1),
# ... (200, 1)],
# ... max=True)
# >>> tree.chance_node(name='ChanceNode',
# ... branches=[(25, 300, 2),
# ... (50, 400, 2),
# ... (25, 500, 2)])
# >>> tree.terminal_node()
# >>> tree.build_tree()
# >>> tree.evaluate()
# Next, the risk profile for the branches corresponding to the sequence of
# optimal decisions is computed.
# >>> tree.compute_risk_profile()
# >>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
# |
# | #0
# | ExpVal=600.00
# | Risk Profile:
# | Value Prob
# | 500.00 25.00
# | 600.00 50.00
# | 700.00 25.00
# | (selected strategy)
# \-------[D]
# |
# | #1
# | DecisionNode=100
# | ExpVal=500.00
# +-------[C]
# | |
# | | #2
# | | ChanceNode=300
# | | Prob=25.00
# | | PathProb=0.00
# | | ExpVal=400.00
# | +-------[T] DecisionNode+ChanceNode
# | |
# | | #3
# | | ChanceNode=400
# | | Prob=50.00
# | | PathProb=0.00
# | | ExpVal=500.00
# | +-------[T] DecisionNode+ChanceNode
# | |
# | | #4
# | | ChanceNode=500
# | | Prob=25.00
# | | PathProb=0.00
# | | ExpVal=600.00
# | \-------[T] DecisionNode+ChanceNode
# |
# | #5
# | DecisionNode=200
# | ExpVal=600.00
# | Risk Profile:
# | Value Prob
# | 500.00 25.00
# | 600.00 50.00
# | 700.00 25.00
# | (selected strategy)
# \-------[C]
# |
# | #6
# | ChanceNode=300
# | Prob=25.00
# | PathProb=25.00
# | ExpVal=500.00
# | (selected strategy)
# +-------[T] DecisionNode+ChanceNode
# |
# | #7
# | ChanceNode=400
# | Prob=50.00
# | PathProb=50.00
# | ExpVal=600.00
# | (selected strategy)
# +-------[T] DecisionNode+ChanceNode
# |
# | #8
# | ChanceNode=500
# | Prob=25.00
# | PathProb=25.00
# | ExpVal=700.00
# | (selected strategy)
# \-------[T] DecisionNode+ChanceNode
# Risk profile values can be acceced using the `risk_profile` variable
# of the nodes in the optimal sequence of decisions. In the following code
# the risk profile is obtained for the root node. Risk profile is retuned
# as a dictionary where the keys are the expected values and the values
# stored in the dictionary are the probabilities of the corresponding
# expected values.
# >>> tree.tree[0]['RiskProfile'] # doctest: +NORMALIZE_WHITESPACE
# {500: 25.0, 600: 50.0, 700: 25.0}
# """
# def collect(this_branch):
# if this_branch["sel_strategy"] is False:
# return
# if this_branch["type"] == "DECISION":
# for branch_id in this_branch["next_branches"]:
# collect(this_branch=self.tree[branch_id])
# next_opt_branch = this_branch["next_branches"][
# this_branch["opt_branch_idx"]
# ]
# this_branch["RiskProfile"] = self.tree[next_opt_branch]["RiskProfile"]
# if this_branch["type"] == "CHANCE":
# for branch_id in this_branch["next_branches"]:
# collect(this_branch=self.tree[branch_id])
# this_branch["RiskProfile"] = {}
# for branch_id in this_branch["next_branches"]:
# next_branch = self.tree[branch_id]["RiskProfile"]
# for key in next_branch.keys():
# if key in this_branch["RiskProfile"].keys():
# this_branch["RiskProfile"][key] += next_branch[key]
# else:
# this_branch["RiskProfile"][key] = next_branch[key]
# if this_branch["type"] == "TERMINAL":
# this_branch["RiskProfile"] = {
# this_branch["ExpVal"]: this_branch["PathProb"]
# }
# collect(this_branch=self.tree[0])
# def exponential_utility_fcn(self, x):
# """Computes the exponential utility function defined as `1 - exp(-x/R)`."""
# return 1 - math.exp(-x / self.R)
# def inv_exponential_utility_fcn(self, u):
# """Computes the inverse exponential utility function defined as `-R * log(1 - U)`."""
# return -1.0 * self.R * math.log(1 - u)
# def logarithmic_utility_fcn(self, x):
# """Computes the logarithmic utility function defined as `log(x + R)`."""
# return math.log(x + self.R)
# def inv_logarithmic_utility_fcn(self, u):
# """Computes the inverse logarithmic utility function defined as `exp(U) - R`."""
# return math.exp(u) - self.R
# def square_root_utility_fcn(self, x):
# """Computes the square root utility function defined as `sqrt(x + R)`."""
# return math.sqrt(x + self.R)
# def inv_square_root_utility_fcn(self, u):
# """Computes the inverse square root utility function defined as `U**2 - R`."""
# return math.pow(u, 2) - self.R
# def use_utility_function(
# self,
# exponential=None,
# logarithmic=None,
# square_root=None,
# R=None,
# ):
# """This function specify the use of utility functions for all
# internal computations in the decision tree.
# Args:
# exponential (logical, None): When it is True, the exponential utility
# function is used for computing the expected utility in the nodes
# of the tree.
# logarithmic (logical, None): When it is True, the logarithmic utility
# function is used for computing the expected utility in the nodes
# of the tree.
# square_root (logical, None): When it is True, the square root utility
# function is used for computing the expected utility in the nodes
# of the tree.
# R (float): Value of the R parameter of the utility function.
# Returns:
# None.
# """
# self.utility_function = None
# self.inv_utility_function = None
# self.R = 0
# if exponential is True:
# self.utility_function = self.exponential_utility_fcn
# self.inv_utility_function = self.inv_exponential_utility_fcn
# self.R = R
# return
# if logarithmic is True:
# self.utility_function = self.logarithmic_utility_fcn
# self.inv_utility_function = self.inv_logarithmic_utility_fcn
# self.R = R
# return
# if square_root is True:
# self.utility_function = self.square_root_utility_fcn
# self.inv_utility_function = self.inv_square_root_utility_fcn
# self.R = R
# return
# def force_branch(self, branch_id, branch_idx=None):
# self.tree[branch_id]["forced_branch_idx"] = branch_idx
# if __name__ == "__main__":
# import doctest
# doctest.testmod()
| 40.178344 | 96 | 0.416048 |
f51d71290166cea2ae5aa2963770bfa0c5c6f560 | 78,746 | py | Python | python/ccxt/hbtc.py | EdwinSchrubb/ccxt | b134ce6ffad54c69ceaa872b07c71ca07e7d3a1a | [
"MIT"
] | 1 | 2021-10-16T17:00:03.000Z | 2021-10-16T17:00:03.000Z | python/ccxt/hbtc.py | EdwinSchrubb/ccxt | b134ce6ffad54c69ceaa872b07c71ca07e7d3a1a | [
"MIT"
] | 2 | 2020-05-12T12:53:48.000Z | 2020-07-05T12:59:52.000Z | python/ccxt/hbtc.py | EdwinSchrubb/ccxt | b134ce6ffad54c69ceaa872b07c71ca07e7d3a1a | [
"MIT"
] | 3 | 2020-04-01T05:56:19.000Z | 2020-06-24T10:17:13.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import BadResponse
from ccxt.base.errors import NullResponse
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
class hbtc(Exchange):
def describe(self):
return self.deep_extend(super(hbtc, self).describe(), {
'id': 'hbtc',
'name': 'HBTC',
'countries': ['CN'],
'rateLimit': 2000,
'version': 'v1',
'has': {
'CORS': False,
'fetchTime': True,
'fetchBidAsk': True,
'fetchBidsAsks': True,
'fetchTickers': True,
'fetchTicker': True,
'fetchDepositAddress': False,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchTradingLimits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'withdraw': True,
'fetchCurrencies': False,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchAccounts': True,
'fetchLedger': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
'3d': '3d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/80134449-70663300-85a7-11ea-8942-e204cdeaab5d.jpg', # 交易所LOGO
'api': {
'quote': 'https://api.hbtc.com/openapi/quote', # 市场API数据端点
'contract': 'https://api.hbtc.com/openapi/contract', # 合约API数据端点
'option': 'https://api.hbtc.com/openapi/option', # 合约API数据端点
'public': 'https://api.hbtc.com/openapi', # 公共API数据端点
'private': 'https://api.hbtc.com/openapi', # 私有API数据端点
'zendesk': 'https://hbtc.zendesk.com/hc/en-us',
},
'www': 'https://www.hbtc.com', # 公司主页
'referral': 'https://www.hbtc.com/register/O2S8NS', # 邀请链接
'doc': 'https://github.com/bhexopen/BHEX-OpenApi/tree/master/doc', # openapi文档地址
'fees': 'https://hbtc.zendesk.com/hc/zh-cn/articles/360009274694', # 费率介绍
},
'api': {
'public': {
'get': [
'ping',
'time',
'brokerInfo', # 查询当前broker交易规则和symbol信息
'getOptions',
],
},
'quote': {
'get': [
'depth', # 获取深度
'depth/merged',
'trades', # 获取当前最新成交
'klines', # 获取K线数据
'ticker/24hr', # 获取24小时价格变化数据
'ticker/price',
'ticker/bookTicker',
'contract/index', # 获取合约标的指数价格
'contract/depth', # 获取合约深度
'contract/depth/merged',
'contract/trades', # 获取合约最近成交,
'contract/klines', # 获取合约的K线数据
'contract/ticker/24hr',
'option/index',
'option/depth',
'option/depth/merged',
'option/trades',
'option/klines',
'option/ticker/24hr',
],
},
'contract': {
'get': [
# public
'insurance',
'fundingRate', # 获取资金费率信息
# private
'openOrders', # 查询合约当前委托
'historyOrders', # 查询合约历史委托
'getOrder', # 查询合约订单详情
'myTrades', # 查询合约历史成交
'positions', # 查询合约当前持仓
'account', # 查询合约账户信息
],
'post': [
'order', # 创建合约订单
'modifyMargin', # 修改保证金
],
'delete': [
'order/cancel', # 取消合约订单
'order/batchCancel',
],
},
'option': {
'get': [
'openOrders',
'positions',
'historyOrders',
# 'getOrder',
'myTrades',
'settlements',
'account',
],
'post': [
'order',
],
'delete': [
'order/cancel',
],
},
'private': {
'get': [
'order', # 查询订单
'openOrders', # 查询当前委托
'historyOrders', # 查询历史委托
'account', # 获取当前账户信息
'myTrades', # 查询历史成交
'depositOrders',
'withdrawalOrders',
'withdraw/detail',
'balance_flow',
],
'post': [
'order', # 创建新订单
'order/test',
'userDataStream',
'subAccount/query',
'transfer',
'user/transfer',
'withdraw',
],
'put': [
'userDataStream',
],
'delete': [
'order', # 取消订单
'userDataStream',
],
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.001,
'taker': 0.001,
},
},
'exceptions': {
'exact': {
# general server or network errors
'-1000': ExchangeError, # An unknown error occured while processing the request
'-1001': ExchangeError, # Internal error, unable to process your request. Please try again
'-1002': AuthenticationError, # You are not authorized to execute self request. Request need API Key included in. We suggest that API Key be included in any request
'-1003': RateLimitExceeded, # Too many requests, please use the websocket for live updates
'-1004': BadRequest,
'-1005': PermissionDenied,
'-1006': BadResponse, # An unexpected response was received from the message bus. Execution status unknown. OPEN API server find some exception in execute request.Please report to Customer service
'-1007': RequestTimeout, # Timeout waiting for response from backend server. Send status unknown, execution status unknown
'-1014': InvalidOrder, # Unsupported order combination
'-1015': RateLimitExceeded, # Reach the rate limit.Please slow down your request speed
'-1016': ExchangeNotAvailable, # This service is no longer available
'-1020': NotSupported, # This operation is not supported
'-1021': BadRequest, # Timestamp for self request is outside of the recvWindow
'-1022': AuthenticationError, # Signature for self request is not valid
# request issues
'-1100': BadRequest, # Illegal characters found in a parameter
'-1101': BadRequest, # Too many parameters sent for self endpoint
'-1102': BadRequest, # A mandatory parameter was not sent, was empty/null, or malformed
'-1103': BadRequest, # An unknown parameter was sent
'-1104': BadRequest, # Not all sent parameters were read
'-1105': BadRequest, # A parameter was empty
'-1106': BadRequest, # A parameter was sent when not required
'-1111': BadRequest, # Precision is over the maximum defined for self asset
'-1112': NullResponse, # No orders on book for symbol
'-1114': InvalidOrder, # TimeInForce parameter sent when not required
'-1115': InvalidOrder, # Invalid timeInForce
'-1116': InvalidOrder, # Invalid orderType
'-1117': InvalidOrder, # Invalid side
'-1118': InvalidOrder, # New client order ID was empty
'-1119': InvalidOrder, # Original client order ID was empty
'-1120': BadRequest, # Invalid interval
'-1121': BadSymbol, # Invalid symbol
'-1125': AuthenticationError, # This listenKey does not exist
'-1127': BadRequest, # Lookup interval is too big
'-1128': BadRequest, # Combination of optional parameters invalid
'-1130': BadRequest, # Invalid data sent for a parameter
'-1131': InsufficientFunds,
'-1132': InvalidOrder, # Order price too high
'-1133': InvalidOrder, # Order price lower than the minimum,please check general broker info
'-1134': InvalidOrder, # Order price decimal too long,please check general broker info
'-1135': InvalidOrder, # Order quantity too large
'-1136': InvalidOrder, # Order quantity lower than the minimum
'-1137': InvalidOrder, # Order quantity decimal too long
'-1138': InvalidOrder, # Order price exceeds permissible range
'-1139': InvalidOrder, # Order has been filled
'-1140': InvalidOrder, # Transaction amount lower than the minimum
'-1141': InvalidOrder, # Duplicate clientOrderId
'-1142': InvalidOrder, # Order has been canceled
'-1143': OrderNotFound, # Cannot be found on order book
'-1144': InvalidOrder, # Order has been locked
'-1145': InvalidOrder, # This order type does not support cancellation
'-1146': RequestTimeout, # Order creation timeout
'-1147': RequestTimeout, # Order cancellation timeout
'-1149': InvalidOrder, # Create order failed
'-1187': InvalidAddress, # Withdrawal address not in whitelist
'-2010': InvalidOrder, # NEW_ORDER_REJECTED
'-2011': InvalidOrder, # CANCEL_REJECTED
'-2013': OrderNotFound, # Order does not exist
'-2014': AuthenticationError, # API-key format invalid
'-2015': AuthenticationError, # Invalid API-key, IP, or permissions for action
'-2016': ExchangeError, # No trading window could be found for the symbol. Try ticker/24hrs instead
},
},
# exchange-specific options
'options': {
'fetchTickers': {
'method': 'quoteGetTicker24hr',
},
},
})
def fetch_time(self, params={}):
response = self.publicGetTime(params)
#
# {
# "serverTime": 1527777538000
# }
#
return self.safe_integer(response, 'serverTime')
def parse_market(self, market, type='spot'):
filters = self.safe_value(market, 'filters', [])
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'baseAsset')
quoteId = self.safe_string(market, 'quoteAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
spot = True
future = False
option = False
inverse = False
if type == 'future':
symbol = id
spot = False
future = True
inverse = self.safe_value(market, 'inverse', False)
baseId = self.safe_string(market, 'underlying')
base = self.safe_currency_code(baseId)
elif type == 'option':
symbol = id
spot = False
option = True
amountMin = None
amountMax = None
priceMin = None
priceMax = None
costMin = None
for j in range(0, len(filters)):
filter = filters[j]
filterType = self.safe_string(filter, 'filterType')
if filterType == 'LOT_SIZE':
amountMin = self.safe_float(filter, 'minQty')
amountMax = self.safe_float(filter, 'maxQty')
if filterType == 'PRICE_FILTER':
priceMin = self.safe_float(filter, 'minPrice')
priceMax = self.safe_float(filter, 'maxPrice')
if filterType == 'MIN_NOTIONAL':
costMin = self.safe_float(filter, 'minNotional')
if (costMin is None) and (amountMin is not None) and (priceMin is not None):
costMin = amountMin * priceMin
precision = {
'price': self.safe_float_2(market, 'quotePrecision', 'quoteAssetPrecision'),
'amount': self.safe_float(market, 'baseAssetPrecision'),
}
limits = {
'amount': {
'min': amountMin,
'max': amountMax,
},
'price': {
'min': priceMin,
'max': priceMax,
},
'cost': {
'min': costMin,
'max': None,
},
}
return {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'type': type,
'spot': spot,
'future': future,
'option': option,
'inverse': inverse,
'precision': precision,
'limits': limits,
'info': market,
}
def fetch_markets(self, params={}):
response = self.publicGetBrokerInfo(params)
#
# {
# "timezone":"UTC",
# "serverTime":"1588015885118",
# "brokerFilters":[],
# "symbols":[
# {
# "filters":[
# {"minPrice":"0.01","maxPrice":"100000.00000000","tickSize":"0.01","filterType":"PRICE_FILTER"},
# {"minQty":"0.0005","maxQty":"100000.00000000","stepSize":"0.000001","filterType":"LOT_SIZE"},
# {"minNotional":"5","filterType":"MIN_NOTIONAL"}
# ],
# "exchangeId":"301",
# "symbol":"BTCUSDT",
# "symbolName":"BTCUSDT",
# "status":"TRADING",
# "baseAsset":"BTC",
# "baseAssetPrecision":"0.000001",
# "quoteAsset":"USDT",
# "quotePrecision":"0.01",
# "icebergAllowed":false
# },
# ],
# "options":[
# {
# "filters":[
# {"minPrice":"0.01","maxPrice":"100000.00000000","tickSize":"0.01","filterType":"PRICE_FILTER"},
# {"minQty":"0.01","maxQty":"100000.00000000","stepSize":"0.001","filterType":"LOT_SIZE"},
# {"minNotional":"1","filterType":"MIN_NOTIONAL"}
# ],
# "exchangeId":"301",
# "symbol":"BTC0501CS8500",
# "symbolName":"BTC0501CS8500",
# "status":"TRADING",
# "baseAsset":"BTC0501CS8500",
# "baseAssetPrecision":"0.001",
# "quoteAsset":"BUSDT",
# "quotePrecision":"0.01",
# "icebergAllowed":false
# },
# ],
# "contracts":[
# {
# "filters":[
# {"minPrice":"0.1","maxPrice":"100000.00000000","tickSize":"0.1","filterType":"PRICE_FILTER"},
# {"minQty":"1","maxQty":"100000.00000000","stepSize":"1","filterType":"LOT_SIZE"},
# {"minNotional":"0.000001","filterType":"MIN_NOTIONAL"}
# ],
# "exchangeId":"301",
# "symbol":"BTC-PERP-REV",
# "symbolName":"BTC-PERP-REV",
# "status":"TRADING",
# "baseAsset":"BTC-PERP-REV",
# "baseAssetPrecision":"1",
# "quoteAsset":"USDT",
# "quoteAssetPrecision":"0.1",
# "icebergAllowed":false,
# "inverse":true,
# "index":"BTCUSDT",
# "marginToken":"TBTC",
# "marginPrecision":"0.00000001",
# "contractMultiplier":"1.0",
# "underlying":"TBTC",
# "riskLimits":[
# {"riskLimitId":"200000001","quantity":"1000000.0","initialMargin":"0.01","maintMargin":"0.005"},
# {"riskLimitId":"200000002","quantity":"2000000.0","initialMargin":"0.02","maintMargin":"0.01"},
# {"riskLimitId":"200000003","quantity":"3000000.0","initialMargin":"0.03","maintMargin":"0.015"},
# {"riskLimitId":"200000004","quantity":"4000000.0","initialMargin":"0.04","maintMargin":"0.02"}
# ]
# },
# {
# "filters":[
# {"minPrice":"0.1","maxPrice":"100000.00000000","tickSize":"0.1","filterType":"PRICE_FILTER"},
# {"minQty":"1","maxQty":"100000.00000000","stepSize":"1","filterType":"LOT_SIZE"},
# {"minNotional":"0.000001","filterType":"MIN_NOTIONAL"}
# ],
# "exchangeId":"301",
# "symbol":"BTC-SWAP",
# "symbolName":"BTC-SWAP",
# "status":"TRADING",
# "baseAsset":"BTC-SWAP",
# "baseAssetPrecision":"1",
# "quoteAsset":"USDT",
# "quoteAssetPrecision":"0.1",
# "icebergAllowed":false,
# "inverse":true,
# "index":"BTCUSDT",
# "marginToken":"BTC",
# "marginPrecision":"0.00000001",
# "contractMultiplier":"1.0",
# "underlying":"BTC",
# "riskLimits":[
# {"riskLimitId":"500000001","quantity":"1000000.0","initialMargin":"0.01","maintMargin":"0.005"},
# {"riskLimitId":"500000002","quantity":"2000000.0","initialMargin":"0.02","maintMargin":"0.01"},
# {"riskLimitId":"500000003","quantity":"3000000.0","initialMargin":"0.03","maintMargin":"0.015"},
# {"riskLimitId":"500000004","quantity":"4000000.0","initialMargin":"0.04","maintMargin":"0.02"}
# ]
# },
# {
# "filters":[
# {"minPrice":"0.1","maxPrice":"100000.00000000","tickSize":"0.1","filterType":"PRICE_FILTER"},
# {"minQty":"1","maxQty":"100000.00000000","stepSize":"1","filterType":"LOT_SIZE"},
# {"minNotional":"0.000000001","filterType":"MIN_NOTIONAL"}
# ],
# "exchangeId":"301",
# "symbol":"BTC-PERP-BUSDT",
# "symbolName":"BTC-PERP-BUSDT",
# "status":"TRADING",
# "baseAsset":"BTC-PERP-BUSDT",
# "baseAssetPrecision":"1",
# "quoteAsset":"BUSDT",
# "quoteAssetPrecision":"0.1",
# "icebergAllowed":false,
# "inverse":false,
# "index":"BTCUSDT",
# "marginToken":"BUSDT",
# "marginPrecision":"0.0001",
# "contractMultiplier":"0.0001",
# "underlying":"TBTC",
# "riskLimits":[
# {"riskLimitId":"600000132","quantity":"1000000.0","initialMargin":"0.01","maintMargin":"0.005"},
# {"riskLimitId":"600000133","quantity":"2000000.0","initialMargin":"0.02","maintMargin":"0.01"},
# {"riskLimitId":"600000134","quantity":"3000000.0","initialMargin":"0.03","maintMargin":"0.015"},
# {"riskLimitId":"600000135","quantity":"4000000.0","initialMargin":"0.04","maintMargin":"0.02"}
# ]
# },
# ]
# }
#
result = []
symbols = self.safe_value(response, 'symbols', [])
for i in range(0, len(symbols)):
market = self.parse_market(symbols[i], 'spot')
result.append(market)
options = self.safe_value(response, 'options', [])
for i in range(0, len(options)):
market = self.parse_market(options[i], 'option')
result.append(market)
contracts = self.safe_value(response, 'contracts', [])
for i in range(0, len(contracts)):
market = self.parse_market(contracts[i], 'future')
result.append(market)
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 40, max 40
response = self.quoteGetDepth(self.extend(request, params))
#
# {
# "time":1588068913453,
# "bids":[
# ["0.025278","0.0202"],
# ["0.025277","16.1132"],
# ["0.025276","7.9056"],
# ]
# "asks":[
# ["0.025302","5.9999"],
# ["0.025303","34.9151"],
# ["0.025304","92.391"],
# ]
# }
#
timestamp = self.safe_integer(response, 'time')
return self.parse_order_book(response, timestamp)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.quoteGetTicker24hr(self.extend(request, params))
#
# {
# "time":1588069860794,
# "symbol":"BNB0501PS16",
# "bestBidPrice":"0.2129",
# "bestAskPrice":"0.3163",
# "volume":"33547",
# "quoteVolume":"10801.987",
# "lastPrice":"0.2625",
# "highPrice":"0.3918",
# "lowPrice":"0.2625",
# "openPrice":"0.362",
# }
#
return self.parse_ticker(response, market)
def parse_tickers(self, rawTickers, symbols=None):
tickers = []
for i in range(0, len(rawTickers)):
tickers.append(self.parse_ticker(rawTickers[i]))
return self.filter_by_array(tickers, 'symbol', symbols)
def fetch_bid_ask(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.quoteGetTickerBookTicker(self.extend(request, params))
#
# {
# "symbol": "LTCBTC",
# "bidPrice": "4.00000000",
# "bidQty": "431.00000000",
# "askPrice": "4.00000200",
# "askQty": "9.00000000"
# }
#
return self.parse_ticker(response, market)
def fetch_bids_asks(self, symbols=None, params={}):
self.load_markets()
response = self.quoteGetTickerBookTicker(params)
#
# [
# {
# "symbol": "LTCBTC",
# "bidPrice": "4.00000000",
# "bidQty": "431.00000000",
# "askPrice": "4.00000200",
# "askQty": "9.00000000"
# },
# {
# "symbol": "ETHBTC",
# "bidPrice": "0.07946700",
# "bidQty": "9.00000000",
# "askPrice": "100000.00000000",
# "askQty": "1000.00000000"
# },
# ]
#
return self.parse_tickers(response, symbols)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
options = self.safe_value(self.options, 'fetchTickers', {})
defaultMethod = self.safe_string(options, 'method', 'quoteGetTicker24hr')
defaultType = self.safe_string(options, 'type', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = defaultMethod
if type == 'future':
method = 'quoteGetContractTicker24hr'
elif type == 'option':
method = 'quoteGetOptionTicker24hr'
response = getattr(self, method)(query)
#
# [
# {
# "time": 1538725500422,
# "symbol": "ETHBTC",
# "lastPrice": "4.00000200",
# "openPrice": "99.00000000",
# "highPrice": "100.00000000",
# "lowPrice": "0.10000000",
# "volume": "8913.30000000"
# },
# ]
#
return self.parse_tickers(response, symbols)
def fetch_balance(self, params={}):
self.load_markets()
options = self.safe_value(self.options, 'fetchBalance', {})
defaultType = self.safe_string(options, 'type', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = 'privateGetAccount'
if type == 'future':
method = 'contractGetAccount'
elif type == 'option':
method = 'optionGetAccount'
response = getattr(self, method)(query)
#
# spot
#
# {
# 'balances': [
# {
# 'asset': 'ALGO',
# 'free': '0',
# 'locked': '0'
# },
# {
# 'asset': 'BHT',
# 'free': '0',
# 'locked': '0'
# }
# ]
# }
#
# contract
#
# {
# "BUSDT":{
# "total":"1000",
# "availableMargin":"1000",
# "positionMargin":"0",
# "orderMargin":"0",
# "tokenId":"BUSDT"
# },
# "TBTC":{
# "total":"0.5",
# "availableMargin":"0.5",
# "positionMargin":"0",
# "orderMargin":"0",
# "tokenId":"TBTC"
# }
# }
#
# option
#
# {
# "optionAsset":"",
# "balances":[
# {
# "tokenName":"USDT",
# "free":"0.0",
# "locked":"0.0",
# "margin":"0.0"
# },
# {
# "tokenName":"BUSDT",
# "free":"0.0",
# "locked":"0.0",
# "margin":"0.0"
# }
# ]
# }
#
balances = self.safe_value(response, 'balances')
result = {'info': response}
if balances is not None:
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string_2(balance, 'asset', 'tokenName')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'free')
account['used'] = self.safe_float(balance, 'locked')
result[code] = account
else:
currencyIds = list(response.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
balance = response[currencyId]
account = self.account()
account['free'] = self.safe_float(balance, 'availableMargin')
account['total'] = self.safe_float(balance, 'total')
result[code] = account
return self.parse_balance(result)
def fetch_trades(self, symbol, since=None, limit=50, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = self.quoteGetTrades(self.extend(request, params))
#
# [
# {"price":"0.025344","time":1588084082060,"qty":"1","isBuyerMaker":false},
# {"price":"0.02535","time":1588084086021,"qty":"0.553","isBuyerMaker":true},
# {"price":"0.025348","time":1588084097037,"qty":"1","isBuyerMaker":false},
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1587906000000, # open time
# "0.1761", # open
# "0.1761", # high
# "0.1761", # low
# "0.1761", # close
# "0", # base volume
# 0, # close time
# "0", # quote volume
# 0, # number of trades
# "0", # taker buy base asset volume
# "0" # taker buy quote asset volume
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # default 500, max 500
response = self.quoteGetKlines(self.extend(request, params))
#
# [
# [1587906000000,"0.1761","0.1761","0.1761","0.1761","0",0,"0",0,"0","0"],
# [1587906180000,"0.1761","0.1761","0.1761","0.1761","0",0,"0",0,"0","0"],
# [1587906360000,"0.1761","0.1848","0.1761","0.1848","53",0,"9.7944",1,"0","0"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
# if only fromId is set,it will get orders < that fromId in descending order
# if only toId is set, it will get orders > that toId in ascending order
# if fromId is set and toId is set, it will get orders < that fromId and > that toId in descending order
# if fromId is not set and toId it not set, most recent order are returned in descending order
# 'fromId': '43287482374',
# 'toId': '43287482374',
# 'endTime': self.milliseconds(), # optional, spot only
}
defaultType = self.safe_string(self.options, 'type', 'spot')
options = self.safe_value(self.options, 'fetchMyTrades', {})
fetchMyTradesType = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', fetchMyTradesType)
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
type = market['type']
query = self.omit(params, 'type')
if limit is not None:
# spot default 500, max 1000
# futures and options default 20, max 1000
request['limit'] = limit
method = 'privateGetMyTrades'
if type == 'future':
method = 'contractGetMyTrades'
else:
if type == 'option':
method = 'optionGetMyTrades'
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a `symbol` argument for ' + type + ' markets')
market = self.market(symbol)
request['symbol'] = market['id']
# spot only?
if since is not None:
request['startTime'] = since
if since is not None:
request['startTime'] = since
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# "id":"616384027512920576",
# "symbol":"TBTCBUSDT",
# "orderId":"616384027202542080",
# "matchOrderId":"605124954767266560",
# "price":"6826.06",
# "qty":"0.1",
# "commission":"0.682606",
# "commissionAsset":"BUSDT",
# "time":"1588214701982",
# "isBuyer":false,
# "isMaker":false,
# "fee":{
# "feeTokenId":"BUSDT",
# "feeTokenName":"BUSDT",
# "fee":"0.682606"
# }
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
orderSide = side.upper()
orderType = type.upper()
request = {
'symbol': market['id'],
# BUY or SELL for spot and options
'side': orderSide,
# GTC, FOK, IOC for spot and options
# GTC, FOK, IOC, LIMIT_MAKER for futures
# 'timeInForce': 'GTC',
}
query = params
method = 'privatePostOrder'
if market['type'] == 'future':
if (orderSide != 'BUY_OPEN') and (orderSide != 'SELL_OPEN') and (orderSide != 'BUY_CLOSE') and (orderSide != 'SELL_CLOSE'):
raise NotSupported(self.id + ' createOrder() does not support order side ' + side + ' for ' + market['type'] + ' markets, only BUY_OPEN, SELL_OPEN, BUY_CLOSE and SELL_CLOSE are supported')
if (orderType != 'LIMIT') and (orderType != 'STOP'):
raise NotSupported(self.id + ' createOrder() does not support order type ' + type + ' for ' + market['type'] + ' markets, only LIMIT and STOP are supported')
clientOrderId = self.safe_value(params, 'clientOrderId')
if clientOrderId is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a clientOrderId parameter for ' + market['type'] + ' markets, supply clientOrderId in the params argument')
leverage = self.safe_value(params, 'leverage')
if leverage is None and (orderSide == 'BUY_OPEN' or orderSide == 'SELL_OPEN'):
raise NotSupported(self.id + ' createOrder() requires a leverage parameter for ' + market['type'] + ' markets if orderSide is BUY_OPEN or SELL_OPEN')
method = 'contractPostOrder'
priceType = self.safe_string(params, 'priceType')
if priceType is None:
request['price'] = self.price_to_precision(symbol, price)
else:
request['priceType'] = priceType
if priceType == 'INPUT':
request['price'] = self.price_to_precision(symbol, price)
request['orderType'] = type.upper() # LIMIT, STOP
request['quantity'] = self.amount_to_precision(symbol, amount)
# request['leverage'] = 1 # not required for closing orders
request['leverage'] = leverage
request['clientOrderId'] = clientOrderId
# optional
# request['priceType'] = 'INPUT', # INPUT, OPPONENT, QUEUE, OVER, MARKET
# request['triggerPrice'] = 123.45
else:
if market['type'] == 'option':
method = 'optionPostOrder'
newClientOrderId = self.safe_value_2(params, 'clientOrderId', 'newClientOrderId')
if newClientOrderId is not None:
request['newClientOrderId'] = newClientOrderId
request['type'] = orderType
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['quantity'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
amount = amount * price
else:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument(the exchange-specific behaviour)")
precision = market['precision']['price']
request['quantity'] = self.decimal_to_precision(amount, TRUNCATE, precision, self.precisionMode)
else:
request['quantity'] = self.amount_to_precision(symbol, amount)
query = self.omit(query, ['clientOrderId', 'newClientOrderId'])
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# "symbol":"TBTCBUSDT",
# "orderId":"616376654496877056",
# "clientOrderId":"158821382304516955",
# "transactTime":"1588213823080",
# "price":"0",
# "origQty":"1000",
# "executedQty":"0",
# "status":"NEW",
# "timeInForce":"GTC",
# "type":"MARKET",
# "side":"BUY"
# }
#
# contract
#
# {
# 'time': '1570759718825',
# 'updateTime': '0',
# 'orderId': '469961015902208000',
# 'clientOrderId': '6423344174',
# 'symbol': 'BTC-PERP-REV',
# 'price': '8200',
# 'leverage': '12.08',
# 'origQty': '5',
# 'executedQty': '0',
# 'avgPrice': '0',
# 'marginLocked': '0.00005047',
# 'orderType': 'LIMIT',
# 'side': 'BUY_OPEN',
# 'fees': [],
# 'timeInForce': 'GTC',
# 'status': 'NEW',
# 'priceType': 'INPUT'
# }
#
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
clientOrderId = self.safe_value_2(params, 'origClientOrderId', 'clientOrderId')
request = {}
defaultType = self.safe_string(self.options, 'type', 'spot')
options = self.safe_value(self.options, 'cancelOrder', {})
cancelOrderType = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', cancelOrderType)
query = self.omit(params, 'type')
if clientOrderId is not None:
request['origClientOrderId'] = clientOrderId
query = self.omit(query, ['origClientOrderId', 'clientOrderId'])
else:
request['orderId'] = id
method = 'privateDeleteOrder'
orderType = self.safe_string(query, 'orderType')
if orderType is not None:
type = 'future'
if type == 'future':
method = 'contractDeleteOrderCancel'
if orderType is None:
raise ArgumentsRequired(self.id + " cancelOrder() requires an orderType parameter, pass the {'orderType': 'LIMIT'} or {'orderType': 'STOP'} in params argument")
request['orderType'] = orderType
else:
if type == 'option':
method = 'optionDeleteOrderCancel'
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# 'exchangeId': '301',
# 'symbol': 'BHTUSDT',
# 'clientOrderId': '0',
# 'orderId': '499890200602846976',
# 'status': 'CANCELED'
# }
#
# futures
#
# {
# "time":"1588353669383",
# "updateTime":"0",
# "orderId":"617549770304599296",
# "clientOrderId":"test-001",
# "symbol":"BTC-PERP-REV",
# "price":"10000",
# "leverage":"1",
# "origQty":"100",
# "executedQty":"0",
# "avgPrice":"0",
# "marginLocked":"0",
# "orderType":"LIMIT",
# "side":"SELL_OPEN",
# "fees":[],
# "timeInForce":"GTC",
# "status":"CANCELED",
# "priceType":"INPUT",
# }
#
return self.parse_order(response)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
# if orderId is set, it will get orders < that orderId otherwise most recent orders are returned
# 'orderId': '43287482374',
}
defaultType = self.safe_string(self.options, 'type', 'spot')
options = self.safe_value(self.options, 'fetchOpenOrders', {})
fetchOpenOrdersType = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', fetchOpenOrdersType)
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
type = market['type']
query = self.omit(params, 'type')
if limit is not None:
request['limit'] = limit # default 500, max 1000
method = 'privateGetOpenOrders'
if type == 'future':
method = 'contractGetOpenOrders'
elif type == 'option':
method = 'optionGetOpenOrders'
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# 'orderId': '499902955766523648',
# 'clientOrderId': '157432907618453',
# 'exchangeId': '301',
# 'symbol': 'BHTUSDT',
# 'price': '0.01',
# 'origQty': '50',
# 'executedQty': '0',
# 'cummulativeQuoteQty': '0',
# 'avgPrice': '0',
# 'status': 'NEW',
# 'timeInForce': 'GTC',
# 'type': 'LIMIT',
# 'side': 'BUY',
# 'stopPrice': '0.0',
# 'icebergQty': '0.0',
# 'time': '1574329076202',
# 'updateTime': '0',
# 'isWorking': True
# }
# ]
#
# futures
#
# [
# {
# "time":"1588353669383",
# "updateTime":"0",
# "orderId":"617549770304599296",
# "clientOrderId":"test-001",
# "symbol":"BTC-PERP-REV",
# "price":"10000",
# "leverage":"1",
# "origQty":"100",
# "executedQty":"0",
# "avgPrice":"0",
# "marginLocked":"0.01",
# "orderType":"LIMIT",
# "side":"SELL_OPEN",
# "fees":[],
# "timeInForce":"GTC",
# "status":"NEW",
# "priceType":"INPUT"
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
# if orderId is set, it will get orders < that orderId otherwise most recent orders are returned
# 'orderId': '43287482374',
# 'endTime': self.milliseconds(), # optional
}
defaultType = self.safe_string(self.options, 'type', 'spot')
options = self.safe_value(self.options, 'fetchClosedOrders', {})
fetchClosedOrdersType = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', fetchClosedOrdersType)
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
type = market['type']
query = self.omit(params, 'type')
if limit is not None:
request['limit'] = limit # default 500, max 1000
if since is not None:
request['startTime'] = since
method = 'privateGetHistoryOrders'
if type == 'future':
method = 'contractGetHistoryOrders'
elif type == 'option':
method = 'optionGetHistoryOrders'
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# "orderId":"616384027202542080",
# "clientOrderId":"158821470194414688",
# "exchangeId":"301",
# "symbol":"TBTCBUSDT",
# "price":"0",
# "origQty":"0.1",
# "executedQty":"0.1",
# "cummulativeQuoteQty":"682.606",
# "avgPrice":"6826.06",
# "status":"FILLED",
# "timeInForce":"GTC",
# "type":"MARKET",
# "side":"SELL",
# "stopPrice":"0.0",
# "icebergQty":"0.0",
# "time":"1588214701974",
# "updateTime":"0",
# "isWorking":true
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
clientOrderId = self.safe_value_2(params, 'origClientOrderId', 'clientOrderId')
request = {}
defaultType = self.safe_string(self.options, 'type', 'spot')
options = self.safe_value(self.options, 'fetchOrder', {})
fetchOrderType = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', fetchOrderType)
query = self.omit(params, 'type')
if clientOrderId is not None:
request['origClientOrderId'] = clientOrderId
query = self.omit(query, ['origClientOrderId', 'clientOrderId'])
else:
request['orderId'] = id
method = 'privateGetOrder'
if type == 'future':
method = 'contractGetGetOrder'
elif type == 'option':
method = 'optionGetGetOrder'
response = getattr(self, method)(self.extend(request, query))
return self.parse_order(response)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
request = {
# 'fromId': 'string', # if fromId is set, it will get deposits > that fromId, otherwise most recent deposits are returned
}
if code is not None:
currency = self.currency(code)
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = self.privateGetDepositOrders(self.extend(request, params))
#
# [
# {
# 'time': '1565769575929',
# 'orderId': '428100569859739648',
# 'token': 'USDT',
# 'address': '',
# 'addressTag': '',
# 'fromAddress': '',
# 'fromAddressTag': '',
# 'quantity': '1100',
# },
# ]
#
return self.parse_transactions(response, currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
request = {
# 'fromId': 'string', # if fromId is set, it will get deposits > that fromId, otherwise most recent deposits are returned
}
if code is not None:
currency = self.currency(code)
request['token'] = currency['id']
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = self.privateGetWithdrawalOrders(self.extend(request, params))
#
# [
# {
# "time":"1536232111669",
# "orderId":"90161227158286336",
# "accountId":"517256161325920",
# "tokenId":"BHC",
# "tokenName":"BHC",
# "address":"0x815bF1c3cc0f49b8FC66B21A7e48fCb476051209",
# "addressExt":"address tag",
# "quantity":"14", # Withdrawal qty
# "arriveQuantity":"14", # Arrived qty
# "statusCode":"PROCESSING_STATUS",
# "status":3,
# "txid":"",
# "txidUrl":"",
# "walletHandleTime":"1536232111669",
# "feeTokenId":"BHC",
# "feeTokenName":"BHC",
# "fee":"0.1",
# "requiredConfirmNum":0, # Required confirmations
# "confirmNum":0, # Confirmations
# "kernelId":"", # BEAM and GRIN only
# "isInternalTransfer": False # True if self transfer is internal
# }
# ]
#
return self.parse_transactions(response, currency, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
clientOrderId = self.safe_string(params, 'clientOrderId', self.uuid())
request = {
'clientOrderId': clientOrderId,
'tokenId': currency['id'],
'address': address, # the withdrawal address must be in current tag list in your PC/APP client
'withdrawQuantity': amount,
# 'chainType': 'OMNI', # OMNI, ERC20, TRC20
}
if tag is not None:
request['addressExt'] = tag
response = self.privatePostWithdraw(self.extend(request, params))
#
# {
# "status": 0,
# "success": True,
# "needBrokerAudit": False, # Whether self request needs broker auit
# "orderId": "423885103582776064" # Id for successful withdrawal
# }
#
return {
'info': response,
'id': self.safe_string(response, 'orderId'),
}
def fetch_accounts(self, params={}):
response = self.privatePostSubAccountQuery(params)
#
# [
# {
# "accountId": "122216245228131",
# "accountName": "createSubAccountByCurl", # sub-account name
# "accountType": 1, # 1 token trading, 2 options, 3 futures
# "accountIndex": 1, # 0 main account, 1 sub-account
# },
# ]
#
result = []
for i in range(0, len(response)):
account = response[i]
accountId = self.safe_string(account, 'accountId')
accountType = self.safe_string(account, 'accountType')
type = accountType
if accountType == '1':
type = 'spot'
elif accountType == '2':
type = 'option'
elif accountType == '3':
type = 'future'
result.append({
'id': accountId,
'type': type,
'currency': None,
'info': account,
})
return result
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'accountType': 1, # spot 1, options 2, futures 3
'accountIndex': 0, # main 0, sub-account 1
'fromFlowId': '', # flowId to start from
'endFlowId': '', # flowId to end with
'endTime': 1588450533040,
}
currency = None
if code is not None:
currency = self.currency(code)
request['tokenId'] = currency['id']
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # default 500, max 500
response = self.privateGetBalanceFlow(self.extend(request, params))
#
# [
# {
# "id": "539870570957903104",
# "accountId": "122216245228131",
# "tokenId": "BTC",
# "tokenName": "BTC",
# "flowTypeValue": 51,
# "flowType": "USER_ACCOUNT_TRANSFER",
# "flowName": "Transfer",
# "change": "-12.5",
# "total": "379.624059937852365", # after change
# "created": "1579093587214"
# },
# {
# "id": "536072393645448960",
# "accountId": "122216245228131",
# "tokenId": "USDT",
# "tokenName": "USDT",
# "flowTypeValue": 7,
# "flowType": "AIRDROP",
# "flowName": "Airdrop",
# "change": "-2000",
# "total": "918662.0917630848",
# "created": "1578640809195"
# }
# ]
#
return self.parse_ledger(response, currency, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "id": "539870570957903104",
# "accountId": "122216245228131",
# "tokenId": "BTC",
# "tokenName": "BTC",
# "flowTypeValue": 51,
# "flowType": "USER_ACCOUNT_TRANSFER",
# "flowName": "Transfer",
# "change": "-12.5",
# "total": "379.624059937852365", # after change
# "created": "1579093587214"
# }
#
# {
# "id": "536072393645448960",
# "accountId": "122216245228131",
# "tokenId": "USDT",
# "tokenName": "USDT",
# "flowTypeValue": 7,
# "flowType": "AIRDROP",
# "flowName": "Airdrop",
# "change": "-2000",
# "total": "918662.0917630848",
# "created": "1578640809195"
# }
#
currencyId = self.safe_string(item, 'tokenId')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_float(item, 'change')
after = self.safe_float(item, 'total')
direction = 'out' if (amount < 0) else 'in'
before = None
if after is not None and amount is not None:
difference = amount if (direction == 'out') else -amount
before = self.sum(after, difference)
timestamp = self.safe_integer(item, 'created')
type = self.parse_ledger_entry_type(self.safe_string(item, 'flowType'))
id = self.safe_string(item, 'id')
account = self.safe_string(item, 'accountId')
return {
'id': id,
'currency': code,
'account': account,
'referenceAccount': None,
'referenceId': None,
'status': None,
'amount': amount,
'before': before,
'after': after,
'fee': None,
'direction': direction,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'type': type,
'info': item,
}
def parse_ledger_entry_type(self, type):
types = {
'TRADE': 'trade',
'FEE': 'fee',
'TRANSFER': 'transfer',
'DEPOSIT': 'transaction',
'MAKER_REWARD': 'rebate',
'PNL': 'pnl',
'SETTLEMENT': 'settlement',
'LIQUIDATION': 'liquidation',
'FUNDING_SETTLEMENT': 'settlement',
'USER_ACCOUNT_TRANSFER': 'transfer',
'OTC_BUY_COIN': 'trade',
'OTC_SELL_COIN': 'trade',
'OTC_FEE': 'fee',
'OTC_TRADE': 'trade',
'ACTIVITY_AWARD': 'referral',
'INVITATION_REFERRAL_BONUS': 'referral',
'REGISTER_BONUS': 'referral',
'AIRDROP': 'airdrop',
'MINE_REWARD': 'reward',
}
return self.safe_string(types, type, type)
def parse_transaction_status(self, status):
statuses = {
'BROKER_AUDITING_STATUS': 'pending',
'BROKER_REJECT_STATUS': 'failed',
'AUDITING_STATUS': 'pending',
'AUDIT_REJECT_STATUS': 'failed',
'PROCESSING_STATUS': 'pending',
'WITHDRAWAL_SUCCESS_STATUS': 'ok',
'WITHDRAWAL_FAILURE_STATUS': 'failed',
'BLOCK_MINING_STATUS': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'time': '1565769575929',
# 'orderId': '428100569859739648',
# 'token': 'USDT',
# 'address': '',
# 'addressTag': '',
# 'fromAddress': '',
# 'fromAddressTag': '',
# 'quantity': '1100',
# }
#
# fetchWithdrawals
#
# {
# "time":"1536232111669",
# "orderId":"90161227158286336",
# "accountId":"517256161325920",
# "tokenId":"BHC",
# "tokenName":"BHC",
# "address":"0x815bF1c3cc0f49b8FC66B21A7e48fCb476051209",
# "addressExt":"address tag",
# "quantity":"14", # Withdrawal qty
# "arriveQuantity":"14", # Arrived qty
# "statusCode":"PROCESSING_STATUS",
# "status":3,
# "txid":"",
# "txidUrl":"",
# "walletHandleTime":"1536232111669",
# "feeTokenId":"BHC",
# "feeTokenName":"BHC",
# "fee":"0.1",
# "requiredConfirmNum":0, # Required confirmations
# "confirmNum":0, # Confirmations
# "kernelId":"", # BEAM and GRIN only
# "isInternalTransfer": False # True if self transfer is internal
# }
#
id = self.safe_string(transaction, 'orderId')
address = self.safe_string(transaction, 'address')
tag = self.safe_string_2(transaction, 'addressExt', 'addressTag')
if tag is not None:
if len(tag) < 1:
tag = None
addressFrom = self.safe_string(transaction, 'fromAddress')
tagFrom = self.safe_string(transaction, 'fromAddressTag')
if tagFrom is not None:
if len(tagFrom) < 1:
tagFrom = None
currencyId = self.safe_string(transaction, 'tokenId')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.safe_integer(transaction, 'time')
txid = self.safe_string(transaction, 'txid')
if txid == '':
txid = None
type = None
status = self.parse_transaction_status(self.safe_string(transaction, 'statusCode'))
if status is None:
type = 'deposit'
status = 'ok'
else:
type = 'withdrawal'
amount = self.safe_float(transaction, 'quantity')
feeCost = self.safe_float(transaction, 'fee')
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(transaction, 'feeTokenId')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'currency': feeCurrencyCode,
'cost': feeCost,
}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': addressFrom,
'address': address,
'addressTo': address,
'tagFrom': tagFrom,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "time":1588069860794,
# "symbol":"BNB0501PS16",
# "bestBidPrice":"0.2129",
# "bestAskPrice":"0.3163",
# "volume":"33547",
# "quoteVolume":"10801.987",
# "lastPrice":"0.2625",
# "highPrice":"0.3918",
# "lowPrice":"0.2625",
# "openPrice":"0.362",
# }
#
# fetchBidAsk, fetchBidAsks
#
# {
# "symbol": "LTCBTC",
# "bidPrice": "4.00000000",
# "bidQty": "431.00000000",
# "askPrice": "4.00000200",
# "askQty": "9.00000000"
# }
#
symbol = None
marketId = self.safe_string(ticker, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(ticker, 'time')
open = self.safe_float(ticker, 'openPrice')
close = self.safe_float(ticker, 'lastPrice')
change = None
percentage = None
average = None
if (open is not None) and (close is not None):
change = close - open
average = self.sum(open, close) / 2
if (close is not None) and (close > 0):
percentage = (change / open) * 100
quoteVolume = self.safe_float(ticker, 'quoteVolume')
baseVolume = self.safe_float(ticker, 'volume')
vwap = None
if baseVolume is not None and quoteVolume is not None and baseVolume > 0:
vwap = quoteVolume / baseVolume
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highPrice'),
'low': self.safe_float(ticker, 'lowPrice'),
'bid': self.safe_float_2(ticker, 'bestBidPrice', 'bidPrice'),
'bidVolume': self.safe_float(ticker, 'bidQty'),
'ask': self.safe_float_2(ticker, 'bestAskPrice', 'askPrice'),
'askVolume': self.safe_float(ticker, 'askQty'),
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market):
#
# fetchTrades(public)
#
# {
# "price":"0.025344",
# "time":1588084082060,
# "qty":"1",
# "isBuyerMaker":false
# }
#
# fetchMyTrades(private)
#
# spot
#
# {
# "id":"616384027512920576",
# "symbol":"TBTCBUSDT",
# "orderId":"616384027202542080",
# "matchOrderId":"605124954767266560",
# "price":"6826.06",
# "qty":"0.1",
# "commission":"0.682606",
# "commissionAsset":"BUSDT",
# "time":"1588214701982",
# "isBuyer":false,
# "isMaker":false,
# "fee":{
# "feeTokenId":"BUSDT",
# "feeTokenName":"BUSDT",
# "fee":"0.682606"
# }
# }
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_float(trade, 'time')
type = None
orderId = self.safe_string(trade, 'orderId')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'qty')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
side = None
takerOrMaker = None
if 'isBuyerMaker' in trade:
side = 'sell' if trade['isBuyerMaker'] else 'buy'
else:
isMaker = self.safe_value(trade, 'isMaker')
if isMaker is not None:
takerOrMaker = 'maker' if isMaker else 'taker'
isBuyer = self.safe_value(trade, 'isBuyer')
side = 'buy' if isBuyer else 'sell'
fee = None
feeCost = self.safe_float(trade, 'commission')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'commissionAsset')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
symbol = None
if (symbol is None) and (market is not None):
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'order': orderId,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "symbol":"TBTCBUSDT",
# "orderId":"616376654496877056",
# "clientOrderId":"158821382304516955",
# "transactTime":"1588213823080",
# "price":"0",
# "origQty":"1000",
# "executedQty":"0",
# "status":"NEW",
# "timeInForce":"GTC",
# "type":"MARKET",
# "side":"BUY"
# }
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# spot
#
# {
# "orderId":"616384027202542080",
# "clientOrderId":"158821470194414688",
# "exchangeId":"301",
# "symbol":"TBTCBUSDT",
# "price":"0",
# "origQty":"0.1",
# "executedQty":"0.1",
# "cummulativeQuoteQty":"682.606",
# "avgPrice":"6826.06",
# "status":"FILLED",
# "timeInForce":"GTC",
# "type":"MARKET",
# "side":"SELL",
# "stopPrice":"0.0",
# "icebergQty":"0.0",
# "time":"1588214701974",
# "updateTime":"0",
# "isWorking":true
# }
#
# future
#
# {
# time: "1588353669383",
# updateTime: "0",
# orderId: "617549770304599296",
# clientOrderId: "test-001",
# symbol: "BTC-PERP-REV",
# price: "10000",
# leverage: "1",
# origQty: "100",
# executedQty: "0",
# avgPrice: "0",
# marginLocked: "0",
# orderType: "LIMIT",
# side: "SELL_OPEN",
# fees: [],
# timeInForce: "GTC",
# status: "CANCELED",
# priceType: "INPUT"
# }
#
#
id = self.safe_string(order, 'orderId')
clientOrderId = self.safe_string(order, 'clientOrderId')
timestamp = self.safe_integer(order, 'time')
if timestamp is None:
timestamp = self.safe_integer(order, 'transactTime')
symbol = None
if market is None:
marketId = self.safe_string(order, 'symbol')
if marketId is not None:
marketId = marketId.upper()
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
type = self.safe_string_lower(order, 'type')
side = self.safe_string_lower(order, 'side')
price = self.safe_float(order, 'price')
average = self.safe_float(order, 'avgPrice')
amount = None
cost = self.safe_float(order, 'cummulativeQuoteQty')
filled = None
remaining = None
if type is None:
type = self.safe_string_lower(order, 'orderType')
if (market is not None) and market['inverse']:
cost = self.safe_float(order, 'executedQty')
amount = None
if cost == 0.0:
filled = 0
else:
amount = self.safe_float(order, 'origQty')
if type == 'market':
price = None
if side == 'buy':
amount = None
filled = self.safe_float(order, 'executedQty')
if filled is not None:
if amount is not None:
remaining = amount - filled
if average == 0.0:
average = None
status = self.parse_order_status(self.safe_string(order, 'status'))
if market is not None:
symbol = market['symbol']
result = {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'trades': None,
'fee': None,
'fees': None,
}
fees = self.safe_value(order, 'fees', [])
numFees = len(fees)
if numFees > 0:
result['fees'] = []
for i in range(0, len(fees)):
feeCost = self.safe_float(fees[i], 'fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(fees[i], 'feeToken')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
result['fees'].append({
'cost': feeCost,
'currency': feeCurrencyCode,
})
return result
def parse_order_status(self, status):
statuses = {
'NEW': 'open',
'CANCELED': 'canceled',
'FILLED': 'closed',
'PENDING_CANCEL': 'canceled',
}
return self.safe_string(statuses, status, status)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
isPublicContract = (api == 'contract') and ((path == 'insurance') or (path == 'fundingRate'))
if (api == 'public') or (api == 'quote') or isPublicContract:
if params:
url += '?' + self.urlencode(params)
else:
timestamp = self.milliseconds()
self.check_required_credentials()
request = self.extend({
'timestamp': timestamp,
}, query)
# 准备待签名数据
auth = self.urlencode(request)
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256)
request['signature'] = signature
headers = {
'X-BH-APIKEY': self.apiKey,
}
if method == 'POST':
body = self.urlencode(request)
headers = self.extend({
'Content-Type': 'application/x-www-form-urlencoded',
}, headers)
else:
url += '?' + self.urlencode(request)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'code' in response:
code = self.safe_string(response, 'code')
if code != '0':
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
raise ExchangeError(feedback)
| 41.5327 | 484 | 0.461839 |
d92c8c64eae36f99ef2a37c65e1c654da24eb270 | 8,191 | py | Python | individual.py | StPluto/Test15 | a09731193cbaac255d303c0ba6335dd2034920da | [
"MIT"
] | null | null | null | individual.py | StPluto/Test15 | a09731193cbaac255d303c0ba6335dd2034920da | [
"MIT"
] | null | null | null | individual.py | StPluto/Test15 | a09731193cbaac255d303c0ba6335dd2034920da | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Выполнить индивидуальное задание 2 лабораторной работы 9, использовав классы данных, а
# также загрузку и сохранение данных в формат XML.
from dataclasses import dataclass, field
import logging
import sys
from typing import List
import xml.etree.ElementTree as ET
# Класс пользовательского исключения в случае, если неверно
# введена цена продукта.
class IllegalPriceError(Exception):
def __init__(self, price, message="Illegal price number"):
self.price = price
self.message = message
super(IllegalPriceError, self).__init__(message)
def __str__(self):
return f"{self.price} -> {self.message}"
# Класс пользовательского исключения в случае, если введенная
# команда является недопустимой.
class UnknownCommandError(Exception):
def __init__(self, command, message="Unknown command"):
self.command = command
self.message = message
super(UnknownCommandError, self).__init__(message)
def __str__(self):
return f"{self.command} -> {self.message}"
@dataclass(frozen=True)
class markets:
shop: str
product: str
price: float
@dataclass
class Staff:
market: List[markets] = field(default_factory=lambda: [])
def add(self, product: str, shop: str, price: float) -> None:
if price < 0 or price > 5000:
raise IllegalPriceError(price)
self.market.append(
markets(
shop=shop,
product=product,
price=price
)
)
self.market.sort(key=lambda markets: markets.product)
def __str__(self) -> str:
# Заголовок таблицы.
table = []
line = '+-{}-+-{}-+-{}-+-{}-+'.format(
'-' * 4,
'-' * 30,
'-' * 20,
'-' * 20
)
table.append(line)
table.append(
'| {:^4} | {:^30} | {:^20} | {:^20} |'.format(
"No",
"Магазин",
"Товар",
"Стоимость в руб."
)
)
table.append(line)
# Вывести данные о всех товарах.
for idx, markets in enumerate(self.market, 1):
table.append(
'| {:>4} | {:<30} | {:<20} | {:>20} |'.format(
idx,
markets.shop,
markets.product,
markets.price
)
)
table.append(line)
return '\n'.join(table)
def select(self, period: str) -> List[markets]:
parts = command.split(' ', maxsplit=1)
period = str(parts[1])
count = 0
result = []
for markets in self.market:
if product in markets.product:
count += 1
result.append(markets)
return result
def load(self, filename: str) -> None:
with open(filename, 'r', encoding='utf8') as fin:
xml = fin.read()
parser = ET.XMLParser(encoding="utf8")
tree = ET.fromstring(xml, parser=parser)
self.market = []
for markets_element in tree:
product, shop, price = None, None, None
for element in markets_element:
if element.tag == 'shop':
shop = element.text
elif element.tag == 'product':
product = element.text
elif element.tag == 'price':
price = float(element.tag)
if product is not None and shop is not None \
and price is not None:
self.market.append(
markets(
shop=shop,
product=product,
price=price
)
)
def save(self, filename: str) -> None:
root = ET.Element('market')
for markets in self.market:
markets_element = ET.Element('markets')
shop_element = ET.SubElement(markets_element, 'shop')
shop_element.text = markets.shop
product_element = ET.SubElement(markets_element, 'product')
product_element.text = markets.product
price_element = ET.SubElement(markets_element, 'price')
price_element.text = str(markets.price)
root.append(markets_element)
tree = ET.ElementTree(root)
with open(filename, 'wb') as fout:
tree.write(fout, encoding='utf8', xml_declaration=True)
if __name__ == '__main__':
# Выполнить настройку логгера.
logging.basicConfig(
filename='market.log',
level=logging.INFO,
format='%(asctime)s %(levelname)s:%(message)s'
)
# Список товара.
staff = Staff()
# Организовать бесконечный цикл запроса команд.
while True:
try:
# Запросить команду из терминала.
command = input(">>> ").lower()
# Выполнить действие в соответствие с командой.
if command == 'exit':
break
elif command == 'add':
# Запросить данные о товаре.
shop = input("Название магазина? ")
product = input("Название товара? ")
price = int(input("Стоимость товара в руб.? "))
# Добавить работника.
staff.add(product, shop, price)
logging.info(
f"Добавлен товар: {product}, {shop}, "
f"поступивший по {price} цене."
)
elif command == 'list':
# Вывести список.
print(staff)
logging.info("Отображен список товаров.")
elif command.startswith('select '):
parts = command.split(maxsplit=1)
# Запросить товар.
selected = staff.select(parts[1])
parts = command.split(' ', maxsplit=2)
# Получить требуемый стаж.
period = str(parts[1])
# Инициализировать счетчик.
count = 0
# Вывести результаты запроса.
if selected:
for count, market in enumerate(selected, 1):
print(
'{:>4}: {}'.format(count, markets.product)
)
logging.info(
f"Найден {len(selected)} товар с "
f"ценой более {parts[1]} "
)
else:
print("Товар не найден.")
logging.warning(
f"Товар с ценой {parts[1]} не найден."
)
elif command.startswith('load '):
# Разбить команду на части для выделения имени файла.
parts = command.split(' ', maxsplit=1)
# Прочитать данные из файла.
staff.load(parts[1])
logging.info(f"Загружены данные из файла {parts[1]}.")
elif command.startswith('save '):
# Разбить команду на части для выделения имени файла.
parts = command.split(maxsplit=1)
# Сохранить данные в файл.
staff.save(parts[1])
logging.info(f"Сохранены данные в файл {parts[1]}.")
elif command == 'help':
# Вывести справку о работе с программой.
print("Список команд:\n")
print("add - добавить продукт;")
print("list - вывести список продуктов;")
print("load <имя_файла> - загрузить данные из файла;")
print("save <имя_файла> - сохранить данные в файл;")
print("select <товар> - информация о товаре;")
print("help - отобразить справку;")
print("exit - завершить работу с программой.")
else:
raise UnknownCommandError(command)
except Exception as exc:
logging.error(f"Ошибка: {exc}")
print(exc, file=sys.stderr) | 31.625483 | 88 | 0.505189 |
2070af6901a641956694818b34ea94304eb1d8e5 | 2,472 | py | Python | tabla/tabla/tests/test_simulator.py | ziqingzeng/public | 4102b3bd42f43b49cf74599492d52d4f755ab7b2 | [
"BSD-3-Clause"
] | 6 | 2021-04-20T06:33:25.000Z | 2022-02-24T06:46:13.000Z | tabla/tabla/tests/test_simulator.py | ziqingzeng/public | 4102b3bd42f43b49cf74599492d52d4f755ab7b2 | [
"BSD-3-Clause"
] | 3 | 2021-04-20T04:28:51.000Z | 2021-05-24T05:14:31.000Z | tabla/tabla/tests/test_simulator.py | ziqingzeng/public | 4102b3bd42f43b49cf74599492d52d4f755ab7b2 | [
"BSD-3-Clause"
] | 4 | 2021-04-08T16:38:46.000Z | 2021-04-30T05:51:30.000Z | import os, sys
try:
import simulation
except ModuleNotFoundError:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../'))
import pytest
from simulation.simulator import Simulator
from simulation.instruction import InstructionLoader
from pathlib import Path
import argparse
BENCH_ROOT = f"{Path(f'{__file__}').parent}/../../compilation_output"
CFG_ROOT = f"{Path(f'{__file__}').parent}/../configs"
def simulate_benchmark(bench_name, cfg_name, debug=False):
benchmark_path = Path(f"{BENCH_ROOT}/{bench_name}").resolve()
cfg_path = Path(f"{CFG_ROOT}/{cfg_name}").resolve()
simulator = Simulator(benchmark_path, cfg_path, debug)
# simulator.only_debug_pu(3)
#simulator.run_cycles(51)
simulator.run()
simulator.print_statistics()
@pytest.mark.parametrize('benchmark, feature_size, pus, pes', [
# ('linear', [784], 8, 8),
('svm_wifi', [325, 139], 4, 64)
# ('reco', [54, 54, 3], 8, 8)
])
def test_sim(benchmark, feature_size, pus, pes):
debug = False
feature_size = [str(f) for f in feature_size]
if benchmark == "svm_wifi":
package_name = f"{benchmark}_{'_'.join(feature_size)}_{pus}PU_{pes}PE"
else:
package_name = f"{benchmark}_{'_'.join(feature_size)}"
# package_name = f"{benchmark}_{'_'.join(feature_size)}"
cfg_path = f"config_{pus}_{pes}.json"
simulate_benchmark(package_name, cfg_path, debug=debug)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Simulator testing')
argparser.add_argument('-b', '--benchmark', required=True,
help='Name of the benchmark to create. One of "logistic", "linear", "reco",'
'or "svm".')
argparser.add_argument('-fs', '--feature_size', nargs='+', required=True,
help='Feature size to use for creating the benchmark')
argparser.add_argument('-cfg', '--config', nargs='+', required=True,
help='PE/PU config')
args = argparser.parse_args()
assert len(args.config) == 2
pus = args.config[0]
pes = args.config[1]
feature_size = [str(f) for f in args.feature_size]
if args.benchmark == "svm_wifi":
package_name = f"{args.benchmark}_{'_'.join(feature_size)}_{pus}PU_{pes}PE"
else:
package_name = f"{args.benchmark}_{'_'.join(feature_size)}"
cfg_path = f"config_{pus}_{pes}.json"
simulate_benchmark(package_name, cfg_path) | 36.352941 | 103 | 0.649676 |
5e656272e6300c64758d53545682c93be4e2a7bf | 3,298 | py | Python | synaptor/io/backends/aws.py | nkemnitz/Synaptor | 40618786d5b762eb3877ecac49ff310f3e6f892d | [
"MIT"
] | 1 | 2019-04-08T21:01:59.000Z | 2019-04-08T21:01:59.000Z | synaptor/io/backends/aws.py | nkemnitz/Synaptor | 40618786d5b762eb3877ecac49ff310f3e6f892d | [
"MIT"
] | null | null | null | synaptor/io/backends/aws.py | nkemnitz/Synaptor | 40618786d5b762eb3877ecac49ff310f3e6f892d | [
"MIT"
] | null | null | null | """ AWS IO Functionality """
import os
import re
import subprocess
import cloudvolume # Piggybacking on cloudvolume's secrets
import boto3
from . import utils
REGEXP = re.compile("s3://")
CREDS_FN = cloudvolume.secrets.aws_credentials
def pull_file(remote_path):
bucket, key = parse_remote_path(remote_path)
local_fname = os.path.basename(remote_path)
client = open_client(bucket)
client.download_file(bucket, key, local_fname)
def pull_files(remote_paths, batching_limit=50000, batch_size=1000):
if len(remote_paths) > batching_limit:
return pull_files_in_batches(remote_paths, batch_size)
else:
subprocess.call(["gsutil", "-m", "-q", "cp", *remote_paths, "."])
return list(map(os.path.basename, remote_paths))
def pull_files_in_batches(paths, batch_size=1000):
num_batches = len(paths) / batch_size + 1
local_paths = list()
for i in range(num_batches):
batch_paths = paths[i*batch_size:(i+1)*batch_size]
subprocess.call(["gsutil", "-m", "-q", "cp", *batch_paths, "."])
local_paths.extend(map(os.path.basename, batch_paths))
return local_paths
def pull_directory(remote_dir):
""" This will currently break if the remote dir has subdirectories """
bucket, key = parse_remote_path(remote_dir)
client = open_client(bucket)
remote_keys = keys_under_prefix(client, bucket, key)
local_dir = os.path.basename(utils.check_no_slash(key))
local_fnames = [os.path.join(local_dir, os.path.basename(k))
for k in remote_keys]
if not os.path.isdir(local_dir):
os.makedirs(local_dir)
for (f, k) in zip(local_fnames, remote_keys):
client.download_file(bucket, k, f)
return local_fnames
def send_file(local_name, remote_path):
bucket, key = parse_remote_path(remote_path)
client = open_client(bucket)
client.upload_file(local_name, bucket, key)
def send_files(local_names, remote_dir):
subprocess.call(["gsutil", "-q", "-m", "cp", *local_names, remote_dir])
def send_directory(local_dir, remote_dir):
bucket, key = parse_remote_path(remote_dir)
# Sending directory to a subdirectory of remote dir
key = os.path.join(key, os.path.basename(utils.check_no_slash(local_dir)))
fnames = os.listdir(local_dir)
remote_keys = [os.path.join(key, f) for f in fnames]
client = open_client(bucket)
for (f, key) in zip(fnames, remote_keys):
client.upload_file(os.path.join(local_dir, f), bucket, key)
def keys_under_prefix(client, bucket, key):
response = client.list_objects(Bucket=bucket,
Prefix=utils.check_slash(key))
return [obj["Key"] for obj in response["Contents"]]
def parse_remote_path(remote_path):
""" Wrapper around the utils function - checks for the right protocol """
protocol, bucket, key = utils.parse_remote_path(remote_path)
assert protocol == "s3:", "Mismatched protocol (expected AWS S3)"
return bucket, key
def open_client(bucket):
creds = CREDS_FN(bucket)
return boto3.client("s3",
aws_access_key_id=creds["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=creds["AWS_SECRET_ACCESS_KEY"],
region_name="us-east-1")
| 27.949153 | 78 | 0.681928 |
8aba9fbf2780ef07add8c586ccad77d85ba6461e | 4,248 | py | Python | models.py | tobiasbartel/servicium-instance_manager | 74702ab61481df67c06c6dc7dfd435a4b37126e8 | [
"MIT"
] | null | null | null | models.py | tobiasbartel/servicium-instance_manager | 74702ab61481df67c06c6dc7dfd435a4b37126e8 | [
"MIT"
] | null | null | null | models.py | tobiasbartel/servicium-instance_manager | 74702ab61481df67c06c6dc7dfd435a4b37126e8 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.template.defaultfilters import slugify
from django.db import models
from django.core.validators import validate_comma_separated_integer_list
from django.db import models
from servicecatalog.models import STATE, LIVE, ACCESS_DIRECTION, BOTH, PaymentMethod, Module, Contact
from contact_manager.models import Contact, ContactRole
DEV = 'd'
INTE = 'i'
QA = 'q'
CTEST = 'ct'
PROD = 'p'
ENVIRONMENT_OPTIONS = (
(DEV, 'Development'),
(INTE, 'Integration'),
(QA, 'Quality Assurance'),
(CTEST, 'Customer Test'),
(PROD, 'Production'),
)
class Location(models.Model):
name = models.CharField(max_length=200, unique=True)
def __unicode__(self):
return str(self.name)
class InstanceConnectsInstance(models.Model):
from_instance = models.ForeignKey('Instance', related_name='instance_from_relation')
to_instance = models.ForeignKey('Instance', related_name='instance_to_relation')
access_direction = models.CharField(choices=ACCESS_DIRECTION, default=BOTH, max_length=2)
payment_methods = models.ManyToManyField(PaymentMethod, blank=True, default=None)
comment = models.CharField(max_length=150, default=None, null=True, blank=True)
is_online = models.NullBooleanField(default=None, null=True, blank=True)
class Meta:
unique_together = ('from_instance', 'to_instance', 'access_direction', 'is_online')
def __unicode__(self):
return str("%s %s %s" % (self.from_instance, self.get_access_direction_display(), self.to_instance))
class InstanceConnectsModule(models.Model):
from_instance = models.ForeignKey('Instance', related_name='from_instance_to_module_relation')
to_module = models.ForeignKey(Module, related_name='to_module_from_instance_relation')
access_direction = models.CharField(choices=ACCESS_DIRECTION, default=BOTH, max_length=2)
payment_methods = models.ManyToManyField(PaymentMethod, blank=True, default=None)
comment = models.CharField(max_length=150, default=None, null=True, blank=True)
is_online = models.NullBooleanField(default=None, null=True, blank=True)
class Meta:
unique_together = ('from_instance', 'to_module', 'access_direction', 'is_online')
def __unicode__(self):
return str("%s %s %s" % (self.from_instance, self.get_access_direction_display(), self.to_module.__unicode__))
class Instance(models.Model):
name = models.CharField(max_length=200, unique=False, blank=True, default='')
slug = models.SlugField(unique=True, null=True, blank=True)
module = models.ForeignKey(Module, related_name='instance_of_module')
environment = models.CharField(max_length=2, choices=ENVIRONMENT_OPTIONS, default=None, blank=False, null=False)
location = models.ForeignKey('Location', unique=False, blank=None, )
connected_to_instance = models.ManyToManyField('self', through='InstanceConnectsInstance', symmetrical=False, default=None, blank=True, related_name='instance_on_instance')
connected_to_module = models.ManyToManyField(Module, through='InstanceConnectsModule', symmetrical=False, default=None, blank=True, related_name='instance_on_module')
customer_accesable = models.BooleanField(default=False)
state = models.CharField(max_length=10, choices=STATE, default=LIVE, blank=False)
class Meta:
unique_together = ('name', 'module', 'environment', 'location')
permissions = (
("is_owner", "Is Owner"),
)
def __unicode__(self):
if self.name is not '':
return str("%s" % (self.name,))
else:
return str("%s (%s, %s)" % (self.module.name, self.get_environment_display(), self.location.name))
def save(self, *args, **kwargs):
if self.slug == None or len(self.slug) == 0:
if len(self.name) > 0:
self.slug = slugify(self.name)
else:
self.slug = slugify(self.__unicode__())
super(Instance, self).save(*args, **kwargs)
class InstanceContact(models.Model):
parent = models.ForeignKey(Instance)
contact = models.ForeignKey(Contact)
role = models.ForeignKey(ContactRole)
class Meta:
unique_together = ('parent', 'contact', 'role', ) | 44.25 | 176 | 0.717279 |
e225bf9dcff3f342d7cbe93eb6aef860b0a8b9e1 | 894 | py | Python | examples/undocumented/python_modular/kernel_wave_modular.py | srgnuclear/shogun | 33c04f77a642416376521b0cd1eed29b3256ac13 | [
"Ruby",
"MIT"
] | 1 | 2015-11-05T18:31:14.000Z | 2015-11-05T18:31:14.000Z | examples/undocumented/python_modular/kernel_wave_modular.py | waderly/shogun | 9288b6fa38e001d63c32188f7f847dadea66e2ae | [
"Ruby",
"MIT"
] | null | null | null | examples/undocumented/python_modular/kernel_wave_modular.py | waderly/shogun | 9288b6fa38e001d63c32188f7f847dadea66e2ae | [
"Ruby",
"MIT"
] | null | null | null | #!/usr/bin/env python
from tools.load import LoadMatrix
from numpy import where
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list=[[traindat,testdat, 1.0],[traindat,testdat, 10.0]]
def kernel_wave_modular (fm_train_real=traindat,fm_test_real=testdat, theta=1.0):
from modshogun import RealFeatures
from modshogun import WaveKernel
from modshogun import EuclideanDistance
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
distance=EuclideanDistance(feats_train, feats_train)
kernel=WaveKernel(feats_train, feats_train, theta, distance)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Wave')
kernel_wave_modular(*parameter_list[0])
| 27.9375 | 81 | 0.805369 |
29afbea73959713b3f7691c13b904ac28b5d9db9 | 1,252 | py | Python | 00_Code/01_LeetCode/7_ReverseInteger.py | KartikKannapur/Data_Structures_and_Algorithms_Python | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | [
"MIT"
] | 1 | 2017-06-11T04:57:07.000Z | 2017-06-11T04:57:07.000Z | 00_Code/01_LeetCode/7_ReverseInteger.py | KartikKannapur/Data_Structures_and_Algorithms_Python | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | [
"MIT"
] | null | null | null | 00_Code/01_LeetCode/7_ReverseInteger.py | KartikKannapur/Data_Structures_and_Algorithms_Python | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | [
"MIT"
] | null | null | null | """
Given a 32-bit signed integer, reverse digits of an integer.
Example 1:
Input: 123
Output: 321
Example 2:
Input: -123
Output: -321
Example 3:
Input: 120
Output: 21
Note:
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.
"""
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
"""
Method 1:
* Store the input number as an string array
* Reverse the integer part of the number
* If the number is greater than 2**31, return 0
Else return the number
We could also use a stack to push and pop numbers
Your runtime beats 75.05 % of python submissions.
"""
arr_input = str(x)
# #Negative Numbers
if arr_input[0] == "-":
result = (int("-" + arr_input[:0:-1]))
# #Positive Numbers
else:
result = (int(arr_input[::-1]))
# #Result 32-bit signed integer
if abs(result) > 2 ** 31:
return (0)
else:
return (result) | 23.185185 | 237 | 0.580671 |
f2d55f19e26ab06e271de69d23c5cee26c083ed6 | 883 | py | Python | cutout.py | ra1nty/CIFAR-100-CS543 | 055164b7ab16b6d13f747d6addf651db60582adb | [
"MIT"
] | 2 | 2019-05-02T08:14:35.000Z | 2019-05-02T18:35:14.000Z | cutout.py | ra1nty/CIFAR-100-CS543 | 055164b7ab16b6d13f747d6addf651db60582adb | [
"MIT"
] | null | null | null | cutout.py | ra1nty/CIFAR-100-CS543 | 055164b7ab16b6d13f747d6addf651db60582adb | [
"MIT"
] | null | null | null | import numpy as np
import torch
class CutOut(object):
"""Cutout an image tensor image with n holes with size length.
"""
def __init__(self, n, length):
self.n = n
self.length = length
def __call__(self, tensor):
_, h, w = tensor.shape
mask = np.ones((3, h, w), dtype=np.float32)
for i in range(self.n):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[:, y1: y2, x1: x2] = 0.
tensor = tensor * torch.from_numpy(mask)
return tensor
def __repr__(self):
return self.__class__.__name__ + '(n={0}, length={1})'.format(self.n, self.length)
| 32.703704 | 90 | 0.524349 |
ce2da37e92de1366741e3255011a09cb33c03511 | 2,764 | py | Python | echidna/settings/http_response_message.py | liyao2598330/echidna | 145c1345ea8ee25cfcc5d3eff867ae06ddea39e8 | [
"MIT"
] | null | null | null | echidna/settings/http_response_message.py | liyao2598330/echidna | 145c1345ea8ee25cfcc5d3eff867ae06ddea39e8 | [
"MIT"
] | null | null | null | echidna/settings/http_response_message.py | liyao2598330/echidna | 145c1345ea8ee25cfcc5d3eff867ae06ddea39e8 | [
"MIT"
] | 1 | 2020-10-19T14:13:41.000Z | 2020-10-19T14:13:41.000Z | """
@author: liyao
@contact: liyao2598330@126.com
@time: 2020/8/14 12:02 下午
"""
from echidna.settings.mood import MOOD
from random import choice
__all__ = ['HttpResponseMessage']
class HttpResponseMessage:
"""
根据不同的http状态码,设置统一的默认返回信息
https://developer.mozilla.org/zh-CN/docs/Web/HTTP/Status
>>> from echidna.settings import HttpResponseMessage
>>> HttpResponseMessage().random_mood
(*・_・)ノ⌒*
>>> hrm = HttpResponseMessage()
>>> hrm(200)
{'msg': 'ok', 'mood': ' ʅ(‾◡◝)'}
>>> hrm.get_message(200)
{'msg': 'ok', 'mood': '(ノ ̄ω ̄)ノ'}
"""
http_default = '服务可能发生了一点点小问题,程序员哥哥正在加班处理..'
http_200 = 'success'
http_201 = '资源创建成功'
http_400 = '糟糕,缺少必要的参数'
http_401 = '糟糕,您的登陆信息失效或异常,可重新登陆后在尝试'
http_403 = '糟糕,您没有权限访问此资源'
http_404 = '您请求的资源不存在'
http_405 = '不支持此方式请求,请以正确的姿势进入'
http_429 = '请求频率过快,请降低访问频率'
http_500 = '处理异常,服务器可能抽风了'
def __call__(self, code: int, *args, **kwargs) -> dict:
"""
根据http状态码返回默认信息和表情
:param code: http_status_code
:return: eg. {'msg': 'ok', 'mood': '(ノ ̄ω ̄)ノ'}
"""
return self.get_message(code)
@classmethod
def get_message(cls, code: int) -> dict:
"""
根据http状态码返回默认信息和表情
:param code: http_status_code
:return: eg. {'msg': 'ok', 'mood': '(ノ ̄ω ̄)ノ'}
"""
assert isinstance(code, int) and 200 <= code < 600, 'code Must be a standard HTTP status code'
message = getattr(cls, 'http_%s' % code) if hasattr(cls, 'http_%s' % code) else cls.http_default
return {
'msg': message,
'mood': cls.get_http_code_mood(code)
}
@classmethod
def get_http_code_mood(cls, code: int) -> str:
"""
根据http状态码获取表情
:param code:
:return:
"""
return {
200: cls.get_mood(tag='happy'),
201: cls.get_mood(tag='laugh'),
400: cls.get_mood(tag='sorry'),
401: cls.get_mood(tag='confuse'),
403: cls.get_mood(tag='cry'),
404: cls.get_mood(tag='surprise'),
405: cls.get_mood(tag='sleep'),
429: cls.get_mood(tag='wtf'),
500: cls.get_mood(tag='cry'),
}.get(code, cls.random_mood)
@property
def random_mood(self) -> str:
"""
获取随机表情
:return:
"""
return choice(choice(MOOD)['yan'])
@staticmethod
def get_mood(tag: str) -> str:
"""
根据tag获取表情
:param tag:
:return:
"""
for mood in MOOD:
if tag in mood['tag']:
return choice(mood['yan'])
return choice(choice(MOOD)['yan'])
| 27.366337 | 104 | 0.535818 |
1175938c8b8e99dadc7c478491e9cc23646aa277 | 3,715 | py | Python | scripts/glance/check_glance.py | jaimevalero/openstack-monitoring | 234b49aafe4247586cf45346872ff91e125d08ba | [
"Apache-2.0"
] | 1 | 2020-10-01T13:10:45.000Z | 2020-10-01T13:10:45.000Z | scripts/glance/check_glance.py | jaimevalero/openstack-monitoring | 234b49aafe4247586cf45346872ff91e125d08ba | [
"Apache-2.0"
] | null | null | null | scripts/glance/check_glance.py | jaimevalero/openstack-monitoring | 234b49aafe4247586cf45346872ff91e125d08ba | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Keystone monitoring script for Nagios
#
# Copyright © 2012 eNovance <licensing@enovance.com>
#
# Author: Florian Lambert <florian.lambert@enovance.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import argparse
from glanceclient import client as gclient
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
STATE_UNKNOWN = 3
def collect_args():
parser = argparse.ArgumentParser(description='Check an OpenStack glance server.')
parser.add_argument('--auth_url', metavar='URL', type=str,
default=os.getenv('OS_AUTH_URL'),
help='Keystone URL')
parser.add_argument('--username', metavar='username', type=str,
default=os.getenv('OS_USERNAME'),
help='username to use for authentication')
parser.add_argument('--password', metavar='password', type=str,
default=os.getenv('OS_PASSWORD'),
help='password to use for authentication')
parser.add_argument('--tenant', metavar='tenant', type=str,
default=os.getenv('OS_TENANT_NAME'),
help='tenant name to use for authentication')
parser.add_argument('--req_count', metavar='numberImages', type=str,
required=False,
help='minimum number of images in glance')
parser.add_argument('--req_images', metavar='imagesName', type=str, nargs='+',
required=False,
help='name of images who must be available')
parser.add_argument('--region_name', metavar='region_name', type=str,
help='Region to select for authentication')
return parser
def check_glance(c,args):
#Flags resultat
valid_image = 0
count = 0
if args.req_count :
required_count = int(args.req_count)
if len(c.get_images(**{"limit": required_count})) >= required_count:
count = 1
#filters = {}
#filters['name'] = "Debian GNU/Linux 6.0.4 amd64"
#filters['container_format'] = "ami"
if args.req_images :
required_images = args.req_images
for image in required_images:
try:
if len(c.get_images(**{"filters": {"name": image}})) == 1:
valid_image = valid_image + 1
except :
pass
#parameters = {"filters": filters, "limit": limit}
#images = c.get_images(**parameters)
if args.req_count and count == 0:
print "Failed - less than %d images found" % (required_count)
sys.exit(STATE_CRITICAL)
if args.req_images and valid_image < len(required_images):
print "Failed - '%s' %d/%d images found " % (required_images,valid_image,len(required_images))
sys.exit(STATE_WARNING)
if args.req_images and args.req_count:
print "OK - image %s found and enough images >=%d" % (required_images,required_count)
elif args.req_images:
print "OK - image %s found" % (required_images)
elif args.req_count:
print "OK - more than %d images found" % (count)
else :
print "OK - Connection glance established"
if __name__ == '__main__':
args = collect_args().parse_args()
try:
c = gclient.Client('1','')
sys.exit(check_glance(c,args))
except Exception as e:
print str(e)
sys.exit(STATE_CRITICAL)
| 31.218487 | 97 | 0.693405 |
4cc252f38c5fa3fdc6a1da186ec58e0861484522 | 5,800 | py | Python | sdk/lusid/models/get_recipe_response.py | finbourne/lusid-sdk-python-generated-preview | 9c36c953e8149443a4390ed7f0c04d01211401b6 | [
"MIT"
] | null | null | null | sdk/lusid/models/get_recipe_response.py | finbourne/lusid-sdk-python-generated-preview | 9c36c953e8149443a4390ed7f0c04d01211401b6 | [
"MIT"
] | null | null | null | sdk/lusid/models/get_recipe_response.py | finbourne/lusid-sdk-python-generated-preview | 9c36c953e8149443a4390ed7f0c04d01211401b6 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.4425
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class GetRecipeResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'href': 'str',
'value': 'ConfigurationRecipe',
'links': 'list[Link]'
}
attribute_map = {
'href': 'href',
'value': 'value',
'links': 'links'
}
required_map = {
'href': 'optional',
'value': 'optional',
'links': 'optional'
}
def __init__(self, href=None, value=None, links=None, local_vars_configuration=None): # noqa: E501
"""GetRecipeResponse - a model defined in OpenAPI"
:param href: The specific Uniform Resource Identifier (URI) for this resource at the requested effective and asAt datetime.
:type href: str
:param value:
:type value: lusid.ConfigurationRecipe
:param links: Collection of links.
:type links: list[lusid.Link]
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._href = None
self._value = None
self._links = None
self.discriminator = None
self.href = href
if value is not None:
self.value = value
self.links = links
@property
def href(self):
"""Gets the href of this GetRecipeResponse. # noqa: E501
The specific Uniform Resource Identifier (URI) for this resource at the requested effective and asAt datetime. # noqa: E501
:return: The href of this GetRecipeResponse. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this GetRecipeResponse.
The specific Uniform Resource Identifier (URI) for this resource at the requested effective and asAt datetime. # noqa: E501
:param href: The href of this GetRecipeResponse. # noqa: E501
:type href: str
"""
self._href = href
@property
def value(self):
"""Gets the value of this GetRecipeResponse. # noqa: E501
:return: The value of this GetRecipeResponse. # noqa: E501
:rtype: lusid.ConfigurationRecipe
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this GetRecipeResponse.
:param value: The value of this GetRecipeResponse. # noqa: E501
:type value: lusid.ConfigurationRecipe
"""
self._value = value
@property
def links(self):
"""Gets the links of this GetRecipeResponse. # noqa: E501
Collection of links. # noqa: E501
:return: The links of this GetRecipeResponse. # noqa: E501
:rtype: list[lusid.Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this GetRecipeResponse.
Collection of links. # noqa: E501
:param links: The links of this GetRecipeResponse. # noqa: E501
:type links: list[lusid.Link]
"""
self._links = links
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetRecipeResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, GetRecipeResponse):
return True
return self.to_dict() != other.to_dict()
| 28.431373 | 132 | 0.579655 |
758ca4c68df95abd76b252c6c9fa73e86cc2b26f | 852 | py | Python | 06.algorithm004-02/week02/06.Tree-BST/leetcode-590.py | custergo/study_algo | fe35f747d396f90a9312e9229cf5ab25234cd4bd | [
"Apache-2.0"
] | 1 | 2020-06-15T02:36:38.000Z | 2020-06-15T02:36:38.000Z | 06.algorithm004-02/week02/06.Tree-BST/leetcode-590.py | custer-go/study_algo | fe35f747d396f90a9312e9229cf5ab25234cd4bd | [
"Apache-2.0"
] | null | null | null | 06.algorithm004-02/week02/06.Tree-BST/leetcode-590.py | custer-go/study_algo | fe35f747d396f90a9312e9229cf5ab25234cd4bd | [
"Apache-2.0"
] | 1 | 2019-10-27T12:27:12.000Z | 2019-10-27T12:27:12.000Z | # Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
class Solution:
def postorder(self, root: 'Node') -> List[int]:
if root is None:
return []
out = []
stack = [root] #空栈初始化一个root
while stack:
#根 右 左
temp = stack.pop() #弹出来一个进行先入out 再判断是否有子节点
out.append(temp.val)
if temp.children: #有子节点按照 从左到右的 方向压入栈
for item in temp.children:
stack.append(item)
return out[::-1] #上一步循环完后是按照 根节点 右节点 左节点 倒序输出即可
class Solution:
#https://leetcode-cn.com/problems/n-ary-tree-postorder-traversal/solution/python3-fei-di-gui-jian-ming-shi-xian-by-ma-wen-2/ | 37.043478 | 124 | 0.525822 |
34016fa905235d8cb210eb9d7bbcb3c4fd4dd8b6 | 10,060 | py | Python | tests/components/hassio/test_ingress.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | tests/components/hassio/test_ingress.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | tests/components/hassio/test_ingress.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | """The tests for the hassio component."""
from http import HTTPStatus
from unittest.mock import MagicMock, patch
from aiohttp.hdrs import X_FORWARDED_FOR, X_FORWARDED_HOST, X_FORWARDED_PROTO
import pytest
from homeassistant.components.hassio.const import X_AUTH_TOKEN
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_get(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.get(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.get(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3][X_AUTH_TOKEN] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_post(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.post(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.post(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3][X_AUTH_TOKEN] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_put(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.put(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.put(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3][X_AUTH_TOKEN] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_delete(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.delete(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.delete(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3][X_AUTH_TOKEN] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_patch(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.patch(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.patch(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3][X_AUTH_TOKEN] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_options(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.options(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.options(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3][X_AUTH_TOKEN] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ws"),
("core", "ws.php"),
("local", "panel/config/stream"),
("jk_921", "hulk"),
("demo", "ws/connection?id=9&token=SJAKWS283"),
],
)
async def test_ingress_websocket(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.get(f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}")
# Ignore error because we can setup a full IO infrastructure
await hassio_client.ws_connect(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3][X_AUTH_TOKEN] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
async def test_ingress_missing_peername(hassio_client, aioclient_mock, caplog):
"""Test hadnling of missing peername."""
aioclient_mock.get(
"http://127.0.0.1/ingress/lorem/ipsum",
text="test",
)
def get_extra_info(_):
return None
with patch(
"aiohttp.web_request.BaseRequest.transport",
return_value=MagicMock(),
) as transport_mock:
transport_mock.get_extra_info = get_extra_info
resp = await hassio_client.get(
"/api/hassio_ingress/lorem/ipsum",
headers={"X-Test-Header": "beer"},
)
assert "Can't set forward_for header, missing peername" in caplog.text
# Check we got right response
assert resp.status == HTTPStatus.BAD_REQUEST
| 32.662338 | 83 | 0.635288 |
e1987b427e86825b40f7e679371c20a2322f3d18 | 3,992 | py | Python | djangox_project/settings.py | MantasReika/djangox | 4b073546f381f97ce9adef3e2b65d5ed4af5981c | [
"MIT"
] | null | null | null | djangox_project/settings.py | MantasReika/djangox | 4b073546f381f97ce9adef3e2b65d5ed4af5981c | [
"MIT"
] | 2 | 2020-02-12T00:52:51.000Z | 2020-06-05T22:01:57.000Z | djangox_project/settings.py | MantasReika/djangox | 4b073546f381f97ce9adef3e2b65d5ed4af5981c | [
"MIT"
] | null | null | null | """
Django settings for djangox_project project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '43)%4yx)aa@a=+_c(fn&kf3g29xax+=+a&key9i=!98zyim=8j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites', # new
# Third-party
'allauth', # new
'allauth.account', # new
'crispy_forms', # new
# Local
'users',
'pages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangox_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangox_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
AUTH_USER_MODEL = 'users.CustomUser'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
SITE_ID = 1
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_SIGNUP_FORM_CLASS = "users.forms.CustomUserCreationForm"
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_UNIQUE_EMAIL = True
CRISPY_TEMPLATE_PACK = 'bootstrap4'
| 25.265823 | 91 | 0.709419 |
b480e9d398ba29f95739eb8d497967aca6ec6cb3 | 3,870 | py | Python | scipy/linalg/tests/test_matmul_toeplitz.py | Ennosigaeon/scipy | 2d872f7cf2098031b9be863ec25e366a550b229c | [
"BSD-3-Clause"
] | 9,095 | 2015-01-02T18:24:23.000Z | 2022-03-31T20:35:31.000Z | scipy/linalg/tests/test_matmul_toeplitz.py | Ennosigaeon/scipy | 2d872f7cf2098031b9be863ec25e366a550b229c | [
"BSD-3-Clause"
] | 11,500 | 2015-01-01T01:15:30.000Z | 2022-03-31T23:07:35.000Z | scipy/linalg/tests/test_matmul_toeplitz.py | Ennosigaeon/scipy | 2d872f7cf2098031b9be863ec25e366a550b229c | [
"BSD-3-Clause"
] | 5,838 | 2015-01-05T11:56:42.000Z | 2022-03-31T23:21:19.000Z | """Test functions for linalg.matmul_toeplitz function
"""
import numpy as np
from scipy.linalg import toeplitz, matmul_toeplitz
from pytest import raises as assert_raises
from numpy.testing import assert_allclose
class TestMatmulToeplitz:
def setup_method(self):
self.rng = np.random.RandomState(42)
self.tolerance = 1.5e-13
def test_real(self):
cases = []
n = 1
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, False))
n = 2
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, False))
n = 101
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, True))
n = 1000
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, False))
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
cases.append((x, c, r, False))
n = 100
c = self.rng.normal(size=(n, 1))
r = self.rng.normal(size=(n, 1))
x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
cases.append((x, c, r, True))
n = 100
c = self.rng.normal(size=(n, 1))
r = None
x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
cases.append((x, c, r, True, -1))
n = 100
c = self.rng.normal(size=(n, 1))
r = None
x = self.rng.normal(size=n)
cases.append((x, c, r, False))
n = 101
c = self.rng.normal(size=n)
r = self.rng.normal(size=n-27)
x = self.rng.normal(size=(n-27, 1))
cases.append((x, c, r, True))
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n//4)
x = self.rng.normal(size=(n//4, self.rng.randint(1, 10)))
cases.append((x, c, r, True))
[self.do(*i) for i in cases]
def test_complex(self):
n = 127
c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
r = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
x = self.rng.normal(size=(n, 3)) + self.rng.normal(size=(n, 3))*1j
self.do(x, c, r, False)
n = 100
c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
r = self.rng.normal(size=(n//2, 1)) +\
self.rng.normal(size=(n//2, 1))*1j
x = self.rng.normal(size=(n//2, 3)) +\
self.rng.normal(size=(n//2, 3))*1j
self.do(x, c, r, False)
def test_exceptions(self):
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=2*n)
x = self.rng.normal(size=n)
assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=n-1)
assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n//2)
x = self.rng.normal(size=n//2-1)
assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
# For toeplitz matrices, matmul_toeplitz() should be equivalent to @.
def do(self, x, c, r=None, check_finite=False, workers=None):
if r is None:
actual = matmul_toeplitz(c, x, check_finite, workers)
else:
actual = matmul_toeplitz((c, r), x, check_finite)
desired = toeplitz(c, r) @ x
assert_allclose(actual, desired,
rtol=self.tolerance, atol=self.tolerance)
| 30.714286 | 74 | 0.535401 |
111a3fdfaab229053566eb626313c6a482413ddb | 248 | py | Python | temp.py | suvarnak/Pytorch-Project-DAFSL | 8539b693a56a219cdc5f0549c146342880ee2447 | [
"MIT"
] | null | null | null | temp.py | suvarnak/Pytorch-Project-DAFSL | 8539b693a56a219cdc5f0549c146342880ee2447 | [
"MIT"
] | null | null | null | temp.py | suvarnak/Pytorch-Project-DAFSL | 8539b693a56a219cdc5f0549c146342880ee2447 | [
"MIT"
] | null | null | null | class Palindrome:
@staticmethod
def is_palindrome(word):
reverse = word[::-1]
if reverse.upper() == word.upper():
return True
else:
return False
word = input()
print(Palindrome.is_palindrome(word)) | 24.8 | 42 | 0.596774 |
b903c6fbf7ea2ada1d68f54f534d047b5edbc267 | 5,028 | py | Python | tests/packages/test_dependency.py | hroncok/poetry-core | bd3ef48d2873eb8a81600d011712edbbea122e58 | [
"MIT"
] | null | null | null | tests/packages/test_dependency.py | hroncok/poetry-core | bd3ef48d2873eb8a81600d011712edbbea122e58 | [
"MIT"
] | null | null | null | tests/packages/test_dependency.py | hroncok/poetry-core | bd3ef48d2873eb8a81600d011712edbbea122e58 | [
"MIT"
] | null | null | null | import pytest
from poetry.core.packages import Dependency
from poetry.core.packages import Package
def test_accepts():
dependency = Dependency("A", "^1.0")
package = Package("A", "1.4")
assert dependency.accepts(package)
def test_accepts_prerelease():
dependency = Dependency("A", "^1.0", allows_prereleases=True)
package = Package("A", "1.4-beta.1")
assert dependency.accepts(package)
def test_accepts_python_versions():
dependency = Dependency("A", "^1.0")
dependency.python_versions = "^3.6"
package = Package("A", "1.4")
package.python_versions = "~3.6"
assert dependency.accepts(package)
def test_accepts_fails_with_different_names():
dependency = Dependency("A", "^1.0")
package = Package("B", "1.4")
assert not dependency.accepts(package)
def test_accepts_fails_with_version_mismatch():
dependency = Dependency("A", "~1.0")
package = Package("B", "1.4")
assert not dependency.accepts(package)
def test_accepts_fails_with_prerelease_mismatch():
dependency = Dependency("A", "^1.0")
package = Package("B", "1.4-beta.1")
assert not dependency.accepts(package)
def test_accepts_fails_with_python_versions_mismatch():
dependency = Dependency("A", "^1.0")
dependency.python_versions = "^3.6"
package = Package("B", "1.4")
package.python_versions = "~3.5"
assert not dependency.accepts(package)
def test_to_pep_508():
dependency = Dependency("Django", "^1.23")
result = dependency.to_pep_508()
assert result == "Django (>=1.23,<2.0)"
dependency = Dependency("Django", "^1.23")
dependency.python_versions = "~2.7 || ^3.6"
result = dependency.to_pep_508()
assert (
result == "Django (>=1.23,<2.0); "
'python_version >= "2.7" and python_version < "2.8" '
'or python_version >= "3.6" and python_version < "4.0"'
)
def test_to_pep_508_wilcard():
dependency = Dependency("Django", "*")
result = dependency.to_pep_508()
assert result == "Django"
def test_to_pep_508_in_extras():
dependency = Dependency("Django", "^1.23")
dependency.in_extras.append("foo")
result = dependency.to_pep_508()
assert result == 'Django (>=1.23,<2.0); extra == "foo"'
dependency.in_extras.append("bar")
result = dependency.to_pep_508()
assert result == 'Django (>=1.23,<2.0); extra == "foo" or extra == "bar"'
dependency.python_versions = "~2.7 || ^3.6"
result = dependency.to_pep_508()
assert result == (
"Django (>=1.23,<2.0); "
"("
'python_version >= "2.7" and python_version < "2.8" '
'or python_version >= "3.6" and python_version < "4.0"'
") "
'and (extra == "foo" or extra == "bar")'
)
def test_to_pep_508_with_single_version_excluded():
dependency = Dependency("foo", "!=1.2.3")
assert "foo (!=1.2.3)" == dependency.to_pep_508()
@pytest.mark.parametrize(
"python_versions, marker",
[
(">=3.5,<3.5.4", 'python_version >= "3.5" and python_full_version < "3.5.4"'),
(">=3.5.4,<3.6", 'python_full_version >= "3.5.4" and python_version < "3.6"'),
("<3.5.4", 'python_full_version < "3.5.4"'),
(">=3.5.4", 'python_full_version >= "3.5.4"'),
("== 3.5.4", 'python_full_version == "3.5.4"'),
],
)
def test_to_pep_508_with_patch_python_version(python_versions, marker):
dependency = Dependency("Django", "^1.23")
dependency.python_versions = python_versions
expected = "Django (>=1.23,<2.0); {}".format(marker)
assert expected == dependency.to_pep_508()
assert marker == str(dependency.marker)
def test_to_pep_508_tilde():
dependency = Dependency("foo", "~1.2.3")
assert "foo (>=1.2.3,<1.3.0)" == dependency.to_pep_508()
dependency = Dependency("foo", "~1.2")
assert "foo (>=1.2,<1.3)" == dependency.to_pep_508()
dependency = Dependency("foo", "~0.2.3")
assert "foo (>=0.2.3,<0.3.0)" == dependency.to_pep_508()
dependency = Dependency("foo", "~0.2")
assert "foo (>=0.2,<0.3)" == dependency.to_pep_508()
def test_to_pep_508_caret():
dependency = Dependency("foo", "^1.2.3")
assert "foo (>=1.2.3,<2.0.0)" == dependency.to_pep_508()
dependency = Dependency("foo", "^1.2")
assert "foo (>=1.2,<2.0)" == dependency.to_pep_508()
dependency = Dependency("foo", "^0.2.3")
assert "foo (>=0.2.3,<0.3.0)" == dependency.to_pep_508()
dependency = Dependency("foo", "^0.2")
assert "foo (>=0.2,<0.3)" == dependency.to_pep_508()
def test_to_pep_508_combination():
dependency = Dependency("foo", "^1.2,!=1.3.5")
assert "foo (>=1.2,<2.0,!=1.3.5)" == dependency.to_pep_508()
dependency = Dependency("foo", "~1.2,!=1.2.5")
assert "foo (>=1.2,<1.3,!=1.2.5)" == dependency.to_pep_508()
def test_complete_name():
assert "foo" == Dependency("foo", ">=1.2.3").complete_name
assert (
"foo[bar,baz]"
== Dependency("foo", ">=1.2.3", extras=["baz", "bar"]).complete_name
)
| 26.887701 | 86 | 0.615553 |
d0221cdd657afeae4fadb9bc126dfec30845205e | 8,671 | py | Python | docs/conf.py | petrolpost/dicompyler-core | 887d41800630ede8e9118ce873a46130c83a4237 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | petrolpost/dicompyler-core | 887d41800630ede8e9118ce873a46130c83a4237 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | petrolpost/dicompyler-core | 887d41800630ede8e9118ce873a46130c83a4237 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# dicompyler-core documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import dicompylercore
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
autodoc_mock_imports = [
'numpy', 'dicom', 'pydicom', 'pydicom', 'dicom',
'PIL', 'numpy', 'matplotlib', 'skimage', 'scipy']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dicompyler-core'
copyright = u'2016-2020, Aditya Panchal'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = dicompylercore.__version__
# The full version, including alpha/beta/rc tags.
release = dicompylercore.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dicompyler-coredoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'dicompyler-core.tex',
u'dicompyler-core Documentation',
u'Aditya Panchal', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dicompyler-core',
u'dicompyler-core Documentation',
[u'Aditya Panchal'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dicompyler-core',
u'dicompyler-core Documentation',
u'Aditya Panchal',
'dicompyler-core',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.857651 | 76 | 0.714796 |
e1d06db681644ce2cb6d8ba7c23283e040156f74 | 13,786 | py | Python | src/python/turicreate/toolkits/activity_classifier/_tf_model_architecture.py | sirahd/turicreate | 386efa4eb5033573ee9120704a8c88a9a6151133 | [
"BSD-3-Clause"
] | null | null | null | src/python/turicreate/toolkits/activity_classifier/_tf_model_architecture.py | sirahd/turicreate | 386efa4eb5033573ee9120704a8c88a9a6151133 | [
"BSD-3-Clause"
] | 3 | 2022-02-15T04:42:24.000Z | 2022-03-12T01:05:15.000Z | src/python/turicreate/toolkits/activity_classifier/_tf_model_architecture.py | sirahd/turicreate | 386efa4eb5033573ee9120704a8c88a9a6151133 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2019 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import tensorflow as _tf
from .._tf_model import TensorFlowModel
import turicreate.toolkits._tf_utils as _utils
import numpy as _np
# Constant parameters for the neural network
CONV_H = 64
LSTM_H = 200
DENSE_H = 128
class ActivityTensorFlowModel(TensorFlowModel):
def __init__(self, net_params, batch_size, num_features, num_classes, prediction_window, seq_len):
for key in net_params.keys():
net_params[key] = _utils.convert_shared_float_array_to_numpy(net_params[key])
# Suppresses verbosity to only errors
_tf.compat.v1.logging.set_verbosity(_tf.compat.v1.logging.ERROR)
_tf.reset_default_graph()
self.num_classes = num_classes
self.batch_size = batch_size
self.seq_len = seq_len
# Vars
self.data = _tf.compat.v1.placeholder(_tf.float32, [None, prediction_window*seq_len, num_features])
self.weight = _tf.compat.v1.placeholder(_tf.float32, [None, seq_len, 1])
self.target = _tf.compat.v1.placeholder(_tf.int32, [None, seq_len, 1])
self.is_training = _tf.compat.v1.placeholder(_tf.bool)
# Reshaping weights
reshaped_weight = _tf.reshape(self.weight, [self.batch_size, seq_len])
# One hot encoding target
reshaped_target = _tf.reshape(self.target, [self.batch_size, seq_len])
one_hot_target = _tf.one_hot(reshaped_target, depth=self.num_classes, axis=-1)
# Weights
self.weights = {
'conv_weight' : _tf.Variable(_tf.zeros([prediction_window, num_features, CONV_H]), name='conv_weight'),
'dense0_weight': _tf.Variable(_tf.zeros([LSTM_H, DENSE_H]), name='dense0_weight'),
'dense1_weight' : _tf.Variable(_tf.zeros([DENSE_H, self.num_classes]), name='dense1_weight')
}
# Biases
self.biases = {
'conv_bias' : _tf.Variable(_tf.zeros([CONV_H]), name='conv_bias'),
'dense0_bias': _tf.Variable(_tf.zeros([DENSE_H]), name='dense0_bias'),
'dense1_bias' : _tf.Variable(_tf.zeros([num_classes]), name='dense1_bias')
}
# Convolution
conv = _tf.nn.conv1d(self.data, self.weights['conv_weight'], stride=prediction_window, padding='SAME')
conv = _tf.nn.bias_add(conv, self.biases['conv_bias'])
conv = _tf.nn.relu(conv)
dropout = _tf.layers.dropout(conv, rate=0.2, training=self.is_training)
# Long Stem Term Memory
lstm = self.load_lstm_weights_params(net_params)
cells = _tf.nn.rnn_cell.LSTMCell(num_units=LSTM_H, reuse=_tf.AUTO_REUSE, forget_bias=0.0,
initializer=_tf.initializers.constant(lstm, verify_shape=True))
init_state = cells.zero_state(batch_size, _tf.float32)
rnn_outputs, final_state = _tf.nn.dynamic_rnn(cells, dropout, initial_state=init_state)
# Dense
dense = _tf.reshape(rnn_outputs, (-1, LSTM_H))
dense = _tf.add(_tf.matmul(dense, self.weights['dense0_weight']), self.biases['dense0_bias'])
dense = _tf.layers.batch_normalization(inputs=dense,
beta_initializer=_tf.initializers.constant(net_params['bn_beta'], verify_shape=True),
gamma_initializer=_tf.initializers.constant(net_params['bn_gamma'], verify_shape=True),
moving_mean_initializer=_tf.initializers.constant(net_params['bn_running_mean'], verify_shape=True),
moving_variance_initializer=_tf.initializers.constant(net_params['bn_running_var'], verify_shape=True), training=self.is_training )
dense = _tf.nn.relu(dense)
dense = _tf.layers.dropout(dense, rate=0.5, training=self.is_training)
# Output
out = _tf.add(_tf.matmul(dense, self.weights['dense1_weight']), self.biases['dense1_bias'])
out = _tf.reshape(out, (-1, self.seq_len, self.num_classes))
self.probs = _tf.nn.softmax(out)
# Weights
seq_sum_weights = _tf.reduce_sum(reshaped_weight, axis=1)
binary_seq_sum_weights = _tf.reduce_sum(_tf.cast(seq_sum_weights > 0, dtype=_tf.float32))
# Loss
loss = _tf.losses.softmax_cross_entropy(logits=out, onehot_labels=one_hot_target, weights=reshaped_weight, reduction=_tf.losses.Reduction.NONE)
self.loss_per_seq = _tf.reduce_sum(loss, axis=1) / (seq_sum_weights + 1e-5)
self.loss_op = _tf.reduce_sum(self.loss_per_seq) / (binary_seq_sum_weights + 1e-5)
# Optimizer
update_ops = _tf.get_collection(_tf.GraphKeys.UPDATE_OPS)
self.set_learning_rate(1e-3)
train_op = self.optimizer.minimize(self.loss_op)
self.train_op = _tf.group([train_op, update_ops])
# Session
self.sess = _tf.compat.v1.Session()
# Initialize all variables
self.sess.run(_tf.compat.v1.global_variables_initializer())
self.sess.run(_tf.compat.v1.local_variables_initializer())
self.load_weights(net_params)
def load_lstm_weights_params(self, net_params):
"""
Function to load lstm weights from the C++ implementation into TensorFlow
Parameters
----------
net_params: Dictionary
Dict with weights from the C++ implementation and its names
Returns
-------
lstm: lstm weights in Tensorflow Format
"""
i2h_i = net_params['lstm_i2h_i_weight']
i2h_f = net_params['lstm_i2h_f_weight']
i2h_c = net_params['lstm_i2h_c_weight']
i2h_o = net_params['lstm_i2h_o_weight']
h2h_i = net_params['lstm_h2h_i_weight']
h2h_f = net_params['lstm_h2h_f_weight']
h2h_c = net_params['lstm_h2h_c_weight']
h2h_o = net_params['lstm_h2h_o_weight']
lstm = _utils.convert_lstm_weight_coreml_to_tf(i2h_i, i2h_c, i2h_f, i2h_o, h2h_i, h2h_c, h2h_f, h2h_o)
return lstm
def load_weights(self, net_params):
"""
Function to load weights from the C++ implementation into TensorFlow
Parameters
----------
net_params: Dictionary
Dict with weights from the C++ implementation and its names
"""
for key in net_params.keys():
if key in self.weights.keys():
if key.startswith('conv'):
net_params[key] = _utils.convert_conv1d_coreml_to_tf(net_params[key])
self.sess.run(_tf.assign(_tf.get_default_graph().get_tensor_by_name(key+":0"), net_params[key]))
elif key.startswith('dense'):
net_params[key] = _utils.convert_dense_coreml_to_tf(net_params[key])
self.sess.run(_tf.assign(_tf.get_default_graph().get_tensor_by_name(key+":0"), net_params[key] ))
elif key in self.biases.keys():
self.sess.run(_tf.assign(_tf.get_default_graph().get_tensor_by_name(key+":0"), net_params[key]))
h2h_i_bias = net_params['lstm_h2h_i_bias']
h2h_c_bias = net_params['lstm_h2h_c_bias']
h2h_f_bias = net_params['lstm_h2h_f_bias']
h2h_o_bias = net_params['lstm_h2h_o_bias']
lstm_bias = _utils.convert_lstm_bias_coreml_to_tf(h2h_i_bias, h2h_c_bias, h2h_f_bias, h2h_o_bias)
self.sess.run(_tf.assign(_tf.get_default_graph().get_tensor_by_name('rnn/lstm_cell/bias:0'), lstm_bias))
def train(self, feed_dict):
"""
Run session for training with new batch of data (inputs, labels and weights)
Parameters
----------
feed_dict: Dictionary
Dictionary to store a batch of input data, corresponding labels and weights. This is currently
passed from the ac_data_iterator.cpp file when a new batch of data is sent.
Returns
-------
result: Dictionary
Loss per batch and probabilities
"""
for key in feed_dict.keys():
feed_dict[key] = _utils.convert_shared_float_array_to_numpy(feed_dict[key])
feed_dict[key] = _np.squeeze(feed_dict[key], axis=1)
feed_dict[key] = _np.reshape(feed_dict[key], (feed_dict[key].shape[0], feed_dict[key].shape[1], feed_dict[key].shape[2]))
_, loss, probs = self.sess.run([self.train_op, self.loss_per_seq, self.probs],
feed_dict={self.data : feed_dict['input'], self.target : feed_dict['labels'], self.weight : feed_dict['weights'], self.is_training : True})
prob = _np.array(probs)
probabilities = _np.reshape(prob, (prob.shape[0], prob.shape[1]*prob.shape[2]))
result = {'loss' : _np.array(loss), 'output': probabilities }
return result
def predict(self, feed_dict):
"""
Run session for predicting with new batch of validation data (inputs, labels and weights) as well as test data (inputs)
Parameters
----------
feed_dict: Dictionary
Dictionary to store a batch of input data, corresponding labels and weights. This is currently
passed from the ac_data_iterator.cpp file when a new batch of data is sent.
Returns
-------
result: Dictionary
Loss per batch and probabilities (in case of validation data)
Probabilities (in case only inputs are provided)
"""
for key in feed_dict.keys():
feed_dict[key] = _utils.convert_shared_float_array_to_numpy(feed_dict[key])
feed_dict[key] = _np.squeeze(feed_dict[key], axis=1)
feed_dict[key] = _np.reshape(feed_dict[key], (feed_dict[key].shape[0], feed_dict[key].shape[1], feed_dict[key].shape[2]))
if len(feed_dict.keys()) == 1:
probs = self.sess.run(self.probs,
feed_dict={self.data : feed_dict['input'], self.is_training: False})
prob = _np.array(probs)
probabilities = _np.reshape(prob, (prob.shape[0], prob.shape[1]*prob.shape[2]))
result = { 'output' : probabilities}
else:
loss, probs= self.sess.run([self.loss_per_seq, self.probs],
feed_dict={self.data : feed_dict['input'], self.target : feed_dict['labels'], self.weight : feed_dict['weights'], self.is_training: False})
prob = _np.array(probs)
probabilities = _np.reshape(prob, (prob.shape[0], prob.shape[1]*prob.shape[2]))
result = {'loss' : _np.array(loss), 'output': probabilities }
return result
def export_weights(self):
"""
Function to store TensorFlow weights back to into a dict in CoreML format to be used
by the C++ implementation
Returns
-------
tf_export_params: Dictionary
Dictionary of weights from TensorFlow stored as {weight_name: weight_value}
"""
tf_export_params = {}
tvars = _tf.trainable_variables()
tvars_vals = self.sess.run(tvars)
for var, val in zip(tvars, tvars_vals):
if 'weight' in var.name:
if var.name.startswith('conv'):
tf_export_params[var.name.split(':')[0]] = _utils.convert_conv1d_tf_to_coreml(val)
elif var.name.startswith('dense'):
tf_export_params[var.name.split(':')[0]] = _utils.convert_dense_tf_to_coreml(val)
elif var.name.startswith('rnn/lstm_cell/kernel'):
i2h_i, i2h_c, i2h_f, i2h_o, h2h_i, h2h_c, h2h_f, h2h_o = _utils.convert_lstm_weight_tf_to_coreml(val, CONV_H)
tf_export_params['lstm_i2h_i_weight'] = i2h_i
tf_export_params['lstm_i2h_c_weight'] = i2h_c
tf_export_params['lstm_i2h_f_weight'] = i2h_f
tf_export_params['lstm_i2h_o_weight'] = i2h_o
tf_export_params['lstm_h2h_i_weight'] = h2h_i
tf_export_params['lstm_h2h_c_weight'] = h2h_c
tf_export_params['lstm_h2h_f_weight'] = h2h_f
tf_export_params['lstm_h2h_o_weight'] = h2h_o
elif var.name.startswith('rnn/lstm_cell/bias'):
h2h_i_bias, h2h_c_bias, h2h_f_bias, h2h_o_bias = _utils.convert_lstm_bias_tf_to_coreml(val)
tf_export_params['lstm_h2h_i_bias'] = h2h_i_bias
tf_export_params['lstm_h2h_c_bias'] = h2h_c_bias
tf_export_params['lstm_h2h_f_bias'] = h2h_f_bias
tf_export_params['lstm_h2h_o_bias'] = h2h_o_bias
elif var.name.startswith('batch_normalization'):
tf_export_params['bn_'+var.name.split('/')[-1][0:-2]] = _np.array(val)
else:
tf_export_params[var.name.split(':')[0]] = _np.array(val)
tvars = _tf.all_variables()
tvars_vals = self.sess.run(tvars)
for var, val in zip(tvars, tvars_vals):
if 'moving_mean' in var.name:
tf_export_params['bn_running_mean'] = _np.array(val)
if 'moving_variance' in var.name:
tf_export_params['bn_running_var'] = _np.array(val)
for layer_name in tf_export_params.keys():
tf_export_params[layer_name] = _np.ascontiguousarray(tf_export_params[layer_name])
return tf_export_params
def set_learning_rate(self, lr):
"""
Set the learning rate
Parameters
----------
lr: float32
Learning rate
"""
self.optimizer = _tf.train.AdamOptimizer(learning_rate=lr)
| 45.348684 | 155 | 0.638256 |
bb43f416d8aeeddde09885b0a46ac6bf4e7be1d1 | 725 | py | Python | OneCent/story/views.py | tobias-fyi/challenges | 4b4d2a8c5e24a51e33d78ab4191ebb843b788aca | [
"MIT"
] | null | null | null | OneCent/story/views.py | tobias-fyi/challenges | 4b4d2a8c5e24a51e33d78ab4191ebb843b788aca | [
"MIT"
] | null | null | null | OneCent/story/views.py | tobias-fyi/challenges | 4b4d2a8c5e24a51e33d78ab4191ebb843b788aca | [
"MIT"
] | null | null | null | from django.views.generic import TemplateView, ListView, DetailView
from django.core.paginator import Paginator
from .models import Story
class StoryIndexView(ListView):
template_name = "index.html"
model = Story
context_object_name = "stories"
paginate_by = 4
class StoryDetailView(DetailView):
template_name = "detail.html"
model = Story
context_object_name = "story"
def get_context_data(self, **kwargs):
"""Adds to context a count of Story objects."""
context = super().get_context_data(**kwargs)
context["count"] = Story.objects.count()
print(context["count"])
return context
class AboutView(TemplateView):
template_name = "about.html"
| 25 | 67 | 0.695172 |
d3ace6d4e7b76fdc239e981e61430ce0e2397b57 | 2,552 | py | Python | markSampleLabelFromPhone.py | isoundy000/FGJumperMaster | 10063f167fbba7d9e16375965f7320a3966169f6 | [
"Apache-2.0"
] | null | null | null | markSampleLabelFromPhone.py | isoundy000/FGJumperMaster | 10063f167fbba7d9e16375965f7320a3966169f6 | [
"Apache-2.0"
] | null | null | null | markSampleLabelFromPhone.py | isoundy000/FGJumperMaster | 10063f167fbba7d9e16375965f7320a3966169f6 | [
"Apache-2.0"
] | 1 | 2019-06-23T12:13:01.000Z | 2019-06-23T12:13:01.000Z | '''
从USB摄像头中读取图片并标注 保存标注文件
'''
import cv2
import numpy
from SampleLabel import SampleLabel
# from glob import glob
import os
from ADBHelper import ADBHelper
import math
save_path = "./samples/label/"
label_filename = "./samples/label/labels.txt"
slabel = SampleLabel(save_path, label_filename)
adb = ADBHelper(1080, 1920)
def distance2time(distance):
ratio = 1.53
# 事件必须是整数类型
return int(distance * ratio)
def cal_distance(pt1, pt2):
'''
获取棋子与下一跳盒子的距离
'''
(x1, y1) = pt1
(x2, y2) = pt2
return math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2))
def nextImg(slabel):
'''
使用迭代器, 遍历数组
'''
global adb
try:
# img_path = next(img_path_iter)
# img_name = getImgName(img_path)
# print("迭代至图片")
# print(img_path)
img = adb.getScreenShotByADB()
# 确认图片是否成功读入
if img is None:
return False
else:
slabel.updateImg(img, img_name=None)
# 读入就将原来 unlabel的文件删除
return True
except StopIteration:
print("遍历结束")
return False
# 初始读入第一个
nextImg(slabel)
while True:
keyValue = cv2.waitKey(0)
# slabel.responseToKeyEvent(k, img=img)
if keyValue == ord('e'):
print('销毁窗口并保存')
slabel.onDestroy()
break
elif keyValue == ord('n'):
print("跳过,下一张图片")
if not nextImg(slabel):
# 如果获取失败, 退出
break
elif keyValue == ord('j'):
print("跳")
# 这个涉及到ADB 这个程序里不实现。
print("Jump")
elif keyValue == ord('c'):
print("取消标注")
# update frame
slabel.updateImg(slabel.img)
elif keyValue == ord('s'):
print("保存")
if slabel.isMarkDone():
slabel.saveImg()
slabel.saveLabelInfo()
slabel.printProcessOnCanvas("Save Done")
adb.randPressOnScreen(distance2time(cal_distance(slabel.cbox, slabel.fchess)))
# 自动载入下一张图片
if not nextImg(slabel):
# 如果获取失败, 退出
break
else:
# 标注未完成, 无法保存
slabel.printProcessOnCanvas("Error: mark undone, could not save")
elif keyValue == ord('h'):
print('''
标注工具-帮助菜单
==================================
键盘 n - next 下一张图片
键盘 c - cancel 撤销标注
键盘 s - save 保存
键盘 j - jump 跳跃
键盘 h - help 帮助菜单
键盘 e - exit 保存标记并退出系统
''') | 21.811966 | 90 | 0.525078 |
ee109e4d2b348c3c6211a94b836eef17969ed5c1 | 484 | py | Python | bkmus_api/bkmus_image.py | mbakija/bkmuseum_xstitch_bot | 07de75a23d48fafae34ebda60a82ba9973386be1 | [
"MIT"
] | 1 | 2020-11-24T05:47:55.000Z | 2020-11-24T05:47:55.000Z | bkmus_api/bkmus_image.py | mbakija/bkmuseum_xstitch_bot | 07de75a23d48fafae34ebda60a82ba9973386be1 | [
"MIT"
] | null | null | null | bkmus_api/bkmus_image.py | mbakija/bkmuseum_xstitch_bot | 07de75a23d48fafae34ebda60a82ba9973386be1 | [
"MIT"
] | null | null | null | # print end of URL for image
# (some objects do not have an image, those are noted as NONE)
# the results are appended to https://d1lfxha3ugu3d4.cloudfront.net/images/opencollection/objects/size4/
# which is the URL for the large-size image the museum makes available to download
import json
f = open('BKMobjects.json')
data = json.load(f)
for id in data['object']:
print('https://d1lfxha3ugu3d4.cloudfront.net/images/opencollection/objects/size4/' + str(id['primary_image']))
| 37.230769 | 114 | 0.756198 |
f12a7b471fce43dab6f7226b5ed488a86ddcf65f | 18,008 | py | Python | Convlab/convlab/modules/e2e/multiwoz/Transformer/util.py | Victorwz/tod_as_nlg | dd23adac15e41d6aeca60b31580d97c358f5fed3 | [
"MIT"
] | 6 | 2021-09-07T14:30:22.000Z | 2021-12-29T05:54:18.000Z | Convlab/convlab/modules/e2e/multiwoz/Transformer/util.py | Victorwz/tod_as_nlg | dd23adac15e41d6aeca60b31580d97c358f5fed3 | [
"MIT"
] | null | null | null | Convlab/convlab/modules/e2e/multiwoz/Transformer/util.py | Victorwz/tod_as_nlg | dd23adac15e41d6aeca60b31580d97c358f5fed3 | [
"MIT"
] | 1 | 2021-09-02T15:12:18.000Z | 2021-09-02T15:12:18.000Z | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import os
import tarfile
import tempfile
import random
import torch
import copy
import re
import json
import time
from tqdm import tqdm
from convlab.modules.util.multiwoz.dbquery import query, dbs
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
Bad_Cases = {'kihinoor': 'kohinoor'}
# BELIEF_BAD_CASES = {"MUL2395": ("meze bar restaurant", "meze bar")}
DOMAINS = ['hotel', 'restaurant', 'train', 'taxi', 'attraction', 'police', 'hospital']
DB_PATH = "data/multiwoz/db/"
DB_DOMAIN = ['attraction', 'hospital', 'hotel', 'police', 'restaurant', 'train']
DOMAIN_NEED_BOOKING = ['hotel', 'restaurant', 'train']
USEFUL_SLOT = {'Addr': 'address', 'Phone': 'phone', 'Post': 'postcode', 'Id': 'id', 'Name': 'name'}
IGNORE_KEY_DB = ['introduction', 'openhours']
USEFUL_SLOT_FOR_DOMAINS = {
"hotel": ["address", "area", "internet", "parking", "name", "phone", "postcode", "pricerange", "stars", "type"],
"restaurant": ["address", "area", "food", "name", "phone", "postcode", "pricerange"],
"train": ["arriveBy", "day", "departure", "destination", "duration", "leaveAt", "price", "trainID"],
"attraction": ["address", "area", "name", "phone", "postcode", "type", "entrance fee"],
"hospital": ["department", "phone"],
"police": ["name", "address", "phone"]
}
# DIALOG_LACK_DOMAIN = {"PMUL4047": "restaurant", "PMUL1454": "attraction", "PMUL4353": "train", "PMUL3040": "train", "PMUL3761": "hotel",
# "PMUL3882": "restaurant", "PMUL4115": "train", "SNG01606": "police", "PMUL4415": "hotel", "PMUL3854": "train",
# "PMUL3116": "hotel"}
def get_woz_dataset(tokenizer, dataset_path, dataset_cache=None, slice_data=False, mode="train"):
dataset_path = dataset_path
if dataset_cache and os.path.isfile(dataset_cache) and mode == "train":
logger.info("Load tokenized dataset from cache at %s", dataset_cache)
dataset = torch.load(dataset_cache)
else:
if mode == "train":
train_path = os.path.join(dataset_path, 'total.json')
valid_path = os.path.join(dataset_path, 'val.json')
with open(train_path, "r", encoding="utf-8") as f:
train_dataset = json.loads(f.read())
with open(valid_path, "r", encoding="utf-8") as f:
valid_dataset = json.loads(f.read())
if slice_data:
dict_slice = lambda adict, start, end: {k: adict[k] for k in list(adict.keys())[start:end]}
#train_dataset = dict_slice(train_dataset, 2, 5)
#train_dataset = {"WOZ20664": train_dataset["WOZ20664"]}
#train_dataset = {"SNG01810": train_dataset["SNG01810"]}
train_dataset = {"MUL2395": train_dataset["MUL2395"]}
valid_dataset = dict_slice(valid_dataset, 1, 2)
#valid_dataset = {"MUL1382": valid_dataset["MUL1382"], "MUL0602": valid_dataset["MUL0602"]}
elif mode == "test":
test_path = os.path.join(dataset_path, 'test.json')
test_dataset = json.load(open(test_path, "r", encoding="utf-8"))
# Load all the databases for all the domains, and save them into one dictionary named database
database = dbs
def convert_act(dialog_act):
bs = []
for d in dialog_act:
tmp = ""
for k in d.keys():
#act = k.lower().replace("-", " ")
for slot, value in d[k]:
potential = ' {} {} {} ,'.format(k, slot, value)
if potential not in tmp:
tmp += potential
print(tmp)
bs.append(tmp)
return bs
def convert_kb_tuple(dom, kb_tuple):
if dom == "taxi":
return ""
kb = ""
for k, v in kb_tuple.items():
if type(v) == str and k in USEFUL_SLOT_FOR_DOMAINS[dom] and v != "?":
kb += k + " " + v + " , "
return kb[:-2]
def convert_action_slot_kb(kb_results, dialog_act, cur_dom):
db_list = []
book_list = []
for i, record in enumerate(dialog_act):
dom_here = ""
db_record = []
ref_tmp = ""
for dom_intent, slot_val_list in record.items():
dom, intent = dom_intent.split('-')
if dom.lower() in set(cur_dom[i]) and dom.lower() not in ["general", "booking"]:
raise KeyError
if intent in ["NoOffer", "NoBook"]:
continue
# deal with C.B postcode case
slot_list = [x[0] for x in slot_val_list]
if "Post" in slot_list:
if "C.B" in slot_val_list[slot_list.index("Post")][1]:
if re.findall('([a-zA-Z]{1}[\. ]?[a-zA-Z]{1}[\. ]+\d{1,2}[, ]+\d{1}[\. ]?[a-zA-Z]{1}[\. ]?[a-zA-Z]{1}|[a-zA-Z]{2}\d{2}[a-zA-Z]{2})', slot_val_list[slot_list.index("Post")][1]):
slot_val_list[slot_list.index("Post")][1] = re.sub('[,\. ]', '', slot_val_list[slot_list.index("Post")][1].lower())
else:
post_combine = slot_val_list[slot_list.index("Post")][1] + " , " + slot_val_list[slot_list.index("Post")+1][1]
post_combine = normalize(post_combine, tokenizer=None)
slot_val_list[slot_list.index("Post")][1] = post_combine
print(post_combine)
for slot, value in slot_val_list:
if slot == "Ref":
ref_tmp = " " + value
elif slot in USEFUL_SLOT:
#db_record = search_db(dom, slot, value)
if dom == 'Train' and slot == 'Id':
#print("\n" + value + "\n")
db_search_res = search_db(cur_dom[i], 'trainID', value)
# Dealing wiht special cases, sometimes restaurant inform will add a space to the phone
elif slot == 'Phone':
value = value.replace(" ", "")
db_search_res = search_db(cur_dom[i], USEFUL_SLOT[slot], value)
else:
#print(cur_dom[i], USEFUL_SLOT[slot], value)
db_search_res = search_db(cur_dom[i], USEFUL_SLOT[slot], normalize(value, tokenizer=None))
#print(db_search_res)
if db_search_res not in db_record and db_search_res != "":
#db_record = db_record + "; " + db_search_res if db_record else db_search_res
db_record.append(db_search_res)
for record in kb_results[i]:
if record not in db_record:
db_record.append(record)
db_list.append("; ".join(db_record[:3]))
book_list.append(ref_tmp)
#print("DB list is", db_list)
return db_list, book_list
def search_db(domain, slot, value):
search_res = ""
if domain == "taxi" and slot == "phone":
search_res = "phone {} ".format(value)
return search_res
if domain == "police":
return convert_kb_tuple('police', database['police'][0])
for record in database[domain]:
if slot in record:
if str(value).lower() in str(record[slot]).lower() or str(record[slot]).lower() in str(value).lower():
search_res += convert_kb_tuple(domain, record)
return search_res
return ""
def convert_meta(dialog_meta, cur_dom, dialog_act):
cs = []
#kb = []
kb_results_list = []
#dom_dialog_list = []
for i, d in enumerate(dialog_meta):
cs_tmp = ""
kb_tmp = ""
dom_this_turn = cur_dom[i]
constraint = d[dom_this_turn]
kb_results = query(dom_this_turn, constraint["semi"].items())
if kb_results and dom_this_turn != 'taxi':
if dom_this_turn == 'train':
if constraint["semi"]['leaveAt'] not in ["none", "not mentioned", ""]:
kb_results = sorted(kb_results, key=lambda k: k['leaveAt'])
elif constraint["semi"]['arriveBy']:
kb_results = sorted(kb_results, key=lambda k: k['arriveBy'], reverse=True)
kb_results = kb_results[:5]
else:
kb_results = random.sample(kb_results, min(5, len(kb_results)))
kb_results_list.append([convert_kb_tuple(dom_this_turn, x) for x in kb_results])
else:
kb_results_list.append([])
# kb_tmp = ' match {} '.format(len(kb_query_results))
# keys = [k for k in dialog_act[i].keys()]
# keys = ''.join(keys)
# if "NoBook" in keys or "NoOffer" in keys:
# kb_tmp = ' match {} '.format(0)
for slot, value in d[dom_this_turn]['semi'].items():
if not value:
pass
elif value in ["dont care", "don't care", "do n't care", "dontcare"]:
cs_tmp += " {} {} {} ,".format(dom_this_turn, slot, "don't care")
elif value == "not mentioned" or value == "none":
pass
elif value == "guest house" or value == "guesthouses":
cs_tmp += " {} {} {} ,".format(dom_this_turn, slot,"guesthouse")
else:
cs_tmp += " {} {} {} ,".format(dom_this_turn, slot, value)
#kb.append(kb_tmp)
cs.append(cs_tmp)
assert len(cs) == len(kb_results_list)
return cs, kb_results_list
def normalize(text, tokenizer):
text = re.sub("\t", " ", text)
text = re.sub("\n", " ", text)
# hotel domain pfb30
text = re.sub(r"b&b", "bed and breakfast", text)
text = re.sub(r"b and b", "bed and breakfast", text)
text = re.sub('\$', '', text)
text = text.replace('/', ' and ')
# weird unicode bug
text = re.sub(u"(\u2018|\u2019)", "'", text)
text = re.sub(u"(\u00a0)", " ", text)
# remove multiple spaces
text = re.sub(' +', ' ', text)
# concatenate numbers
# tmp = text
# tokens = text.split()
# i = 1
# while i < len(tokens):
# if re.match(u'^\d+$', tokens[i]) and \
# re.match(u'\d+$', tokens[i - 1]):
# tokens[i - 1] += tokens[i]
# del tokens[i]
# else:
# i += 1
# text = ' '.join(tokens)
phone = re.findall('\d{5}[ ]?\d{5,6}', text)
if phone:
sidx = 0
for p in phone:
sidx = text.find(p, sidx)
eidx = sidx + len(p)
text = text[:sidx] + re.sub('[ ]', '', p) + text[eidx:]
# deal with special postcode
#ms = re.findall('([a-zA-Z]{1}[\. ]?[a-zA-Z]{1}[\. ]?\d{1,2}[, ]+\d{1}[\. ]?[a-zA-Z]{1}[\. ]?[a-zA-Z]{1}|[a-zA-Z]{2}\d{2}[a-zA-Z]{2})',text)
ms = re.findall('([cC]{1}[\. ]?[bB]{1}[\. ]+\d{1,2}[, ]+\d{1}[\. ]?[a-zA-Z]{1}[\. ]?[a-zA-Z]{1}|[cC]{1}[bB]{1}\d{2}[a-zA-Z]{2})',text)
if ms:
sidx = 0
for m in ms:
sidx = text.find(m, sidx)
eidx = sidx + len(m)
text = text[:sidx] + re.sub('[,\. ]', '', m.lower()) + text[eidx:]
# if text[0].isdigit() == False:
text = text[0].upper() + text[1:]
if tokenizer:
text = tokenizer.decode(tokenizer.convert_tokens_to_ids(tokenizer._tokenize(text)))
return text
def parse_woz_data(data, valid=False):
dataset = {}
doms = ['hotel', 'restaurant', 'train', 'taxi', 'attraction', 'hospital', 'police']
#sns = set()
for dia_name in tqdm(data.keys()):
print(dia_name)
dialog_info = [t['text'].strip() for t in data[dia_name]['log']]
dialog_act_meta = [t['dialog_act'] for t in data[dia_name]['log']]
dialog_act = dialog_act_meta[1::2]
cur_dom = []
for t in dialog_act_meta:
key_list = [k.lower() for k in t.keys()]
keys = ' '.join(key_list)
cur_dom_tmp = set()
#print(keys)
for d in doms:
if d in keys:
cur_dom_tmp.add(d)
if d == 'police':
if len(cur_dom) == 0 and len(cur_dom_tmp) == 0:
cur_dom_tmp.add('none')
elif len(cur_dom_tmp) == 0:
cur_dom_tmp.add(cur_dom[-1])
if len(cur_dom_tmp) > 1:
tmp = cur_dom_tmp.copy()
for dom in cur_dom_tmp:
if "{}-request" in keys:
tmp.remove(dom)
cur_dom.append(list(tmp)[0])
else:
cur_dom.append(list(cur_dom_tmp)[0])
cur_dom = cur_dom[1::2]
if len(cur_dom) > 2:
if cur_dom[1] == "none":
cur_dom[1] = cur_dom[2]
if cur_dom[0] == "none":
cur_dom[0] = cur_dom[1]
if "none" in cur_dom:
raise KeyError
dialog_meta = [t['metadata'] for t in data[dia_name]['log']]
dialog_meta = dialog_meta[1::2]
cs, kb_results = convert_meta(dialog_meta, cur_dom, dialog_act)
db_list, book_list = convert_action_slot_kb(kb_results, dialog_act, cur_dom)
assert len(cs) == len(db_list)
assert len(cur_dom) == len(cs)
dp = convert_act(dialog_act)
#sns = sns.union(sn)
dialog_len = len(dialog_info)
if dialog_len == 0:
continue
utterances = {"utterances": []}
temp = {"candidates": [], "history": [], "dp": [], "cs": [], "db": [], "book": [], "dom": []}
#print("dialog len is : ", dialog_len)
for i in range(dialog_len):
if i % 2 == 0:
temp["history"].append(normalize(dialog_info[i], tokenizer))
#temp["candidates"].append(random_candidates(data))
temp["candidates"].append(normalize(dialog_info[i + 1], tokenizer))
if cur_dom[i // 2] == "none":
raise KeyError
temp["dom"].append(" " + cur_dom[i // 2])
if book_list[i // 2] != "":
temp["book"].append(book_list[i // 2])
temp["dp"].append(dp[i // 2][:-2])
if cs[i // 2] != '':
temp["cs"].append(cs[i // 2][:-2])
if db_list[i // 2] != '':
#dbinserted = ' ; '.join([db[i // 2][:-1], db_list[i // 2]])
temp["db"].append(" " + db_list[i // 2][:-1])
else:
print(temp, "\n")
utterances["utterances"].append(copy.deepcopy(temp))
temp["history"].append(normalize(dialog_info[i], tokenizer))
temp["candidates"] = []
temp["dp"] = []
temp["cs"] = []
temp["db"] = []
temp["book"] = []
temp["dom"] = []
dataset[dia_name] = utterances
return dataset
if mode == "train":
train = parse_woz_data(train_dataset)
valid = parse_woz_data(valid_dataset)
dataset = {"train": train, "valid": valid}
elif mode == "test":
dataset = parse_woz_data(test_dataset)
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer._tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
dataset = tokenize(dataset)
if dataset_cache and not slice_data and mode == "train":
torch.save(dataset, dataset_cache)
return dataset
if __name__ == "__main__":
get_woz_dataset(tokenizer=None, dataset_path="data/multiwoz")
| 43.08134 | 204 | 0.46335 |
0446106613041503146a815385991025df2f4e10 | 118 | py | Python | recipe/test_imports_future.py | cryoem/eman-dependencies-feedstock | 74a2cc4d3abcec69dc3fff20761ce0191192eaea | [
"BSD-3-Clause"
] | null | null | null | recipe/test_imports_future.py | cryoem/eman-dependencies-feedstock | 74a2cc4d3abcec69dc3fff20761ce0191192eaea | [
"BSD-3-Clause"
] | 2 | 2020-04-28T13:38:02.000Z | 2020-09-07T10:57:03.000Z | recipe/test_imports_future.py | cryoem/eman-dependencies-feedstock | 74a2cc4d3abcec69dc3fff20761ce0191192eaea | [
"BSD-3-Clause"
] | 2 | 2020-04-28T13:32:13.000Z | 2020-09-07T10:44:19.000Z | from __future__ import division
from __future__ import print_function
from __future__ import print_function, division
| 29.5 | 47 | 0.881356 |
6f60afa318de36f9992c1a644717c06a0fde9247 | 5,332 | py | Python | tlslite/integration/xmlrpctransport.py | tomato42/tlslite-1 | 4631799cdfac8f90b567d455e698b05d7a917599 | [
"Unlicense"
] | 121 | 2015-05-28T18:14:37.000Z | 2020-11-18T11:23:59.000Z | tlslite/integration/xmlrpctransport.py | tomato42/tlslite-1 | 4631799cdfac8f90b567d455e698b05d7a917599 | [
"Unlicense"
] | 340 | 2015-05-28T15:56:11.000Z | 2020-11-04T11:40:45.000Z | tlslite/integration/xmlrpctransport.py | tomato42/tlslite-1 | 4631799cdfac8f90b567d455e698b05d7a917599 | [
"Unlicense"
] | 60 | 2015-07-10T20:07:02.000Z | 2020-10-22T08:04:20.000Z | # Authors:
# Trevor Perrin
# Kees Bos - Fixes for compatibility with different Python versions
# Martin von Loewis - python 3 port
#
# See the LICENSE file for legal information regarding use of this file.
"""TLS Lite + xmlrpclib."""
try:
import xmlrpclib
import httplib
except ImportError:
# Python 3
from xmlrpc import client as xmlrpclib
from http import client as httplib
from tlslite.integration.httptlsconnection import HTTPTLSConnection
from tlslite.integration.clienthelper import ClientHelper
import tlslite.errors
class XMLRPCTransport(xmlrpclib.Transport, ClientHelper):
"""Handles an HTTPS transaction to an XML-RPC server."""
# Pre python 2.7, the make_connection returns a HTTP class
transport = xmlrpclib.Transport()
conn_class_is_http = not hasattr(transport, '_connection')
del(transport)
def __init__(self, use_datetime=0,
username=None, password=None,
certChain=None, privateKey=None,
checker=None,
settings=None,
ignoreAbruptClose=False):
"""
Create a new XMLRPCTransport.
An instance of this class can be passed to
:py:class:`xmlrpclib.ServerProxy`
to use TLS with XML-RPC calls::
from tlslite import XMLRPCTransport
from xmlrpclib import ServerProxy
transport = XMLRPCTransport(user="alice", password="abra123")
server = ServerProxy("https://localhost", transport)
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
you can do certificate-based server
authentication with one of these argument combinations:
- x509Fingerprint
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Thus you should be prepared to handle TLS-specific
exceptions when calling methods of :py:class:`xmlrpclib.ServerProxy`.
See the
client handshake functions in
:py:class:`~tlslite.tlsconnection.TLSConnection` for details on which
exceptions might be raised.
:type username: str
:param username: SRP username. Requires the
'password' argument.
:type password: str
:param password: SRP password for mutual authentication.
Requires the 'username' argument.
:type certChain: ~tlslite.x509certchain.X509CertChain
:param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP arguments.
:type privateKey: ~tlslite.utils.rsakey.RSAKey
:param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP arguments.
:type checker: ~tlslite.checker.Checker
:param checker: Callable object called after handshaking to
evaluate the connection and raise an Exception if necessary.
:type settings: ~tlslite.handshakesettings.HandshakeSettings
:param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
:type ignoreAbruptClose: bool
:param ignoreAbruptClose: ignore the TLSAbruptCloseError on
unexpected hangup.
"""
# self._connection is new in python 2.7, since we're using it here,
# we'll add this ourselves too, just in case we're pre-2.7
self._connection = (None, None)
xmlrpclib.Transport.__init__(self, use_datetime)
self.ignoreAbruptClose = ignoreAbruptClose
ClientHelper.__init__(self,
username, password,
certChain, privateKey,
checker,
settings)
def make_connection(self, host):
"""Make a connection to `host`. Reuse keepalive connections."""
# return an existing connection if possible. This allows
# HTTP/1.1 keep-alive.
if self._connection and host == self._connection[0]:
http = self._connection[1]
else:
# create a HTTPS connection object from a host descriptor
chost, extra_headers, x509 = self.get_host_info(host)
http = HTTPTLSConnection(
chost, None, username=self.username, password=self.password,
certChain=self.certChain, privateKey=self.privateKey,
checker=self.checker,
settings=self.settings,
ignoreAbruptClose=self.ignoreAbruptClose)
# store the host argument along with the connection object
self._connection = host, http
if not self.conn_class_is_http:
return http
http2 = httplib.HTTP()
http2._setup(http)
return http2
| 38.359712 | 77 | 0.658477 |
2c16b061081f69a141cda7bad67ffd9f2ae85deb | 65,013 | py | Python | chia/full_node/weight_proof.py | keypool-com/chia-blockchain | 8c96651a78a0ef6694197c0070f4631fc4b1bf45 | [
"Apache-2.0"
] | null | null | null | chia/full_node/weight_proof.py | keypool-com/chia-blockchain | 8c96651a78a0ef6694197c0070f4631fc4b1bf45 | [
"Apache-2.0"
] | null | null | null | chia/full_node/weight_proof.py | keypool-com/chia-blockchain | 8c96651a78a0ef6694197c0070f4631fc4b1bf45 | [
"Apache-2.0"
] | null | null | null | import asyncio
import dataclasses
import logging
import math
import random
from concurrent.futures.process import ProcessPoolExecutor
from typing import Dict, List, Optional, Tuple
from chia.consensus.block_header_validation import validate_finished_header_block
from chia.consensus.block_record import BlockRecord
from chia.consensus.blockchain_interface import BlockchainInterface
from chia.consensus.constants import ConsensusConstants
from chia.consensus.deficit import calculate_deficit
from chia.consensus.full_block_to_block_record import header_block_to_sub_block_record
from chia.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_iters,
is_overflow_block,
)
from chia.consensus.vdf_info_computation import get_signage_point_vdf_info
from chia.types.blockchain_format.classgroup import ClassgroupElement
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.blockchain_format.vdf import VDFInfo
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.header_block import HeaderBlock
from chia.types.weight_proof import (
SubEpochChallengeSegment,
SubEpochData,
SubSlotData,
WeightProof,
SubEpochSegments,
RecentChainData,
)
from chia.util.block_cache import BlockCache
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint32, uint64, uint128
from chia.util.streamable import dataclass_from_dict, recurse_jsonify
log = logging.getLogger(__name__)
class WeightProofHandler:
LAMBDA_L = 100
C = 0.5
MAX_SAMPLES = 20
def __init__(
self,
constants: ConsensusConstants,
blockchain: BlockchainInterface,
):
self.tip: Optional[bytes32] = None
self.proof: Optional[WeightProof] = None
self.constants = constants
self.blockchain = blockchain
self.lock = asyncio.Lock()
async def get_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]:
tip_rec = self.blockchain.try_block_record(tip)
if tip_rec is None:
log.error("unknown tip")
return None
if tip_rec.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
log.debug("chain to short for weight proof")
return None
async with self.lock:
if self.proof is not None:
if self.proof.recent_chain_data[-1].header_hash == tip:
return self.proof
wp = await self._create_proof_of_weight(tip)
if wp is None:
return None
self.proof = wp
self.tip = tip
return wp
def get_sub_epoch_data(self, tip_height: uint32, summary_heights: List[uint32]) -> List[SubEpochData]:
sub_epoch_data: List[SubEpochData] = []
for sub_epoch_n, ses_height in enumerate(summary_heights):
if ses_height > tip_height:
break
ses = self.blockchain.get_ses(ses_height)
log.debug(f"handle sub epoch summary {sub_epoch_n} at height: {ses_height} ses {ses}")
sub_epoch_data.append(_create_sub_epoch_data(ses))
return sub_epoch_data
async def _create_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]:
"""
Creates a weight proof object
"""
assert self.blockchain is not None
sub_epoch_segments: List[SubEpochChallengeSegment] = []
tip_rec = self.blockchain.try_block_record(tip)
if tip_rec is None:
log.error("failed not tip in cache")
return None
log.info(f"create weight proof peak {tip} {tip_rec.height}")
recent_chain = await self._get_recent_chain(tip_rec.height)
if recent_chain is None:
return None
summary_heights = self.blockchain.get_ses_heights()
prev_ses_block = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(uint32(0)))
if prev_ses_block is None:
return None
sub_epoch_data = self.get_sub_epoch_data(tip_rec.height, summary_heights)
# use second to last ses as seed
seed = self.get_seed_for_proof(summary_heights, tip_rec.height)
rng = random.Random(seed)
weight_to_check = _get_weights_for_sampling(rng, tip_rec.weight, recent_chain)
sample_n = 0
ses_blocks = await self.blockchain.get_block_records_at(summary_heights)
if ses_blocks is None:
return None
for sub_epoch_n, ses_height in enumerate(summary_heights):
if ses_height > tip_rec.height:
break
# if we have enough sub_epoch samples, dont sample
if sample_n >= self.MAX_SAMPLES:
log.debug("reached sampled sub epoch cap")
break
# sample sub epoch
# next sub block
ses_block = ses_blocks[sub_epoch_n]
if ses_block is None or ses_block.sub_epoch_summary_included is None:
log.error("error while building proof")
return None
if _sample_sub_epoch(prev_ses_block.weight, ses_block.weight, weight_to_check): # type: ignore
sample_n += 1
segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.height)
if segments is None:
segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n))
if segments is None:
log.error(
f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} "
)
return None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.height, segments)
log.debug(f"sub epoch {sub_epoch_n} has {len(segments)} segments")
sub_epoch_segments.extend(segments)
prev_ses_block = ses_block
log.debug(f"sub_epochs: {len(sub_epoch_data)}")
return WeightProof(sub_epoch_data, sub_epoch_segments, recent_chain)
def get_seed_for_proof(self, summary_heights: List[uint32], tip_height) -> bytes32:
count = 0
ses = None
for sub_epoch_n, ses_height in enumerate(reversed(summary_heights)):
if ses_height <= tip_height:
count += 1
if count == 2:
ses = self.blockchain.get_ses(ses_height)
break
assert ses is not None
seed = ses.get_hash()
return seed
async def _get_recent_chain(self, tip_height: uint32) -> Optional[List[HeaderBlock]]:
recent_chain: List[HeaderBlock] = []
ses_heights = self.blockchain.get_ses_heights()
min_height = 0
count_ses = 0
for ses_height in reversed(ses_heights):
if ses_height <= tip_height:
count_ses += 1
if count_ses == 2:
min_height = ses_height - 1
break
log.debug(f"start {min_height} end {tip_height}")
headers = await self.blockchain.get_header_blocks_in_range(min_height, tip_height)
blocks = await self.blockchain.get_block_records_in_range(min_height, tip_height)
ses_count = 0
curr_height = tip_height
blocks_n = 0
while ses_count < 2:
if curr_height == 0:
break
# add to needed reward chain recent blocks
header_block = headers[self.blockchain.height_to_hash(curr_height)]
block_rec = blocks[header_block.header_hash]
if header_block is None:
log.error("creating recent chain failed")
return None
recent_chain.insert(0, header_block)
if block_rec.sub_epoch_summary_included:
ses_count += 1
curr_height = uint32(curr_height - 1) # type: ignore
blocks_n += 1
header_block = headers[self.blockchain.height_to_hash(curr_height)]
recent_chain.insert(0, header_block)
log.info(
f"recent chain, "
f"start: {recent_chain[0].reward_chain_block.height} "
f"end: {recent_chain[-1].reward_chain_block.height} "
)
return recent_chain
async def create_prev_sub_epoch_segments(self):
log.debug("create prev sub_epoch_segments")
heights = self.blockchain.get_ses_heights()
if len(heights) < 3:
return
count = len(heights) - 2
ses_sub_block = self.blockchain.height_to_block_record(heights[-2])
prev_ses_sub_block = self.blockchain.height_to_block_record(heights[-3])
assert prev_ses_sub_block.sub_epoch_summary_included is not None
segments = await self.__create_sub_epoch_segments(ses_sub_block, prev_ses_sub_block, uint32(count))
assert segments is not None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_sub_block.height, segments)
log.debug("sub_epoch_segments done")
return
async def __create_sub_epoch_segments(
self, ses_block: BlockRecord, se_start: BlockRecord, sub_epoch_n: uint32
) -> Optional[List[SubEpochChallengeSegment]]:
segments: List[SubEpochChallengeSegment] = []
start_height = await self.get_prev_two_slots_height(se_start)
blocks = await self.blockchain.get_block_records_in_range(
start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS
)
header_blocks = await self.blockchain.get_header_blocks_in_range(
start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS
)
curr: Optional[HeaderBlock] = header_blocks[se_start.header_hash]
height = se_start.height
assert curr is not None
first = True
idx = 0
while curr.height < ses_block.height:
if blocks[curr.header_hash].is_challenge_block(self.constants):
log.debug(f"challenge segment {idx}, starts at {curr.height} ")
seg, height = await self._create_challenge_segment(curr, sub_epoch_n, header_blocks, blocks, first)
if seg is None:
log.error(f"failed creating segment {curr.header_hash} ")
return None
segments.append(seg)
idx += 1
first = False
else:
height = height + uint32(1) # type: ignore
curr = header_blocks[self.blockchain.height_to_hash(height)]
if curr is None:
return None
log.debug(f"next sub epoch starts at {height}")
return segments
async def get_prev_two_slots_height(self, se_start: BlockRecord) -> uint32:
# find prev 2 slots height
slot = 0
batch_size = 50
curr_rec = se_start
blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height)
end = curr_rec.height
while slot < 2 and curr_rec.height > 0:
if curr_rec.first_in_sub_slot:
slot += 1
if end - curr_rec.height == batch_size - 1:
blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height)
end = curr_rec.height
curr_rec = blocks[self.blockchain.height_to_hash(uint32(curr_rec.height - 1))]
return curr_rec.height
async def _create_challenge_segment(
self,
header_block: HeaderBlock,
sub_epoch_n: uint32,
header_blocks: Dict[bytes32, HeaderBlock],
blocks: Dict[bytes32, BlockRecord],
first_segment_in_sub_epoch: bool,
) -> Tuple[Optional[SubEpochChallengeSegment], uint32]:
assert self.blockchain is not None
sub_slots: List[SubSlotData] = []
log.debug(f"create challenge segment block {header_block.header_hash} block height {header_block.height} ")
# VDFs from sub slots before challenge block
first_sub_slots, first_rc_end_of_slot_vdf = await self.__first_sub_slot_vdfs(
header_block, header_blocks, blocks, first_segment_in_sub_epoch
)
if first_sub_slots is None:
log.error("failed building first sub slots")
return None, uint32(0)
sub_slots.extend(first_sub_slots)
ssd = await _challenge_block_vdfs(
self.constants,
header_block,
blocks[header_block.header_hash],
blocks,
)
sub_slots.append(ssd)
# # VDFs from slot after challenge block to end of slot
log.debug(f"create slot end vdf for block {header_block.header_hash} height {header_block.height} ")
challenge_slot_end_sub_slots, end_height = await self.__slot_end_vdf(
uint32(header_block.height + 1), header_blocks, blocks
)
if challenge_slot_end_sub_slots is None:
log.error("failed building slot end ")
return None, uint32(0)
sub_slots.extend(challenge_slot_end_sub_slots)
if first_segment_in_sub_epoch and sub_epoch_n != 0:
return (
SubEpochChallengeSegment(sub_epoch_n, sub_slots, first_rc_end_of_slot_vdf),
end_height,
)
return SubEpochChallengeSegment(sub_epoch_n, sub_slots, None), end_height
# returns a challenge chain vdf from slot start to signage point
async def __first_sub_slot_vdfs(
self,
header_block: HeaderBlock,
header_blocks: Dict[bytes32, HeaderBlock],
blocks: Dict[bytes32, BlockRecord],
first_in_sub_epoch: bool,
) -> Tuple[Optional[List[SubSlotData]], Optional[VDFInfo]]:
# combine cc vdfs of all reward blocks from the start of the sub slot to end
header_block_sub_rec = blocks[header_block.header_hash]
# find slot start
curr_sub_rec = header_block_sub_rec
first_rc_end_of_slot_vdf = None
if first_in_sub_epoch and curr_sub_rec.height > 0:
while not curr_sub_rec.sub_epoch_summary_included:
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
first_rc_end_of_slot_vdf = self.first_rc_end_of_slot_vdf(header_block, blocks, header_blocks)
else:
if header_block_sub_rec.overflow and header_block_sub_rec.first_in_sub_slot:
sub_slots_num = 2
while sub_slots_num > 0 and curr_sub_rec.height > 0:
if curr_sub_rec.first_in_sub_slot:
assert curr_sub_rec.finished_challenge_slot_hashes is not None
sub_slots_num -= len(curr_sub_rec.finished_challenge_slot_hashes)
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
else:
while not curr_sub_rec.first_in_sub_slot and curr_sub_rec.height > 0:
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
curr = header_blocks[curr_sub_rec.header_hash]
sub_slots_data: List[SubSlotData] = []
tmp_sub_slots_data: List[SubSlotData] = []
curr = header_blocks[curr.header_hash]
while curr.height < header_block.height:
if curr is None:
log.error("failed fetching block")
return None, None
if curr.first_in_sub_slot:
# if not blue boxed
if not blue_boxed_end_of_slot(curr.finished_sub_slots[0]):
sub_slots_data.extend(tmp_sub_slots_data)
for idx, sub_slot in enumerate(curr.finished_sub_slots):
curr_icc_info = None
if sub_slot.infused_challenge_chain is not None:
curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info))
tmp_sub_slots_data = []
ssd = SubSlotData(
None,
None,
None,
None,
None,
curr.reward_chain_block.signage_point_index,
None,
None,
None,
None,
curr.reward_chain_block.challenge_chain_ip_vdf,
curr.reward_chain_block.infused_challenge_chain_ip_vdf,
curr.total_iters,
)
tmp_sub_slots_data.append(ssd)
curr = header_blocks[self.blockchain.height_to_hash(uint32(curr.height + 1))]
if len(tmp_sub_slots_data) > 0:
sub_slots_data.extend(tmp_sub_slots_data)
for idx, sub_slot in enumerate(header_block.finished_sub_slots):
curr_icc_info = None
if sub_slot.infused_challenge_chain is not None:
curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info))
return sub_slots_data, first_rc_end_of_slot_vdf
def first_rc_end_of_slot_vdf(
self,
header_block,
blocks: Dict[bytes32, BlockRecord],
header_blocks: Dict[bytes32, HeaderBlock],
) -> Optional[VDFInfo]:
curr = blocks[header_block.header_hash]
while curr.height > 0 and not curr.sub_epoch_summary_included:
curr = blocks[curr.prev_hash]
return header_blocks[curr.header_hash].finished_sub_slots[-1].reward_chain.end_of_slot_vdf
async def __slot_end_vdf(
self, start_height: uint32, header_blocks: Dict[bytes32, HeaderBlock], blocks: Dict[bytes32, BlockRecord]
) -> Tuple[Optional[List[SubSlotData]], uint32]:
# gets all vdfs first sub slot after challenge block to last sub slot
log.debug(f"slot end vdf start height {start_height}")
curr = header_blocks[self.blockchain.height_to_hash(start_height)]
sub_slots_data: List[SubSlotData] = []
tmp_sub_slots_data: List[SubSlotData] = []
while not blocks[curr.header_hash].is_challenge_block(self.constants):
if curr.first_in_sub_slot:
sub_slots_data.extend(tmp_sub_slots_data)
# add collected vdfs
for idx, sub_slot in enumerate(curr.finished_sub_slots):
prev_rec = blocks[curr.prev_header_hash]
eos_vdf_iters = prev_rec.sub_slot_iters
if idx == 0:
eos_vdf_iters = uint64(prev_rec.sub_slot_iters - prev_rec.ip_iters(self.constants))
sub_slots_data.append(handle_end_of_slot(sub_slot, eos_vdf_iters))
tmp_sub_slots_data = []
tmp_sub_slots_data.append(self.handle_block_vdfs(curr, blocks))
curr = header_blocks[self.blockchain.height_to_hash(uint32(curr.height + 1))]
if len(tmp_sub_slots_data) > 0:
sub_slots_data.extend(tmp_sub_slots_data)
log.debug(f"slot end vdf end height {curr.height} slots {len(sub_slots_data)} ")
return sub_slots_data, curr.height
def handle_block_vdfs(self, curr: HeaderBlock, blocks: Dict[bytes32, BlockRecord]):
cc_sp_proof = None
icc_ip_proof = None
cc_sp_info = None
icc_ip_info = None
block_record = blocks[curr.header_hash]
if curr.infused_challenge_chain_ip_proof is not None:
assert curr.reward_chain_block.infused_challenge_chain_ip_vdf
icc_ip_proof = curr.infused_challenge_chain_ip_proof
icc_ip_info = curr.reward_chain_block.infused_challenge_chain_ip_vdf
if curr.challenge_chain_sp_proof is not None:
assert curr.reward_chain_block.challenge_chain_sp_vdf
cc_sp_vdf_info = curr.reward_chain_block.challenge_chain_sp_vdf
if not curr.challenge_chain_sp_proof.normalized_to_identity:
(_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info(
self.constants,
curr.finished_sub_slots,
block_record.overflow,
None if curr.height == 0 else blocks[curr.prev_header_hash],
BlockCache(blocks),
block_record.sp_total_iters(self.constants),
block_record.sp_iters(self.constants),
)
cc_sp_vdf_info = VDFInfo(
curr.reward_chain_block.challenge_chain_sp_vdf.challenge,
cc_vdf_iters,
curr.reward_chain_block.challenge_chain_sp_vdf.output,
)
cc_sp_proof = curr.challenge_chain_sp_proof
cc_sp_info = cc_sp_vdf_info
return SubSlotData(
None,
cc_sp_proof,
curr.challenge_chain_ip_proof,
icc_ip_proof,
cc_sp_info,
curr.reward_chain_block.signage_point_index,
None,
None,
None,
None,
curr.reward_chain_block.challenge_chain_ip_vdf,
icc_ip_info,
curr.total_iters,
)
def validate_weight_proof_single_proc(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
log.info(f"validate weight proof peak height {peak_height}")
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.warning("weight proof failed sub epoch data validation")
return False, uint32(0)
constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(
self.constants, summaries, weight_proof
)
log.info("validate sub epoch challenge segments")
seed = summaries[-2].get_hash()
rng = random.Random(seed)
if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
log.error("failed weight proof sub epoch sample validation")
return False, uint32(0)
if not _validate_sub_epoch_segments(constants, rng, wp_segment_bytes, summary_bytes):
return False, uint32(0)
log.info("validate weight proof recent blocks")
if not _validate_recent_blocks(constants, wp_recent_chain_bytes, summary_bytes):
return False, uint32(0)
return True, self.get_fork_point(summaries)
def get_fork_point_no_validations(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
log.debug("get fork point skip validations")
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.warning("weight proof failed to validate sub epoch summaries")
return False, uint32(0)
return True, self.get_fork_point(summaries)
async def validate_weight_proof(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
log.info(f"validate weight proof peak height {peak_height}")
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.error("weight proof failed sub epoch data validation")
return False, uint32(0)
seed = summaries[-2].get_hash()
rng = random.Random(seed)
if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
log.error("failed weight proof sub epoch sample validation")
return False, uint32(0)
executor = ProcessPoolExecutor(1)
constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(
self.constants, summaries, weight_proof
)
segment_validation_task = asyncio.get_running_loop().run_in_executor(
executor, _validate_sub_epoch_segments, constants, rng, wp_segment_bytes, summary_bytes
)
recent_blocks_validation_task = asyncio.get_running_loop().run_in_executor(
executor, _validate_recent_blocks, constants, wp_recent_chain_bytes, summary_bytes
)
valid_segment_task = segment_validation_task
valid_recent_blocks_task = recent_blocks_validation_task
valid_recent_blocks = await valid_recent_blocks_task
if not valid_recent_blocks:
log.error("failed validating weight proof recent blocks")
return False, uint32(0)
valid_segments = await valid_segment_task
if not valid_segments:
log.error("failed validating weight proof sub epoch segments")
return False, uint32(0)
return True, self.get_fork_point(summaries)
def get_fork_point(self, received_summaries: List[SubEpochSummary]) -> uint32:
# iterate through sub epoch summaries to find fork point
fork_point_index = 0
ses_heights = self.blockchain.get_ses_heights()
for idx, summary_height in enumerate(ses_heights):
log.debug(f"check summary {idx} height {summary_height}")
local_ses = self.blockchain.get_ses(summary_height)
if local_ses is None or local_ses.get_hash() != received_summaries[idx].get_hash():
break
fork_point_index = idx
if fork_point_index > 2:
# Two summeries can have different blocks and still be identical
# This gets resolved after one full sub epoch
height = ses_heights[fork_point_index - 2]
else:
height = uint32(0)
return height
def _get_weights_for_sampling(
rng: random.Random, total_weight: uint128, recent_chain: List[HeaderBlock]
) -> Optional[List[uint128]]:
weight_to_check = []
last_l_weight = recent_chain[-1].reward_chain_block.weight - recent_chain[0].reward_chain_block.weight
delta = last_l_weight / total_weight
prob_of_adv_succeeding = 1 - math.log(WeightProofHandler.C, delta)
if prob_of_adv_succeeding <= 0:
return None
queries = -WeightProofHandler.LAMBDA_L * math.log(2, prob_of_adv_succeeding)
for i in range(int(queries) + 1):
u = rng.random()
q = 1 - delta ** u
# todo check division and type conversions
weight = q * float(total_weight)
weight_to_check.append(uint128(weight))
weight_to_check.sort()
return weight_to_check
def _sample_sub_epoch(
start_of_epoch_weight: uint128,
end_of_epoch_weight: uint128,
weight_to_check: List[uint128],
) -> bool:
"""
weight_to_check: List[uint128] is expected to be sorted
"""
if weight_to_check is None:
return True
if weight_to_check[-1] < start_of_epoch_weight:
return False
if weight_to_check[0] > end_of_epoch_weight:
return False
choose = False
for weight in weight_to_check:
if weight > end_of_epoch_weight:
return False
if start_of_epoch_weight < weight < end_of_epoch_weight:
log.debug(f"start weight: {start_of_epoch_weight}")
log.debug(f"weight to check {weight}")
log.debug(f"end weight: {end_of_epoch_weight}")
choose = True
break
return choose
# wp creation methods
def _create_sub_epoch_data(
sub_epoch_summary: SubEpochSummary,
) -> SubEpochData:
reward_chain_hash: bytes32 = sub_epoch_summary.reward_chain_hash
# Number of subblocks overflow in previous slot
previous_sub_epoch_overflows: uint8 = sub_epoch_summary.num_blocks_overflow # total in sub epoch - expected
# New work difficulty and iterations per sub-slot
sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
return SubEpochData(reward_chain_hash, previous_sub_epoch_overflows, sub_slot_iters, new_difficulty)
async def _challenge_block_vdfs(
constants: ConsensusConstants,
header_block: HeaderBlock,
block_rec: BlockRecord,
sub_blocks: Dict[bytes32, BlockRecord],
):
(_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info(
constants,
header_block.finished_sub_slots,
block_rec.overflow,
None if header_block.height == 0 else sub_blocks[header_block.prev_header_hash],
BlockCache(sub_blocks),
block_rec.sp_total_iters(constants),
block_rec.sp_iters(constants),
)
cc_sp_info = None
if header_block.reward_chain_block.challenge_chain_sp_vdf:
cc_sp_info = header_block.reward_chain_block.challenge_chain_sp_vdf
assert header_block.challenge_chain_sp_proof
if not header_block.challenge_chain_sp_proof.normalized_to_identity:
cc_sp_info = VDFInfo(
header_block.reward_chain_block.challenge_chain_sp_vdf.challenge,
cc_vdf_iters,
header_block.reward_chain_block.challenge_chain_sp_vdf.output,
)
ssd = SubSlotData(
header_block.reward_chain_block.proof_of_space,
header_block.challenge_chain_sp_proof,
header_block.challenge_chain_ip_proof,
None,
cc_sp_info,
header_block.reward_chain_block.signage_point_index,
None,
None,
None,
None,
header_block.reward_chain_block.challenge_chain_ip_vdf,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf,
block_rec.total_iters,
)
return ssd
def handle_finished_slots(end_of_slot: EndOfSubSlotBundle, icc_end_of_slot_info):
return SubSlotData(
None,
None,
None,
None,
None,
None,
None
if end_of_slot.proofs.challenge_chain_slot_proof is None
else end_of_slot.proofs.challenge_chain_slot_proof,
None
if end_of_slot.proofs.infused_challenge_chain_slot_proof is None
else end_of_slot.proofs.infused_challenge_chain_slot_proof,
end_of_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
icc_end_of_slot_info,
None,
None,
None,
)
def handle_end_of_slot(
sub_slot: EndOfSubSlotBundle,
eos_vdf_iters: uint64,
):
assert sub_slot.infused_challenge_chain
assert sub_slot.proofs.infused_challenge_chain_slot_proof
if sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity:
icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
else:
icc_info = VDFInfo(
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.challenge,
eos_vdf_iters,
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
cc_info = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf
else:
cc_info = VDFInfo(
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
eos_vdf_iters,
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
return SubSlotData(
None,
None,
None,
None,
None,
None,
sub_slot.proofs.challenge_chain_slot_proof,
sub_slot.proofs.infused_challenge_chain_slot_proof,
cc_info,
icc_info,
None,
None,
None,
)
def compress_segments(full_segment_index, segments: List[SubEpochChallengeSegment]) -> List[SubEpochChallengeSegment]:
compressed_segments = []
compressed_segments.append(segments[0])
for idx, segment in enumerate(segments[1:]):
if idx != full_segment_index:
# remove all redundant values
segment = compress_segment(segment)
compressed_segments.append(segment)
return compressed_segments
def compress_segment(segment: SubEpochChallengeSegment) -> SubEpochChallengeSegment:
# find challenge slot
comp_seg = SubEpochChallengeSegment(segment.sub_epoch_n, [], segment.rc_slot_end_info)
for slot in segment.sub_slots:
comp_seg.sub_slots.append(slot)
if slot.is_challenge():
break
return segment
# wp validation methods
def _validate_sub_epoch_summaries(
constants: ConsensusConstants,
weight_proof: WeightProof,
) -> Tuple[Optional[List[SubEpochSummary]], Optional[List[uint128]]]:
last_ses_hash, last_ses_sub_height = _get_last_ses_hash(constants, weight_proof.recent_chain_data)
if last_ses_hash is None:
log.warning("could not find last ses block")
return None, None
summaries, total, sub_epoch_weight_list = _map_sub_epoch_summaries(
constants.SUB_EPOCH_BLOCKS,
constants.GENESIS_CHALLENGE,
weight_proof.sub_epochs,
constants.DIFFICULTY_STARTING,
)
log.info(f"validating {len(summaries)} sub epochs")
# validate weight
if not _validate_summaries_weight(constants, total, summaries, weight_proof):
log.error("failed validating weight")
return None, None
last_ses = summaries[-1]
log.debug(f"last ses sub height {last_ses_sub_height}")
# validate last ses_hash
if last_ses.get_hash() != last_ses_hash:
log.error(f"failed to validate ses hashes block height {last_ses_sub_height}")
return None, None
return summaries, sub_epoch_weight_list
def _map_sub_epoch_summaries(
sub_blocks_for_se: uint32,
ses_hash: bytes32,
sub_epoch_data: List[SubEpochData],
curr_difficulty: uint64,
) -> Tuple[List[SubEpochSummary], uint128, List[uint128]]:
total_weight: uint128 = uint128(0)
summaries: List[SubEpochSummary] = []
sub_epoch_weight_list: List[uint128] = []
for idx, data in enumerate(sub_epoch_data):
ses = SubEpochSummary(
ses_hash,
data.reward_chain_hash,
data.num_blocks_overflow,
data.new_difficulty,
data.new_sub_slot_iters,
)
if idx < len(sub_epoch_data) - 1:
delta = 0
if idx > 0:
delta = sub_epoch_data[idx].num_blocks_overflow
log.debug(f"sub epoch {idx} start weight is {total_weight+curr_difficulty} ")
sub_epoch_weight_list.append(uint128(total_weight + curr_difficulty))
total_weight = total_weight + uint128( # type: ignore
curr_difficulty * (sub_blocks_for_se + sub_epoch_data[idx + 1].num_blocks_overflow - delta)
)
# if new epoch update diff and iters
if data.new_difficulty is not None:
curr_difficulty = data.new_difficulty
# add to dict
summaries.append(ses)
ses_hash = std_hash(ses)
# add last sub epoch weight
sub_epoch_weight_list.append(uint128(total_weight + curr_difficulty))
return summaries, total_weight, sub_epoch_weight_list
def _validate_summaries_weight(constants: ConsensusConstants, sub_epoch_data_weight, summaries, weight_proof) -> bool:
num_over = summaries[-1].num_blocks_overflow
ses_end_height = (len(summaries) - 1) * constants.SUB_EPOCH_BLOCKS + num_over - 1
curr = None
for block in weight_proof.recent_chain_data:
if block.reward_chain_block.height == ses_end_height:
curr = block
if curr is None:
return False
return curr.reward_chain_block.weight == sub_epoch_data_weight
def _validate_sub_epoch_segments(
constants_dict: Dict,
rng: random.Random,
weight_proof_bytes: bytes,
summaries_bytes: List[bytes],
):
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
sub_epoch_segments: SubEpochSegments = SubEpochSegments.from_bytes(weight_proof_bytes)
rc_sub_slot_hash = constants.GENESIS_CHALLENGE
total_blocks, total_ip_iters = 0, 0
total_slot_iters, total_slots = 0, 0
total_ip_iters = 0
prev_ses: Optional[SubEpochSummary] = None
segments_by_sub_epoch = map_segments_by_sub_epoch(sub_epoch_segments.challenge_segments)
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
for sub_epoch_n, segments in segments_by_sub_epoch.items():
prev_ssi = curr_ssi
curr_difficulty, curr_ssi = _get_curr_diff_ssi(constants, sub_epoch_n, summaries)
log.debug(f"validate sub epoch {sub_epoch_n}")
# recreate RewardChainSubSlot for next ses rc_hash
sampled_seg_index = rng.choice(range(len(segments)))
if sub_epoch_n > 0:
rc_sub_slot = __get_rc_sub_slot(constants, segments[0], summaries, curr_ssi)
prev_ses = summaries[sub_epoch_n - 1]
rc_sub_slot_hash = rc_sub_slot.get_hash()
if not summaries[sub_epoch_n].reward_chain_hash == rc_sub_slot_hash:
log.error(f"failed reward_chain_hash validation sub_epoch {sub_epoch_n}")
return False
for idx, segment in enumerate(segments):
valid_segment, ip_iters, slot_iters, slots = _validate_segment(
constants, segment, curr_ssi, prev_ssi, curr_difficulty, prev_ses, idx == 0, sampled_seg_index == idx
)
if not valid_segment:
log.error(f"failed to validate sub_epoch {segment.sub_epoch_n} segment {idx} slots")
return False
prev_ses = None
total_blocks += 1
total_slot_iters += slot_iters
total_slots += slots
total_ip_iters += ip_iters
return True
def _validate_segment(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
curr_ssi: uint64,
prev_ssi: uint64,
curr_difficulty: uint64,
ses: Optional[SubEpochSummary],
first_segment_in_se: bool,
sampled: bool,
) -> Tuple[bool, int, int, int]:
ip_iters, slot_iters, slots = 0, 0, 0
after_challenge = False
for idx, sub_slot_data in enumerate(segment.sub_slots):
if sampled and sub_slot_data.is_challenge():
after_challenge = True
required_iters = __validate_pospace(constants, segment, idx, curr_difficulty, ses, first_segment_in_se)
if required_iters is None:
return False, uint64(0), uint64(0), uint64(0)
assert sub_slot_data.signage_point_index is not None
ip_iters = ip_iters + calculate_ip_iters( # type: ignore
constants, curr_ssi, sub_slot_data.signage_point_index, required_iters
)
if not _validate_challenge_block_vdfs(constants, idx, segment.sub_slots, curr_ssi):
log.error(f"failed to validate challenge slot {idx} vdfs")
return False, uint64(0), uint64(0), uint64(0)
elif sampled and after_challenge:
if not _validate_sub_slot_data(constants, idx, segment.sub_slots, curr_ssi):
log.error(f"failed to validate sub slot data {idx} vdfs")
return False, uint64(0), uint64(0), uint64(0)
slot_iters = slot_iters + curr_ssi # type: ignore
slots = slots + uint64(1) # type: ignore
return True, ip_iters, slot_iters, slots
def _validate_challenge_block_vdfs(
constants: ConsensusConstants,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
ssi: uint64,
) -> bool:
sub_slot_data = sub_slots[sub_slot_idx]
if sub_slot_data.cc_signage_point is not None and sub_slot_data.cc_sp_vdf_info:
assert sub_slot_data.signage_point_index
sp_input = ClassgroupElement.get_default_element()
if not sub_slot_data.cc_signage_point.normalized_to_identity and sub_slot_idx >= 1:
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
prev_ssd = sub_slots[sub_slot_idx - 1]
sp_input = sub_slot_data_vdf_input(
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
)
if not sub_slot_data.cc_signage_point.is_valid(constants, sp_input, sub_slot_data.cc_sp_vdf_info):
log.error(f"failed to validate challenge chain signage point 2 {sub_slot_data.cc_sp_vdf_info}")
return False
assert sub_slot_data.cc_infusion_point
assert sub_slot_data.cc_ip_vdf_info
ip_input = ClassgroupElement.get_default_element()
cc_ip_vdf_info = sub_slot_data.cc_ip_vdf_info
if not sub_slot_data.cc_infusion_point.normalized_to_identity and sub_slot_idx >= 1:
prev_ssd = sub_slots[sub_slot_idx - 1]
if prev_ssd.cc_slot_end is None:
assert prev_ssd.cc_ip_vdf_info
assert prev_ssd.total_iters
assert sub_slot_data.total_iters
ip_input = prev_ssd.cc_ip_vdf_info.output
ip_vdf_iters = uint64(sub_slot_data.total_iters - prev_ssd.total_iters)
cc_ip_vdf_info = VDFInfo(
sub_slot_data.cc_ip_vdf_info.challenge, ip_vdf_iters, sub_slot_data.cc_ip_vdf_info.output
)
if not sub_slot_data.cc_infusion_point.is_valid(constants, ip_input, cc_ip_vdf_info):
log.error(f"failed to validate challenge chain infusion point {sub_slot_data.cc_ip_vdf_info}")
return False
return True
def _validate_sub_slot_data(
constants: ConsensusConstants,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
ssi: uint64,
) -> bool:
sub_slot_data = sub_slots[sub_slot_idx]
assert sub_slot_idx > 0
prev_ssd = sub_slots[sub_slot_idx - 1]
if sub_slot_data.is_end_of_slot():
if sub_slot_data.icc_slot_end is not None:
input = ClassgroupElement.get_default_element()
if not sub_slot_data.icc_slot_end.normalized_to_identity and prev_ssd.icc_ip_vdf_info is not None:
assert prev_ssd.icc_ip_vdf_info
input = prev_ssd.icc_ip_vdf_info.output
assert sub_slot_data.icc_slot_end_info
if not sub_slot_data.icc_slot_end.is_valid(constants, input, sub_slot_data.icc_slot_end_info, None):
log.error(f"failed icc slot end validation {sub_slot_data.icc_slot_end_info} ")
return False
assert sub_slot_data.cc_slot_end_info
assert sub_slot_data.cc_slot_end
input = ClassgroupElement.get_default_element()
if (not prev_ssd.is_end_of_slot()) and (not sub_slot_data.cc_slot_end.normalized_to_identity):
assert prev_ssd.cc_ip_vdf_info
input = prev_ssd.cc_ip_vdf_info.output
if not sub_slot_data.cc_slot_end.is_valid(constants, input, sub_slot_data.cc_slot_end_info):
log.error(f"failed cc slot end validation {sub_slot_data.cc_slot_end_info}")
return False
else:
# find end of slot
idx = sub_slot_idx
while idx < len(sub_slots) - 1:
curr_slot = sub_slots[idx]
if curr_slot.is_end_of_slot():
# dont validate intermediate vdfs if slot is blue boxed
assert curr_slot.cc_slot_end
if curr_slot.cc_slot_end.normalized_to_identity is True:
log.debug(f"skip intermediate vdfs slot {sub_slot_idx}")
return True
else:
break
idx += 1
if sub_slot_data.icc_infusion_point is not None and sub_slot_data.icc_ip_vdf_info is not None:
input = ClassgroupElement.get_default_element()
if not prev_ssd.is_challenge() and prev_ssd.icc_ip_vdf_info is not None:
input = prev_ssd.icc_ip_vdf_info.output
if not sub_slot_data.icc_infusion_point.is_valid(constants, input, sub_slot_data.icc_ip_vdf_info, None):
log.error(f"failed icc infusion point vdf validation {sub_slot_data.icc_slot_end_info} ")
return False
assert sub_slot_data.signage_point_index is not None
if sub_slot_data.cc_signage_point:
assert sub_slot_data.cc_sp_vdf_info
input = ClassgroupElement.get_default_element()
if not sub_slot_data.cc_signage_point.normalized_to_identity:
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
input = sub_slot_data_vdf_input(
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
)
if not sub_slot_data.cc_signage_point.is_valid(constants, input, sub_slot_data.cc_sp_vdf_info):
log.error(f"failed cc signage point vdf validation {sub_slot_data.cc_sp_vdf_info}")
return False
input = ClassgroupElement.get_default_element()
assert sub_slot_data.cc_ip_vdf_info
assert sub_slot_data.cc_infusion_point
cc_ip_vdf_info = sub_slot_data.cc_ip_vdf_info
if not sub_slot_data.cc_infusion_point.normalized_to_identity and prev_ssd.cc_slot_end is None:
assert prev_ssd.cc_ip_vdf_info
input = prev_ssd.cc_ip_vdf_info.output
assert sub_slot_data.total_iters
assert prev_ssd.total_iters
ip_vdf_iters = uint64(sub_slot_data.total_iters - prev_ssd.total_iters)
cc_ip_vdf_info = VDFInfo(
sub_slot_data.cc_ip_vdf_info.challenge, ip_vdf_iters, sub_slot_data.cc_ip_vdf_info.output
)
if not sub_slot_data.cc_infusion_point.is_valid(constants, input, cc_ip_vdf_info):
log.error(f"failed cc infusion point vdf validation {sub_slot_data.cc_slot_end_info}")
return False
return True
def sub_slot_data_vdf_input(
constants: ConsensusConstants,
sub_slot_data: SubSlotData,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
is_overflow: bool,
new_sub_slot: bool,
ssi: uint64,
) -> ClassgroupElement:
cc_input = ClassgroupElement.get_default_element()
sp_total_iters = get_sp_total_iters(constants, is_overflow, ssi, sub_slot_data)
ssd: Optional[SubSlotData] = None
if is_overflow and new_sub_slot:
if sub_slot_idx >= 2:
if sub_slots[sub_slot_idx - 2].cc_slot_end_info is None:
for ssd_idx in reversed(range(0, sub_slot_idx - 1)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
ssd = sub_slots[ssd_idx + 1]
break
if not (ssd.total_iters > sp_total_iters):
break
if ssd and ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
elif not is_overflow and not new_sub_slot:
for ssd_idx in reversed(range(0, sub_slot_idx)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
ssd = sub_slots[ssd_idx + 1]
break
if not (ssd.total_iters > sp_total_iters):
break
assert ssd is not None
if ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
elif not new_sub_slot and is_overflow:
slots_seen = 0
for ssd_idx in reversed(range(0, sub_slot_idx)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
slots_seen += 1
if slots_seen == 2:
return ClassgroupElement.get_default_element()
if ssd.cc_slot_end_info is None and not (ssd.total_iters > sp_total_iters):
break
assert ssd is not None
if ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
def _validate_recent_blocks(constants_dict: Dict, recent_chain_bytes: bytes, summaries_bytes: List[bytes]) -> bool:
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
recent_chain: RecentChainData = RecentChainData.from_bytes(recent_chain_bytes)
sub_blocks = BlockCache({})
first_ses_idx = _get_ses_idx(recent_chain.recent_chain_data)
ses_idx = len(summaries) - len(first_ses_idx)
ssi: uint64 = constants.SUB_SLOT_ITERS_STARTING
diff: Optional[uint64] = constants.DIFFICULTY_STARTING
last_blocks_to_validate = 100 # todo remove cap after benchmarks
for summary in summaries[:ses_idx]:
if summary.new_sub_slot_iters is not None:
ssi = summary.new_sub_slot_iters
if summary.new_difficulty is not None:
diff = summary.new_difficulty
ses_blocks, sub_slots, transaction_blocks = 0, 0, 0
challenge, prev_challenge = None, None
tip_height = recent_chain.recent_chain_data[-1].height
prev_block_record = None
deficit = uint8(0)
for idx, block in enumerate(recent_chain.recent_chain_data):
required_iters = uint64(0)
overflow = False
ses = False
height = block.height
for sub_slot in block.finished_sub_slots:
prev_challenge = challenge
challenge = sub_slot.challenge_chain.get_hash()
deficit = sub_slot.reward_chain.deficit
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
ses = True
assert summaries[ses_idx].get_hash() == sub_slot.challenge_chain.subepoch_summary_hash
ses_idx += 1
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
ssi = sub_slot.challenge_chain.new_sub_slot_iters
if sub_slot.challenge_chain.new_difficulty is not None:
diff = sub_slot.challenge_chain.new_difficulty
if (challenge is not None) and (prev_challenge is not None):
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
deficit = get_deficit(constants, deficit, prev_block_record, overflow, len(block.finished_sub_slots))
log.debug(f"wp, validate block {block.height}")
if sub_slots > 2 and transaction_blocks > 11 and (tip_height - block.height < last_blocks_to_validate):
required_iters, error = validate_finished_header_block(
constants, sub_blocks, block, False, diff, ssi, ses_blocks > 2
)
if error is not None:
log.error(f"block {block.header_hash} failed validation {error}")
return False
else:
required_iters = _validate_pospace_recent_chain(
constants, block, challenge, diff, overflow, prev_challenge
)
if required_iters is None:
return False
curr_block_ses = None if not ses else summaries[ses_idx - 1]
block_record = header_block_to_sub_block_record(
constants, required_iters, block, ssi, overflow, deficit, height, curr_block_ses
)
log.debug(f"add block {block_record.height} to tmp sub blocks")
sub_blocks.add_block_record(block_record)
if block.first_in_sub_slot:
sub_slots += 1
if block.is_transaction_block:
transaction_blocks += 1
if ses:
ses_blocks += 1
prev_block_record = block_record
return True
def _validate_pospace_recent_chain(
constants: ConsensusConstants,
block: HeaderBlock,
challenge: bytes32,
diff: uint64,
overflow: bool,
prev_challenge: bytes32,
):
if block.reward_chain_block.challenge_chain_sp_vdf is None:
# Edge case of first sp (start of slot), where sp_iters == 0
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
assert cc_sp_hash is not None
q_str = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants,
challenge if not overflow else prev_challenge,
cc_sp_hash,
)
if q_str is None:
log.error(f"could not verify proof of space block {block.height} {overflow}")
return None
required_iters = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
block.reward_chain_block.proof_of_space.size,
diff,
cc_sp_hash,
)
return required_iters
def __validate_pospace(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
idx: int,
curr_diff: uint64,
ses: Optional[SubEpochSummary],
first_in_sub_epoch: bool,
) -> Optional[uint64]:
if first_in_sub_epoch and segment.sub_epoch_n == 0 and idx == 0:
cc_sub_slot_hash = constants.GENESIS_CHALLENGE
else:
cc_sub_slot_hash = __get_cc_sub_slot(segment.sub_slots, idx, ses).get_hash()
sub_slot_data: SubSlotData = segment.sub_slots[idx]
if sub_slot_data.signage_point_index and is_overflow_block(constants, sub_slot_data.signage_point_index):
curr_slot = segment.sub_slots[idx - 1]
assert curr_slot.cc_slot_end_info
challenge = curr_slot.cc_slot_end_info.challenge
else:
challenge = cc_sub_slot_hash
if sub_slot_data.cc_sp_vdf_info is None:
cc_sp_hash = cc_sub_slot_hash
else:
cc_sp_hash = sub_slot_data.cc_sp_vdf_info.output.get_hash()
# validate proof of space
assert sub_slot_data.proof_of_space is not None
q_str = sub_slot_data.proof_of_space.verify_and_get_quality_string(
constants,
challenge,
cc_sp_hash,
)
if q_str is None:
log.error("could not verify proof of space")
return None
return calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
sub_slot_data.proof_of_space.size,
curr_diff,
cc_sp_hash,
)
def __get_rc_sub_slot(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
summaries: List[SubEpochSummary],
curr_ssi: uint64,
) -> RewardChainSubSlot:
ses = summaries[uint32(segment.sub_epoch_n - 1)]
# find first challenge in sub epoch
first_idx = None
first = None
for idx, curr in enumerate(segment.sub_slots):
if curr.cc_slot_end is None:
first_idx = idx
first = curr
break
assert first_idx
idx = first_idx
slots = segment.sub_slots
# number of slots to look for
slots_n = 1
assert first
assert first.signage_point_index is not None
if is_overflow_block(constants, first.signage_point_index):
if idx >= 2 and slots[idx - 2].cc_slot_end is None:
slots_n = 2
new_diff = None if ses is None else ses.new_difficulty
new_ssi = None if ses is None else ses.new_sub_slot_iters
ses_hash = None if ses is None else ses.get_hash()
overflow = is_overflow_block(constants, first.signage_point_index)
if overflow:
if idx >= 2 and slots[idx - 2].cc_slot_end is not None and slots[idx - 1].cc_slot_end is not None:
ses_hash = None
new_ssi = None
new_diff = None
sub_slot = slots[idx]
while True:
if sub_slot.cc_slot_end:
slots_n -= 1
if slots_n == 0:
break
idx -= 1
sub_slot = slots[idx]
icc_sub_slot_hash: Optional[bytes32] = None
assert sub_slot is not None
assert sub_slot.cc_slot_end_info is not None
assert segment.rc_slot_end_info is not None
if idx != 0:
cc_vdf_info = VDFInfo(sub_slot.cc_slot_end_info.challenge, curr_ssi, sub_slot.cc_slot_end_info.output)
if sub_slot.icc_slot_end_info is not None:
icc_slot_end_info = VDFInfo(
sub_slot.icc_slot_end_info.challenge, curr_ssi, sub_slot.icc_slot_end_info.output
)
icc_sub_slot_hash = icc_slot_end_info.get_hash()
else:
cc_vdf_info = sub_slot.cc_slot_end_info
if sub_slot.icc_slot_end_info is not None:
icc_sub_slot_hash = sub_slot.icc_slot_end_info.get_hash()
cc_sub_slot = ChallengeChainSubSlot(
cc_vdf_info,
icc_sub_slot_hash,
ses_hash,
new_ssi,
new_diff,
)
rc_sub_slot = RewardChainSubSlot(
segment.rc_slot_end_info,
cc_sub_slot.get_hash(),
icc_sub_slot_hash,
constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK,
)
return rc_sub_slot
def __get_cc_sub_slot(sub_slots: List[SubSlotData], idx, ses: Optional[SubEpochSummary]) -> ChallengeChainSubSlot:
sub_slot: Optional[SubSlotData] = None
for i in reversed(range(0, idx)):
sub_slot = sub_slots[i]
if sub_slot.cc_slot_end_info is not None:
break
assert sub_slot is not None
assert sub_slot.cc_slot_end_info is not None
icc_vdf = sub_slot.icc_slot_end_info
icc_vdf_hash: Optional[bytes32] = None
if icc_vdf is not None:
icc_vdf_hash = icc_vdf.get_hash()
cc_sub_slot = ChallengeChainSubSlot(
sub_slot.cc_slot_end_info,
icc_vdf_hash,
None if ses is None else ses.get_hash(),
None if ses is None else ses.new_sub_slot_iters,
None if ses is None else ses.new_difficulty,
)
return cc_sub_slot
def _get_curr_diff_ssi(constants: ConsensusConstants, idx, summaries):
curr_difficulty = constants.DIFFICULTY_STARTING
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
for ses in reversed(summaries[0:idx]):
if ses.new_sub_slot_iters is not None:
curr_ssi = ses.new_sub_slot_iters
curr_difficulty = ses.new_difficulty
break
return curr_difficulty, curr_ssi
def vars_to_bytes(constants, summaries, weight_proof):
constants_dict = recurse_jsonify(dataclasses.asdict(constants))
wp_recent_chain_bytes = bytes(RecentChainData(weight_proof.recent_chain_data))
wp_segment_bytes = bytes(SubEpochSegments(weight_proof.sub_epoch_segments))
summary_bytes = []
for summary in summaries:
summary_bytes.append(bytes(summary))
return constants_dict, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes
def bytes_to_vars(constants_dict, summaries_bytes):
summaries = []
for summary in summaries_bytes:
summaries.append(SubEpochSummary.from_bytes(summary))
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
return constants, summaries
def _get_last_ses_hash(
constants: ConsensusConstants, recent_reward_chain: List[HeaderBlock]
) -> Tuple[Optional[bytes32], uint32]:
for idx, block in enumerate(reversed(recent_reward_chain)):
if (block.reward_chain_block.height % constants.SUB_EPOCH_BLOCKS) == 0:
idx = len(recent_reward_chain) - 1 - idx # reverse
# find first block after sub slot end
while idx < len(recent_reward_chain):
curr = recent_reward_chain[idx]
if len(curr.finished_sub_slots) > 0:
for slot in curr.finished_sub_slots:
if slot.challenge_chain.subepoch_summary_hash is not None:
return (
slot.challenge_chain.subepoch_summary_hash,
curr.reward_chain_block.height,
)
idx += 1
return None, uint32(0)
def _get_ses_idx(recent_reward_chain: List[HeaderBlock]) -> List[int]:
idxs: List[int] = []
for idx, curr in enumerate(recent_reward_chain):
if len(curr.finished_sub_slots) > 0:
for slot in curr.finished_sub_slots:
if slot.challenge_chain.subepoch_summary_hash is not None:
idxs.append(idx)
return idxs
def get_deficit(
constants: ConsensusConstants,
curr_deficit: uint8,
prev_block: BlockRecord,
overflow: bool,
num_finished_sub_slots: int,
) -> uint8:
if prev_block is None:
if curr_deficit >= 1 and not (overflow and curr_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK):
curr_deficit -= 1
return curr_deficit
return calculate_deficit(constants, uint32(prev_block.height + 1), prev_block, overflow, num_finished_sub_slots)
def get_sp_total_iters(constants: ConsensusConstants, is_overflow: bool, ssi: uint64, sub_slot_data: SubSlotData):
assert sub_slot_data.cc_ip_vdf_info is not None
assert sub_slot_data.total_iters is not None
assert sub_slot_data.signage_point_index is not None
sp_iters: uint64 = calculate_sp_iters(constants, ssi, sub_slot_data.signage_point_index)
ip_iters: uint64 = sub_slot_data.cc_ip_vdf_info.number_of_iterations
sp_sub_slot_total_iters = uint128(sub_slot_data.total_iters - ip_iters)
if is_overflow:
sp_sub_slot_total_iters = uint128(sp_sub_slot_total_iters - ssi)
return sp_sub_slot_total_iters + sp_iters
def blue_boxed_end_of_slot(sub_slot: EndOfSubSlotBundle):
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None:
if sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity:
return True
else:
return True
return False
def validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
tip = weight_proof.recent_chain_data[-1]
weight_to_check = _get_weights_for_sampling(rng, tip.weight, weight_proof.recent_chain_data)
sampled_sub_epochs: dict[int, bool] = {}
for idx in range(1, len(sub_epoch_weight_list)):
if _sample_sub_epoch(sub_epoch_weight_list[idx - 1], sub_epoch_weight_list[idx], weight_to_check):
sampled_sub_epochs[idx - 1] = True
if len(sampled_sub_epochs) == WeightProofHandler.MAX_SAMPLES:
break
curr_sub_epoch_n = -1
for sub_epoch_segment in weight_proof.sub_epoch_segments:
if curr_sub_epoch_n < sub_epoch_segment.sub_epoch_n:
if sub_epoch_segment.sub_epoch_n in sampled_sub_epochs:
del sampled_sub_epochs[sub_epoch_segment.sub_epoch_n]
curr_sub_epoch_n = sub_epoch_segment.sub_epoch_n
if len(sampled_sub_epochs) > 0:
return False
return True
def map_segments_by_sub_epoch(sub_epoch_segments) -> Dict[int, List[SubEpochChallengeSegment]]:
segments: Dict[int, List[SubEpochChallengeSegment]] = {}
curr_sub_epoch_n = -1
for idx, segment in enumerate(sub_epoch_segments):
if curr_sub_epoch_n < segment.sub_epoch_n:
curr_sub_epoch_n = segment.sub_epoch_n
segments[curr_sub_epoch_n] = []
segments[curr_sub_epoch_n].append(segment)
return segments
def validate_total_iters(
segment: SubEpochChallengeSegment,
sub_slot_data_idx,
expected_sub_slot_iters: uint64,
finished_sub_slots_since_prev: int,
prev_b: SubSlotData,
prev_sub_slot_data_iters,
genesis,
) -> bool:
sub_slot_data = segment.sub_slots[sub_slot_data_idx]
if genesis:
total_iters: uint128 = uint128(expected_sub_slot_iters * finished_sub_slots_since_prev)
elif segment.sub_slots[sub_slot_data_idx - 1].is_end_of_slot():
assert prev_b.total_iters
assert prev_b.cc_ip_vdf_info
total_iters = prev_b.total_iters
# Add the rest of the slot of prev_b
total_iters = uint128(total_iters + prev_sub_slot_data_iters - prev_b.cc_ip_vdf_info.number_of_iterations)
# Add other empty slots
total_iters = uint128(total_iters + (expected_sub_slot_iters * (finished_sub_slots_since_prev - 1)))
else:
# Slot iters is guaranteed to be the same for header_block and prev_b
# This takes the beginning of the slot, and adds ip_iters
assert prev_b.cc_ip_vdf_info
assert prev_b.total_iters
total_iters = uint128(prev_b.total_iters - prev_b.cc_ip_vdf_info.number_of_iterations)
total_iters = uint128(total_iters + sub_slot_data.cc_ip_vdf_info.number_of_iterations)
return total_iters == sub_slot_data.total_iters
| 42.243665 | 120 | 0.673096 |
7a06c8456fda45552251d6cd71403fc5fe62f9c3 | 4,094 | py | Python | aws_util.py | jlarrieux/CryptoPriceLambdaCommons | 8b0cfb00c596125be49788f2d3567b78c4153dc7 | [
"Apache-2.0"
] | null | null | null | aws_util.py | jlarrieux/CryptoPriceLambdaCommons | 8b0cfb00c596125be49788f2d3567b78c4153dc7 | [
"Apache-2.0"
] | null | null | null | aws_util.py | jlarrieux/CryptoPriceLambdaCommons | 8b0cfb00c596125be49788f2d3567b78c4153dc7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from decimal import Decimal
import boto3
import datetime
import indicator_util
import pickle
from crypto_price_lambda_commons_util import MovingAverageType
from my_rolling_list import MyRollingList
import crypto_price_lambda_commons_util
region = "us-east-1"
dynamodb = boto3.client('dynamodb', region_name=region)
ssm = boto3.client('ssm', region_name=region)
table_name = 'eth-price-hourly-nosql-db'
parameter_key = '0'
s3_resource = boto3.resource('s3')
default_bucket = 'com.jlarrieux.lambda'
def get_last_price() -> [None, float]:
json_string = _get_from_dynamo()
return None if json_string is None else float(json_string['Item']['last_price']['N'])
def get_last_moving_average(ma_type: MovingAverageType):
json_string = _get_from_dynamo()
ma_string = f"{str(ma_type.value)}_day_ma"
if json_string is None:
return None
try:
json_string['Item'][ma_string]
except KeyError:
return None
return json_string['Item'][ma_string]['N']
def _get_from_dynamo() -> [None, str]:
return dynamodb.get_item(
TableName=table_name, Key={'id': {'N': parameter_key}})
def save_price(val: float, is_time_to_save: bool, key: str, bucket: str, initial_size: int = 500) -> MyRollingList:
update_dynamo_table(val, "last_price")
round_val = float(Decimal(val).quantize(Decimal("0.01")))
rolling_average = _load_from_s3(bucket, key)
if is_time_to_save:
if rolling_average is None:
rolling_average = MyRollingList(initial_size)
rolling_average.add(round_val)
save_to_s3(bucket, key, rolling_average)
ma_10 = indicator_util.calculate_simple_moving_average(rolling_average.get_most_recents(10))
ma_12 = indicator_util.calculate_simple_moving_average(rolling_average.get_most_recents(12))
ma_50 = indicator_util.calculate_simple_moving_average(rolling_average.get_most_recents(50))
ma_200 = indicator_util.calculate_simple_moving_average(rolling_average.get_most_recents(200))
update_dynamo_table(ma_10, "10_day_ma")
update_dynamo_table(ma_12, "12_day_ma")
update_dynamo_table(ma_50, "50_day_ma")
update_dynamo_table(ma_200, "200_day_ma")
return rolling_average
def update_dynamo_table(val: float, item: str) -> None:
dynamodb.update_item(TableName=table_name, Key={'id': {
'N': parameter_key}}, ExpressionAttributeNames={"#name": item}, UpdateExpression=f"set #name = :v",
ExpressionAttributeValues={':v': {'N': str(val)}})
def get_parameter(parameter_name):
return ssm.get_parameter(Name=parameter_name, WithDecryption=True)['Parameter']['Value']
def _load_from_s3(bucket: str, s3_key: str) -> [MyRollingList, None]:
return load_from_s3(bucket, s3_key)
def save_to_s3_default_bucket(key: str, obj: object) -> None:
save_to_s3(default_bucket, key, obj)
def save_to_s3(bucket: str, key: str, obj: object) -> None:
pickle_byte_obj = pickle.dumps(obj)
s3_resource.Object(bucket, key).put(Body=pickle_byte_obj)
def load_from_s3_default_bucket(key: str):
return load_from_s3(default_bucket, key)
def load_from_s3(bucket: str, key: str):
try:
return pickle.loads(s3_resource.Object(bucket, key).get()['Body'].read())
except Exception as error:
if isinstance(error, s3_resource.meta.client.exceptions.NoSuchKey):
return None
def get_rolling_average(key: str) -> [MyRollingList, None]:
return load_from_s3_default_bucket(key)
| 36.230088 | 115 | 0.73107 |
89dee17c4425d8328fdf12e5bf406a08dc3523df | 1,402 | py | Python | run.py | dddaga/word-tree | ed6c59c16feee04d5c6003b3f5f4df68e6808e04 | [
"MIT"
] | null | null | null | run.py | dddaga/word-tree | ed6c59c16feee04d5c6003b3f5f4df68e6808e04 | [
"MIT"
] | null | null | null | run.py | dddaga/word-tree | ed6c59c16feee04d5c6003b3f5f4df68e6808e04 | [
"MIT"
] | 1 | 2020-12-02T09:07:06.000Z | 2020-12-02T09:07:06.000Z | import threading
import queue
from src.services.train import train_context
from src.services.get_corpus import corpus_generator, get_chunk
from config import THREADS, CHUNK_SIZE, CORPUS_PATH
import time
train_queue = queue.Queue()
chuck_count = 0
def train_chunk():
while True:
train_context(train_queue.get(block=True))
def start_threads(thread_count):
threads = []
for t in range(thread_count):
threads.append(threading.Thread(target=train_chunk))
threads[-1].start()
if __name__ == '__main__':
corpus = corpus_generator(CORPUS_PATH)
start_threads(THREADS)
while True :
while train_queue.qsize() < THREADS :
chunk , corpus_null = get_chunk(corpus,CHUNK_SIZE)
train_queue.put(chunk)
time.sleep(1)
print('chunks in queue: {}'.format(train_queue.qsize()))
time.sleep(10)
if corpus_null:
break
print('Training finished for {}'.format(CORPUS_PATH))
#start_threads(THREADS)
#print('length of corpus is {}'.format(len(corpus)))
#corpus = corpus[1000000:]
#while corpus != []:
#chunk = corpus[:CHUNK_SIZE]
#train_queue.put(chunk)
#del corpus[:CHUNK_SIZE]
#del chunk
#print('chunks in queue: {}'.format(train_queue.qsize()) )
#time.sleep(1)
| 25.490909 | 68 | 0.623395 |
0579f892c43a85c0d462b817e262c875605b6466 | 766 | py | Python | playground/posts/migrations/0001_initial.py | AsheKR/django-quill-editor | 3a629d83629c30bccf18065cb207213e14f6d138 | [
"MIT"
] | 125 | 2020-03-17T11:41:18.000Z | 2022-03-29T15:28:16.000Z | playground/posts/migrations/0001_initial.py | AsheKR/django-quill-editor | 3a629d83629c30bccf18065cb207213e14f6d138 | [
"MIT"
] | 70 | 2020-03-17T09:39:36.000Z | 2022-03-17T21:34:12.000Z | playground/posts/migrations/0001_initial.py | AsheKR/django-quill-editor | 3a629d83629c30bccf18065cb207213e14f6d138 | [
"MIT"
] | 33 | 2020-04-04T20:49:58.000Z | 2022-03-07T23:09:24.000Z | # Generated by Django 3.1.7 on 2021-03-22 03:15
from django.db import migrations, models
import django_quill.fields
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="QuillPost",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("content", django_quill.fields.QuillField()),
],
options={
"ordering": ["-pk"],
},
),
]
| 23.212121 | 62 | 0.425587 |
42e314ab18805175216f81cf9616fe05519be2a9 | 8,268 | py | Python | tests/test_user_model.py | biliGo/flasky | d0de3ed04cd6fc39d5a18db179c16b52c599abdb | [
"MIT"
] | null | null | null | tests/test_user_model.py | biliGo/flasky | d0de3ed04cd6fc39d5a18db179c16b52c599abdb | [
"MIT"
] | 3 | 2020-03-24T15:59:24.000Z | 2021-02-02T21:47:42.000Z | tests/test_user_model.py | biliGo/flasky | d0de3ed04cd6fc39d5a18db179c16b52c599abdb | [
"MIT"
] | null | null | null | import unittest
import time
from datetime import datetime
from app import create_app, db
from app.models import User, AnonymousUser, Role, Permission, Follow
class UserModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_setter(self):
u = User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password='cat')
with self.assertRaises(AttributeError):
u.password
def test_password_verification(self):
u = User(password='cat')
self.assertTrue(u.verify_password('cat'))
self.assertFalse(u.verify_password('dog'))
def test_password_salts_are_random(self):
u = User(password='cat')
u2 = User(password='cat')
self.assertTrue(u.password_hash != u2.password_hash)
def test_valid_confirmation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token()
self.assertTrue(u.confirm(token))
def test_invalid_confirmation_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_confirmation_token()
self.assertFalse(u2.confirm(token))
def test_expired_confirmation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token(1)
time.sleep(2)
self.assertFalse(u.confirm(token))
def test_valid_reset_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_reset_token()
self.assertTrue(User.reset_password(token, 'dog'))
self.assertTrue(u.verify_password('dog'))
def test_invalid_reset_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_reset_token()
self.assertFalse(User.reset_password(token + 'a', 'horse'))
self.assertTrue(u.verify_password('cat'))
def test_valid_email_change_token(self):
u = User(email='john@example.com', password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_email_change_token('susan@example.org')
self.assertTrue(u.change_email(token))
self.assertTrue(u.email == 'susan@example.org')
def test_invalid_email_change_token(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_email_change_token('david@example.net')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == 'susan@example.org')
def test_duplicate_email_change_token(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u2.generate_email_change_token('john@example.com')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == 'susan@example.org')
def test_user_role(self):
u = User(email='john@example.com', password='cat')
self.assertTrue(u.can(Permission.FOLLOW))
self.assertTrue(u.can(Permission.COMMENT))
self.assertTrue(u.can(Permission.WRITE))
self.assertFalse(u.can(Permission.MODERATE))
self.assertFalse(u.can(Permission.ADMIN))
def test_moderator_role(self):
r = Role.query.filter_by(name='Moderator').first()
u = User(email='john@example.com', password='cat', role=r)
self.assertTrue(u.can(Permission.FOLLOW))
self.assertTrue(u.can(Permission.COMMENT))
self.assertTrue(u.can(Permission.WRITE))
self.assertTrue(u.can(Permission.MODERATE))
self.assertFalse(u.can(Permission.ADMIN))
def test_administrator_role(self):
r = Role.query.filter_by(name='Administrator').first()
u = User(email='john@example.com', password='cat', role=r)
self.assertTrue(u.can(Permission.FOLLOW))
self.assertTrue(u.can(Permission.COMMENT))
self.assertTrue(u.can(Permission.WRITE))
self.assertTrue(u.can(Permission.MODERATE))
self.assertTrue(u.can(Permission.ADMIN))
def test_anonymous_user(self):
u = AnonymousUser()
self.assertFalse(u.can(Permission.FOLLOW))
self.assertFalse(u.can(Permission.COMMENT))
self.assertFalse(u.can(Permission.WRITE))
self.assertFalse(u.can(Permission.MODERATE))
self.assertFalse(u.can(Permission.ADMIN))
def test_timestamps(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
self.assertTrue(
(datetime.utcnow() - u.member_since).total_seconds() < 3)
self.assertTrue(
(datetime.utcnow() - u.last_seen).total_seconds() < 3)
def test_ping(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
time.sleep(2)
last_seen_before = u.last_seen
u.ping()
self.assertTrue(u.last_seen > last_seen_before)
def test_gravatar(self):
u = User(email='john@example.com', password='cat')
with self.app.test_request_context('/'):
gravatar = u.gravatar()
gravatar_256 = u.gravatar(size=256)
gravatar_pg = u.gravatar(rating='pg')
gravatar_retro = u.gravatar(default='retro')
self.assertTrue('https://secure.gravatar.com/avatar/' +
'd4c74594d841139328695756648b6bd6'in gravatar)
self.assertTrue('s=256' in gravatar_256)
self.assertTrue('r=pg' in gravatar_pg)
self.assertTrue('d=retro' in gravatar_retro)
def test_follows(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertFalse(u1.is_followed_by(u2))
timestamp_before = datetime.utcnow()
u1.follow(u2)
db.session.add(u1)
db.session.commit()
timestamp_after = datetime.utcnow()
self.assertTrue(u1.is_following(u2))
self.assertFalse(u1.is_followed_by(u2))
self.assertTrue(u2.is_followed_by(u1))
self.assertTrue(u1.followed.count() == 2)
self.assertTrue(u2.followers.count() == 2)
f = u1.followed.all()[-1]
self.assertTrue(f.followed == u2)
self.assertTrue(timestamp_before <= f.timestamp <= timestamp_after)
f = u2.followers.all()[-1]
self.assertTrue(f.follower == u1)
u1.unfollow(u2)
db.session.add(u1)
db.session.commit()
self.assertTrue(u1.followed.count() == 1)
self.assertTrue(u2.followers.count() == 1)
self.assertTrue(Follow.query.count() == 2)
u2.follow(u1)
db.session.add(u1)
db.session.add(u2)
db.session.commit()
db.session.delete(u2)
db.session.commit()
self.assertTrue(Follow.query.count() == 1)
def test_to_json(self):
u = User(email='john@example.com', password='cat')
db.session.add(u)
db.session.commit()
with self.app.test_request_context('/'):
json_user = u.to_json()
expected_keys = ['url', 'username', 'member_since', 'last_seen',
'posts_url', 'followed_posts_url', 'post_count']
self.assertEqual(sorted(json_user.keys()), sorted(expected_keys))
self.assertEqual('/api/v1/users/' + str(u.id), json_user['url'])
if __name__ == '__main__':
unittest.main
| 37.076233 | 75 | 0.627963 |
f0438dca3a8b38b1f82e2154b1e3ff3381572211 | 6,292 | py | Python | asset/onboard_VS_optitrack.py | shushuai3/multi-robot-localization | 9d7b45979cc21ea11def44e7bc51613e7599a768 | [
"MIT"
] | 8 | 2021-08-18T15:03:26.000Z | 2022-03-18T20:43:42.000Z | asset/onboard_VS_optitrack.py | shushuai3/multi-robot-localization | 9d7b45979cc21ea11def44e7bc51613e7599a768 | [
"MIT"
] | null | null | null | asset/onboard_VS_optitrack.py | shushuai3/multi-robot-localization | 9d7b45979cc21ea11def44e7bc51613e7599a768 | [
"MIT"
] | 6 | 2020-03-26T14:21:39.000Z | 2022-01-13T22:14:14.000Z | import logging
import time
from threading import Timer
import cflib.crtp # noqa
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.log import LogConfig
# Only output errors from the logging framework
logging.basicConfig(level=logging.ERROR)
from NatNetClient import NatNetClient
import numpy as np
pos2 = np.zeros(3) # 3D position from optiTrack
att2 = np.zeros(4) # 3D attitude from optiTrack
pos3 = np.zeros(3) # 3D position from optiTrack
att3 = np.zeros(4) # 3D attitude from optiTrack
rlxCF = 0
rlyCF = 0
rlyawCF = 0
# This is a callback function that gets connected to the NatNet client and called once per mocap frame.
def receiveNewFrame( frameNumber, markerSetCount, unlabeledMarkersCount, rigidBodyCount, skeletonCount,
labeledMarkerCount, latency, timecode, timecodeSub, timestamp, isRecording, trackedModelsChanged ):
pass
# This is a callback function that gets connected to the NatNet client. It is called once per rigid body per frame
def receiveRigidBodyFrame( id, position, rotation ):
global pos2, att2, pos3, att3
if id==1:
pos3[:] = position
att3[:] = rotation
if id==2:
pos2[:] = position
att2[:] = rotation
streamingClient = NatNetClient() # Create a new NatNet client
streamingClient.newFrameListener = receiveNewFrame
streamingClient.rigidBodyListener = receiveRigidBodyFrame
streamingClient.run() # Run perpetually on a separate thread.
class LoggingExample:
"""
Simple logging example class that logs the Stabilizer from a supplied
link uri and disconnects after 5s.
"""
def __init__(self, link_uri):
""" Initialize and run the example with the specified link_uri """
self._cf = Crazyflie(rw_cache='./cache')
# Connect some callbacks from the Crazyflie API
self._cf.connected.add_callback(self._connected)
self._cf.disconnected.add_callback(self._disconnected)
self._cf.connection_failed.add_callback(self._connection_failed)
self._cf.connection_lost.add_callback(self._connection_lost)
print('Connecting to %s' % link_uri)
# Try to connect to the Crazyflie
self._cf.open_link(link_uri)
# Variable used to keep main loop occupied until disconnect
self.is_connected = True
def _connected(self, link_uri):
""" This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded."""
print('Connected to %s' % link_uri)
# The definition of the logconfig can be made before connecting
self._lg_stab = LogConfig(name='relative_pos', period_in_ms=200)
self._lg_stab.add_variable('relative_pos.rlX0', 'float')
self._lg_stab.add_variable('relative_pos.rlY0', 'float')
self._lg_stab.add_variable('relative_pos.rlYaw0', 'float')
# Adding the configuration cannot be done until a Crazyflie is
# connected, since we need to check that the variables we
# would like to log are in the TOC.
try:
self._cf.log.add_config(self._lg_stab)
# This callback will receive the data
self._lg_stab.data_received_cb.add_callback(self._stab_log_data)
# This callback will be called on errors
self._lg_stab.error_cb.add_callback(self._stab_log_error)
# Start the logging
self._lg_stab.start()
except KeyError as e:
print('Could not start log configuration,'
'{} not found in TOC'.format(str(e)))
except AttributeError:
print('Could not add Stabilizer log config, bad configuration.')
# Start a timer to disconnect in 10s
t = Timer(1000, self._cf.close_link)
t.start()
def _stab_log_error(self, logconf, msg):
"""Callback from the log API when an error occurs"""
print('Error when logging %s: %s' % (logconf.name, msg))
def _stab_log_data(self, timestamp, data, logconf):
"""Callback froma the log API when data arrives"""
global rlxCF, rlyCF, rlyawCF
rlxCF = data['relative_pos.rlX0']
rlyCF = data['relative_pos.rlY0']
rlyawCF = data['relative_pos.rlYaw0']
def _connection_failed(self, link_uri, msg):
"""Callback when connection initial connection fails (i.e no Crazyflie
at the speficied address)"""
print('Connection to %s failed: %s' % (link_uri, msg))
self.is_connected = False
def _connection_lost(self, link_uri, msg):
"""Callback when disconnected after a connection has been made (i.e
Crazyflie moves out of range)"""
print('Connection to %s lost: %s' % (link_uri, msg))
def _disconnected(self, link_uri):
"""Callback when the Crazyflie is disconnected (called in all cases)"""
print('Disconnected from %s' % link_uri)
self.is_connected = False
if __name__ == '__main__':
# Initialize the low-level drivers (don't list the debug drivers)
cflib.crtp.init_drivers(enable_debug_driver=False)
le = LoggingExample('radio://0/60/2M/E7E7E7E7E6')
while le.is_connected:
# while 1:
time.sleep(0.3)
q = att2 / np.linalg.norm(att2)
yaw2 = -np.arctan2( -2*(q[1]*q[3]-q[0]*q[2]), q[0]**2-q[1]**2-q[2]**2+q[3]**2)
q3 = att3 / np.linalg.norm(att3)
yaw3 = -np.arctan2( -2*(q3[1]*q3[3]-q3[0]*q3[2]), q3[0]**2-q3[1]**2-q3[2]**2+q3[3]**2)
p2 = np.array([pos2[0], -pos2[2], pos2[1]])
p3 = np.array([pos3[0], -pos3[2], pos3[1]])
rlxE = p3[0] - p2[0]
rlyE = p3[1] - p2[1]
rlxB = rlxE * np.cos(-yaw2) - rlyE * np.sin(-yaw2)
rlyB = rlxE * np.sin(-yaw2) + rlyE * np.cos(-yaw2)
print("relaX:%1.2f, relaY:%1.2f, relaYaw:%2.2f" % (rlxCF, rlyCF, rlyawCF))
# print("relaX:%1.2f, relaY:%1.2f, relaYaw:%2.2f" % (rlxB, rlyB, yaw3-yaw2))
# print("relaX01:%1.2f, relaY01:%1.2f, relaYaw01:%2.2f; ErrX:%1.2f, ErrY:%1.2f, ErrYaw:%2.2f" % (rlxCF, rlyCF, rlyawCF, rlxCF-rlxB, rlyCF-rlyB, rlyawCF-(yaw3-yaw2)))
# yawErr = np.arctan2(np.sin(rlyawCF-(yaw3-yaw2)), np.cos(rlyawCF-(yaw3-yaw2)))
# print("ErrX:%1.2f, ErrY:%1.2f, ErrYaw:%2.2f" % (rlxCF-rlxB, rlyCF-rlyB, yawErr)) # rlyawCF-(yaw3-yaw2)
| 42.513514 | 173 | 0.656866 |
9df2af6a5f79fa04f0a2edbd5b2d65a3b0277bc3 | 511 | py | Python | Source/Utility/python-twitter/twitter/error.py | guissy/StockRecommendSystem | 2e8694d0bb2ceaa42585ee7414564d921cc5a854 | [
"MIT"
] | 137 | 2017-06-13T06:54:40.000Z | 2022-03-30T22:19:38.000Z | Source/Utility/python-twitter/twitter/error.py | guissy/StockRecommendSystem | 2e8694d0bb2ceaa42585ee7414564d921cc5a854 | [
"MIT"
] | 27 | 2017-04-01T15:06:36.000Z | 2021-02-08T20:19:58.000Z | Source/Utility/python-twitter/twitter/error.py | guissy/StockRecommendSystem | 2e8694d0bb2ceaa42585ee7414564d921cc5a854 | [
"MIT"
] | 61 | 2017-07-03T01:30:36.000Z | 2022-01-11T08:50:44.000Z | #!/usr/bin/env python
class TwitterError(Exception):
"""Base class for Twitter errors"""
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class PythonTwitterDeprecationWarning(DeprecationWarning):
"""Base class for python-twitter deprecation warnings"""
pass
class PythonTwitterDeprecationWarning330(PythonTwitterDeprecationWarning):
"""Warning for features to be removed in version 3.3.0"""
pass
| 24.333333 | 74 | 0.7182 |
0b0ec302d72b1323ebd0ff65e0e828d957753366 | 307 | py | Python | datatableview/__init__.py | milu-buet/django-datatable-view | adb3e9e437058e51fdb71ce5a9cedf792ca82c53 | [
"Apache-2.0"
] | null | null | null | datatableview/__init__.py | milu-buet/django-datatable-view | adb3e9e437058e51fdb71ce5a9cedf792ca82c53 | [
"Apache-2.0"
] | null | null | null | datatableview/__init__.py | milu-buet/django-datatable-view | adb3e9e437058e51fdb71ce5a9cedf792ca82c53 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
__name__ = 'datatableview'
__author__ = 'Tim Valenta'
__version_info__ = (0, 8, 0)
__version__ = '.'.join(map(str, __version_info__))
__date__ = '2013/11/14 2:00:00 PM'
__credits__ = ['Tim Valenta', 'Steven Klass']
__license__ = 'See the file LICENSE.txt for licensing information.'
| 34.111111 | 67 | 0.703583 |
2210f27f7b817955a0b4de2d8698fbb20f566838 | 174 | py | Python | accounts/models.py | LokeshBolisetty/Webapp-django | 51fd6d3224dfd4295e7688b8fa6c88f1c11dfe9a | [
"MIT"
] | null | null | null | accounts/models.py | LokeshBolisetty/Webapp-django | 51fd6d3224dfd4295e7688b8fa6c88f1c11dfe9a | [
"MIT"
] | null | null | null | accounts/models.py | LokeshBolisetty/Webapp-django | 51fd6d3224dfd4295e7688b8fa6c88f1c11dfe9a | [
"MIT"
] | null | null | null | from django.db import models
#This app uses the django User table itself. If that is not something required then models for accounts can be made here with required fields. | 58 | 142 | 0.798851 |
0711f66b783dd09d729edb92b24abf78ea9a2df2 | 6,560 | py | Python | dashboard/utils.py | siqueiralex/SistemaComissaoVoluntario | 1a4ba8fd37d5182156e7bb4c4204845df2707fd1 | [
"MIT"
] | null | null | null | dashboard/utils.py | siqueiralex/SistemaComissaoVoluntario | 1a4ba8fd37d5182156e7bb4c4204845df2707fd1 | [
"MIT"
] | null | null | null | dashboard/utils.py | siqueiralex/SistemaComissaoVoluntario | 1a4ba8fd37d5182156e7bb4c4204845df2707fd1 | [
"MIT"
] | null | null | null | import xlwt
from openpyxl import Workbook
from django.contrib.auth.decorators import login_required
from django.utils.formats import date_format
import datetime
from .sheet_builder import SheetBuilder, NewSheetBuilder
from projetos.models import Atividade, Cronograma, Unidade, Vaga
from projetos.decorators import *
CUSTO_COTA = 600
def style_random_color(number):
color_list = ["ocean_blue", "light_green", "light_turquoise", "lavender", "ice_blue", "gray25", "sea_green", "aqua"]
return xlwt.easyxf(f"borders: top thin, bottom thin, left thin,right thin;font: bold 1, color black;align: horiz center, vert center;pattern: pattern solid, pattern_fore_colour {color_list[number % len(color_list)]};")
def controlePlanosSheetFiller(sheet, cronograma, unidades):
total_rows = []
sb = SheetBuilder(sheet)
sb.row = 1
sb.col_initial = 0
sb.sheet.row(1).height_mismatch = True
sb.sheet.row(1).height = 600
sb.set_col_widths([5000,4000,7500,3000,2500,2000,2500,2500,6000])
sb.style="header"
sb.write_header([
{'text':'UNIDADE'},
{'text':'TIPO DE ATIVIDADE'},
{'text':'ATIVIDADE'},
{'text':'HORÁRIO'},
{'text':'CH'},
{'text':'QTDE DIAS'},
{'text':'QTDE DE SV'},
{'text':'QTDE COTAS'},
{'text':'CUSTO','style':"center_currency"}
])
for unidade in unidades:
sb.style="center"
atividades = Atividade.objects.filter(cronograma_id=cronograma.id, unidade_id=unidade.id).order_by('id')
sb.enter()
qtde = atividades.count()
sb.sheet.write_merge(sb.row, sb.row+qtde, sb.col, sb.col, unidade.sigla, style_random_color(unidade.id) )
sb.col+=1
sb.col_initial+=1
for atv in atividades:
sb.write_line([atv.tipo_atividade, atv.nome, atv.horario, atv.carga_horaria, atv.quantidade_de_dias, atv.total_voluntarios, atv.total_cotas, atv.total_cotas*CUSTO_COTA])
sb.enter()
sb.style = "total"
total_rows += [sb.row]
sb.sheet.write_merge(sb.row,sb.row,sb.col,sb.col+3, "TOTAL",sb.get_style())
sb.write_sum(elig_col=[4,5,6,7], num_rows=qtde-1)
sb.col_initial-=1
sb.enter()
sb.enter()
sb.style = "total_darker"
sb.sheet.write_merge(sb.row,sb.row,sb.col,sb.col+4, "TOTAL GERAL", sb.get_style())
sb.write_sum(elig_col=[5,6,7,8], rows = total_rows)
def cotachSheetFiller(sheet, cronograma, unidades):
sb = SheetBuilder(sheet)
sb.row = 1
sb.col_initial = 0
sb.sheet.row(1).height_mismatch = True
sb.sheet.row(1).height = 600
total_rows = []
sb.set_col_widths([3000,8000]+[1200]*len(cronograma.lista_de_datas)+[3000,5000])
sb.style="header"
sb.write_header([
{'text':'UNIDADE'},
{'text':'ATIVIDADE','style':"center160"},
] + [{'text':f"{d.day}"} for d in cronograma.lista_de_datas] + [
{'text':'CH (Horas)'},
{'text':'HORÁRIOS'},
])
for unidade in unidades:
sb.style="center"
atividades = Atividade.objects.filter(cronograma_id=cronograma.id, unidade_id=unidade.id).order_by('id')
sb.enter()
qtde = atividades.count()
sb.sheet.write_merge(sb.row, sb.row+qtde, sb.col, sb.col, unidade.sigla, style_random_color(unidade.id))
sb.col+=1
sb.col_initial+=1
for atv in atividades:
sb.write_line([atv.nome] + atv.lista_efetivos + [atv.carga_horaria, atv.horario])
sb.enter()
sb.style = "total"
total_rows += [sb.row]
sb.sheet.write_merge(sb.row,sb.row,sb.col,sb.col, "TOTAL",sb.get_style())
date_cols = list(range(1,len(cronograma.lista_de_datas)+1))
sb.write_sum(elig_col=date_cols, num_rows=qtde-1)
sb.write_line(["",""])
sb.col_initial-=1
sb.enter()
sb.enter()
sb.style = "total_darker"
sb.sheet.write_merge(sb.row,sb.row,sb.col,sb.col+1, "TOTAL GERAL", sb.get_style())
date_cols = list(range(2,len(cronograma.lista_de_datas)+2))
sb.write_sum(elig_col=date_cols, rows = total_rows)
sb.write_line(["",""])
def projetoSheetFiller(sheet, cronograma, unidade):
atividades = Atividade.objects.filter(unidade_id= unidade.id, cronograma_id=cronograma.id).order_by('id')
sb = SheetBuilder(sheet)
sb.sheet.row(1).height_mismatch = True
sb.sheet.row(1).height = 900
sb.col_initial = 1
sb.row = 1
sb.set_col_widths([3000,5000,10000,3000,4000,3000,3000,3000,3000,3000,4000])
sb.style = "header"
sb.write_header([{'text':'UNIDADE'},
{'text':'TIPO DE PROJETO'},
{'text':'NOME DO PROJETO','style':"center160"},
{'text':'MÊS'},
{'text':'DATA','style':"center_date"},
{'text':'QTDE DE DIAS'},
{'text':'Nº SV/DIA'},
{'text':'Nº DE JOVENS ATENDIDOS'},
{'text':'Nº MÁX JOVENS SIMULT'},
{'text':'CH'},
{'text':'HORARIO DA ATIVIDADE'}
])
sb.style = "center"
for atv in atividades:
sb.enter()
sb.write_line([atv.unidade.sigla, atv.tipo_atividade, atv.nome, f"{date_format(atv.cronograma.inicio_servicos, 'F')}",atv.cronograma.nome_curto, atv.quantidade_de_dias, atv.voluntarios_dia, atv.numero_jovens, atv.numero_jovens_sim ,atv.carga_horaria, atv.horario])
sb.col_initial = 2
sb.style = "header"
sb.enter()
sb.enter()
sb.enter()
sb.write_header([{'text':'ATIVIDADE','style':"center_wrap"},
{'text':'OBJETIVO DA UNIDADE','style':"center_wrap"}])
for atv in atividades:
sb.enter()
sb.write_line([atv.nome, atv.objetivo])
def extract_list_from_sheet(sheet, header_row=1):
row_count = sheet.max_row
col_count = sheet.max_column
header = [cell.value for cell in sheet[header_row]]
values_list = []
for row in sheet.iter_rows(min_row=header_row+1):
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
if values['Matrícula']==None:
break
values_list.append(values)
return header, values_list | 34.34555 | 272 | 0.590396 |
40e3ebad0df9824ad905e3752dbe6bd465662f4a | 1,746 | py | Python | scripts/instance_google_sheet.py | rniksch/openshift-on-aws | 33663a816c07a9f362a586bc960c7a054df3f1a5 | [
"Apache-2.0"
] | null | null | null | scripts/instance_google_sheet.py | rniksch/openshift-on-aws | 33663a816c07a9f362a586bc960c7a054df3f1a5 | [
"Apache-2.0"
] | null | null | null | scripts/instance_google_sheet.py | rniksch/openshift-on-aws | 33663a816c07a9f362a586bc960c7a054df3f1a5 | [
"Apache-2.0"
] | 5 | 2018-11-08T00:51:22.000Z | 2021-06-08T01:47:02.000Z | #!/usr/bin/python
# Used this resources to build this simple script
# https://boto3.readthedocs.io/en/latest/guide/ec2-example-managing-instances.html
# https://pygsheets.readthedocs.io/en/latest/
# This library below was extremely slow removed for pygsheets
# http://gspread.readthedocs.io/en/latest/
# https://www.twilio.com/blog/2017/02/an-easy-way-to-read-and-write-to-a-google-spreadsheet-in-python.html
from __future__ import print_function
import pygsheets
import boto3
import os
import time
def main():
ec2 = boto3.client('ec2')
filters = [{'Name':'tag:lab_type', 'Values':["loft-lab"],'Name': 'instance-state-name', 'Values': ['running']}]
instances = ec2.describe_instances(Filters=filters)
gc = pygsheets.authorize(service_file='%s/nycawsloft-af8212519288.json' % os.environ['HOME'])
row = ["Student ID", "Public URL", "Public IP Address", "Claimed By"]
sht = gc.open("NYC AWS Loft Instances")
wks = sht.worksheet('index', 0)
wks.update_row(1, values=row)
row_count = 2
for r in instances['Reservations']:
for i in r['Instances']:
for t in i['Tags']:
if t['Key'] == 'Name':
if 'spare' in t['Value']:
student_id = t['Value']
else:
student_id = t['Value'].split('-')[-1]
print(i['PublicDnsName'])
print(i['PublicIpAddress'])
row = [student_id, i['PublicDnsName'], i['PublicIpAddress']]
# Sleep is required otherwise the script will hit the API limit
time.sleep(0.5)
wks.update_row(row_count, values=row)
row_count = row_count + 1
if __name__ == '__main__':
main()
| 28.622951 | 115 | 0.613402 |
75bd8d1632bbf85554c0e11be2664d83adb2fbca | 3,315 | py | Python | src/models/export_model_to_js_h5.py | morpheus-project/morpheus-deblend | a63b9e27de3be22bb732509bbdf6dc84ba066e92 | [
"MIT"
] | 1 | 2022-01-14T13:44:27.000Z | 2022-01-14T13:44:27.000Z | src/models/export_model_to_js_h5.py | morpheus-project/morpheus-deblend | a63b9e27de3be22bb732509bbdf6dc84ba066e92 | [
"MIT"
] | null | null | null | src/models/export_model_to_js_h5.py | morpheus-project/morpheus-deblend | a63b9e27de3be22bb732509bbdf6dc84ba066e92 | [
"MIT"
] | null | null | null | # MIT License
# Copyright 2020 Ryan Hausen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import tensorflow as tf
import tensorflowjs as tfjs
from tensorflow.keras import layers
from tensorflow.keras.models import Model
import src.models.PanopticFastAttention as pfa
MODELS_DIR = os.path.join(os.path.dirname(__file__), "../../models")
def main(model_id:str):
model_dir = os.path.join(MODELS_DIR, model_id)
model_input_shape = [256, 256, 1]
encoder_filters = [16, 32, 64, 64]
encoder_input_shape = [256, 256, 1]
encoder_dropout_rate = 0.1
instance_decoder_output_shape = [256, 256, 1]
instance_decoder_filters = [16, 32, 64, 64]
instance_decoder_dropout_rate = 0.1
instance_decoder_n_instaces = 3
inputs = layers.Input(shape=model_input_shape)
enc = pfa.encoder(
encoder_input_shape,
encoder_filters,
dropout_rate=encoder_dropout_rate
)
dec_intance = pfa.instance_decoder_v8(
instance_decoder_output_shape,
instance_decoder_filters,
dropout_rate=instance_decoder_dropout_rate,
n_instances=instance_decoder_n_instaces
)
enc_outputs = enc(inputs)
reversed_outputs = list(reversed(enc_outputs))
cv, cm, com = dec_intance(reversed_outputs)
model = Model([inputs], [cv, cm, com])
checkpoint = tf.train.Checkpoint(model=model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint=checkpoint,
directory=os.path.join(model_dir, "raw"),
max_to_keep=3,
)
checkpoint.restore(checkpoint_manager.latest_checkpoint).expect_partial()
js_dir = os.path.join(model_dir, f"js-{model_id}")
if not os.path.exists(js_dir):
os.mkdir(js_dir)
h5_dir = os.path.join(model_dir, "h5")
if not os.path.exists(h5_dir):
os.mkdir(h5_dir)
tf_path = os.path.join(model_dir, f"savedmodel-{model_id}")
if not os.path.exists(tf_path):
os.mkdir(tf_path)
tfjs.converters.save_keras_model(model, js_dir)
model.save(
os.path.join(h5_dir, f"morpheus-deblend-{model_id}.h5"),
save_format="h5",
include_optimizer=False,
)
model.save(tf_path, include_optimizer=False)
if __name__=="__main__":
model_id = sys.argv[1]
main(model_id)
| 30.412844 | 82 | 0.720362 |
b975140a6a8d7165dc725bf77f81ea174cdaa8d0 | 1,204 | py | Python | scanmatcher/launch/mapping_robot.launch.py | jediofgever/lidarslam_ros2 | 91d4f1049193d98876fbca8bd74c40d20df2d229 | [
"BSD-2-Clause"
] | null | null | null | scanmatcher/launch/mapping_robot.launch.py | jediofgever/lidarslam_ros2 | 91d4f1049193d98876fbca8bd74c40d20df2d229 | [
"BSD-2-Clause"
] | null | null | null | scanmatcher/launch/mapping_robot.launch.py | jediofgever/lidarslam_ros2 | 91d4f1049193d98876fbca8bd74c40d20df2d229 | [
"BSD-2-Clause"
] | null | null | null | import os
import launch
import launch_ros.actions
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
mapping_param_dir = launch.substitutions.LaunchConfiguration(
'mapping_param_dir',
default=os.path.join(
get_package_share_directory('scanmatcher'),
'param',
'mapping_robot.yaml'))
mapping = launch_ros.actions.Node(
package='scanmatcher',
executable='scanmatcher_node',
parameters=[mapping_param_dir],
remappings=[('/input_cloud','/velodyne_points'),('/imu','/imu/data')],
#remappings=[('/imu','/gpsimu_driver/imu_data')],# for imu debug
output='screen'
)
tf = launch_ros.actions.Node(
package='tf2_ros',
executable='static_transform_publisher',
arguments=['0','0','0','0','0','0','1','base_link','velodyne_link']
)
return launch.LaunchDescription([
launch.actions.DeclareLaunchArgument(
'mapping_param_dir',
default_value=mapping_param_dir,
description='Full path to mapping parameter file to load'),
mapping,
tf
]) | 30.871795 | 78 | 0.634551 |
378642773d087d118d3fb9641e2c12e0c6fd0c00 | 13,722 | py | Python | GANs/GAN_v4.3.py | jessvb/3d_world_procedural_generation | 44468f4267ccb378de90efb53d6c52a204cd6e25 | [
"MIT"
] | 7 | 2019-01-29T21:20:01.000Z | 2020-11-23T01:03:04.000Z | GANs/GAN_v4.3.py | jessvb/3d_world_procedural_generation | 44468f4267ccb378de90efb53d6c52a204cd6e25 | [
"MIT"
] | null | null | null | GANs/GAN_v4.3.py | jessvb/3d_world_procedural_generation | 44468f4267ccb378de90efb53d6c52a204cd6e25 | [
"MIT"
] | 1 | 2021-07-12T10:43:29.000Z | 2021-07-12T10:43:29.000Z | # v4.3 changes all tf.nn.relu activations to tf.nn.leaky_relu
# and uses values between 1 and 2 (instead of -1 and 1), two different alphas, image resize upscaling and 256x256px images
# and trains forever instead of just for #iterations
# TODO: things to try:
# - change the "kernel size" of the layers to be the same size as a river (e.g., 8px)
# - chaging the number of conv layers in the generator (less layers, but still output 256x256)
# - Try inputting the original images to the GAN to see what happens!
import tensorflow as tf
import random
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import pickle
from PIL import Image
BATCH_SIZE = 150 # todo
# ITERATIONS = 10000 # NOTE: does not stop for x iterations -- runs forever now!
D_ALPHA = 3e-6 # the discriminator learning rate todo
G_ALPHA = 3e-4 # the generator learning rate todo
# get the training data
x_train = pickle.load(open('pickled/_0.pickle', "rb"))
# (numImgs, IMAGE_SIZE, IMAGE_SIZE, 1): (###, 512, 512, 1)
IMAGE_SIZE = np.shape(x_train)[1] # 512
numImgs = np.shape(x_train)[0]
# arrange the images into 1D vectors
x_train = np.array([x_train])
x_train = x_train.reshape([numImgs, IMAGE_SIZE, IMAGE_SIZE, 1])
print('~~~~~~~~~~~~~~~~~~~~x_train before scale:', x_train)
# x_train = x_train / 255 * 2 - 1 # scale between -1 and 1
x_train = x_train / 255 + 1 # scale between 1 and 2 (no zero.)
# if you want to view the original images
# for i in range(10, 14):
# plt.imshow(x_train[i].reshape([IMAGE_SIZE, IMAGE_SIZE]),
# cmap=plt.get_cmap('gray'))
# plt.show()
def conv2d(x, W):
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME')
def avg_pool_2x2(x):
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# single x_image is 512*512px
def discriminator(x_image, reuse=False):
with tf.variable_scope('discriminator') as scope:
if (reuse):
tf.get_variable_scope().reuse_variables()
# First Conv and Pool Layers
W_conv1 = tf.get_variable(
'd_wconv1', [5, 5, 1, 8], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_conv1 = tf.get_variable(
'd_bconv1', [8], initializer=tf.constant_initializer(0))
h_conv1 = tf.nn.leaky_relu(conv2d(x_image, W_conv1) + b_conv1)
# orig h_conv1 dimensions = ; 512 dimensions = 16 512 512 8
h_pool1 = avg_pool_2x2(h_conv1)
# Second Conv and Pool Layers
W_conv2 = tf.get_variable('d_wconv2', [
5, 5, 8, 16], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_conv2 = tf.get_variable(
'd_bconv2', [16], initializer=tf.constant_initializer(0))
h_conv2 = tf.nn.leaky_relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = avg_pool_2x2(h_conv2)
dimVal = np.shape(h_pool2)[1]*np.shape(h_pool2)[2] * \
np.shape(h_pool2)[3] # before: 7 * 7 * 16
# First Fully Connected Layer
W_fc1 = tf.get_variable('d_wfc1', [
dimVal, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
# 7 * 7 * 16, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_fc1 = tf.get_variable(
'd_bfc1', [32], initializer=tf.constant_initializer(0))
# 7*7*16]) # 7*7*16=784
h_pool2_flat = tf.reshape(h_pool2, [-1, dimVal])
h_fc1 = tf.nn.leaky_relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Second Fully Connected Layer
W_fc2 = tf.get_variable(
'd_wfc2', [32, 1], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_fc2 = tf.get_variable(
'd_bfc2', [1], initializer=tf.constant_initializer(0))
# Final Layer
y_conv = (tf.matmul(h_fc1, W_fc2) + b_fc2)
return y_conv
def generator(z, batch_size, z_dim, reuse=False):
with tf.variable_scope('generator') as scope:
if (reuse):
tf.get_variable_scope().reuse_variables()
g_dim = 64 # Number of filters of first layer of generator
# Color dimension of output (MNIST is grayscale, so c_dim = 1 for us)
c_dim = 1
# s = 28 #Output size of the image
# Output size of the image --> changed to the number of pixels of our input image (512)
s = IMAGE_SIZE
# We want to slowly upscale the image, so these values will help
s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)
# make that change gradual. --> s=512, s2=256, s4=128, s8=64, s16=32
# h0 = tf.reshape(z, [batch_size, s16+1, s16+1, 25]) # s16 = 128
# --> s*s/((s16)*(s16)) ---> changed such that s16*s16*256=s*s
h0 = tf.reshape(z, [batch_size, s16, s16, 256])
h0 = tf.nn.leaky_relu(h0)
# Dimensions of h0 = batch_size x 2 x 2 x 25 = 100 --> 1 33 33 25 --> want this to multiply to 512*512
# First DeConv Layer
output1_shape = [batch_size, s8, s8, g_dim*4]
# b_conv and W_conv's are unused --> deleted these
# instead of tf.nn.conv2d_transpose, let's use resize_images to upsample to reduce artifacts
H_conv1 = tf.image.resize_images(images=h0,
size=tf.constant(
[output1_shape[1], output1_shape[2]]),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
H_conv1 = tf.layers.conv2d(inputs=H_conv1, filters=s2, kernel_size=(
5, 5), padding='same', activation=tf.nn.leaky_relu)
H_conv1 = tf.contrib.layers.batch_norm(
inputs=H_conv1, center=True, scale=True, is_training=True, scope="g_bn1")
H_conv1 = tf.nn.leaky_relu(H_conv1)
# Dimensions of H_conv1 = batch_size x 3 x 3 x 256 --> batchsize 64 64 256
# Second DeConv Layer
output2_shape = [batch_size, s4 - 1, s4 - 1, g_dim*2]
# b_conv and W_conv's are unused --> deleted these
H_conv2 = tf.image.resize_images(images=H_conv1,
size=tf.constant(
[output2_shape[1], output2_shape[2]]),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# H_conv2 = tf.nn.conv2d_transpose(H_conv1, W_conv2, output_shape=output2_shape,
# strides=[1, 2, 2, 1], padding='SAME') + b_conv2
H_conv2 = tf.layers.conv2d(inputs=H_conv2, filters=s4, kernel_size=(
5, 5), padding='same', activation=tf.nn.leaky_relu)
H_conv2 = tf.contrib.layers.batch_norm(
inputs=H_conv2, center=True, scale=True, is_training=True, scope="g_bn2")
H_conv2 = tf.nn.leaky_relu(H_conv2)
# Dimensions of H_conv2 = batch_size x 6 x 6 x 128 --> batchsize 127 127 128
# Third DeConv Layer
output3_shape = [batch_size, s2 - 2, s2 - 2, g_dim*1]
# b_conv and W_conv's are unused --> deleted these
H_conv3 = tf.image.resize_images(images=H_conv2,
size=tf.constant(
[output3_shape[1], output3_shape[2]]),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# H_conv3 = tf.nn.conv2d_transpose(H_conv2, W_conv3, output_shape=output3_shape,
# strides=[1, 2, 2, 1], padding='SAME') + b_conv3
H_conv3 = tf.layers.conv2d(inputs=H_conv3, filters=s8, kernel_size=(
5, 5), padding='same', activation=tf.nn.leaky_relu)
H_conv3 = tf.contrib.layers.batch_norm(
inputs=H_conv3, center=True, scale=True, is_training=True, scope="g_bn3")
H_conv3 = tf.nn.leaky_relu(H_conv3)
# Dimensions of H_conv3 = batch_size x 12 x 12 x 64 --> 1 254 254 64
# Fourth DeConv Layer
output4_shape = [batch_size, s, s, c_dim]
# b_conv and W_conv's are unused --> deleted these
H_conv4 = tf.image.resize_images(images=H_conv3,
size=tf.constant(
[output4_shape[1], output4_shape[2]]),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# H_conv4 = tf.nn.conv2d_transpose(H_conv3, W_conv4, output_shape=output4_shape,
# strides=[1, 2, 2, 1], padding='VALID') + b_conv4
H_conv4 = tf.layers.conv2d(inputs=H_conv4, filters=1, kernel_size=(
5, 5), padding='same', activation=tf.nn.leaky_relu) # this should have 'VALID' padding??
H_conv4 = tf.nn.tanh(H_conv4)
# Dimensions of H_conv4 = batch_size x 28 x 28 x 1 --> batch_size x 512 x 512 x 1
print('h0: ', np.shape(h0))
print('H_conv1: ', np.shape(H_conv1))
print('H_conv2: ', np.shape(H_conv2))
print('H_conv3: ', np.shape(H_conv3))
print('H_conv4: ', np.shape(H_conv4))
return H_conv4
# # create and view a single (essentially randomly) generated image:
# sess = tf.Session()
# # changed from 100 --> want a 512*512 image
# z_dimensions = IMAGE_SIZE*IMAGE_SIZE
# z_test_placeholder = tf.placeholder(tf.float32, [None, z_dimensions])
# sample_image = generator(z_test_placeholder, 1, z_dimensions)
# test_z = np.random.normal(-1, 1, [1, z_dimensions])
# sess.run(tf.global_variables_initializer())
# temp = (sess.run(sample_image, feed_dict={z_test_placeholder: test_z}))
# my_i = temp.squeeze()
# plt.imshow(my_i, cmap='gray_r')
# plt.show()
### Training a GAN ###
# changed from 100 --> want a 512*512 image
z_dimensions = IMAGE_SIZE*IMAGE_SIZE
batch_size = BATCH_SIZE
# Since we changed our batch size (from 1 to 16), we need to reset our Tensorflow graph
tf.reset_default_graph()
sess = tf.Session()
# Placeholder for input images to the discriminator
x_placeholder = tf.placeholder(
"float", shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1]) # 28,28,1]) # <-- original shape (now 512x512x1)
# Placeholder for input noise vectors to the generator
z_placeholder = tf.placeholder(tf.float32, [None, z_dimensions])
# Dx will hold discriminator prediction probabilities for the real MNIST images
Dx = discriminator(x_placeholder)
# Gz holds the generated images
Gz = generator(z_placeholder, batch_size, z_dimensions)
# Dg will hold discriminator prediction probabilities for generated images
Dg = discriminator(Gz, reuse=True)
# ensure forward compatibility: function needs to have logits and labels args explicitly used
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=Dg, labels=tf.ones_like(Dg)))
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=Dx, labels=tf.ones_like(Dx)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=Dg, labels=tf.zeros_like(Dg)))
d_loss = d_loss_real + d_loss_fake
tvars = tf.trainable_variables()
d_vars = [var for var in tvars if 'd_' in var.name]
g_vars = [var for var in tvars if 'g_' in var.name]
# print(tf.get_variable_scope().reuse)
d_adam = tf.train.AdamOptimizer(D_ALPHA)
g_adam = tf.train.AdamOptimizer(G_ALPHA)
trainerD = d_adam.minimize(d_loss, var_list=d_vars)
trainerG = g_adam.minimize(g_loss, var_list=g_vars)
sess.run(tf.global_variables_initializer())
# Create a saver object which will save all the variables
saver = tf.train.Saver()
# loop:
# iterations = ITERATIONS # no more iterations -- will train forever!
i = 0
while True:
print('starting iteration: ', i)
z_batch = np.random.normal(-1, 1, size=[batch_size, z_dimensions])
# print('i:', i, 'i+batch_size:', i+batch_size)
real_image_batch = x_train[i:i+batch_size, :, :, :]
_, dLoss = sess.run([trainerD, d_loss], feed_dict={
z_placeholder: z_batch, x_placeholder: real_image_batch}) # Update the discriminator
_, gLoss = sess.run([trainerG, g_loss], feed_dict={
z_placeholder: z_batch}) # Update the generator
i = i+batch_size
if i % (batch_size*10) == 0:
print('done batch ', i/batch_size)
if i % (batch_size*20) == 0:
print('saving checkpoint at batch ', i/batch_size)
saver.save(sess, './checkpoints/GAN'+str(i/batch_size))
print('saving image at batch ', i/batch_size)
sample_image = generator(z_placeholder, 1, z_dimensions, True)
z_batch = np.random.normal(-1, 1, size=[1, z_dimensions])
temp = (sess.run(sample_image, feed_dict={z_placeholder: z_batch}))
my_i = temp.squeeze()
# plt.imshow(my_i, cmap='gray')
# plt.show()
print('##################image array before scale:', my_i)
# save the generated image as an image:
my_i = (my_i+1)*255/2 # scale up to within 0 255 from -1 1
# my_i = my_i*255 # scale up to within 0 255 from 0 1
print('!!!!!!!!!image array:', my_i)
im = Image.fromarray(my_i)
# print('!!!!!!!!!!!!image fromarray:', list(im.getdata()))
im = im.convert('RGB')
# print('!!!!!!!!!!!!!!!image rgb:', list(im.getdata()))
# make sure we don't overwrite:
import os
if os.path.exists('./generated/generated_' + str(i/batch_size) + '.png'):
import time
im.save("./generated/generated_" + str(i/batch_size) +
"{}.png".format(int(time.time())), "PNG")
else:
im.save('./generated/generated_' +
str(i/batch_size) + '.png', "PNG")
# This will never occur (training goes forever)
print('done training and generation!')
# see restoreAndView.py if you want to restore the model
| 46.04698 | 122 | 0.625929 |
92800cf0c586918525654efae8c23f5f0308826c | 155 | py | Python | pylibup/cli/__init__.py | trisongz/pylibup | 456c082032cb14e7b2f12f115b4033237a0b1d1f | [
"MIT"
] | null | null | null | pylibup/cli/__init__.py | trisongz/pylibup | 456c082032cb14e7b2f12f115b4033237a0b1d1f | [
"MIT"
] | null | null | null | pylibup/cli/__init__.py | trisongz/pylibup | 456c082032cb14e7b2f12f115b4033237a0b1d1f | [
"MIT"
] | null | null | null | from . import base
from . import app
from .base import baseCli
from .app import repoCli, stateCli
baseCli.add_typer(repoCli)
baseCli.add_typer(stateCli)
| 17.222222 | 34 | 0.793548 |
b0cc334da82a8d8d2afbe100ce002f7948f2cc37 | 4,938 | py | Python | tools/globus/demuxer.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2021-02-05T13:19:58.000Z | 2021-02-05T13:19:58.000Z | tools/globus/demuxer.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | tools/globus/demuxer.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
"""Creates a simple single file dataset from a composite CGA dataset
"""
import json
from optparse import OptionParser
import os
import re
import sys
def parse_lane_and_part_numbers(path):
# parse lane number and part number out of:
# the reads file name reads_GS21910-FS3-L04_004.tsv.bz2
basename = os.path.basename(path)
sans_extension = basename.split(".")[0]
lane_part_segment = sans_extension.split("-")[-1]
lane_string, part_string = lane_part_segment.split("_")
lane_number = int(lane_string.replace("L", ""))
part_number = int(part_string)
return lane_number, part_number
def link(datapath, outpath):
os.unlink(outpath)
os.symlink(datapath, outpath)
def parse_chromosome_number(path):
basename = os.path.basename(path)
pattern = ".*chrm([0-9]+)_.*"
m = re.search(pattern, basename)
number = None
if m:
try:
number = int(m.groups()[0])
except ValueError:
print "%s does not appear to have a chromosome number!"%(path,)
return number
def extract_file(data, extension, lane_number, part_number, outpath, chromosome_number=1):
datasets = data["files"]
print "lane_number:", lane_number, "part_number:", part_number
for dataset in datasets:
path = dataset["path"]
print "path:", path
try:
lane_no, part_no = parse_lane_and_part_numbers(path)
except:
lane_no, part_no = None, None
print "lane_no, part_no:", lane_no, part_no
chrom_no = parse_chromosome_number(path)
if ((lane_no == lane_number and part_no == part_number and path.endswith(extension)) or
chromosome_number == chrom_no):
datapath = path
print "Creating a symbolic link to the data %s at outpath %s"%(
datapath, outpath)
if not os.path.exists(datapath):
print "Some of these tools add another extension (e.g.: samtools sam2bam) .bam extensions..."
datapath += ".%s"%(extension,)
print "os.path.exists(outpath):", os.path.exists(outpath)
link(datapath, outpath)
return
raise Exception("File not found!")
def convert_to_dataset(inpath, indir, outpath,
output_type, lane_number, part_number):
try:
lane_number = int(lane_number)
except ValueError:
print "Invalid lane number!"
try:
part_number = int(lane_number)
except ValueError:
print "Invalid part number!"
with open(inpath, "r") as json_file:
data = json.load(json_file)
files = data["files"]
#for f in files:
# print f
if output_type == "dat":
parts = data["parts"]
found_file = False
for name in parts:
part = parts[name]
print "part[%s] = %s"%(name, part)
lane_no, part_no = parse_lane_and_part_numbers(part["reads"])
if lane_no == lane_number and part_no == part_number:
datapath = part["reads"]
print "Creating a symbolic link to the data %s at outpath %s"%(
datapath, outpath)
link(datapath, outpath)
found_file = True
if not found_file:
raise Exception("Unable to find read or mapping file for this lane or part!")
elif output_type == "sam":
extract_file(data, "sam", lane_number, part_number, outpath)
elif output_type == "bam":
extract_file(data, "bam", lane_number, part_number, outpath)
elif output_type == "boxplot.png":
extract_file(data, "boxplot.png", lane_number, part_number, outpath)
else:
raise Exception("Unknown output_type '%s' specified!"%(output_type,))
if __name__ == "__main__":
parser = OptionParser(usage=__doc__, version="%prog 0.01")
parser.add_option("-o","--outpath",dest="outpath",
help="output file path", default = 'output.dat')
parser.add_option("-t", "--output-type", dest="output_type",
help="output dataset type", default="dat")
parser.add_option("-p","--indir",dest="indir",
help="path for input files", default = './')
parser.add_option("-i", "--inpath", dest="inpath",
help="path for input primary file", default="./")
parser.add_option("-l", "--lane", dest="lane_number",
help="lane number to extract", default="01")
parser.add_option("-r", "--part", dest="part_number",
help="part number to extract", default="01")
(options,args) = parser.parse_args()
convert_to_dataset(options.inpath, options.indir, options.outpath,
options.output_type, options.lane_number,
options.part_number)
| 37.694656 | 110 | 0.59923 |
cb3e16ea5d83574e2aefa4131e42b9c2ea8b5d9e | 314 | py | Python | tests/conftest.py | stevenkbennett/stko | b416fccb3ca849151e27846b6b2d5516f5464190 | [
"MIT"
] | 8 | 2020-06-09T16:59:20.000Z | 2022-03-18T23:05:38.000Z | tests/conftest.py | stevenkbennett/stko | b416fccb3ca849151e27846b6b2d5516f5464190 | [
"MIT"
] | 60 | 2020-05-22T13:38:54.000Z | 2022-03-25T09:34:22.000Z | tests/conftest.py | stevenkbennett/stko | b416fccb3ca849151e27846b6b2d5516f5464190 | [
"MIT"
] | 4 | 2020-12-02T10:39:54.000Z | 2021-03-01T18:34:07.000Z |
def pytest_addoption(parser):
parser.addoption('--macromodel_path', default='')
def pytest_generate_tests(metafunc):
if 'macromodel_path' in metafunc.fixturenames:
macromodel_path = metafunc.config.getoption('macromodel_path')
metafunc.parametrize('macromodel_path', [macromodel_path])
| 28.545455 | 70 | 0.748408 |
a479ce7f4028005a16cb8fc8e8dd376c1ce61fc4 | 515 | py | Python | api/management/commands/drown_negativity.py | eiling/SchoolIdolAPI | a05980fdb33b143dbe2febfc1ad6cf723f025c8d | [
"Apache-2.0"
] | 65 | 2017-12-29T12:28:11.000Z | 2022-03-15T06:42:26.000Z | api/management/commands/drown_negativity.py | eiling/SchoolIdolAPI | a05980fdb33b143dbe2febfc1ad6cf723f025c8d | [
"Apache-2.0"
] | 31 | 2017-12-18T02:03:09.000Z | 2022-01-13T00:43:35.000Z | api/management/commands/drown_negativity.py | eiling/SchoolIdolAPI | a05980fdb33b143dbe2febfc1ad6cf723f025c8d | [
"Apache-2.0"
] | 7 | 2018-08-27T15:11:01.000Z | 2021-08-16T05:15:13.000Z | from django.core.management.base import BaseCommand, CommandError
from api import models
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from django.db.models import Q, F
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
models.Activity.objects.filter(Q(message_data__icontains='eunice') | Q(message_data__icontains='astin') | Q(message_data__icontains='suici')).update(creation=(timezone.now() - relativedelta(days=2)))
| 42.916667 | 207 | 0.782524 |
7d0a8a8084b1025ff55186c2725f2130e936659c | 3,065 | py | Python | files/Snake_game.py | shubham-0927/python-games-with-tutle-library | ca1604eda7a361d0072b8aea4b0f4ad1e350f10c | [
"MIT"
] | 2 | 2022-01-23T13:30:36.000Z | 2022-01-26T16:09:53.000Z | files/Snake_game.py | shubham-0927/python-games-with-tutle-library | ca1604eda7a361d0072b8aea4b0f4ad1e350f10c | [
"MIT"
] | null | null | null | files/Snake_game.py | shubham-0927/python-games-with-tutle-library | ca1604eda7a361d0072b8aea4b0f4ad1e350f10c | [
"MIT"
] | null | null | null | import turtle
import time
import random
delay=0.1
scr = turtle.Screen()
scr.title("Snake game")
scr.bgcolor("black")
scr.setup(width=600, height=600)
scr.tracer(0)
brd=turtle.Turtle()
brd.penup()
brd.setposition(-300,-300)
brd.color("white")
brd.pendown()
brd.pensize(4)
for j in range(4):
brd.forward(600)
brd.left(90)
brd.hideturtle()
#for head
head = turtle.Turtle()
head.speed(0)
head.shape("square")
head.color("white")
head.penup()
head.goto(0,0)
head.direction = "right"
#for food
food = turtle.Turtle()
food.speed(0)
food.shape("circle")
food.color("red")
food.penup()
food.goto(0,0)
segments=[]
#for score
pen=turtle.Turtle()
pen.speed()
pen.color("yellow")
pen.penup()
pen.hideturtle()
pen.goto(0,260)
pen.write("score", align="center",font=('Arial',24,'normal'))
def up():
if head.direction !="down":
head.direction="up"
def down():
if head.direction !="up":
head.direction="down"
def left():
if head.direction != "right":
head.direction="left"
def right():
if head.direction != "left":
head.direction="right"
def forexit():
scr.bye()
def move():
if head.direction=="up":
y=head.ycor()
head.sety(y+20)
if head.direction=="down":
y=head.ycor()
head.sety(y-20)
if head.direction=="left":
x=head.xcor()
head.setx(x-20)
if head.direction=="right":
x=head.xcor()
head.setx(x+20)
scr.listen()
scr.onkeypress(up,"w")
scr.onkeypress(down,"s")
scr.onkeypress(left,"a")
scr.onkeypress(right,"d")
scr.onkeypress(forexit,"x")
while True:
scr.update()
score = len(segments)
pen.clear()
pen.write(" SCORE: {} ".format(score), align='center', font=('Arial', 20))
pen.write(" x = exit key", font=('Arial', 12))
if head.xcor() > 290 or -290 > head.xcor() or head.ycor()> 290 or (-290)> head.ycor():
time.sleep(1)
head.goto(0,0)
head.direction="stop"
for s in segments:
s.goto(1000,1000)
segments.clear()
if head.distance(food)<20:
x = random.randint(-290, 290)
y = random.randint(-290, 290)
food.goto(x,y)
new_seg=turtle.Turtle()
new_seg.speed(0)
new_seg.shape("square")
new_seg.color("grey")
new_seg.penup()
segments.append(new_seg)
for i in range(len(segments)-1,0,-1):
x=segments[i-1].xcor()
y= segments[i-1].ycor()
segments[i].goto(x,y)
if len(segments)>0:
x= head.xcor()
y=head.ycor()
segments[0].goto(x,y)
move()
for s in segments:
if s.distance(head)<20:
time.sleep(1)
head.goto(0,0)
head.direction =" stop"
for s in segments:
s.goto(1000, 1000)
segments.clear()
time.sleep(delay)
scr.mainloop() | 22.372263 | 102 | 0.543556 |
e0aefbce6cd56f73683817e10a8bf3f3c7b9eb74 | 190 | py | Python | telegram_bot_calendar/wmonth.py | whitebaronnb/python-telegram-bot-calendar | d6ea017539fa4e4a2710e408d55c48b4b46d0037 | [
"MIT"
] | 44 | 2020-08-05T20:19:45.000Z | 2022-03-10T22:29:19.000Z | telegram_bot_calendar/wmonth.py | whitebaronnb/python-telegram-bot-calendar | d6ea017539fa4e4a2710e408d55c48b4b46d0037 | [
"MIT"
] | 6 | 2021-01-08T16:07:24.000Z | 2022-02-15T18:39:51.000Z | telegram_bot_calendar/wmonth.py | whitebaronnb/python-telegram-bot-calendar | d6ea017539fa4e4a2710e408d55c48b4b46d0037 | [
"MIT"
] | 20 | 2020-09-08T16:19:22.000Z | 2022-03-14T15:39:56.000Z | from telegram_bot_calendar.base import DAY
from telegram_bot_calendar.detailed import DetailedTelegramCalendar
class WMonthTelegramCalendar(DetailedTelegramCalendar):
first_step = DAY
| 27.142857 | 67 | 0.868421 |
2ba1b48561d3fc6e31ce5c672fae70166670480f | 762 | py | Python | sig-backend/server.py | antonioalfa22/sig-playas-asturias | 3cc087d44a0dc7cdc932adbc9e877a4b53fcff93 | [
"MIT"
] | 1 | 2020-12-01T19:53:14.000Z | 2020-12-01T19:53:14.000Z | sig-backend/server.py | antonioalfa22/sig-playas-asturias | 3cc087d44a0dc7cdc932adbc9e877a4b53fcff93 | [
"MIT"
] | null | null | null | sig-backend/server.py | antonioalfa22/sig-playas-asturias | 3cc087d44a0dc7cdc932adbc9e877a4b53fcff93 | [
"MIT"
] | null | null | null | """App entry point."""
import unittest
import argparse
from api import create_app
def run(app):
app.run(host='0.0.0.0', port=5000)
def test():
"""Runs the unit tests."""
tests = unittest.TestLoader().discover('./api/test', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
def parse_args():
parser = argparse.ArgumentParser(description='API Rest')
parser.add_argument("mode", help="Run Mode [dev | prod | test]")
args = parser.parse_args()
return args
def main(args):
mode = args.mode
app = create_app(mode)
if mode == "test":
test()
else:
run(app)
if __name__ == '__main__':
main(parse_args())
| 19.538462 | 76 | 0.628609 |
0ff2e5a055b2e96b4c7f1ecdd2283cf14b15ba1a | 7,756 | py | Python | Tests/Methods/Slot/test_HoleM53_meth.py | carbon-drive/pyleecan | e89d4fe97f23f6182c19127d2c6a2133614e169d | [
"Apache-2.0"
] | 1 | 2021-07-08T01:27:24.000Z | 2021-07-08T01:27:24.000Z | Tests/Methods/Slot/test_HoleM53_meth.py | ecs-kev/pyleecan | 1faedde4b24acc6361fa1fdd4e980eaec4ca3a62 | [
"Apache-2.0"
] | null | null | null | Tests/Methods/Slot/test_HoleM53_meth.py | ecs-kev/pyleecan | 1faedde4b24acc6361fa1fdd4e980eaec4ca3a62 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.SurfLine import SurfLine
from pyleecan.Classes.LamHole import LamHole
from pyleecan.Classes.HoleM53 import HoleM53
from pyleecan.Classes.Magnet import Magnet
from numpy import exp, arcsin, ndarray, pi
from pyleecan.Methods.Slot.HoleM53 import Slot53InterError
# For AlmostEqual
DELTA = 1e-6
HoleM53_test = list()
HoleM53_test_error = list()
# Two hole
test_obj = LamHole(is_internal=True, Rext=80.2e-3, Rint=0)
test_obj.hole = list()
test_obj.hole.append(
HoleM53(
Zh=8, H0=0.02, H1=0.001, H2=0.01, H3=0.003, W1=0.005, W2=0, W3=0.01, W4=0.78
)
)
HoleM53_test.append(
{
"test_obj": test_obj,
"S_exp": 3.63836e-4,
"SM_exp": 0.0002,
"Rmin": 5.8879558e-2,
"Rmax": 7.92e-2,
"W5": 7.78324e-3,
}
)
# One hole
test_obj = LamHole(is_internal=True, Rext=80.2e-3, Rint=0)
test_obj.hole = list()
test_obj.hole.append(
HoleM53(Zh=8, H0=0.02, H1=0.001, H2=0.01, H3=0.003, W1=0, W2=0, W3=0.01, W4=0.78)
)
HoleM53_test.append(
{
"test_obj": test_obj,
"S_exp": 3.73158e-4,
"SM_exp": 0.0002,
"Rmin": 5.8523556e-2,
"Rmax": 7.92e-2,
"W5": 8.317707e-3,
}
)
# Error test
test_obj = LamHole(is_internal=True, Rext=80.2e-3, Rint=0)
test_obj.hole = list()
test_obj.hole.append(
HoleM53(Zh=8, H0=0.02, H1=0.001, H2=0.01, H3=0.003, W1=0, W2=0, W3=0.01, W4=0.78)
)
HoleM53_test_error.append(
{
"test_obj": test_obj,
"S_exp": 3.73158e-4,
"SM_exp": 0.0002,
"Rmin": 5.8523556e-2,
"Rmax": 7.92e-2,
"W5": 8.317707e-3,
}
)
class Test_HoleM53_meth(object):
"""pytest for holeB53 methods"""
@pytest.mark.parametrize("test_dict", HoleM53_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.hole[0].comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", HoleM53_test)
def test_comp_surface_mag(self, test_dict):
"""Check that the computation of the magnet surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.hole[0].comp_surface_magnets()
a = result
b = test_dict["SM_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", HoleM53_test)
def test_comp_radius(self, test_dict):
"""Check that the computation of the radius is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.hole[0].comp_radius()
a = result[0]
b = test_dict["Rmin"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
a = result[1]
b = test_dict["Rmax"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", HoleM53_test)
def test_comp_W5(self, test_dict):
"""Check that the computation of W5 iscorrect"""
test_obj = test_dict["test_obj"]
a = test_obj.hole[0].comp_W5()
b = test_dict["W5"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Test that Z11 = Zlist[0]
test_obj2 = LamHole(is_internal=True, Rext=80.2e-3, Rint=0)
test_obj2.hole = list()
test_obj2.hole.append(
HoleM53(
Zh=8,
H0=0.00000000000000000000002,
H1=0.00000001,
H2=0.01,
H3=0.003,
W1=0,
W2=0,
W3=0.01,
W4=2.28,
)
)
a = test_obj2.hole[0].comp_W5()
assert -0.0014380265690122837 == a
@pytest.mark.parametrize("test_dict", HoleM53_test)
def test_build_geometry(self, test_dict):
"""Check that the build geometry method works"""
# is_simplified to True and magnetization Parallel
test_obj = test_dict["test_obj"]
test_obj.hole[0].magnet_0 = Magnet(type_magnetization=1)
test_obj.hole[0].magnet_1 = Magnet(type_magnetization=1)
a = test_obj.hole[0].build_geometry(is_simplified=True)
assert a[1].label == "HoleMagnet_Stator_Parallel_N_R0_T0_S0"
assert a[1].line_list[0] is not None
assert a[1].line_list[1] is not None
with pytest.raises(IndexError) as context:
a[1].line_list[2]
if test_obj.hole[0].W1 > 0:
assert a[4].label == "HoleMagnet_Stator_Parallel_N_R0_T1_S0"
assert a[4].line_list[0] is not None
assert a[4].line_list[1] is not None
with pytest.raises(IndexError) as context:
a[4].line_list[2]
else:
assert a[3].label == "HoleMagnet_Stator_Parallel_N_R0_T1_S0"
assert a[3].line_list[0] is not None
assert a[3].line_list[1] is not None
with pytest.raises(IndexError) as context:
a[3].line_list[2]
@pytest.mark.parametrize("test_dict", HoleM53_test_error)
def test_build_geometry_Z11_Z1_not_foundable(self, test_dict):
"""Check that the build geometry error works"""
test_obj = test_dict["test_obj"]
test_obj.hole[0] = HoleM53(
Zh=8,
H0=0.02,
H1=0.001,
H2=0.01,
H3=0.003,
W1=0.765149757,
W2=0.32542,
W3=0.0564,
W4=0.324,
)
# Z11
with pytest.raises(Slot53InterError) as context:
test_obj.hole[0].build_geometry()
test_obj.hole[0] = HoleM53(
Zh=8,
H0=50.02,
H1=10.0054456451,
H2=40.56456456401,
H3=0.968464003,
W1=10.0,
W2=0.14540,
W3=1.01546654654,
W4=0.05144,
)
# Z1
with pytest.raises(Slot53InterError) as context:
test_obj.hole[0].build_geometry()
@pytest.mark.parametrize("test_dict", HoleM53_test_error)
def test_build_geometry_Z11_Z1(self, test_dict):
"""Check nothing it's just for the coverage"""
test_obj = test_dict["test_obj"]
test_obj.hole[0] = HoleM53(
Zh=8, H0=0.02, H1=0.001, H2=0.01, H3=0.003, W1=0.005, W2=0, W3=0.01, W4=0.78
)
lst_pattern = test_obj.hole[0].build_geometry()
# Z11 = Zlist[0]
test_obj.hole[0] = HoleM53(
Zh=8,
H0=0.00000000000000000000002,
H1=0.00000001,
H2=0.01,
H3=0.003,
W1=0,
W2=0,
W3=0.01,
W4=2.28,
)
lst1 = test_obj.hole[0].build_geometry()
# Z1 = Zlist[0]
test_obj.hole[0] = HoleM53(
Zh=8,
H0=0.00000000000000000000002,
H1=0.00000001,
H2=0.01,
H3=0.003,
W1=0,
W2=0,
W3=0.01,
W4=4.78,
)
lst2 = test_obj.hole[0].build_geometry()
assert len(lst1) != len(lst_pattern)
assert len(lst2) != len(lst_pattern)
def test_comp_surface_magnet_id(self):
"""check that id is 0"""
hole = HoleM53(
Zh=8, H0=0.02, H1=0.001, H2=0.01, H3=0.003, W1=0.005, W2=0, W3=0.01, W4=0.78
)
assert hole.comp_surface_magnet_id(2) == 0
| 29.716475 | 88 | 0.556601 |
c58bf5d48292c467cb4d7a62696670d744994bde | 11,310 | py | Python | test/test_languages/testPython.py | Wonshtrum/lizard | c900ecc9b09c0de27e1f15f50ad77115fc0ef0cb | [
"MIT"
] | 1,255 | 2015-01-07T20:24:45.000Z | 2022-03-31T02:39:50.000Z | test/test_languages/testPython.py | Wonshtrum/lizard | c900ecc9b09c0de27e1f15f50ad77115fc0ef0cb | [
"MIT"
] | 293 | 2015-01-05T14:31:16.000Z | 2022-03-24T18:12:16.000Z | test/test_languages/testPython.py | Wonshtrum/lizard | c900ecc9b09c0de27e1f15f50ad77115fc0ef0cb | [
"MIT"
] | 217 | 2015-01-07T20:24:49.000Z | 2022-03-30T19:20:21.000Z | import unittest
import inspect
from ..testHelpers import get_python_function_list_with_extension
from lizard_ext.lizardnd import LizardExtension as NestDepth
from lizard_languages.python import PythonReader
def get_python_function_list(source_code):
return get_python_function_list_with_extension(source_code, NestDepth())
class Test_tokenizer_for_Python(unittest.TestCase):
def test_comment_with_quote(self):
tokens = PythonReader.generate_tokens("#'\n''")
self.assertEqual(["#'", "\n", "''"], list(tokens))
class Test_Python_nesting_level(unittest.TestCase):
def test_top_level_function(self):
functions = get_python_function_list(
"def a():\n" +
" pass")
self.assertEqual(0, functions[0].top_nesting_level)
def test_second_top_level_functions(self):
functions = get_python_function_list(
"def a():\n" +
" pass\n" +
"def b():\n" +
" pass"
)
self.assertEqual(0, functions[1].top_nesting_level)
def test_top_level_function_with_leading_space(self):
functions = get_python_function_list(
" def a():\n" +
" pass\n"
)
self.assertEqual(1, functions[0].top_nesting_level)
def test_2nd_level_function_with_leading_space(self):
functions = get_python_function_list(
"class C:\n" +
" def f():\n" +
" pass\n"
)
self.assertEqual(1, functions[0].top_nesting_level)
def test_miss_indented_comment(self):
functions = get_python_function_list(
"class C:\n" +
" class D:\n" +
" def a():\n" +
" pass\n" +
" #\n" +
" def b():\n" +
" pass")
self.assertEqual(7, functions[0].end_line)
class Test_parser_for_Python(unittest.TestCase):
def test_empty_source_should_return_no_function(self):
functions = get_python_function_list("")
self.assertEqual(0, len(functions))
def test_simple_python_function(self):
class namespace1:
def simple_function():
if IamOnEarth:
return toMars()
functions = get_python_function_list(inspect.getsource(namespace1))
self.assertEqual(1, len(functions))
self.assertEqual("simple_function", functions[0].name)
self.assertEqual(2, functions[0].cyclomatic_complexity)
self.assertEqual(1, functions[0].max_nesting_depth)
self.assertEqual(4, functions[0].end_line)
self.assertEqual("simple_function( )", functions[0].long_name)
def test_two_simple_python_function(self):
source = """
def foo():
#'
return False
def bar():
if foo == 'bar':
return True
"""
functions = get_python_function_list(source)
self.assertEqual(2, len(functions))
def test_multi_line_function_def_function_end(self):
source = """
def foo(arg1,
arg2,
):
# comment
return True
def foo2(arg1,
arg2,
arg3
):
if True:
return False
"""
functions = get_python_function_list(source)
self.assertEqual(6, functions[0].end_line)
self.assertEqual(13, functions[1].end_line)
def test_parameter_count(self):
class namespace2:
def function_with_2_parameters(a, b):
pass
functions = get_python_function_list(inspect.getsource(namespace2))
self.assertEqual(2, functions[0].parameter_count)
def test_parameter_count_with_default_value(self):
class namespace_df:
def function_with_2_parameters_and_default_value(a, b=None):
pass
functions = get_python_function_list(inspect.getsource(namespace_df))
self.assertEqual(2, functions[0].parameter_count)
self.assertEqual(['a', 'b'], functions[0].parameters)
def test_function_end(self):
class namespace3:
def simple_function(self):
pass
blah = 42
functions = get_python_function_list(inspect.getsource(namespace3))
self.assertEqual(1, len(functions))
self.assertEqual("simple_function", functions[0].name)
self.assertEqual(3, functions[0].end_line)
def test_top_level_functions(self):
functions = get_python_function_list(inspect.getsource(top_level_function_for_test))
self.assertEqual(1, len(functions))
def test_2_top_level_functions(self):
functions = get_python_function_list('''
def a():
pass
def b():
pass
''')
self.assertEqual(2, len(functions))
self.assertEqual("a", functions[0].name)
def test_2_functions(self):
class namespace4:
def function1(a, b):
pass
def function2(a, b):
pass
functions = get_python_function_list(inspect.getsource(namespace4))
self.assertEqual(2, len(functions))
def test_nested_functions(self):
class namespace5:
def function1(a, b):
def function2(a, b):
pass
a = 1 if b == 2 else 3
functions = get_python_function_list(inspect.getsource(namespace5))
self.assertEqual(2, len(functions))
self.assertEqual("function1.function2", functions[0].name)
self.assertEqual(4, functions[0].end_line)
self.assertEqual("function1", functions[1].name)
self.assertEqual(5, functions[1].end_line)
self.assertEqual(2, functions[1].cyclomatic_complexity)
self.assertEqual(2, functions[1].max_nesting_depth)
# will be fixed, should be equal to 1
def test_nested_functions_ended_at_eof(self):
class namespace6:
def function1(a, b):
def function2(a, b):
pass
functions = get_python_function_list(inspect.getsource(namespace6))
self.assertEqual(2, len(functions))
self.assertEqual("function1.function2", functions[0].name)
self.assertEqual(4, functions[0].end_line)
self.assertEqual("function1", functions[1].name)
self.assertEqual(4, functions[1].end_line)
def test_nested_functions_ended_at_same_line(self):
class namespace7:
def function1(a, b):
def function2(a, b):
pass
def function3():
pass
functions = get_python_function_list(inspect.getsource(namespace7))
self.assertEqual(3, len(functions))
self.assertEqual("function1.function2", functions[0].name)
self.assertEqual(4, functions[0].end_line)
self.assertEqual("function1", functions[1].name)
self.assertEqual(4, functions[1].end_line)
def xtest_one_line_functions(self):
class namespace8:
def a( ):pass
def b( ):pass
functions = get_python_function_list(inspect.getsource(namespace8))
self.assertEqual("a", functions[0].name)
self.assertEqual("b", functions[1].name)
def test_nested_depth_metric_multiple_continuous_loop_statements(self):
class namespace9:
def function1():
if IamOnEarth:
if IamOnShip:
return toMars()
functions = get_python_function_list(inspect.getsource(namespace9))
self.assertEqual(1, len(functions))
self.assertEqual("function1", functions[0].name)
self.assertEqual(3, functions[0].cyclomatic_complexity)
self.assertEqual(2, functions[0].max_nesting_depth)
self.assertEqual(5, functions[0].end_line)
def xtest_nested_depth_metric_multiple_discrete_loop_statement(self):
class namespace10:
def function1():
if IamOnEarth:
if not IamOnShip:
return toMars()
elif IamOnMoon:
return backEarth()
functions = get_python_function_list(inspect.getsource(namespace10))
self.assertEqual(1, len(functions))
self.assertEqual("function1", functions[0].name)
self.assertEqual(4, functions[0].cyclomatic_complexity)
self.assertEqual(2, functions[0].max_nesting_depth)
self.assertEqual(7, functions[0].end_line)
def test_comment_is_not_counted_in_nloc(self):
def function_with_comments():
# comment
pass
functions = get_python_function_list(inspect.getsource(function_with_comments))
self.assertEqual(2, functions[0].nloc)
def test_odd_blank_line(self):
code = "class c:\n" + \
" def f():\n" +\
" \n" +\
" pass\n"
functions = get_python_function_list(code)
self.assertEqual(4, functions[0].end_line)
def test_odd_line_with_comment(self):
code = "class c:\n" + \
" def f():\n" +\
" #\n" +\
" pass\n"
functions = get_python_function_list(code)
self.assertEqual(4, functions[0].end_line)
def test_tab_is_same_as_8_spaces(self):
code = ' ' * 7 + "def a():\n" + \
'\t' + "pass\n"
functions = get_python_function_list(code)
self.assertEqual(2, functions[0].end_line)
def xtest_if_elif_and_or_for_while_except_finally(self):
code = 'def a():\n' + \
' if elif and or for while except finally\n'
functions = get_python_function_list(code)
self.assertEqual(9, functions[0].cyclomatic_complexity)
self.assertEqual(8, functions[0].max_nesting_depth)
def test_block_string_is_one_token(self):
code = 'def a():\n' + \
" a = '''\n" +\
"a b c d e f g h i'''\n"+\
" return a\n"
functions = get_python_function_list(code)
self.assertEqual(9, functions[0].token_count)
self.assertEqual(4, functions[0].end_line)
def check_function_info(self, source, expect_token_count, expect_nloc, expect_endline):
functions = get_python_function_list(source)
self.assertEqual(expect_token_count, functions[0].token_count)
self.assertEqual(expect_nloc, functions[0].nloc)
self.assertEqual(expect_endline, functions[0].end_line)
def test_block_string(self):
self.check_function_info('def f():\n a="""block string"""', 7, 2, 2)
self.check_function_info("def f():\n a='''block string'''", 7, 2, 2)
self.check_function_info("def f():\n a='''block string'''", 7, 2, 2)
self.check_function_info("def f():\n a='''block\n string'''", 7, 3, 3)
self.check_function_info("def f():\n a='''block\n '''", 7, 3, 3)
def test_docstring_is_not_counted_in_nloc(self):
self.check_function_info("def f():\n '''block\n '''\n pass", 6, 2, 4)
#global complexity
def top_level_function_for_test():
pass
| 36.720779 | 92 | 0.603271 |
32f0039d407b46ae4505a7df30595ebca1d257c4 | 1,896 | py | Python | docs/samples/explanation/art/mnist/train_model.py | pydemia/kfserving | a0a52f8e7b97276b89393447524c78f2b702c257 | [
"Apache-2.0"
] | null | null | null | docs/samples/explanation/art/mnist/train_model.py | pydemia/kfserving | a0a52f8e7b97276b89393447524c78f2b702c257 | [
"Apache-2.0"
] | 635 | 2021-01-29T07:06:06.000Z | 2022-03-31T09:09:20.000Z | docs/samples/explanation/art/mnist/train_model.py | pydemia/kfserving | a0a52f8e7b97276b89393447524c78f2b702c257 | [
"Apache-2.0"
] | 1 | 2019-05-08T18:03:26.000Z | 2019-05-08T18:03:26.000Z |
import warnings
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.exceptions import ConvergenceWarning
from sklearn.neural_network import MLPClassifier
import joblib
import numpy as np
from aix360.datasets import MNISTDataset
data = MNISTDataset()
X_train_h, X_test_h = data.test_data[:8000], data.test_data[8000:]
y_train_h, y_test_h = data.test_labels[:8000], data.test_labels[8000:]
x = X_train_h
n_samples = len(x)
X_train_2 = x.reshape((n_samples, -1))
x = X_test_h
n_samples = len(x)
X_test_2 = x.reshape((n_samples, -1))
y_train_2 = [0 for x in range(0, len(y_train_h))]
y_test_2 = [0 for x in range(0, len(y_test_h))]
for label_iter in range(0, len(y_train_h)):
y_train_2[label_iter] = y_train_h[label_iter].argmax()
for label_iter in range(0, len(y_test_h)):
y_test_2[label_iter] = y_test_h[label_iter].argmax()
print(data.test_data.shape)
# Load data from https://www.openml.org/d/554
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X = X / 255.
# rescale the data, use the traditional train/test split
X_train, X_test = X[:60000].extend(X_train_2), X[60000:].extend(X_test_2)
y_train, y_test = y[:60000].extend(y_train_2), y[60000:].extend(y_test_2)
mlp = MLPClassifier(hidden_layer_sizes=(500,500,500), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, random_state=1,
learning_rate_init=.1)
# this example won't converge because of CI's time constraints, so we catch the
# warning and are ignore it here
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ConvergenceWarning,
module="sklearn")
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
joblib.dump(mlp, 'sklearnserver/sklearnserver/example_model/model.pkl') | 32.689655 | 79 | 0.727321 |
3e6b07c8b9a7c28064954490ffd94a7f9f5cd503 | 2,905 | py | Python | src/accounts/models.py | NestorMonroy/GreatKart | c417faed7e1ec430fd676b58f618cb66e7c07785 | [
"MIT"
] | null | null | null | src/accounts/models.py | NestorMonroy/GreatKart | c417faed7e1ec430fd676b58f618cb66e7c07785 | [
"MIT"
] | null | null | null | src/accounts/models.py | NestorMonroy/GreatKart | c417faed7e1ec430fd676b58f618cb66e7c07785 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
# Normal user
class MyAccountManager(BaseUserManager):
def create_user(self, first_name, last_name, username, email, password=None):
if not email:
raise ValueError("User must have an email address")
if not username:
raise ValueError("User must have username")
user = self.model(
email=self.normalize_email(email),
username=username,
first_name=first_name,
last_name=last_name,
)
user.set_password(password)
user.save(using=self._db)
return user
# create super user
def create_superuser(self, first_name, last_name, email, username, password):
user = self.create_user(
email=self.normalize_email(email),
username=username,
password=password,
first_name=first_name,
last_name=last_name,
)
user.is_admin = True
user.is_active = True
user.is_staff = True
user.is_superadmin = True
user.save(using=self._db)
return user
class Account(AbstractBaseUser):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
username = models.CharField(max_length=50, unique=True)
email = models.EmailField(max_length=100, unique=True)
phone_number = models.CharField(max_length=50)
# Required
date_joined = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
is_superadmin = models.BooleanField(default=False)
# for me to login with email address in admin platform
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["username", "first_name", "last_name"]
objects = MyAccountManager()
def full_name(self):
return f"{self.first_name} {self.last_name}"
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, add_label):
return True
class UserProfile(models.Model):
user = models.OneToOneField(Account, on_delete=models.CASCADE)
address_line_1 = models.CharField(blank=True, max_length=100)
address_line_2 = models.CharField(blank=True, max_length=100)
profile_picture = models.ImageField(blank=True, upload_to='userprofile')
city = models.CharField(blank=True, max_length=20)
state = models.CharField(blank=True, max_length=20)
country = models.CharField(blank=True, max_length=20)
def __str__(self):
return self.user.first_name
def full_address(self):
return f'{self.address_line_1} {self.address_line_2}' | 32.277778 | 81 | 0.68296 |
16d2fa3474befddfa2ff332c11cd3b8df5835740 | 752 | py | Python | Electron_configuration.py | AndreasFlensmark/Learning-python | 7023b787a16869ae6c2a6150987e0433b67b1057 | [
"Unlicense"
] | null | null | null | Electron_configuration.py | AndreasFlensmark/Learning-python | 7023b787a16869ae6c2a6150987e0433b67b1057 | [
"Unlicense"
] | null | null | null | Electron_configuration.py | AndreasFlensmark/Learning-python | 7023b787a16869ae6c2a6150987e0433b67b1057 | [
"Unlicense"
] | null | null | null | import math
# Calculates the electron configuration of atoms
Atomic_number =____ # number of electrons
o_name = ['s', 'p', 'd', 'f', 'g']
o_value = [2, 6, 10, 14, 18]
output_string = ""
end_period = 1
while Atomic_number > 0:
for i in range(math.floor((end_period-1)/2), -1, -1):
if(Atomic_number > o_value[i]):
output_string += "{0}{1}({2})".format(end_period -
i, o_name[i], o_value[i])
else:
output_string += "{0}{1}({2})".format(end_period -
i, o_name[i], Atomic_number)
Atomic_number = 0
break
Atomic_number -= o_value[i]
end_period += 1
print(output_string)
| 30.08 | 78 | 0.511968 |
6ad78620795fd010a1c78cb05caa02b5323d97a0 | 888 | py | Python | Components/student/migrations/0013_welcomepage.py | iamTanTan/E-Learning_Lab_Spring_2021 | e426ba982cc5044510eb1d8b80b377cb0bd5407a | [
"MIT"
] | 2 | 2021-01-29T22:35:28.000Z | 2021-05-13T23:35:54.000Z | Components/student/migrations/0013_welcomepage.py | iamTanTan/E-Learning_Lab_Spring_2021 | e426ba982cc5044510eb1d8b80b377cb0bd5407a | [
"MIT"
] | 8 | 2021-03-19T11:24:23.000Z | 2022-03-12T00:57:13.000Z | Components/student/migrations/0013_welcomepage.py | iamTanTan/E-Learning_Lab_Spring_2021 | e426ba982cc5044510eb1d8b80b377cb0bd5407a | [
"MIT"
] | 1 | 2021-09-11T15:00:09.000Z | 2021-09-11T15:00:09.000Z | # Generated by Django 3.0.5 on 2020-06-12 22:04
import ckeditor_uploader.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0005_auto_20200612_1421'),
('student', '0012_auto_20200612_1358'),
]
operations = [
migrations.CreateModel(
name='WelcomePage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=255)),
('content', ckeditor_uploader.fields.RichTextUploadingField()),
('courses', models.ForeignKey(default='dd390af4-07f1-4597-b48a-f585fd79289d', on_delete=django.db.models.deletion.CASCADE, to='courses.Courses')),
],
),
]
| 34.153846 | 162 | 0.637387 |
22cce23d3e5a86b5627faec0d82ff7b7d5dfeb0e | 1,100 | py | Python | portal/trash/migrations/versions/84d75343fc9d_.py | jeremybusk/demoflaskpgsqlnginxdocker | e76a5b4eda7034f60f51277da5bdd18decce740b | [
"MIT"
] | null | null | null | portal/trash/migrations/versions/84d75343fc9d_.py | jeremybusk/demoflaskpgsqlnginxdocker | e76a5b4eda7034f60f51277da5bdd18decce740b | [
"MIT"
] | null | null | null | portal/trash/migrations/versions/84d75343fc9d_.py | jeremybusk/demoflaskpgsqlnginxdocker | e76a5b4eda7034f60f51277da5bdd18decce740b | [
"MIT"
] | null | null | null | """empty message
Revision ID: 84d75343fc9d
Revises: a4fe05917039
Create Date: 2019-05-06 20:44:19.545793
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '84d75343fc9d'
down_revision = 'a4fe05917039'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('key', sa.Column('private_keys', sa.String(length=1024), nullable=True))
op.add_column('key', sa.Column('public_keys', sa.String(length=512), nullable=True))
op.drop_column('key', 'public_key')
op.drop_column('key', 'private_key')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('key', sa.Column('private_key', sa.VARCHAR(length=256), autoincrement=False, nullable=True))
op.add_column('key', sa.Column('public_key', sa.VARCHAR(length=256), autoincrement=False, nullable=True))
op.drop_column('key', 'public_keys')
op.drop_column('key', 'private_keys')
# ### end Alembic commands ###
| 31.428571 | 110 | 0.700909 |
c61200f2a604dbd945462a54f61f83cec5654874 | 3,908 | py | Python | compressai/utils/update_model/__main__.py | Chrisa142857/CompressAI | 75760096b9700a58d346351251d544050f3418fb | [
"Apache-2.0"
] | 1 | 2021-06-17T12:16:59.000Z | 2021-06-17T12:16:59.000Z | compressai/utils/update_model/__main__.py | Chrisa142857/CompressAI | 75760096b9700a58d346351251d544050f3418fb | [
"Apache-2.0"
] | null | null | null | compressai/utils/update_model/__main__.py | Chrisa142857/CompressAI | 75760096b9700a58d346351251d544050f3418fb | [
"Apache-2.0"
] | 1 | 2020-11-30T12:14:49.000Z | 2020-11-30T12:14:49.000Z | # Copyright 2020 InterDigital Communications, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Update the CDFs parameters of a trained model.
To be called on a model checkpoint after training. This will update the internal
CDFs related buffers required for entropy coding.
"""
import argparse
import hashlib
import sys
from pathlib import Path
from typing import Dict
import torch
from compressai.models.priors import (
FactorizedPrior,
JointAutoregressiveHierarchicalPriors,
MeanScaleHyperprior,
ScaleHyperprior,
)
def sha256_file(filepath: Path, len_hash_prefix: int = 8) -> str:
# from pytorch github repo
sha256 = hashlib.sha256()
with filepath.open("rb") as f:
while True:
buf = f.read(8192)
if len(buf) == 0:
break
sha256.update(buf)
digest = sha256.hexdigest()
return digest[:len_hash_prefix]
def load_checkpoint(filepath: Path) -> Dict[str, torch.Tensor]:
checkpoint = torch.load(filepath, map_location="cpu")
if "network" in checkpoint:
state_dict = checkpoint["network"]
elif "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
return state_dict
description = """
Export a trained model to a new checkpoint with an updated CDFs parameters and a
hash prefix, so that it can be loaded later via `load_state_dict_from_url`.
""".strip()
models = {
"factorized-prior": FactorizedPrior,
"jarhp": JointAutoregressiveHierarchicalPriors,
"mean-scale-hyperprior": MeanScaleHyperprior,
"scale-hyperprior": ScaleHyperprior,
}
def setup_args():
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"filepath", type=str, help="Path to the checkpoint model to be exported."
)
parser.add_argument("-n", "--name", type=str, help="Exported model name.")
parser.add_argument("-d", "--dir", type=str, help="Exported model directory.")
parser.add_argument(
"--no-update",
action="store_true",
default=False,
help="Do not update the model CDFs parameters.",
)
parser.add_argument(
"--architecture",
default="scale-hyperprior",
choices=models.keys(),
help="Set model architecture (default: %(default)s).",
)
return parser
def main(argv):
args = setup_args().parse_args(argv)
filepath = Path(args.filepath).resolve()
if not filepath.is_file():
raise RuntimeError(f'"{filepath}" is not a valid file.')
state_dict = load_checkpoint(filepath)
model_cls = models[args.architecture]
net = model_cls.from_state_dict(state_dict)
if not args.no_update:
net.update(force=True)
state_dict = net.state_dict()
if not args.name:
filename = filepath
while filename.suffixes:
filename = Path(filename.stem)
else:
filename = args.name
ext = "".join(filepath.suffixes)
if args.dir is not None:
output_dir = args.dir
Path(output_dir).mkdir(exist_ok=True)
else:
output_dir = Path.cwd().name
filepath = Path(f"{output_dir}/{filename}{ext}")
torch.save(state_dict, filepath)
hash_prefix = sha256_file(filepath)
filepath.rename(f"{output_dir}/{filename}-{hash_prefix}{ext}")
if __name__ == "__main__":
main(sys.argv[1:])
| 27.914286 | 82 | 0.680655 |
3b24e1eb2046e49ecb1d8de5b28b92e23158d36f | 7,893 | py | Python | mantra/util/ranking.py | durandtibo/mantra-python | a35dfd93f92f7f510a212ee5356ae4d776a27849 | [
"MIT"
] | 1 | 2019-02-22T09:48:04.000Z | 2019-02-22T09:48:04.000Z | mantra/util/ranking.py | durandtibo/mantra-python | a35dfd93f92f7f510a212ee5356ae4d776a27849 | [
"MIT"
] | null | null | null | mantra/util/ranking.py | durandtibo/mantra-python | a35dfd93f92f7f510a212ee5356ae4d776a27849 | [
"MIT"
] | null | null | null |
import numpy as np
from mantra.util.data.labeled_object import LabeledObject
from mantra.util.ranking_cython import (average_precision_cython,
find_optimum_neg_locations_cython,
generate_ranking_from_labels_cython)
###############################################################################
# RankingPattern
###############################################################################
class RankingPattern:
""" label=1 -> relevant
label=0 -> irrelevant """
def __init__(self, patterns, labels, num_pos=None, num_neg=None):
self.patterns = patterns
self.labels = labels
if num_pos is None or num_neg is None:
self.num_pos = 0
self.num_neg = 0
for label in labels:
if label == 1:
self.num_pos += 1
elif label == 0:
self.num_neg += 1
else:
raise ValueError('incorrect label: %s (expected 1 or 0)' % label)
else:
self.num_pos = num_pos
self.num_neg = num_neg
def __str__(self):
return 'RankingPattern [patterns={}, labels={}, num_pos={}, num_neg={}]'.format(len(self.patterns), len(self.labels), self.num_pos, self.num_neg)
###############################################################################
# RankingLabel
###############################################################################
class RankingLabel:
def __init__(self, ranking=None, labels=None, num_pos=None, num_neg=None):
self.ranking = ranking
self.labels = labels
self.num_pos = num_pos
self.num_neg = num_neg
def generate_ranking_label(self, labels):
num_examples = labels.shape[0]
self.ranking = np.zeros(num_examples, np.int32)
self.labels = np.copy(labels)
self.num_pos = 0
self.num_neg = 0
# Initializes labels
for label in labels:
if label == 1:
self.num_pos += 1
elif label == 0:
self.num_neg += 1
else:
raise ValueError('incorrect label: %s (expected 1 or 0)' % label)
self.ranking = generate_ranking_from_labels_cython(labels)
def __str__(self):
return 'RankingLabel [ranking={}, labels={}, num_pos={}, num_neg={}]'.format(len(self.ranking), len(self.labels), self.num_pos, self.num_neg)
###############################################################################
# RankingUtils
###############################################################################
class RankingUtils:
def generate_ranking_example(data, target_label=None):
patterns = list()
labels = list()
for example in data:
patterns.append(example.pattern)
label = example.label
if target_label is not None:
if label is target_label:
label = 1
else:
label = 0
labels.append(label)
# Converts the list in np.array
try:
patterns = np.asarray(patterns, np.float64)
except TypeError:
print('patterns can not be converted to np.array')
pass
labels = np.asarray(labels, np.int32)
# generates the ranking pattern
ranking_pattern = RankingPattern(patterns, labels)
# initalizes the ranking label
ranking_label = RankingLabel()
# generates a ranking with the labels
ranking_label.generate_ranking_label(labels)
# generates a list of LabeledObject with 1 example
ranking_data = list()
ranking_data.append(LabeledObject(ranking_pattern, ranking_label))
return ranking_data
def average_precision(y_truth, y_predict):
""" Computes the average precision of 2 RankingLabel
- y_truth: RankingLabel
- y_predict: RankingLabel
"""
# converts the ranking in "score"
scores = np.asarray(y_predict.ranking, dtype=np.float64)
return average_precision_cython(y_truth.labels, scores)
def average_precision_python(y_truth, y_predict):
number_of_examples = y_truth.num_pos + y_truth.num_neg
# Stores rank of all examples
ranking = np.zeros(number_of_examples, dtype=np.int32)
# Stores list of images sorted by rank. Higher rank to lower rank
sorted_examples = np.zeros(number_of_examples, dtype=np.int32)
# Converts rank matrix to rank list
indexes = np.arange(number_of_examples)
for i in indexes:
ranking[i] = 1
for j in indexes:
if y_predict.ranking[i] > y_predict.ranking[j]:
ranking[i] += 1
sorted_examples[number_of_examples - ranking[i]] = i
# Computes prec@i
pos_count = 0.
total_count = 0.
precision_at_i = 0.
for i in indexes:
label = y_truth.labels[sorted_examples[i]]
if label == 1:
pos_count += 1
total_count += 1
if label == 1:
precision_at_i += pos_count / total_count
precision_at_i /= pos_count
return precision_at_i
def find_optimum_neg_locations(x, positive_example_score, negative_example_score, example_index_map):
ranking = find_optimum_neg_locations_cython(x.num_pos, x.num_neg, x.labels, positive_example_score, negative_example_score, example_index_map)
y_predict = RankingLabel(ranking=ranking, labels=list(), num_pos=x.num_pos, num_neg=x.num_neg)
return y_predict
def find_optimum_neg_locations_python(x, positive_example_score, negative_example_score, example_index_map):
max_value = 0.0
current_value = 0.0
max_index = -1
num_pos = x.num_pos
num_neg = x.num_neg
optimum_loc_neg_example = np.zeros(num_neg, dtype=np.uint32)
# for every jth negative image
for j in np.arange(1, num_neg+1):
max_value = 0
max_index = num_pos + 1
# k is what we are maximising over. There would be one k_max for each negative image j
current_value = 0
for k in reversed(np.arange(1, num_pos+1)):
current_value += (1.0 / num_pos) * ((j / (j + k)) - ((j - 1.0) / (j + k - 1.0))) - (2.0 / (num_pos * num_neg)) * (positive_example_score[k-1] - negative_example_score[j-1])
if current_value > max_value:
max_value = current_value
max_index = k
optimum_loc_neg_example[j-1] = max_index
return RankingUtils.encode_ranking_python(x, positive_example_score, negative_example_score, example_index_map, optimum_loc_neg_example)
def encode_ranking_python(x, positive_example_score, negative_example_score, example_index_map, optimum_loc_neg_example):
labels = x.labels
number_of_examples = len(x.patterns)
ranking = np.zeros(number_of_examples, dtype=np.int32)
for i in range(number_of_examples):
for j in range(i+1, number_of_examples):
if labels[i] == labels[j]:
if labels[i] == 1:
if positive_example_score[example_index_map[i]] > positive_example_score[example_index_map[j]]:
ranking[i] += 1
ranking[j] -= 1
elif positive_example_score[example_index_map[j]] > positive_example_score[example_index_map[i]]:
ranking[i] -= 1
ranking[j] += 1
else:
if i < j:
ranking[i] += 1
ranking[j] -= 1
else:
ranking[i] -= 1
ranking[j] += 1
else:
if negative_example_score[example_index_map[i]] > negative_example_score[example_index_map[j]]:
ranking[i] += 1
ranking[j] -= 1
elif negative_example_score[example_index_map[j]] > negative_example_score[example_index_map[i]]:
ranking[i] -= 1
ranking[j] += 1
else:
if i < j:
ranking[i] += 1
ranking[j] -= 1
else:
ranking[i] -= 1
ranking[j] += 1
elif labels[i] == 1 and labels[j] == 0:
i_prime = example_index_map[i] + 1
j_prime = example_index_map[j] + 1
oj_prime = optimum_loc_neg_example[j_prime-1]
if (oj_prime - i_prime - 0.5) > 0:
ranking[i] += 1
ranking[j] -= 1
else:
ranking[i] -= 1
ranking[j] += 1
elif labels[i] == 0 and labels[j] == 1:
i_prime = example_index_map[i] + 1
j_prime = example_index_map[j] + 1
oi_prime = optimum_loc_neg_example[i_prime - 1]
if (j_prime - oi_prime + 0.5) > 0:
ranking[i] += 1
ranking[j] -= 1
else:
ranking[i] -= 1
ranking[j] += 1
return RankingLabel(ranking=ranking, labels=list(), num_pos=x.num_pos, num_neg=x.num_neg)
| 31.197628 | 176 | 0.642721 |
3396646c7c18b74c1a4cd4c8496fb45088f31214 | 432 | py | Python | Project-6/CICD all files/mail.py | Vedant-S/DevOps-Assembly_Line-Project | c173cd49f9b1ef1757ab9444b4072ca5b9c6f1f2 | [
"MIT"
] | 1 | 2020-08-15T10:01:53.000Z | 2020-08-15T10:01:53.000Z | Project-6/CICD all files/mail.py | Vedant-S/DevOps-Assembly_Line-Project | c173cd49f9b1ef1757ab9444b4072ca5b9c6f1f2 | [
"MIT"
] | null | null | null | Project-6/CICD all files/mail.py | Vedant-S/DevOps-Assembly_Line-Project | c173cd49f9b1ef1757ab9444b4072ca5b9c6f1f2 | [
"MIT"
] | 4 | 2020-07-10T15:12:31.000Z | 2022-01-17T14:15:21.000Z | import smtplib
sender_email = "vedantshrivastava466@gmail.com"
rec_email = "github@gmail.com"
password = "********"
message = "Hello Developer, your website has some error......plz check the code. and push again"
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(sender_email, password)
print("Login success")
server.sendmail(sender_email, rec_email, message)
print("Email has been sent to ", rec_email)
| 30.857143 | 96 | 0.747685 |
50fc92a07a9a7ed87ccd361b24ffb90d851aa3c5 | 3,981 | py | Python | pubsub/synth.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | 1 | 2019-01-23T21:54:51.000Z | 2019-01-23T21:54:51.000Z | pubsub/synth.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | null | null | null | pubsub/synth.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | 1 | 2020-11-15T11:44:36.000Z | 2020-11-15T11:44:36.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import re
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
version = "v1"
# ----------------------------------------------------------------------------
# Generate pubsub GAPIC layer
# ----------------------------------------------------------------------------
library = gapic.py_library(
"pubsub", version, config_path="/google/pubsub/artman_pubsub.yaml"
)
s.move(
library,
excludes=[
"docs/**/*",
"nox.py",
"README.rst",
"setup.py",
"google/cloud/pubsub_v1/__init__.py",
"google/cloud/pubsub_v1/types.py",
],
)
# Adjust tests to import the clients directly.
s.replace(
"tests/unit/gapic/v1/test_publisher_client_v1.py",
"from google.cloud import pubsub_v1",
"from google.cloud.pubsub_v1.gapic import publisher_client",
)
s.replace(
"tests/unit/gapic/v1/test_publisher_client_v1.py", " pubsub_v1", " publisher_client"
)
s.replace(
"tests/unit/gapic/v1/test_subscriber_client_v1.py",
"from google.cloud import pubsub_v1",
"from google.cloud.pubsub_v1.gapic import subscriber_client",
)
s.replace(
"tests/unit/gapic/v1/test_subscriber_client_v1.py",
" pubsub_v1",
" subscriber_client",
)
# DEFAULT SCOPES are being used. so let's force them in.
s.replace(
"google/cloud/pubsub_v1/gapic/*er_client.py",
"# The name of the interface for this client. This is the key used to",
"""# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub', )
\g<0>""",
)
s.replace(
"google/cloud/pubsub_v1/gapic/publisher_client.py",
"import google.api_core.gapic_v1.method\n",
"\g<0>import google.api_core.path_template\n",
)
# Doc strings are formatted poorly
s.replace(
"google/cloud/pubsub_v1/proto/pubsub_pb2.py",
'DESCRIPTOR = _MESSAGESTORAGEPOLICY,\n\s+__module__.*\n\s+,\n\s+__doc__ = """',
"\g<0>A message storage policy.\n\n\n ",
)
s.replace(
"google/cloud/pubsub_v1/gapic/subscriber_client.py",
"subscription \(str\): The subscription whose backlog .*\n(.*\n)+?"
"\s+Format is .*",
"""subscription (str): The subscription whose backlog the snapshot retains.
Specifically, the created snapshot is guaranteed to retain: \\
(a) The existing backlog on the subscription. More precisely, this is \\
defined as the messages in the subscription's backlog that are \\
unacknowledged upon the successful completion of the \\
`CreateSnapshot` request; as well as: \\
(b) Any messages published to the subscription's topic following the \\
successful completion of the CreateSnapshot request. \\
Format is ``projects/{project}/subscriptions/{sub}``.""",
)
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = gcp.CommonTemplates().py_library(unit_cov_level=97, cov_level=100)
s.move(templated_files)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
| 34.318966 | 89 | 0.627481 |
75112067283ed7d80aa72d67da4bd80db5627dcf | 281 | py | Python | intmcp/tree/__init__.py | RDLLab/i-ntmcp | 63deec3d956d41a0ad4b66a707536893859e4e9f | [
"MIT"
] | null | null | null | intmcp/tree/__init__.py | RDLLab/i-ntmcp | 63deec3d956d41a0ad4b66a707536893859e4e9f | [
"MIT"
] | null | null | null | intmcp/tree/__init__.py | RDLLab/i-ntmcp | 63deec3d956d41a0ad4b66a707536893859e4e9f | [
"MIT"
] | null | null | null | from typing import Dict, Any
from .node import Node
from .nst import NestedSearchTree, HistoryDist, NestingLevel, NestedBelief
SEARCH_TREES: Dict[str, Any] = {
'NST': NestedSearchTree,
**{
c.__name__: c for c in [
NestedSearchTree,
]
}
}
| 18.733333 | 74 | 0.633452 |
3b6dd2d44d9f5ce06ce94cf3f29819685be3b073 | 6,060 | py | Python | platformio/package/manager/_update.py | World-Enterprise-Collision/platformio-core | c6e0c4d89d8aeaf6e733e3a668cd500fc7078e15 | [
"Apache-2.0"
] | null | null | null | platformio/package/manager/_update.py | World-Enterprise-Collision/platformio-core | c6e0c4d89d8aeaf6e733e3a668cd500fc7078e15 | [
"Apache-2.0"
] | null | null | null | platformio/package/manager/_update.py | World-Enterprise-Collision/platformio-core | c6e0c4d89d8aeaf6e733e3a668cd500fc7078e15 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import click
from platformio.clients.http import ensure_internet_on
from platformio.package.exception import UnknownPackageError
from platformio.package.meta import PackageItem, PackageOutdatedResult, PackageSpec
from platformio.package.vcsclient import VCSBaseException, VCSClientFactory
class PackageManagerUpdateMixin(object):
def outdated(self, pkg, spec=None):
assert isinstance(pkg, PackageItem)
assert not spec or isinstance(spec, PackageSpec)
assert os.path.isdir(pkg.path) and pkg.metadata
# skip detached package to a specific version
detached_conditions = [
"@" in pkg.path,
pkg.metadata.spec and not pkg.metadata.spec.external,
not spec,
]
if all(detached_conditions):
return PackageOutdatedResult(current=pkg.metadata.version, detached=True)
latest = None
wanted = None
if pkg.metadata.spec.external:
latest = self._fetch_vcs_latest_version(pkg)
else:
try:
reg_pkg = self.fetch_registry_package(pkg.metadata.spec)
latest = (
self.pick_best_registry_version(reg_pkg["versions"]) or {}
).get("name")
if spec:
wanted = (
self.pick_best_registry_version(reg_pkg["versions"], spec) or {}
).get("name")
if not wanted: # wrong library
latest = None
except UnknownPackageError:
pass
return PackageOutdatedResult(
current=pkg.metadata.version, latest=latest, wanted=wanted
)
def _fetch_vcs_latest_version(self, pkg):
vcs = None
try:
vcs = VCSClientFactory.new(pkg.path, pkg.metadata.spec.url, silent=True)
except VCSBaseException:
return None
if not vcs.can_be_updated:
return None
return str(
self.build_metadata(
pkg.path, pkg.metadata.spec, vcs_revision=vcs.get_latest_revision()
).version
)
def update( # pylint: disable=too-many-arguments
self,
from_spec,
to_spec=None,
only_check=False,
silent=False,
show_incompatible=True,
):
pkg = self.get_package(from_spec)
if not pkg or not pkg.metadata:
raise UnknownPackageError(from_spec)
if not silent:
click.echo(
"{} {:<45} {:<35}".format(
"Checking" if only_check else "Updating",
click.style(pkg.metadata.spec.humanize(), fg="cyan"),
"%s @ %s" % (pkg.metadata.version, to_spec.requirements)
if to_spec and to_spec.requirements
else str(pkg.metadata.version),
),
nl=False,
)
if not ensure_internet_on():
if not silent:
click.echo("[%s]" % (click.style("Off-line", fg="yellow")))
return pkg
outdated = self.outdated(pkg, to_spec)
if not silent:
self.print_outdated_state(outdated, show_incompatible)
if only_check or not outdated.is_outdated(allow_incompatible=False):
return pkg
try:
self.lock()
return self._update(pkg, outdated, silent=silent)
finally:
self.unlock()
@staticmethod
def print_outdated_state(outdated, show_incompatible=True):
if outdated.detached:
return click.echo("[%s]" % (click.style("Detached", fg="yellow")))
if (
not outdated.latest
or outdated.current == outdated.latest
or (not show_incompatible and outdated.current == outdated.wanted)
):
return click.echo("[%s]" % (click.style("Up-to-date", fg="green")))
if outdated.wanted and outdated.current == outdated.wanted:
return click.echo(
"[%s]" % (click.style("Incompatible %s" % outdated.latest, fg="yellow"))
)
return click.echo(
"[%s]"
% (
click.style(
"Outdated %s" % str(outdated.wanted or outdated.latest), fg="red"
)
)
)
def _update(self, pkg, outdated, silent=False):
if pkg.metadata.spec.external:
vcs = VCSClientFactory.new(pkg.path, pkg.metadata.spec.url)
assert vcs.update()
pkg.metadata.version = self._fetch_vcs_latest_version(pkg)
pkg.dump_meta()
return pkg
new_pkg = self.install(
PackageSpec(
id=pkg.metadata.spec.id,
owner=pkg.metadata.spec.owner,
name=pkg.metadata.spec.name,
requirements=outdated.wanted or outdated.latest,
),
silent=silent,
)
if new_pkg:
old_pkg = self.get_package(
PackageSpec(
id=pkg.metadata.spec.id,
owner=pkg.metadata.spec.owner,
name=pkg.metadata.name,
requirements=pkg.metadata.version,
)
)
if old_pkg:
self.uninstall(old_pkg, silent=silent, skip_dependencies=True)
return new_pkg
| 35.647059 | 88 | 0.573102 |
9d8921bf50e98ca48ad56df39de136113301ef84 | 967 | py | Python | Python/seven_kyu/descending_order.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | 1 | 2019-12-20T04:09:56.000Z | 2019-12-20T04:09:56.000Z | Python/seven_kyu/descending_order.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | null | null | null | Python/seven_kyu/descending_order.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | null | null | null | # Python solution for 'Descending Order' codewars question.
# Level: 7 kyu
# Tags: Fundamentals, Functions, Control Flow, and Basic Language Features.
# Author: Jack Brokenshire
# Date: 03/03/2020
import unittest
def descending_order(num):
"""
Your task is to make a function that can take any non-negative integer as a argument and return it with its digits
in descending order. Essentially, rearrange the digits to create the highest possible number.
:param num: an positive integer.
:return: the integers digits in descending order.
"""
return int("".join(sorted(str(num))[::-1]))
class TestDescendingOrder(unittest.TestCase):
"""Class to test 'descending_order' function"""
def test_descending_order(self):
self.assertEqual(descending_order(0), 0)
self.assertEqual(descending_order(15), 51)
self.assertEqual(descending_order(123456789), 987654321)
if __name__ == '__main__':
unittest.main()
| 31.193548 | 118 | 0.720786 |
70345d616c153e04543665337cefdb1d1d639b83 | 3,998 | py | Python | utils/data/camera_data.py | anviv-lab/robotic-grasping | a186b3f15f2fb98e5862448eda6115f6fe16fb89 | [
"BSD-3-Clause"
] | null | null | null | utils/data/camera_data.py | anviv-lab/robotic-grasping | a186b3f15f2fb98e5862448eda6115f6fe16fb89 | [
"BSD-3-Clause"
] | null | null | null | utils/data/camera_data.py | anviv-lab/robotic-grasping | a186b3f15f2fb98e5862448eda6115f6fe16fb89 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import torch
import os
import glob
from utils.dataset_processing import image, grasp
from .grasp_data import GraspDatasetBase
class CameraData(GraspDatasetBase):
"""
Dataset wrapper for the camera data.
"""
def __init__(self, file_path, ds_rotate=0,
width=640,
height=480,
output_size=224,
include_depth=True,
include_rgb=True,
**kwargs):
"""
:param output_size: Image output size in pixels (square)
:param include_depth: Whether depth image is included
:param include_rgb: Whether RGB image is included
"""
super(CameraData, self).__init__(**kwargs)
self.output_size = output_size
self.include_depth = include_depth
self.include_rgb = include_rgb
self.depth_files = glob.glob(os.path.join(file_path, 'depth_*.npy'))
self.depth_files.sort()
self.rgb_files = glob.glob(os.path.join(file_path, 'color_*.png'))
self.rgb_files.sort()
self.length = len(self.depth_files)
if include_depth is False and include_rgb is False:
raise ValueError('At least one of Depth or RGB must be specified.')
left = (width - output_size) // 2
top = (height - output_size) // 2
right = (width + output_size) // 2
bottom = (height + output_size) // 2
self.bottom_right = (bottom, right)
self.top_left = (top, left)
@staticmethod
def numpy_to_torch(s):
if len(s.shape) == 2:
return torch.from_numpy(np.expand_dims(s, 0).astype(np.float32))
else:
return torch.from_numpy(s.astype(np.float32))
def get_gtbb(self, idx, rot=0, zoom=1.0):
rect = np.array([[
[0.0, 10.0],
[10.0, 10.0],
[10.0, 0.0],
[0.0, 0.0]
]])
gtbbs = grasp.GraspRectangles.load_from_array(rect)
c = self.output_size // 2
# gtbbs.rotate(rot, (c, c))
# gtbbs.zoom(zoom, (c, c))
return gtbbs
def get_depth(self, idx, rot=0, zoom=1.0, normalise=True):
arr = np.load(self.depth_files[idx])
depth_img = image.Image(arr)
depth_img.crop(bottom_right=self.bottom_right, top_left=self.top_left)
depth_img.rotate(rot)
# depth_img.zoom(zoom)
depth_img.resize((self.output_size, self.output_size))
# depth_img.resize((self.output_size, self.output_size))
# depth_img.img = depth_img.img.transpose((2, 0, 1))
if normalise:
depth_img.normalise()
return np.squeeze(depth_img.img)
def get_rgb(self, idx, rot=0, zoom=1.0, normalise=True):
rgb_img = image.Image.from_file(self.rgb_files[idx])
rgb_img.crop(bottom_right=self.bottom_right, top_left=self.top_left)
rgb_img.rotate(rot)
rgb_img.zoom(zoom)
rgb_img.resize((self.output_size, self.output_size))
if normalise:
rgb_img.normalise()
rgb_img.img = rgb_img.img.transpose((2, 0, 1))
return rgb_img.img
def get_data(self, rgb=None, depth=None):
depth_img = None
rgb_img = None
# Load the depth image
if self.include_depth:
depth_img = self.get_depth(img=depth)
# Load the RGB image
if self.include_rgb:
rgb_img = self.get_rgb(img=rgb)
if self.include_depth and self.include_rgb:
x = self.numpy_to_torch(
np.concatenate(
(np.expand_dims(depth_img, 0),
np.expand_dims(rgb_img, 0)),
1
)
)
elif self.include_depth:
x = self.numpy_to_torch(depth_img)
elif self.include_rgb:
x = self.numpy_to_torch(np.expand_dims(rgb_img, 0))
return x, depth_img, rgb_img
| 33.041322 | 79 | 0.576038 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.