hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70a5536b187b95499372854a9278b60efced3b0
| 788
|
py
|
Python
|
merge-sort/merge_sort/merge_sort.py
|
doaa-1996/Data-structures-and-algorithms1
|
5b2b4e1ece2f6671770dac80a95b662345106f49
|
[
"MIT"
] | 1
|
2021-06-22T12:26:13.000Z
|
2021-06-22T12:26:13.000Z
|
merge-sort/merge_sort/merge_sort.py
|
doaa-1996/data-structures-and-algorithms
|
5b2b4e1ece2f6671770dac80a95b662345106f49
|
[
"MIT"
] | null | null | null |
merge-sort/merge_sort/merge_sort.py
|
doaa-1996/data-structures-and-algorithms
|
5b2b4e1ece2f6671770dac80a95b662345106f49
|
[
"MIT"
] | null | null | null |
def mergeSort(arr):
n= len(arr)
if n > 1:
mid = int(n/2)
left = arr[0:mid]
right = arr[mid:n]
mergeSort(left)
mergeSort(right)
Merge(left, right, arr)
def Merge(left, right, arr):
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k =k + 1
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
if __name__ == "__main__":
arr = [8,4,23,42,16,15]
print('Array => '+f'{arr}')
mergeSort(arr)
print('Sorted array => '+f'{arr}')
| 20.736842
| 43
| 0.413706
|
def mergeSort(arr):
n= len(arr)
if n > 1:
mid = int(n/2)
left = arr[0:mid]
right = arr[mid:n]
mergeSort(left)
mergeSort(right)
Merge(left, right, arr)
def Merge(left, right, arr):
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k =k + 1
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
if __name__ == "__main__":
arr = [8,4,23,42,16,15]
print('Array => '+f'{arr}')
mergeSort(arr)
print('Sorted array => '+f'{arr}')
| true
| true
|
f70a5558a78b0bae5dd9be819598d90e827a8312
| 624
|
py
|
Python
|
sphinxpapyrus/docxbuilder/nodes/important.py
|
amarin/sphinxpapyrus-docxbuilder
|
0fd00a0c5467554d0a2b5ad9cd93ab780511f1a3
|
[
"MIT"
] | null | null | null |
sphinxpapyrus/docxbuilder/nodes/important.py
|
amarin/sphinxpapyrus-docxbuilder
|
0fd00a0c5467554d0a2b5ad9cd93ab780511f1a3
|
[
"MIT"
] | null | null | null |
sphinxpapyrus/docxbuilder/nodes/important.py
|
amarin/sphinxpapyrus-docxbuilder
|
0fd00a0c5467554d0a2b5ad9cd93ab780511f1a3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Translate docutils node important formatting.
each important start will processed with visit() and finished with depart()
"""
from docutils.nodes import Node
from sphinxpapyrus.docxbuilder.translator import DocxTranslator
node_name = "important"
def visit(visitor: DocxTranslator, node: Node):
"""Start processing important node"""
assert isinstance(visitor, DocxTranslator)
assert isinstance(node, Node)
def depart(visitor: DocxTranslator, node: Node):
"""Finish processing important node"""
assert isinstance(visitor, DocxTranslator)
assert isinstance(node, Node)
| 27.130435
| 75
| 0.75
|
from docutils.nodes import Node
from sphinxpapyrus.docxbuilder.translator import DocxTranslator
node_name = "important"
def visit(visitor: DocxTranslator, node: Node):
assert isinstance(visitor, DocxTranslator)
assert isinstance(node, Node)
def depart(visitor: DocxTranslator, node: Node):
assert isinstance(visitor, DocxTranslator)
assert isinstance(node, Node)
| true
| true
|
f70a55c9bd6446009caf8957e8cedb97d86a3592
| 3,679
|
py
|
Python
|
setup.py
|
speezepearson/pow
|
7c86a36134cb90bfcf6e2740c4293d629b6021a1
|
[
"MIT"
] | 5
|
2017-10-31T00:17:30.000Z
|
2017-11-11T00:53:08.000Z
|
setup.py
|
speezepearson/prpg
|
7c86a36134cb90bfcf6e2740c4293d629b6021a1
|
[
"MIT"
] | null | null | null |
setup.py
|
speezepearson/prpg
|
7c86a36134cb90bfcf6e2740c4293d629b6021a1
|
[
"MIT"
] | null | null | null |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.markdown'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='prpg',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.4.1',
description='A pseudorandom password generator / password manager.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/speezepearson/prpg',
# Author details
author='speezepearson',
author_email='speeze.pearson+prpg@gmail.com',
# Choose your license
# license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Security',
# Pick your license as you wish (should match "license" above)
# 'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# What does your project relate to?
keywords='password password-management password-generation',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['test', 'doc', 'wiki']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': ['pytest', 'pexpect'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'browsergui': ['_server/*.html', '_server/*.js', 'examples/*.png'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'prpg=prpg:main',
],
},
)
| 36.425743
| 94
| 0.665398
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.markdown'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='prpg',
version='0.4.1',
description='A pseudorandom password generator / password manager.',
long_description=long_description,
url='https://github.com/speezepearson/prpg',
# Author details
author='speezepearson',
author_email='speeze.pearson+prpg@gmail.com',
# Choose your license
# license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Security',
# Pick your license as you wish (should match "license" above)
# 'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# What does your project relate to?
keywords='password password-management password-generation',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['test', 'doc', 'wiki']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
install_requires=[],
extras_require={
'dev': [],
'test': ['pytest', 'pexpect'],
},
ts={
'console_scripts': [
'prpg=prpg:main',
],
},
)
| true
| true
|
f70a56767a954b3cb026e141fd9f582d9f92fc11
| 847
|
py
|
Python
|
djangoPharma/app/templatetags/app_tags.py
|
thodoris/djangoPharma
|
76089e67bc9940651a876d078879469127f5ac66
|
[
"Apache-2.0"
] | null | null | null |
djangoPharma/app/templatetags/app_tags.py
|
thodoris/djangoPharma
|
76089e67bc9940651a876d078879469127f5ac66
|
[
"Apache-2.0"
] | null | null | null |
djangoPharma/app/templatetags/app_tags.py
|
thodoris/djangoPharma
|
76089e67bc9940651a876d078879469127f5ac66
|
[
"Apache-2.0"
] | null | null | null |
from django import template
from django.contrib.auth.models import Group
register = template.Library()
@register.filter(name='has_group')
def has_group(user, group_name):
try:
group = Group.objects.get(name=group_name)
except:
return False # group doesn't exist, so for sure the user isn't part of the group
# for superuser , always return True
if user.is_superuser:
return True
return user.groups.filter(name=group_name).exists()
# The first argument *must* be called "context" here.
def breadcrumb_tag(context):
request = context['request']
address = request.path
return {
'link':address,
'title': address,
}
# Register the custom tag as an inclusion tag with takes_context=True.
register.inclusion_tag('tags/breadcrumb.html', takes_context=True)(breadcrumb_tag)
| 29.206897
| 89
| 0.706021
|
from django import template
from django.contrib.auth.models import Group
register = template.Library()
@register.filter(name='has_group')
def has_group(user, group_name):
try:
group = Group.objects.get(name=group_name)
except:
return False
if user.is_superuser:
return True
return user.groups.filter(name=group_name).exists()
def breadcrumb_tag(context):
request = context['request']
address = request.path
return {
'link':address,
'title': address,
}
register.inclusion_tag('tags/breadcrumb.html', takes_context=True)(breadcrumb_tag)
| true
| true
|
f70a58845e40a18b54f35acaaa0caa0a007ce791
| 265
|
py
|
Python
|
demo_app/config/desktop.py
|
ravik0007/erpapp_tasks
|
bafd1de9bbf6889e639320b15c6e7c52124ba05b
|
[
"MIT"
] | null | null | null |
demo_app/config/desktop.py
|
ravik0007/erpapp_tasks
|
bafd1de9bbf6889e639320b15c6e7c52124ba05b
|
[
"MIT"
] | null | null | null |
demo_app/config/desktop.py
|
ravik0007/erpapp_tasks
|
bafd1de9bbf6889e639320b15c6e7c52124ba05b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Demo App",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Demo App")
}
]
| 17.666667
| 44
| 0.607547
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Demo App",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Demo App")
}
]
| true
| true
|
f70a5b9b2e7e749c433cad42920bd0cd17d8c944
| 4,446
|
py
|
Python
|
src/pretix/plugins/sendmail/forms.py
|
sker152/pretix
|
92754136a653453d00f0b95cdefac533fec5e1ba
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/plugins/sendmail/forms.py
|
sker152/pretix
|
92754136a653453d00f0b95cdefac533fec5e1ba
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/plugins/sendmail/forms.py
|
sker152/pretix
|
92754136a653453d00f0b95cdefac533fec5e1ba
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from django import forms
from django.urls import reverse
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
from i18nfield.forms import I18nFormField, I18nTextarea, I18nTextInput
from pretix.base.email import get_available_placeholders
from pretix.base.forms import PlaceholderValidator
from pretix.base.models import Item, Order, SubEvent
from pretix.control.forms.widgets import Select2
class MailForm(forms.Form):
recipients = forms.ChoiceField(
label=_('Send email to'),
widget=forms.RadioSelect,
initial='orders',
choices=[]
)
sendto = forms.MultipleChoiceField() # overridden later
subject = forms.CharField(label=_("Subject"))
message = forms.CharField(label=_("Message"))
items = forms.ModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
label=_('Only send to people who bought'),
required=True,
queryset=Item.objects.none()
)
subevent = forms.ModelChoiceField(
SubEvent.objects.none(),
label=_('Only send to customers of'),
required=False,
empty_label=pgettext_lazy('subevent', 'All dates')
)
def _set_field_placeholders(self, fn, base_parameters):
phs = [
'{%s}' % p
for p in sorted(get_available_placeholders(self.event, base_parameters).keys())
]
ht = _('Available placeholders: {list}').format(
list=', '.join(phs)
)
if self.fields[fn].help_text:
self.fields[fn].help_text += ' ' + str(ht)
else:
self.fields[fn].help_text = ht
self.fields[fn].validators.append(
PlaceholderValidator(phs)
)
def __init__(self, *args, **kwargs):
event = self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
recp_choices = [
('orders', _('Everyone who created a ticket order'))
]
if event.settings.attendee_emails_asked:
recp_choices += [
('attendees', _('Every attendee (falling back to the order contact when no attendee email address is '
'given)')),
('both', _('Both (all order contact addresses and all attendee email addresses)'))
]
self.fields['recipients'].choices = recp_choices
self.fields['subject'] = I18nFormField(
label=_('Subject'),
widget=I18nTextInput, required=True,
locales=event.settings.get('locales'),
)
self.fields['message'] = I18nFormField(
label=_('Message'),
widget=I18nTextarea, required=True,
locales=event.settings.get('locales'),
)
self._set_field_placeholders('subject', ['event', 'order', 'position_or_address'])
self._set_field_placeholders('message', ['event', 'order', 'position_or_address'])
choices = list(Order.STATUS_CHOICE)
if not event.settings.get('payment_term_expire_automatically', as_type=bool):
choices.append(
('overdue', _('pending with payment overdue'))
)
self.fields['sendto'] = forms.MultipleChoiceField(
label=_("Send to customers with order status"),
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
choices=choices
)
if not self.initial.get('sendto'):
self.initial['sendto'] = ['p', 'n']
self.fields['items'].queryset = event.items.all()
if not self.initial.get('items'):
self.initial['items'] = event.items.all()
if event.has_subevents:
self.fields['subevent'].queryset = event.subevents.all()
self.fields['subevent'].widget = Select2(
attrs={
'data-model-select2': 'event',
'data-select2-url': reverse('control:event.subevents.select2', kwargs={
'event': event.slug,
'organizer': event.organizer.slug,
}),
'data-placeholder': pgettext_lazy('subevent', 'Date')
}
)
self.fields['subevent'].widget.choices = self.fields['subevent'].choices
else:
del self.fields['subevent']
| 39
| 118
| 0.587494
|
from django import forms
from django.urls import reverse
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
from i18nfield.forms import I18nFormField, I18nTextarea, I18nTextInput
from pretix.base.email import get_available_placeholders
from pretix.base.forms import PlaceholderValidator
from pretix.base.models import Item, Order, SubEvent
from pretix.control.forms.widgets import Select2
class MailForm(forms.Form):
recipients = forms.ChoiceField(
label=_('Send email to'),
widget=forms.RadioSelect,
initial='orders',
choices=[]
)
sendto = forms.MultipleChoiceField()
subject = forms.CharField(label=_("Subject"))
message = forms.CharField(label=_("Message"))
items = forms.ModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
label=_('Only send to people who bought'),
required=True,
queryset=Item.objects.none()
)
subevent = forms.ModelChoiceField(
SubEvent.objects.none(),
label=_('Only send to customers of'),
required=False,
empty_label=pgettext_lazy('subevent', 'All dates')
)
def _set_field_placeholders(self, fn, base_parameters):
phs = [
'{%s}' % p
for p in sorted(get_available_placeholders(self.event, base_parameters).keys())
]
ht = _('Available placeholders: {list}').format(
list=', '.join(phs)
)
if self.fields[fn].help_text:
self.fields[fn].help_text += ' ' + str(ht)
else:
self.fields[fn].help_text = ht
self.fields[fn].validators.append(
PlaceholderValidator(phs)
)
def __init__(self, *args, **kwargs):
event = self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
recp_choices = [
('orders', _('Everyone who created a ticket order'))
]
if event.settings.attendee_emails_asked:
recp_choices += [
('attendees', _('Every attendee (falling back to the order contact when no attendee email address is '
'given)')),
('both', _('Both (all order contact addresses and all attendee email addresses)'))
]
self.fields['recipients'].choices = recp_choices
self.fields['subject'] = I18nFormField(
label=_('Subject'),
widget=I18nTextInput, required=True,
locales=event.settings.get('locales'),
)
self.fields['message'] = I18nFormField(
label=_('Message'),
widget=I18nTextarea, required=True,
locales=event.settings.get('locales'),
)
self._set_field_placeholders('subject', ['event', 'order', 'position_or_address'])
self._set_field_placeholders('message', ['event', 'order', 'position_or_address'])
choices = list(Order.STATUS_CHOICE)
if not event.settings.get('payment_term_expire_automatically', as_type=bool):
choices.append(
('overdue', _('pending with payment overdue'))
)
self.fields['sendto'] = forms.MultipleChoiceField(
label=_("Send to customers with order status"),
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
choices=choices
)
if not self.initial.get('sendto'):
self.initial['sendto'] = ['p', 'n']
self.fields['items'].queryset = event.items.all()
if not self.initial.get('items'):
self.initial['items'] = event.items.all()
if event.has_subevents:
self.fields['subevent'].queryset = event.subevents.all()
self.fields['subevent'].widget = Select2(
attrs={
'data-model-select2': 'event',
'data-select2-url': reverse('control:event.subevents.select2', kwargs={
'event': event.slug,
'organizer': event.organizer.slug,
}),
'data-placeholder': pgettext_lazy('subevent', 'Date')
}
)
self.fields['subevent'].widget.choices = self.fields['subevent'].choices
else:
del self.fields['subevent']
| true
| true
|
f70a5c3041d88ac442f82714d37a6a78ffa82afd
| 14,503
|
py
|
Python
|
bases_2021_1S/Grupo 03/parserT28/models/instructions/Expression/trigonometric_functions.py
|
dadu0699/tytus
|
e1920f6932c840859e3e79eb8756a1d3da88bd77
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
bases_2021_1S/Grupo 03/parserT28/models/instructions/Expression/trigonometric_functions.py
|
dadu0699/tytus
|
e1920f6932c840859e3e79eb8756a1d3da88bd77
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
bases_2021_1S/Grupo 03/parserT28/models/instructions/Expression/trigonometric_functions.py
|
dadu0699/tytus
|
e1920f6932c840859e3e79eb8756a1d3da88bd77
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
from parserT28.models.instructions.Expression.type_enum import DATA_TYPE
from parserT28.controllers.three_address_code import ThreeAddressCode
from parserT28.controllers.error_controller import ErrorController
from parserT28.models.instructions.Expression.expression import Expression, Identifiers, PrimitiveData
from parserT28.models.instructions.shared import ObjectReference
from math import *
class ExpressionsTrigonometric(Expression):
'''
ExpressionsTrigonometric
'''
def __init__(self, type_trigonometric, expression1, optional_expression2, line, column):
self.type_trigonometric = type_trigonometric
self.expression1 = expression1
self.optional_expression2 = optional_expression2
self.line = line
self.column = column
self.alias = f'{self.type_trigonometric}({self.expression1.alias})'
self._tac = ""
def __repr__(self):
return str(vars(self))
def process(self, expression):
type_trigo = self.type_trigonometric
exp1 = None
exp2 = None
result = 0
lista1 = []
try:
if isinstance(self.expression1, Identifiers):
if isinstance(self.optional_expression2, PrimitiveData):
exp2 = self.optional_expression2.process(expression)
exp1 = self.expression1.process(expression)
if type_trigo.lower() == "acos":
result = [acos(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'acosd':
result = [degrees(acos(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'asin':
result = [asin(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'asind':
result = [degrees(asin(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'atan':
result = [atan(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'atand':
result = [degrees(atan(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'atan2':
result = [atan2(columns, exp2.value)
for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'atan2d':
result = [degrees(atan2(columns, exp2.value))
for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'cos':
result = [cos(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'cosd':
result = [degrees(cos(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'cot':
result = [(1)/(tan(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'cotd':
result = [degrees((1)/(tan(columns)))
for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'sin':
result = [sin(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'sind':
result = [degrees(sin(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'tan':
result = [tan(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'tand':
result = [degrees(tan(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'cosh':
result = [cosh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'sinh':
result = [sinh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'tanh':
result = [tanh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'acosh':
result = [acosh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'asinh':
result = [asinh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'atanh':
result = [atanh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
if isinstance(self.expression1, PrimitiveData):
exp1 = self.expression1.process(expression)
if isinstance(self.optional_expression2, PrimitiveData):
exp2 = self.optional_expression2.process(expression)
if type_trigo.lower() == "acos":
result = round(acos(float(exp1.value)), 4)
elif type_trigo.lower() == 'acosd':
result = round(degrees(acos(float(exp1.value))), 4)
elif type_trigo.lower() == 'asin':
result = round(asin(float(exp1.value)), 4)
elif type_trigo.lower() == 'asind':
result = round(degrees(asin(float(exp1.value))), 4)
elif type_trigo.lower() == 'atan':
result = round(atan(float(exp1.value)), 4)
elif type_trigo.lower() == 'atand':
result = round(degrees(atan(float(exp1.value))), 4)
elif type_trigo.lower() == 'atan2':
result = round(
atan2(float(exp1.value), float(exp2.value)), 4)
elif type_trigo.lower() == 'atan2d':
result = round(
degrees(atan2(float(exp1.value), float(exp2.value))), 4)
elif type_trigo.lower() == 'cos':
result = round(cos(float(exp1.value)), 4)
elif type_trigo.lower() == 'cosd':
result = round(degrees(cos(float(exp1.value))), 4)
elif type_trigo.lower() == 'cot':
result = round(1/(tan(float(exp1.value))), 4)
elif type_trigo.lower() == 'cotd':
result = round(degrees(1/(tan(float(exp1.value)))), 4)
elif type_trigo.lower() == 'sin':
result = round(sin(float(exp1.value)), 4)
elif type_trigo.lower() == 'sind':
result = round(degrees(sin(float(exp1.value))), 4)
elif type_trigo.lower() == 'tan':
result = round(tan(float(exp1.value)), 4)
elif type_trigo.lower() == 'tand':
result = round(degrees(tan(float(exp1.value))), 4)
elif type_trigo.lower() == 'cosh':
result = round(cosh(float(exp1.value)), 4)
elif type_trigo.lower() == 'sinh':
result = round(sinh(float(exp1.value)), 4)
elif type_trigo.lower() == 'tanh':
result = round(tanh(float(exp1.value)), 4)
elif type_trigo.lower() == 'acosh':
result = round(acosh(float(exp1.value)), 4)
elif type_trigo.lower() == 'asinh':
result = round(asinh(float(exp1.value)), 4)
elif type_trigo.lower() == 'atanh':
result = round(atanh(float(exp1.value)), 4)
return PrimitiveData(DATA_TYPE.NUMBER, result, self.line, self.column)
except:
desc = "FATAL ERROR --- ExpressionsTrigonometric"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, expression):
type_trigo = self.type_trigonometric
temporal = ThreeAddressCode().newTemp()
temp1 = self.expression1.compile(expression)
temp2 = None
if self.optional_expression2:
temp2 = self.optional_expression2.compile(expression)
if type_trigo.lower() == "acos":
ThreeAddressCode().addCode(f"{temporal} = acos({temp1.value})")
elif type_trigo.lower() == 'acosd':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = acos({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'asin':
ThreeAddressCode().addCode(f"{temporal} = asin({temp1.value})")
elif type_trigo.lower() == 'asind':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = asin({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'atan':
ThreeAddressCode().addCode(f"{temporal} = atan({temp1.value})")
elif type_trigo.lower() == 'atand':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = atan({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'atan2':
ThreeAddressCode().addCode(
f"{temporal} = atan2({temp1.value}, {temp2.value})")
elif type_trigo.lower() == 'atan2d':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(
f"{temporal1} = atan2({temp1.value}, {temp2.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'cos':
ThreeAddressCode().addCode(f"{temporal} = cos({temp1.value})")
elif type_trigo.lower() == 'cosd':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = cos({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'cot':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = tan({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = 1 / {temporal1}")
elif type_trigo.lower() == 'cotd':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = tan({temp1.value})")
temporal2 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal2} = 1 / {temporal1}")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal2})")
elif type_trigo.lower() == 'sin':
ThreeAddressCode().addCode(f"{temporal} = sin({temp1.value})")
elif type_trigo.lower() == 'sind':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = sin({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'tan':
ThreeAddressCode().addCode(f"{temporal} = tan({temp1.value})")
elif type_trigo.lower() == 'tand':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = tan({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'cosh':
ThreeAddressCode().addCode(f"{temporal} = cosh({temp1.value})")
elif type_trigo.lower() == 'sinh':
ThreeAddressCode().addCode(f"{temporal} = sinh({temp1.value})")
elif type_trigo.lower() == 'tanh':
ThreeAddressCode().addCode(f"{temporal} = tanh({temp1.value})")
elif type_trigo.lower() == 'acosh':
ThreeAddressCode().addCode(f"{temporal} = acosh({temp1.value})")
elif type_trigo.lower() == 'asinh':
ThreeAddressCode().addCode(f"{temporal} = asinh({temp1.value})")
elif type_trigo.lower() == 'atanh':
ThreeAddressCode().addCode(f"{temporal} = atanh({temp1.value})")
return PrimitiveData(DATA_TYPE.NUMBER, temporal, self.line, self.column)
| 46.187898
| 102
| 0.524581
|
from parserT28.models.instructions.Expression.type_enum import DATA_TYPE
from parserT28.controllers.three_address_code import ThreeAddressCode
from parserT28.controllers.error_controller import ErrorController
from parserT28.models.instructions.Expression.expression import Expression, Identifiers, PrimitiveData
from parserT28.models.instructions.shared import ObjectReference
from math import *
class ExpressionsTrigonometric(Expression):
def __init__(self, type_trigonometric, expression1, optional_expression2, line, column):
self.type_trigonometric = type_trigonometric
self.expression1 = expression1
self.optional_expression2 = optional_expression2
self.line = line
self.column = column
self.alias = f'{self.type_trigonometric}({self.expression1.alias})'
self._tac = ""
def __repr__(self):
return str(vars(self))
def process(self, expression):
type_trigo = self.type_trigonometric
exp1 = None
exp2 = None
result = 0
lista1 = []
try:
if isinstance(self.expression1, Identifiers):
if isinstance(self.optional_expression2, PrimitiveData):
exp2 = self.optional_expression2.process(expression)
exp1 = self.expression1.process(expression)
if type_trigo.lower() == "acos":
result = [acos(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'acosd':
result = [degrees(acos(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'asin':
result = [asin(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'asind':
result = [degrees(asin(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'atan':
result = [atan(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'atand':
result = [degrees(atan(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'atan2':
result = [atan2(columns, exp2.value)
for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'atan2d':
result = [degrees(atan2(columns, exp2.value))
for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'cos':
result = [cos(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'cosd':
result = [degrees(cos(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'cot':
result = [(1)/(tan(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'cotd':
result = [degrees((1)/(tan(columns)))
for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'sin':
result = [sin(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'sind':
result = [degrees(sin(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'tan':
result = [tan(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'tand':
result = [degrees(tan(columns)) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'cosh':
result = [cosh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'sinh':
result = [sinh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'tanh':
result = [tanh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'acosh':
result = [acosh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'asinh':
result = [asinh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
elif type_trigo.lower() == 'atanh':
result = [atanh(columns) for columns in exp1[0]]
lista1.append(result)
lista1.append(self.alias)
return lista1
else:
if isinstance(self.expression1, PrimitiveData):
exp1 = self.expression1.process(expression)
if isinstance(self.optional_expression2, PrimitiveData):
exp2 = self.optional_expression2.process(expression)
if type_trigo.lower() == "acos":
result = round(acos(float(exp1.value)), 4)
elif type_trigo.lower() == 'acosd':
result = round(degrees(acos(float(exp1.value))), 4)
elif type_trigo.lower() == 'asin':
result = round(asin(float(exp1.value)), 4)
elif type_trigo.lower() == 'asind':
result = round(degrees(asin(float(exp1.value))), 4)
elif type_trigo.lower() == 'atan':
result = round(atan(float(exp1.value)), 4)
elif type_trigo.lower() == 'atand':
result = round(degrees(atan(float(exp1.value))), 4)
elif type_trigo.lower() == 'atan2':
result = round(
atan2(float(exp1.value), float(exp2.value)), 4)
elif type_trigo.lower() == 'atan2d':
result = round(
degrees(atan2(float(exp1.value), float(exp2.value))), 4)
elif type_trigo.lower() == 'cos':
result = round(cos(float(exp1.value)), 4)
elif type_trigo.lower() == 'cosd':
result = round(degrees(cos(float(exp1.value))), 4)
elif type_trigo.lower() == 'cot':
result = round(1/(tan(float(exp1.value))), 4)
elif type_trigo.lower() == 'cotd':
result = round(degrees(1/(tan(float(exp1.value)))), 4)
elif type_trigo.lower() == 'sin':
result = round(sin(float(exp1.value)), 4)
elif type_trigo.lower() == 'sind':
result = round(degrees(sin(float(exp1.value))), 4)
elif type_trigo.lower() == 'tan':
result = round(tan(float(exp1.value)), 4)
elif type_trigo.lower() == 'tand':
result = round(degrees(tan(float(exp1.value))), 4)
elif type_trigo.lower() == 'cosh':
result = round(cosh(float(exp1.value)), 4)
elif type_trigo.lower() == 'sinh':
result = round(sinh(float(exp1.value)), 4)
elif type_trigo.lower() == 'tanh':
result = round(tanh(float(exp1.value)), 4)
elif type_trigo.lower() == 'acosh':
result = round(acosh(float(exp1.value)), 4)
elif type_trigo.lower() == 'asinh':
result = round(asinh(float(exp1.value)), 4)
elif type_trigo.lower() == 'atanh':
result = round(atanh(float(exp1.value)), 4)
return PrimitiveData(DATA_TYPE.NUMBER, result, self.line, self.column)
except:
desc = "FATAL ERROR --- ExpressionsTrigonometric"
ErrorController().add(34, 'Execution', desc, self.line, self.column)
def compile(self, expression):
type_trigo = self.type_trigonometric
temporal = ThreeAddressCode().newTemp()
temp1 = self.expression1.compile(expression)
temp2 = None
if self.optional_expression2:
temp2 = self.optional_expression2.compile(expression)
if type_trigo.lower() == "acos":
ThreeAddressCode().addCode(f"{temporal} = acos({temp1.value})")
elif type_trigo.lower() == 'acosd':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = acos({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'asin':
ThreeAddressCode().addCode(f"{temporal} = asin({temp1.value})")
elif type_trigo.lower() == 'asind':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = asin({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'atan':
ThreeAddressCode().addCode(f"{temporal} = atan({temp1.value})")
elif type_trigo.lower() == 'atand':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = atan({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'atan2':
ThreeAddressCode().addCode(
f"{temporal} = atan2({temp1.value}, {temp2.value})")
elif type_trigo.lower() == 'atan2d':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(
f"{temporal1} = atan2({temp1.value}, {temp2.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'cos':
ThreeAddressCode().addCode(f"{temporal} = cos({temp1.value})")
elif type_trigo.lower() == 'cosd':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = cos({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'cot':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = tan({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = 1 / {temporal1}")
elif type_trigo.lower() == 'cotd':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = tan({temp1.value})")
temporal2 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal2} = 1 / {temporal1}")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal2})")
elif type_trigo.lower() == 'sin':
ThreeAddressCode().addCode(f"{temporal} = sin({temp1.value})")
elif type_trigo.lower() == 'sind':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = sin({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'tan':
ThreeAddressCode().addCode(f"{temporal} = tan({temp1.value})")
elif type_trigo.lower() == 'tand':
temporal1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temporal1} = tan({temp1.value})")
ThreeAddressCode().addCode(f"{temporal} = degrees({temporal1})")
elif type_trigo.lower() == 'cosh':
ThreeAddressCode().addCode(f"{temporal} = cosh({temp1.value})")
elif type_trigo.lower() == 'sinh':
ThreeAddressCode().addCode(f"{temporal} = sinh({temp1.value})")
elif type_trigo.lower() == 'tanh':
ThreeAddressCode().addCode(f"{temporal} = tanh({temp1.value})")
elif type_trigo.lower() == 'acosh':
ThreeAddressCode().addCode(f"{temporal} = acosh({temp1.value})")
elif type_trigo.lower() == 'asinh':
ThreeAddressCode().addCode(f"{temporal} = asinh({temp1.value})")
elif type_trigo.lower() == 'atanh':
ThreeAddressCode().addCode(f"{temporal} = atanh({temp1.value})")
return PrimitiveData(DATA_TYPE.NUMBER, temporal, self.line, self.column)
| true
| true
|
f70a5c8395fa19831b0e3bd7d8affacab5114e5c
| 5,465
|
py
|
Python
|
docs/conf.py
|
kattni/Adafruit_CircuitPython_MatrixKeypad
|
cbe0474ca08ce0b9beaf7322ecda487d8db9a5fe
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
kattni/Adafruit_CircuitPython_MatrixKeypad
|
cbe0474ca08ce0b9beaf7322ecda487d8db9a5fe
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
kattni/Adafruit_CircuitPython_MatrixKeypad
|
cbe0474ca08ce0b9beaf7322ecda487d8db9a5fe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["digitalio", "busio"]
# autodoc_mock_imports = ["digitalio"]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'BusDevice': ('https://circuitpython.readthedocs.io/projects/busdevice/en/latest/', None),'Register': ('https://circuitpython.readthedocs.io/projects/register/en/latest/', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Adafruit MatrixKeypad Library'
copyright = u'2018 ladyada'
author = u'ladyada'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitMatrixkeypadLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdafruitMatrixKeypadLibrary.tex', u'AdafruitMatrixKeypad Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'AdafruitMatrixKeypadlibrary', u'Adafruit MatrixKeypad Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdafruitMatrixKeypadLibrary', u'Adafruit MatrixKeypad Library Documentation',
author, 'AdafruitMatrixKeypadLibrary', 'One line description of project.',
'Miscellaneous'),
]
| 33.734568
| 324
| 0.69021
|
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'BusDevice': ('https://circuitpython.readthedocs.io/projects/busdevice/en/latest/', None),'Register': ('https://circuitpython.readthedocs.io/projects/register/en/latest/', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Adafruit MatrixKeypad Library'
copyright = u'2018 ladyada'
author = u'ladyada'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
html_static_path = ['_static']
html_favicon = '_static/favicon.ico'
htmlhelp_basename = 'AdafruitMatrixkeypadLibrarydoc'
latex_elements = {
}
latex_documents = [
(master_doc, 'AdafruitMatrixKeypadLibrary.tex', u'AdafruitMatrixKeypad Library Documentation',
author, 'manual'),
]
man_pages = [
(master_doc, 'AdafruitMatrixKeypadlibrary', u'Adafruit MatrixKeypad Library Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'AdafruitMatrixKeypadLibrary', u'Adafruit MatrixKeypad Library Documentation',
author, 'AdafruitMatrixKeypadLibrary', 'One line description of project.',
'Miscellaneous'),
]
| true
| true
|
f70a5d79a04061ba9e020a7854cb72fde0f00f28
| 8,923
|
py
|
Python
|
DeutschlandStadtLandFluss/generateAnkiDeck.py
|
SoerenSofke/Anki
|
533b3d340f441582d094b04cfa1eb4d99bd7a8d0
|
[
"CC0-1.0"
] | null | null | null |
DeutschlandStadtLandFluss/generateAnkiDeck.py
|
SoerenSofke/Anki
|
533b3d340f441582d094b04cfa1eb4d99bd7a8d0
|
[
"CC0-1.0"
] | null | null | null |
DeutschlandStadtLandFluss/generateAnkiDeck.py
|
SoerenSofke/Anki
|
533b3d340f441582d094b04cfa1eb4d99bd7a8d0
|
[
"CC0-1.0"
] | null | null | null |
import genanki
import glob
from pathlib import Path
import random
def generateAnki():
random.seed(42)
title = 'Deutschland - Stadt, Land, Fluss'
aDeck = genanki.Deck(
2059400110,
title)
# location to name
aModel = genanki.Model(
1607392319,
title,
fields=[
{'name': 'Question'},
{'name': 'MapStyleQ'},
{'name': 'Answer'},
{'name': 'MapStyleA'},
],
templates=[
{
'name': 'Card Number',
'qfmt': '{{Question}}<br><br>{{MapStyleQ}}<div id="inline-svg"></div><script src="https://rawcdn.githack.com/SoerenSofke/Anki/release/v1.0.0/DeutschlandStadtLandFluss/inline-svg.js"></script>',
'afmt': '{{Question}}<br><br>{{MapStyleA}}<div id="inline-svg"></div><script src="https://rawcdn.githack.com/SoerenSofke/Anki/release/v1.0.0/DeutschlandStadtLandFluss/inline-svg.js"></script><hr id=answer><u>{{Answer}}</u>',
},
],
css='''
.card {
font-family: arial;
font-size: 20px;
text-align: center;
color: black;
background-color: white;
}
hr#answer {
visibility: hidden;
}
'''
)
states = [
"Niedersachsen",
"Hamburg",
"Brandenburg",
"Berlin",
"Saarland",
"Hessen",
"Bremen",
"Nordrhein-Westfalen",
"Rheinland-Pfalz",
"Sachsen",
"Schleswig-Holstein",
"Thüringen",
"Mecklenburg-Vorpommern",
"Bayern",
"Baden-Württemberg",
"Sachsen-Anhalt",
]
random.shuffle(states)
for state in states:
question = 'Wie heißt das rot markierte <u>Bundesland</u>?'
mapStyle = '<style>#State_' + state + ' {fill: crimson;}</style>'
answer = state
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyle,
answer,
mapStyle,
]
))
for state in states:
question = 'Wo liegt das Bundesland <u>' + state +'</u>?'
mapStyleQ = '<style></style>'
answer = ''
mapStyleA = '<style>#State_' + state + ' {fill: crimson;}</style>'
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyleQ,
answer,
mapStyleA,
]
))
cities = [
'Bremen',
'Berlin',
'Hamburg',
'Dresden',
'Düsseldorf',
'Erfurt',
'Hannover',
'Kiel',
'Magdeburg',
'Mainz',
'München',
'Saarbrücken',
'Schwerin',
'Stuttgart',
'Wiesbaden',
'Potsdam',
]
random.shuffle(cities)
for city in cities:
question = 'Wie heißt die rot markierte <u>Stadt</u>?'
mapStyle = '<style>#City_' + city + ' {fill: crimson;} #Cities {visibility: visible;}</style>'
answer = city
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyle,
answer,
mapStyle
]
))
for city in cities:
question = 'Wo liegt die Stadt <u>' + city +'</u>?'
mapStyleQ = '<style>#Cities {visibility: visible;}</style>'
answer = ''
mapStyleA = '<style>#City_' + city + ' {fill: crimson;} #Cities {visibility: visible;}</style>'
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyleQ,
answer,
mapStyleA
]
))
mountains = [
'Teutoburger_Wald',
'Rothaargebirge',
'Westerwald',
'Eifel',
'Taunus',
'Odenwald',
'Hunsrück',
'Vogelsberg',
'Rhön',
'Thüringer_Wald',
'Erzgebirge',
'Fichtelgebirge',
'Oberpfälzer_Wald',
'Fränkische_Alb',
'Bayerischer_Wald',
'Schwäbische_Alb',
'Schwarzwald',
'Alpenvorland',
'Spessart',
'Harz',
]
random.shuffle(mountains)
for mountain in mountains:
question = 'Wie heißt das rot markierte <u>Gebirge</u>?'
mapStyle = '<style>#Mountain_' + mountain + ' {fill: crimson;} #Mountains {visibility: visible;}</style>'
answer = mountain.replace('_', ' ')
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyle,
answer,
mapStyle
]
))
for mountain in mountains:
question = 'Wo liegt das Gebirge <u>' + mountain.replace('_', ' ') +'</u>?'
mapStyleQ = '<style>#Mountains {visibility: visible;}</style>'
answer = ''
mapStyleA = '<style>#Mountain_' + mountain + ' {fill: crimson;} #Mountains {visibility: visible;}</style>'
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyleQ,
answer,
mapStyleA
]
))
rivers = [
'Donau',
'Lech',
'Isar',
'Inn',
'Rhein',
'Neckar',
'Main',
'Ems',
'Weser',
'Werra',
'Ruhr',
'Oder',
'Saale',
'Mosel',
'Spree',
'Neisse',
'Lippe',
'Havel',
'Elbe',
'Fulda',
'Aller',
'Mulde',
'Unstrut',
'Peene',
'Naab',
'Lahn',
'Leine',
'Regnitz',
'Salzach',
]
random.shuffle(rivers)
for river in rivers:
question = 'Wie heißt der rot markierte <u>Fluss</u>?'
mapStyle = '<style>#River_' + river + ' {stroke: crimson; stroke-width: 5} #Rivers {visibility: visible;}</style>'
answer = river
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyle,
answer,
mapStyle
]
))
for river in rivers:
question = 'Wo liegt der Fluss <u>' + river +'</u>?'
mapStyleQ = '<style>#Rivers {visibility: visible;}</style>'
answer = ''
mapStyleA = '<style>#River_' + river + ' {stroke: crimson; stroke-width: 5} #Rivers {visibility: visible;}</style>'
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyleQ,
answer,
mapStyleA
]
))
nations = [
'Tschechien',
'Österreich',
'Frankreich',
'Schweiz',
'Polen',
'Belgien',
'Luxemburg',
'Niederlande',
'Dänemark',
]
random.shuffle(nations)
for nation in nations:
question = 'Wie heißt das rot markierte <u>Land</u>?'
mapStyle = '<style>#Nation_' + nation + ' {fill: crimson;} </style>'
answer = nation
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyle,
answer,
mapStyle
]
))
for nation in nations:
question = 'Wo liegt der Land <u>' + nation +'</u>?'
mapStyleQ = '<style></style>'
answer = ''
mapStyleA = '<style>#Nation_' + nation + ' {fill: crimson;} </style>'
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyleQ,
answer,
mapStyleA
]
))
aPackage = genanki.Package(aDeck)
aPackage.write_to_file(title + '.apkg')
def main():
generateAnki()
if __name__ == "__main__":
main()
| 26.876506
| 240
| 0.419029
|
import genanki
import glob
from pathlib import Path
import random
def generateAnki():
random.seed(42)
title = 'Deutschland - Stadt, Land, Fluss'
aDeck = genanki.Deck(
2059400110,
title)
aModel = genanki.Model(
1607392319,
title,
fields=[
{'name': 'Question'},
{'name': 'MapStyleQ'},
{'name': 'Answer'},
{'name': 'MapStyleA'},
],
templates=[
{
'name': 'Card Number',
'qfmt': '{{Question}}<br><br>{{MapStyleQ}}<div id="inline-svg"></div><script src="https://rawcdn.githack.com/SoerenSofke/Anki/release/v1.0.0/DeutschlandStadtLandFluss/inline-svg.js"></script>',
'afmt': '{{Question}}<br><br>{{MapStyleA}}<div id="inline-svg"></div><script src="https://rawcdn.githack.com/SoerenSofke/Anki/release/v1.0.0/DeutschlandStadtLandFluss/inline-svg.js"></script><hr id=answer><u>{{Answer}}</u>',
},
],
css='''
.card {
font-family: arial;
font-size: 20px;
text-align: center;
color: black;
background-color: white;
}
hr#answer {
visibility: hidden;
}
'''
)
states = [
"Niedersachsen",
"Hamburg",
"Brandenburg",
"Berlin",
"Saarland",
"Hessen",
"Bremen",
"Nordrhein-Westfalen",
"Rheinland-Pfalz",
"Sachsen",
"Schleswig-Holstein",
"Thüringen",
"Mecklenburg-Vorpommern",
"Bayern",
"Baden-Württemberg",
"Sachsen-Anhalt",
]
random.shuffle(states)
for state in states:
question = 'Wie heißt das rot markierte <u>Bundesland</u>?'
mapStyle = '<style>#State_' + state + ' {fill: crimson;}</style>'
answer = state
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyle,
answer,
mapStyle,
]
))
for state in states:
question = 'Wo liegt das Bundesland <u>' + state +'</u>?'
mapStyleQ = '<style></style>'
answer = ''
mapStyleA = '<style>#State_' + state + ' {fill: crimson;}</style>'
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyleQ,
answer,
mapStyleA,
]
))
cities = [
'Bremen',
'Berlin',
'Hamburg',
'Dresden',
'Düsseldorf',
'Erfurt',
'Hannover',
'Kiel',
'Magdeburg',
'Mainz',
'München',
'Saarbrücken',
'Schwerin',
'Stuttgart',
'Wiesbaden',
'Potsdam',
]
random.shuffle(cities)
for city in cities:
question = 'Wie heißt die rot markierte <u>Stadt</u>?'
mapStyle = '<style>#City_' + city + ' {fill: crimson;} #Cities {visibility: visible;}</style>'
answer = city
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyle,
answer,
mapStyle
]
))
for city in cities:
question = 'Wo liegt die Stadt <u>' + city +'</u>?'
mapStyleQ = '<style>#Cities {visibility: visible;}</style>'
answer = ''
mapStyleA = '<style>#City_' + city + ' {fill: crimson;} #Cities {visibility: visible;}</style>'
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyleQ,
answer,
mapStyleA
]
))
mountains = [
'Teutoburger_Wald',
'Rothaargebirge',
'Westerwald',
'Eifel',
'Taunus',
'Odenwald',
'Hunsrück',
'Vogelsberg',
'Rhön',
'Thüringer_Wald',
'Erzgebirge',
'Fichtelgebirge',
'Oberpfälzer_Wald',
'Fränkische_Alb',
'Bayerischer_Wald',
'Schwäbische_Alb',
'Schwarzwald',
'Alpenvorland',
'Spessart',
'Harz',
]
random.shuffle(mountains)
for mountain in mountains:
question = 'Wie heißt das rot markierte <u>Gebirge</u>?'
mapStyle = '<style>#Mountain_' + mountain + ' {fill: crimson;} #Mountains {visibility: visible;}</style>'
answer = mountain.replace('_', ' ')
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyle,
answer,
mapStyle
]
))
for mountain in mountains:
question = 'Wo liegt das Gebirge <u>' + mountain.replace('_', ' ') +'</u>?'
mapStyleQ = '<style>#Mountains {visibility: visible;}</style>'
answer = ''
mapStyleA = '<style>#Mountain_' + mountain + ' {fill: crimson;} #Mountains {visibility: visible;}</style>'
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyleQ,
answer,
mapStyleA
]
))
rivers = [
'Donau',
'Lech',
'Isar',
'Inn',
'Rhein',
'Neckar',
'Main',
'Ems',
'Weser',
'Werra',
'Ruhr',
'Oder',
'Saale',
'Mosel',
'Spree',
'Neisse',
'Lippe',
'Havel',
'Elbe',
'Fulda',
'Aller',
'Mulde',
'Unstrut',
'Peene',
'Naab',
'Lahn',
'Leine',
'Regnitz',
'Salzach',
]
random.shuffle(rivers)
for river in rivers:
question = 'Wie heißt der rot markierte <u>Fluss</u>?'
mapStyle = '<style>#River_' + river + ' {stroke: crimson; stroke-width: 5} #Rivers {visibility: visible;}</style>'
answer = river
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyle,
answer,
mapStyle
]
))
for river in rivers:
question = 'Wo liegt der Fluss <u>' + river +'</u>?'
mapStyleQ = '<style>#Rivers {visibility: visible;}</style>'
answer = ''
mapStyleA = '<style>#River_' + river + ' {stroke: crimson; stroke-width: 5} #Rivers {visibility: visible;}</style>'
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyleQ,
answer,
mapStyleA
]
))
nations = [
'Tschechien',
'Österreich',
'Frankreich',
'Schweiz',
'Polen',
'Belgien',
'Luxemburg',
'Niederlande',
'Dänemark',
]
random.shuffle(nations)
for nation in nations:
question = 'Wie heißt das rot markierte <u>Land</u>?'
mapStyle = '<style>#Nation_' + nation + ' {fill: crimson;} </style>'
answer = nation
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyle,
answer,
mapStyle
]
))
for nation in nations:
question = 'Wo liegt der Land <u>' + nation +'</u>?'
mapStyleQ = '<style></style>'
answer = ''
mapStyleA = '<style>#Nation_' + nation + ' {fill: crimson;} </style>'
aDeck.add_note(
genanki.Note(
model=aModel,
fields=[
question,
mapStyleQ,
answer,
mapStyleA
]
))
aPackage = genanki.Package(aDeck)
aPackage.write_to_file(title + '.apkg')
def main():
generateAnki()
if __name__ == "__main__":
main()
| true
| true
|
f70a5d9047d00493189d08971cef9c2e994138ee
| 11,060
|
py
|
Python
|
src/oci/logging/models/create_unified_agent_configuration_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/logging/models/create_unified_agent_configuration_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/logging/models/create_unified_agent_configuration_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateUnifiedAgentConfigurationDetails(object):
"""
Unified Agent configuration creation object.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateUnifiedAgentConfigurationDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this CreateUnifiedAgentConfigurationDetails.
:type display_name: str
:param is_enabled:
The value to assign to the is_enabled property of this CreateUnifiedAgentConfigurationDetails.
:type is_enabled: bool
:param service_configuration:
The value to assign to the service_configuration property of this CreateUnifiedAgentConfigurationDetails.
:type service_configuration: oci.logging.models.UnifiedAgentServiceConfigurationDetails
:param defined_tags:
The value to assign to the defined_tags property of this CreateUnifiedAgentConfigurationDetails.
:type defined_tags: dict(str, dict(str, object))
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateUnifiedAgentConfigurationDetails.
:type freeform_tags: dict(str, str)
:param compartment_id:
The value to assign to the compartment_id property of this CreateUnifiedAgentConfigurationDetails.
:type compartment_id: str
:param description:
The value to assign to the description property of this CreateUnifiedAgentConfigurationDetails.
:type description: str
:param group_association:
The value to assign to the group_association property of this CreateUnifiedAgentConfigurationDetails.
:type group_association: oci.logging.models.GroupAssociationDetails
"""
self.swagger_types = {
'display_name': 'str',
'is_enabled': 'bool',
'service_configuration': 'UnifiedAgentServiceConfigurationDetails',
'defined_tags': 'dict(str, dict(str, object))',
'freeform_tags': 'dict(str, str)',
'compartment_id': 'str',
'description': 'str',
'group_association': 'GroupAssociationDetails'
}
self.attribute_map = {
'display_name': 'displayName',
'is_enabled': 'isEnabled',
'service_configuration': 'serviceConfiguration',
'defined_tags': 'definedTags',
'freeform_tags': 'freeformTags',
'compartment_id': 'compartmentId',
'description': 'description',
'group_association': 'groupAssociation'
}
self._display_name = None
self._is_enabled = None
self._service_configuration = None
self._defined_tags = None
self._freeform_tags = None
self._compartment_id = None
self._description = None
self._group_association = None
@property
def display_name(self):
"""
Gets the display_name of this CreateUnifiedAgentConfigurationDetails.
The user-friendly display name. This must be unique within the enclosing resource,
and it's changeable. Avoid entering confidential information.
:return: The display_name of this CreateUnifiedAgentConfigurationDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateUnifiedAgentConfigurationDetails.
The user-friendly display name. This must be unique within the enclosing resource,
and it's changeable. Avoid entering confidential information.
:param display_name: The display_name of this CreateUnifiedAgentConfigurationDetails.
:type: str
"""
self._display_name = display_name
@property
def is_enabled(self):
"""
**[Required]** Gets the is_enabled of this CreateUnifiedAgentConfigurationDetails.
Whether or not this resource is currently enabled.
:return: The is_enabled of this CreateUnifiedAgentConfigurationDetails.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this CreateUnifiedAgentConfigurationDetails.
Whether or not this resource is currently enabled.
:param is_enabled: The is_enabled of this CreateUnifiedAgentConfigurationDetails.
:type: bool
"""
self._is_enabled = is_enabled
@property
def service_configuration(self):
"""
**[Required]** Gets the service_configuration of this CreateUnifiedAgentConfigurationDetails.
:return: The service_configuration of this CreateUnifiedAgentConfigurationDetails.
:rtype: oci.logging.models.UnifiedAgentServiceConfigurationDetails
"""
return self._service_configuration
@service_configuration.setter
def service_configuration(self, service_configuration):
"""
Sets the service_configuration of this CreateUnifiedAgentConfigurationDetails.
:param service_configuration: The service_configuration of this CreateUnifiedAgentConfigurationDetails.
:type: oci.logging.models.UnifiedAgentServiceConfigurationDetails
"""
self._service_configuration = service_configuration
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateUnifiedAgentConfigurationDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this CreateUnifiedAgentConfigurationDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateUnifiedAgentConfigurationDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this CreateUnifiedAgentConfigurationDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateUnifiedAgentConfigurationDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this CreateUnifiedAgentConfigurationDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateUnifiedAgentConfigurationDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this CreateUnifiedAgentConfigurationDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this CreateUnifiedAgentConfigurationDetails.
The OCID of the compartment that the resource belongs to.
:return: The compartment_id of this CreateUnifiedAgentConfigurationDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this CreateUnifiedAgentConfigurationDetails.
The OCID of the compartment that the resource belongs to.
:param compartment_id: The compartment_id of this CreateUnifiedAgentConfigurationDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def description(self):
"""
Gets the description of this CreateUnifiedAgentConfigurationDetails.
Description for this resource.
:return: The description of this CreateUnifiedAgentConfigurationDetails.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this CreateUnifiedAgentConfigurationDetails.
Description for this resource.
:param description: The description of this CreateUnifiedAgentConfigurationDetails.
:type: str
"""
self._description = description
@property
def group_association(self):
"""
Gets the group_association of this CreateUnifiedAgentConfigurationDetails.
:return: The group_association of this CreateUnifiedAgentConfigurationDetails.
:rtype: oci.logging.models.GroupAssociationDetails
"""
return self._group_association
@group_association.setter
def group_association(self, group_association):
"""
Sets the group_association of this CreateUnifiedAgentConfigurationDetails.
:param group_association: The group_association of this CreateUnifiedAgentConfigurationDetails.
:type: oci.logging.models.GroupAssociationDetails
"""
self._group_association = group_association
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 36.866667
| 245
| 0.687703
|
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateUnifiedAgentConfigurationDetails(object):
def __init__(self, **kwargs):
self.swagger_types = {
'display_name': 'str',
'is_enabled': 'bool',
'service_configuration': 'UnifiedAgentServiceConfigurationDetails',
'defined_tags': 'dict(str, dict(str, object))',
'freeform_tags': 'dict(str, str)',
'compartment_id': 'str',
'description': 'str',
'group_association': 'GroupAssociationDetails'
}
self.attribute_map = {
'display_name': 'displayName',
'is_enabled': 'isEnabled',
'service_configuration': 'serviceConfiguration',
'defined_tags': 'definedTags',
'freeform_tags': 'freeformTags',
'compartment_id': 'compartmentId',
'description': 'description',
'group_association': 'groupAssociation'
}
self._display_name = None
self._is_enabled = None
self._service_configuration = None
self._defined_tags = None
self._freeform_tags = None
self._compartment_id = None
self._description = None
self._group_association = None
@property
def display_name(self):
return self._display_name
@display_name.setter
def display_name(self, display_name):
self._display_name = display_name
@property
def is_enabled(self):
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
self._is_enabled = is_enabled
@property
def service_configuration(self):
return self._service_configuration
@service_configuration.setter
def service_configuration(self, service_configuration):
self._service_configuration = service_configuration
@property
def defined_tags(self):
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
self._defined_tags = defined_tags
@property
def freeform_tags(self):
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
self._freeform_tags = freeform_tags
@property
def compartment_id(self):
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
self._compartment_id = compartment_id
@property
def description(self):
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def group_association(self):
return self._group_association
@group_association.setter
def group_association(self, group_association):
self._group_association = group_association
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f70a5f5db5f4952721a9c7dd0511c372f9948851
| 2,041
|
py
|
Python
|
whatsapp-bot-venv/Lib/site-packages/twilio/base/serialize.py
|
RedaMastouri/ConversationalPythonicChatBot
|
f204276d4b80348d42091b17d1a7d9eea33fb4e0
|
[
"MIT"
] | 1,362
|
2015-01-04T10:25:18.000Z
|
2022-03-24T10:07:08.000Z
|
whatsapp-bot-venv/Lib/site-packages/twilio/base/serialize.py
|
RedaMastouri/ConversationalPythonicChatBot
|
f204276d4b80348d42091b17d1a7d9eea33fb4e0
|
[
"MIT"
] | 299
|
2015-01-30T09:52:39.000Z
|
2022-03-31T23:03:02.000Z
|
bot/lib/python3.7/site-packages/twilio/base/serialize.py
|
carlosrh18/DavinciBot
|
d73a6b7f68d7bab25d134d3f85c6b63a86c206c5
|
[
"MIT"
] | 622
|
2015-01-03T04:43:09.000Z
|
2022-03-29T14:11:00.000Z
|
import datetime
import json
from twilio.base import values
def iso8601_date(d):
"""
Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date
"""
if d == values.unset:
return d
elif isinstance(d, datetime.datetime):
return str(d.date())
elif isinstance(d, datetime.date):
return str(d)
elif isinstance(d, str):
return d
def iso8601_datetime(d):
"""
Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date
"""
if d == values.unset:
return d
elif isinstance(d, datetime.datetime) or isinstance(d, datetime.date):
return d.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(d, str):
return d
def prefixed_collapsible_map(m, prefix):
"""
Return a dict of params corresponding to those in m with the added prefix
"""
if m == values.unset:
return {}
def flatten_dict(d, result=None, prv_keys=None):
if result is None:
result = {}
if prv_keys is None:
prv_keys = []
for k, v in d.items():
if isinstance(v, dict):
flatten_dict(v, result, prv_keys + [k])
else:
result['.'.join(prv_keys + [k])] = v
return result
if isinstance(m, dict):
flattened = flatten_dict(m)
return {'{}.{}'.format(prefix, k): v for k, v in flattened.items()}
return {}
def object(obj):
"""
Return a jsonified string represenation of obj if obj is jsonifiable else
return obj untouched
"""
if isinstance(obj, dict) or isinstance(obj, list):
return json.dumps(obj)
return obj
def map(lst, serialize_func):
"""
Applies serialize_func to every element in lst
"""
if not isinstance(lst, list):
return lst
return [serialize_func(e) for e in lst]
| 24.590361
| 78
| 0.602646
|
import datetime
import json
from twilio.base import values
def iso8601_date(d):
if d == values.unset:
return d
elif isinstance(d, datetime.datetime):
return str(d.date())
elif isinstance(d, datetime.date):
return str(d)
elif isinstance(d, str):
return d
def iso8601_datetime(d):
if d == values.unset:
return d
elif isinstance(d, datetime.datetime) or isinstance(d, datetime.date):
return d.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(d, str):
return d
def prefixed_collapsible_map(m, prefix):
if m == values.unset:
return {}
def flatten_dict(d, result=None, prv_keys=None):
if result is None:
result = {}
if prv_keys is None:
prv_keys = []
for k, v in d.items():
if isinstance(v, dict):
flatten_dict(v, result, prv_keys + [k])
else:
result['.'.join(prv_keys + [k])] = v
return result
if isinstance(m, dict):
flattened = flatten_dict(m)
return {'{}.{}'.format(prefix, k): v for k, v in flattened.items()}
return {}
def object(obj):
if isinstance(obj, dict) or isinstance(obj, list):
return json.dumps(obj)
return obj
def map(lst, serialize_func):
if not isinstance(lst, list):
return lst
return [serialize_func(e) for e in lst]
| true
| true
|
f70a6146604ae77580af15ad77d5692ec2f498f1
| 4,713
|
py
|
Python
|
tests/test_dialects.py
|
blthree/sqlglot
|
c3130584db6d767575854ba0d57da37e026863c9
|
[
"MIT"
] | null | null | null |
tests/test_dialects.py
|
blthree/sqlglot
|
c3130584db6d767575854ba0d57da37e026863c9
|
[
"MIT"
] | null | null | null |
tests/test_dialects.py
|
blthree/sqlglot
|
c3130584db6d767575854ba0d57da37e026863c9
|
[
"MIT"
] | null | null | null |
import unittest
from sqlglot import transpile
from sqlglot.errors import ErrorLevel, UnsupportedError
class TestDialects(unittest.TestCase):
def test_mysql(self):
sql = transpile('SELECT CAST(`a`.`b` AS INT) FROM foo', read='mysql', write='mysql')[0]
self.assertEqual(sql, 'SELECT CAST(`a`.`b` AS INT) FROM foo')
def test_postgres(self):
sql = transpile('SELECT CAST(`a`.`b` AS DOUBLE) FROM foo', read='postgres', write='postgres')[0]
self.assertEqual(sql, 'SELECT CAST(`a`.`b` AS DOUBLE PRECISION) FROM foo')
def test_presto(self):
sql = transpile('SELECT "a"."b" FROM foo', read='presto', write='presto', identify=True)[0]
self.assertEqual(sql, 'SELECT "a"."b" FROM "foo"')
sql = transpile('SELECT a.b FROM foo', read='presto', write='spark')[0]
self.assertEqual(sql, 'SELECT a.b FROM foo')
sql = transpile('SELECT "a"."b" FROM foo', read='presto', write='spark', identify=True)[0]
self.assertEqual(sql, 'SELECT `a`.`b` FROM `foo`')
sql = transpile('SELECT a.b FROM foo', read='presto', write='spark', identify=True)[0]
self.assertEqual(sql, 'SELECT `a`.`b` FROM `foo`')
sql = transpile('SELECT APPROX_DISTINCT(a) FROM foo', read='presto', write='spark')[0]
self.assertEqual(sql, 'SELECT APPROX_COUNT_DISTINCT(a) FROM foo')
sql = transpile(
'SELECT APPROX_DISTINCT(a, 0.1) FROM foo',
read='presto',
write='spark',
unsupported_level=ErrorLevel.IGNORE
)[0]
self.assertEqual(sql, 'SELECT APPROX_COUNT_DISTINCT(a) FROM foo')
ctas = "CREATE TABLE test WITH (FORMAT = 'PARQUET') AS SELECT 1"
self.assertEqual(transpile(ctas, read='presto', write='presto')[0], ctas)
sql = transpile(ctas, read='presto', write='spark')[0]
self.assertEqual(sql, "CREATE TABLE test STORED AS PARQUET AS SELECT 1")
sql = transpile("SELECT JSON_EXTRACT(x, '$.name')", read='presto', write='spark')[0]
self.assertEqual(sql, "SELECT GET_JSON_OBJECT(x, '$.name')")
with self.assertRaises(UnsupportedError):
transpile(
'SELECT APPROX_DISTINCT(a, 0.1) FROM foo',
read='presto',
write='spark',
unsupported_level=ErrorLevel.RAISE,
)
def test_hive(self):
sql = transpile('SELECT "a"."b" FROM "foo"', write='hive')[0]
self.assertEqual(sql, "SELECT `a`.`b` FROM `foo`")
sql = transpile('SELECT CAST(`a`.`b` AS SMALLINT) FROM foo', read='hive', write='hive')[0]
self.assertEqual(sql, 'SELECT CAST(`a`.`b` AS SMALLINT) FROM foo')
sql = transpile('SELECT "a"."b" FROM foo', write='hive', identify=True)[0]
self.assertEqual(sql, 'SELECT `a`.`b` FROM `foo`')
sql = transpile('SELECT APPROX_COUNT_DISTINCT(a) FROM foo', read='hive', write='presto')[0]
self.assertEqual(sql, 'SELECT APPROX_DISTINCT(a) FROM foo')
sql = transpile('CREATE TABLE test STORED AS PARQUET AS SELECT 1', read='hive', write='presto')[0]
self.assertEqual(sql, "CREATE TABLE test WITH (FORMAT = 'PARQUET') AS SELECT 1")
sql = transpile("SELECT GET_JSON_OBJECT(x, '$.name')", read='hive', write='presto')[0]
self.assertEqual(sql, "SELECT JSON_EXTRACT(x, '$.name')")
def test_spark(self):
sql = transpile('SELECT "a"."b" FROM "foo"', write='spark')[0]
self.assertEqual(sql, "SELECT `a`.`b` FROM `foo`")
sql = transpile('SELECT CAST(`a`.`b` AS SMALLINT) FROM foo', read='spark')[0]
self.assertEqual(sql, 'SELECT CAST(`a`.`b` AS SHORT) FROM foo')
sql = transpile('SELECT "a"."b" FROM foo', write='spark', identify=True)[0]
self.assertEqual(sql, 'SELECT `a`.`b` FROM `foo`')
sql = transpile('SELECT APPROX_COUNT_DISTINCT(a) FROM foo', read='spark', write='presto')[0]
self.assertEqual(sql, 'SELECT APPROX_DISTINCT(a) FROM foo')
sql = transpile('CREATE TABLE test STORED AS PARQUET AS SELECT 1', read='spark', write='presto')[0]
self.assertEqual(sql, "CREATE TABLE test WITH (FORMAT = 'PARQUET') AS SELECT 1")
sql = transpile('SELECT /*+ COALESCE(3) */ * FROM x', read='spark')[0]
self.assertEqual(sql, 'SELECT /*+ COALESCE(3) */ * FROM x')
def test_sqlite(self):
sql = transpile('SELECT CAST(`a`.`b` AS SMALLINT) FROM foo', read='sqlite', write='sqlite')[0]
self.assertEqual(sql, 'SELECT CAST(`a`.`b` AS INTEGER) FROM foo')
def test_msaccess(self):
sql = transpile('SELECT [a].[b] FROM [foo]', read='msacess', write='msacess')[0]
self.assertEqual(sql, 'SELECT [a].[b] FROM [foo]')
| 46.663366
| 107
| 0.611288
|
import unittest
from sqlglot import transpile
from sqlglot.errors import ErrorLevel, UnsupportedError
class TestDialects(unittest.TestCase):
def test_mysql(self):
sql = transpile('SELECT CAST(`a`.`b` AS INT) FROM foo', read='mysql', write='mysql')[0]
self.assertEqual(sql, 'SELECT CAST(`a`.`b` AS INT) FROM foo')
def test_postgres(self):
sql = transpile('SELECT CAST(`a`.`b` AS DOUBLE) FROM foo', read='postgres', write='postgres')[0]
self.assertEqual(sql, 'SELECT CAST(`a`.`b` AS DOUBLE PRECISION) FROM foo')
def test_presto(self):
sql = transpile('SELECT "a"."b" FROM foo', read='presto', write='presto', identify=True)[0]
self.assertEqual(sql, 'SELECT "a"."b" FROM "foo"')
sql = transpile('SELECT a.b FROM foo', read='presto', write='spark')[0]
self.assertEqual(sql, 'SELECT a.b FROM foo')
sql = transpile('SELECT "a"."b" FROM foo', read='presto', write='spark', identify=True)[0]
self.assertEqual(sql, 'SELECT `a`.`b` FROM `foo`')
sql = transpile('SELECT a.b FROM foo', read='presto', write='spark', identify=True)[0]
self.assertEqual(sql, 'SELECT `a`.`b` FROM `foo`')
sql = transpile('SELECT APPROX_DISTINCT(a) FROM foo', read='presto', write='spark')[0]
self.assertEqual(sql, 'SELECT APPROX_COUNT_DISTINCT(a) FROM foo')
sql = transpile(
'SELECT APPROX_DISTINCT(a, 0.1) FROM foo',
read='presto',
write='spark',
unsupported_level=ErrorLevel.IGNORE
)[0]
self.assertEqual(sql, 'SELECT APPROX_COUNT_DISTINCT(a) FROM foo')
ctas = "CREATE TABLE test WITH (FORMAT = 'PARQUET') AS SELECT 1"
self.assertEqual(transpile(ctas, read='presto', write='presto')[0], ctas)
sql = transpile(ctas, read='presto', write='spark')[0]
self.assertEqual(sql, "CREATE TABLE test STORED AS PARQUET AS SELECT 1")
sql = transpile("SELECT JSON_EXTRACT(x, '$.name')", read='presto', write='spark')[0]
self.assertEqual(sql, "SELECT GET_JSON_OBJECT(x, '$.name')")
with self.assertRaises(UnsupportedError):
transpile(
'SELECT APPROX_DISTINCT(a, 0.1) FROM foo',
read='presto',
write='spark',
unsupported_level=ErrorLevel.RAISE,
)
def test_hive(self):
sql = transpile('SELECT "a"."b" FROM "foo"', write='hive')[0]
self.assertEqual(sql, "SELECT `a`.`b` FROM `foo`")
sql = transpile('SELECT CAST(`a`.`b` AS SMALLINT) FROM foo', read='hive', write='hive')[0]
self.assertEqual(sql, 'SELECT CAST(`a`.`b` AS SMALLINT) FROM foo')
sql = transpile('SELECT "a"."b" FROM foo', write='hive', identify=True)[0]
self.assertEqual(sql, 'SELECT `a`.`b` FROM `foo`')
sql = transpile('SELECT APPROX_COUNT_DISTINCT(a) FROM foo', read='hive', write='presto')[0]
self.assertEqual(sql, 'SELECT APPROX_DISTINCT(a) FROM foo')
sql = transpile('CREATE TABLE test STORED AS PARQUET AS SELECT 1', read='hive', write='presto')[0]
self.assertEqual(sql, "CREATE TABLE test WITH (FORMAT = 'PARQUET') AS SELECT 1")
sql = transpile("SELECT GET_JSON_OBJECT(x, '$.name')", read='hive', write='presto')[0]
self.assertEqual(sql, "SELECT JSON_EXTRACT(x, '$.name')")
def test_spark(self):
sql = transpile('SELECT "a"."b" FROM "foo"', write='spark')[0]
self.assertEqual(sql, "SELECT `a`.`b` FROM `foo`")
sql = transpile('SELECT CAST(`a`.`b` AS SMALLINT) FROM foo', read='spark')[0]
self.assertEqual(sql, 'SELECT CAST(`a`.`b` AS SHORT) FROM foo')
sql = transpile('SELECT "a"."b" FROM foo', write='spark', identify=True)[0]
self.assertEqual(sql, 'SELECT `a`.`b` FROM `foo`')
sql = transpile('SELECT APPROX_COUNT_DISTINCT(a) FROM foo', read='spark', write='presto')[0]
self.assertEqual(sql, 'SELECT APPROX_DISTINCT(a) FROM foo')
sql = transpile('CREATE TABLE test STORED AS PARQUET AS SELECT 1', read='spark', write='presto')[0]
self.assertEqual(sql, "CREATE TABLE test WITH (FORMAT = 'PARQUET') AS SELECT 1")
sql = transpile('SELECT /*+ COALESCE(3) */ * FROM x', read='spark')[0]
self.assertEqual(sql, 'SELECT /*+ COALESCE(3) */ * FROM x')
def test_sqlite(self):
sql = transpile('SELECT CAST(`a`.`b` AS SMALLINT) FROM foo', read='sqlite', write='sqlite')[0]
self.assertEqual(sql, 'SELECT CAST(`a`.`b` AS INTEGER) FROM foo')
def test_msaccess(self):
sql = transpile('SELECT [a].[b] FROM [foo]', read='msacess', write='msacess')[0]
self.assertEqual(sql, 'SELECT [a].[b] FROM [foo]')
| true
| true
|
f70a6173c8c03d653e854b67508634dd8a582875
| 189
|
py
|
Python
|
tools/pathutils.py
|
Laogeodritt/KazTron
|
42f35e520875b458ffde7c2729865c95de606aca
|
[
"MIT"
] | 6
|
2018-07-04T20:41:01.000Z
|
2021-09-08T08:10:34.000Z
|
tools/pathutils.py
|
Laogeodritt/KazTron
|
42f35e520875b458ffde7c2729865c95de606aca
|
[
"MIT"
] | 259
|
2018-05-01T22:41:32.000Z
|
2022-02-08T23:25:00.000Z
|
tools/pathutils.py
|
Laogeodritt/KazTron
|
42f35e520875b458ffde7c2729865c95de606aca
|
[
"MIT"
] | 6
|
2019-04-16T22:13:15.000Z
|
2021-12-15T08:06:38.000Z
|
from pathlib import Path
import sys
import os
def add_application_path():
app_path = Path(__file__).resolve().parents[1]
sys.path.append(str(app_path))
os.chdir(str(app_path))
| 21
| 50
| 0.730159
|
from pathlib import Path
import sys
import os
def add_application_path():
app_path = Path(__file__).resolve().parents[1]
sys.path.append(str(app_path))
os.chdir(str(app_path))
| true
| true
|
f70a6277163e65611b36ebc3dd064be9f81de3f7
| 1,159
|
py
|
Python
|
web/addons/hr_holidays/tests/__init__.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | null | null | null |
web/addons/hr_holidays/tests/__init__.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | null | null | null |
web/addons/hr_holidays/tests/__init__.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.hr_holidays.tests import test_holidays_flow
checks = [
test_holidays_flow,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 39.965517
| 78
| 0.630716
| true
| true
|
|
f70a627d9cb818695d9704ab57ce2d2dce8ac924
| 248
|
py
|
Python
|
plex_notifier/__init__.py
|
sudoursa/plex_notifier
|
d0c7123d23b7e5a37ef5ad4ca0ab2c324d9c2332
|
[
"MIT"
] | 1
|
2018-01-22T21:25:40.000Z
|
2018-01-22T21:25:40.000Z
|
plex_notifier/__init__.py
|
sudoursa/plex_notifier
|
d0c7123d23b7e5a37ef5ad4ca0ab2c324d9c2332
|
[
"MIT"
] | 16
|
2018-01-22T15:22:26.000Z
|
2018-01-27T22:18:12.000Z
|
plex_notifier/__init__.py
|
sudoursa/plex_notifier
|
d0c7123d23b7e5a37ef5ad4ca0ab2c324d9c2332
|
[
"MIT"
] | 1
|
2018-01-28T23:49:10.000Z
|
2018-01-28T23:49:10.000Z
|
"""
importing public methods
"""
from .plex_auth import connect_to_plex
from .plex_movies import return_movies
from .plex_tv import return_tv
from .plex_users import get_emails
from .plex_users import unsub_emails
from .plex_email import send_mail
| 24.8
| 38
| 0.834677
|
from .plex_auth import connect_to_plex
from .plex_movies import return_movies
from .plex_tv import return_tv
from .plex_users import get_emails
from .plex_users import unsub_emails
from .plex_email import send_mail
| true
| true
|
f70a628c1ce95ca174fbb067a483d898b989003d
| 1,265
|
py
|
Python
|
_unittests/ut_packaged/test_LONG_script_install.py
|
sdpython/pymyinstall
|
72b3a56a29def0694e34ccae910bf288a95cf4a5
|
[
"MIT"
] | 8
|
2015-08-24T21:01:49.000Z
|
2018-01-04T06:34:51.000Z
|
_unittests/ut_packaged/test_LONG_script_install.py
|
sdpython/pymyinstall
|
72b3a56a29def0694e34ccae910bf288a95cf4a5
|
[
"MIT"
] | 66
|
2015-06-14T22:04:58.000Z
|
2021-11-11T13:46:03.000Z
|
_unittests/ut_packaged/test_LONG_script_install.py
|
sdpython/pymyinstall
|
72b3a56a29def0694e34ccae910bf288a95cf4a5
|
[
"MIT"
] | 5
|
2016-09-13T18:14:46.000Z
|
2021-08-23T12:03:28.000Z
|
"""
@brief test log(time=2s)
"""
import unittest
import warnings
from pyquickhelper.loghelper import fLOG
class TestLONGScriptInstall(unittest.TestCase):
def test_pypi(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import xmlrpc.client as xmlrpc_client
module_name = "version_information"
url = 'https://pypi.org/pypi/pip/json'
functions = []
with xmlrpc_client.ServerProxy(url) as pypi:
try:
for f in pypi.system.listMethods():
fLOG(f)
sig = pypi.system.methodSignature(f)
fLOG(" ", sig)
h = pypi.system.methodHelp(f)
fLOG(" ", h)
functions.append(f)
if len(functions) > 1:
break
available = pypi.package_releases(module_name, True)
fLOG(available)
except xmlrpc_client.ProtocolError as e:
warnings.warn("PyPI protocal has changed {0}".format(e))
functions = [None, None]
assert len(functions) > 1
if __name__ == "__main__":
unittest.main()
| 30.119048
| 72
| 0.527273
|
import unittest
import warnings
from pyquickhelper.loghelper import fLOG
class TestLONGScriptInstall(unittest.TestCase):
def test_pypi(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import xmlrpc.client as xmlrpc_client
module_name = "version_information"
url = 'https://pypi.org/pypi/pip/json'
functions = []
with xmlrpc_client.ServerProxy(url) as pypi:
try:
for f in pypi.system.listMethods():
fLOG(f)
sig = pypi.system.methodSignature(f)
fLOG(" ", sig)
h = pypi.system.methodHelp(f)
fLOG(" ", h)
functions.append(f)
if len(functions) > 1:
break
available = pypi.package_releases(module_name, True)
fLOG(available)
except xmlrpc_client.ProtocolError as e:
warnings.warn("PyPI protocal has changed {0}".format(e))
functions = [None, None]
assert len(functions) > 1
if __name__ == "__main__":
unittest.main()
| true
| true
|
f70a6377aa4fa71b8438d2f648431ccecf2a659a
| 1,216
|
py
|
Python
|
ambassador/views.py
|
cforcross/django-vue-admin
|
269ba3047b6762c565d9a4c306efc86c3ffd4867
|
[
"MIT"
] | null | null | null |
ambassador/views.py
|
cforcross/django-vue-admin
|
269ba3047b6762c565d9a4c306efc86c3ffd4867
|
[
"MIT"
] | null | null | null |
ambassador/views.py
|
cforcross/django-vue-admin
|
269ba3047b6762c565d9a4c306efc86c3ffd4867
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import exceptions
from common.serializers import UserSerializer
from core.models import User,Product,Link,OrderItem,Order
from common.authentication import JWTAuthentication
from rest_framework.permissions import IsAuthenticated
from .serializers import ProductSerializer
from django.core.cache import cache
import time
# Create your views here.
class ProductFrontendAPIView(APIView):
# authentication_classes =[JWTAuthentication]
# permission_classes=[IsAuthenticated]
def get(self, request):
products = Product.objects.all()
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
class ProductBackendAPIView(APIView):
def get(self, request):
products = cache.get('products_backend')
if not products:
time.sleep(2)
products = list(Product.objects.all())
cache.set(products, 'products_backend',timeout=60*30)
products = Product.objects.all()
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
| 39.225806
| 65
| 0.754112
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import exceptions
from common.serializers import UserSerializer
from core.models import User,Product,Link,OrderItem,Order
from common.authentication import JWTAuthentication
from rest_framework.permissions import IsAuthenticated
from .serializers import ProductSerializer
from django.core.cache import cache
import time
class ProductFrontendAPIView(APIView):
def get(self, request):
products = Product.objects.all()
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
class ProductBackendAPIView(APIView):
def get(self, request):
products = cache.get('products_backend')
if not products:
time.sleep(2)
products = list(Product.objects.all())
cache.set(products, 'products_backend',timeout=60*30)
products = Product.objects.all()
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
| true
| true
|
f70a649a6c145f8eae91526b87cd9cfca92cdb65
| 679
|
py
|
Python
|
pyglet/window/cocoa/systemcursor.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | 1
|
2016-01-09T03:47:39.000Z
|
2016-01-09T03:47:39.000Z
|
pyglet/window/cocoa/systemcursor.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/window/cocoa/systemcursor.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | null | null | null |
from pyglet.libs.darwin.objc_runtime import *
# This class is a wrapper around NSCursor which prevents us from
# sending too many hide or unhide messages in a row. Apparently
# NSCursor treats them like retain/release messages, which can be
# problematic when we are e.g. switching between window & fullscreen.
class SystemCursor:
cursor_is_hidden = False
@classmethod
def hide(cls):
if not cls.cursor_is_hidden:
send_message('NSCursor', 'hide')
cls.cursor_is_hidden = True
@classmethod
def unhide(cls):
if cls.cursor_is_hidden:
send_message('NSCursor', 'unhide')
cls.cursor_is_hidden = False
| 35.736842
| 69
| 0.693667
|
from pyglet.libs.darwin.objc_runtime import *
class SystemCursor:
cursor_is_hidden = False
@classmethod
def hide(cls):
if not cls.cursor_is_hidden:
send_message('NSCursor', 'hide')
cls.cursor_is_hidden = True
@classmethod
def unhide(cls):
if cls.cursor_is_hidden:
send_message('NSCursor', 'unhide')
cls.cursor_is_hidden = False
| true
| true
|
f70a67ae0050e3dd5dbc7ea33789132d8704dd2b
| 269
|
py
|
Python
|
src/pycounts_polluxtroy3758/__init__.py
|
polluxtroy3758/pycounts
|
92bcbdb2609eb543c631293c7cf3babb0472565c
|
[
"MIT"
] | null | null | null |
src/pycounts_polluxtroy3758/__init__.py
|
polluxtroy3758/pycounts
|
92bcbdb2609eb543c631293c7cf3babb0472565c
|
[
"MIT"
] | null | null | null |
src/pycounts_polluxtroy3758/__init__.py
|
polluxtroy3758/pycounts
|
92bcbdb2609eb543c631293c7cf3babb0472565c
|
[
"MIT"
] | null | null | null |
# read version from installed package
from importlib.metadata import version
__version__ = version("pycounts_polluxtroy3758")
from pycounts_polluxtroy3758.plotting import plot_words # noqa: F401
from pycounts_polluxtroy3758.pycounts import count_words # noqa: F401
| 33.625
| 70
| 0.836431
|
from importlib.metadata import version
__version__ = version("pycounts_polluxtroy3758")
from pycounts_polluxtroy3758.plotting import plot_words
from pycounts_polluxtroy3758.pycounts import count_words
| true
| true
|
f70a68ac62bf6c61cd21de3ffd41d24a77bdf900
| 11,410
|
py
|
Python
|
examples/adminapi.py
|
fkaufer/confluent-kafka-python
|
c4ff376cdbfba41b08806df8e4a68d68f953b593
|
[
"Apache-2.0"
] | 1
|
2018-07-23T15:01:15.000Z
|
2018-07-23T15:01:15.000Z
|
examples/adminapi.py
|
AkuDTA/confluent-kafka-python
|
e4f7bb6d2feeae33ec1aa69f49bc3277265dba48
|
[
"Apache-2.0"
] | 1
|
2018-06-14T19:53:56.000Z
|
2018-06-14T19:53:56.000Z
|
examples/adminapi.py
|
AkuDTA/confluent-kafka-python
|
e4f7bb6d2feeae33ec1aa69f49bc3277265dba48
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2018 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Example Admin clients.
#
from confluent_kafka.admin import AdminClient, NewTopic, NewPartitions, ConfigResource, ConfigEntry
from confluent_kafka import KafkaException
import sys
import threading
import logging
logging.basicConfig()
def example_create_topics(a, topics):
""" Create topics """
new_topics = [NewTopic(topic, num_partitions=3, replication_factor=1) for topic in topics]
# Call create_topics to asynchronously create topics, a dict
# of <topic,future> is returned.
fs = a.create_topics(new_topics)
# Wait for operation to finish.
# Timeouts are preferably controlled by passing request_timeout=15.0
# to the create_topics() call.
# All futures will finish at the same time.
for topic, f in fs.items():
try:
f.result() # The result itself is None
print("Topic {} created".format(topic))
except Exception as e:
print("Failed to create topic {}: {}".format(topic, e))
def example_delete_topics(a, topics):
""" delete topics """
# Call delete_topics to asynchronously delete topics, a future is returned.
# By default this operation on the broker returns immediately while
# topics are deleted in the background. But here we give it some time (30s)
# to propagate in the cluster before returning.
#
# Returns a dict of <topic,future>.
fs = a.delete_topics(topics, operation_timeout=30)
# Wait for operation to finish.
for topic, f in fs.items():
try:
f.result() # The result itself is None
print("Topic {} deleted".format(topic))
except Exception as e:
print("Failed to delete topic {}: {}".format(topic, e))
def example_create_partitions(a, topics):
""" create partitions """
new_parts = [NewPartitions(topic, int(new_total_count)) for
topic, new_total_count in zip(topics[0::2], topics[1::2])]
# Try switching validate_only to True to only validate the operation
# on the broker but not actually perform it.
fs = a.create_partitions(new_parts, validate_only=False)
# Wait for operation to finish.
for topic, f in fs.items():
try:
f.result() # The result itself is None
print("Additional partitions created for topic {}".format(topic))
except Exception as e:
print("Failed to add partitions to topic {}: {}".format(topic, e))
def print_config(config, depth):
print('%40s = %-50s [%s,is:read-only=%r,default=%r,sensitive=%r,synonym=%r,synonyms=%s]' %
((' ' * depth) + config.name, config.value,
ConfigEntry.config_source_to_str(config.source),
config.is_read_only, config.is_default,
config.is_sensitive, config.is_synonym,
["%s:%s" % (x.name, ConfigEntry.config_source_to_str(x.source))
for x in iter(config.synonyms.values())]))
def example_describe_configs(a, args):
""" describe configs """
resources = [ConfigResource(restype, resname) for
restype, resname in zip(args[0::2], args[1::2])]
fs = a.describe_configs(resources)
# Wait for operation to finish.
for res, f in fs.items():
try:
configs = f.result()
for config in iter(configs.values()):
print_config(config, 1)
except KafkaException as e:
print("Failed to describe {}: {}".format(res, e))
except Exception as e:
raise
def example_alter_configs(a, args):
""" Alter configs atomically, replacing non-specified
configuration properties with their default values.
"""
resources = []
for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
resource = ConfigResource(restype, resname)
resources.append(resource)
for k, v in [conf.split('=') for conf in configs.split(',')]:
resource.set_config(k, v)
fs = a.alter_configs(resources)
# Wait for operation to finish.
for res, f in fs.items():
try:
f.result() # empty, but raises exception on failure
print("{} configuration successfully altered".format(res))
except Exception:
raise
def example_delta_alter_configs(a, args):
"""
The AlterConfigs Kafka API requires all configuration to be passed,
any left out configuration properties will revert to their default settings.
This example shows how to just modify the supplied configuration entries
by first reading the configuration from the broker, updating the supplied
configuration with the broker configuration (without overwriting), and
then writing it all back.
The async nature of futures is also show-cased, which makes this example
a bit more complex than it needs to be in the synchronous case.
"""
# Convert supplied config to resources.
# We can reuse the same resources both for describe_configs and
# alter_configs.
resources = []
for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
resource = ConfigResource(restype, resname)
resources.append(resource)
for k, v in [conf.split('=') for conf in configs.split(',')]:
resource.set_config(k, v)
# Set up a locked counter and an Event (for signaling) to track when the
# second level of futures are done. This is a bit of contrived example
# due to no other asynchronous mechanism being used, so we'll need
# to wait on something to signal completion.
class WaitZero(object):
def __init__(self, waitcnt):
self.cnt = waitcnt
self.lock = threading.Lock()
self.event = threading.Event()
def decr(self):
""" Decrement cnt by 1"""
with self.lock:
assert self.cnt > 0
self.cnt -= 1
self.event.set()
def wait(self):
""" Wait until cnt reaches 0 """
self.lock.acquire()
while self.cnt > 0:
self.lock.release()
self.event.wait()
self.event.clear()
self.lock.acquire()
self.lock.release()
def __len__(self):
with self.lock:
return self.cnt
wait_zero = WaitZero(len(resources))
# Read existing configuration from cluster
fs = a.describe_configs(resources)
def delta_alter_configs_done(fut, resource):
e = fut.exception()
if e is not None:
print("Config update for {} failed: {}".format(resource, e))
else:
print("Config for {} updated".format(resource))
wait_zero.decr()
def delta_alter_configs(resource, remote_config):
print("Updating {} supplied config entries {} with {} config entries read from cluster".format(
len(resource), resource, len(remote_config)))
# Only set configuration that is not default
for k, entry in [(k, v) for k, v in remote_config.items() if not v.is_default]:
resource.set_config(k, entry.value, overwrite=False)
fs = a.alter_configs([resource])
fs[resource].add_done_callback(lambda fut: delta_alter_configs_done(fut, resource))
# For each resource's future set up a completion callback
# that in turn calls alter_configs() on that single resource.
# This is ineffective since the resources can usually go in
# one single alter_configs() call, but we're also show-casing
# the futures here.
for res, f in fs.items():
f.add_done_callback(lambda fut, resource=res: delta_alter_configs(resource, fut.result()))
# Wait for done callbacks to be triggered and operations to complete.
print("Waiting for {} resource updates to finish".format(len(wait_zero)))
wait_zero.wait()
def example_list(a, args):
""" list topics and cluster metadata """
if len(args) == 0:
what = "all"
else:
what = args[0]
md = a.list_topics(timeout=10)
print("Cluster {} metadata (response from broker {}):".format(md.cluster_id, md.orig_broker_name))
if what in ("all", "brokers"):
print(" {} brokers:".format(len(md.brokers)))
for b in iter(md.brokers.values()):
if b.id == md.controller_id:
print(" {} (controller)".format(b))
else:
print(" {}".format(b))
if what not in ("all", "topics"):
return
print(" {} topics:".format(len(md.topics)))
for t in iter(md.topics.values()):
if t.error is not None:
errstr = ": {}".format(t.error)
else:
errstr = ""
print(" \"{}\" with {} partition(s){}".format(t, len(t.partitions), errstr))
for p in iter(t.partitions.values()):
if p.error is not None:
errstr = ": {}".format(p.error)
else:
errstr = ""
print(" partition {} leader: {}, replicas: {}, isrs: {}".format(
p.id, p.leader, p.replicas, p.isrs, errstr))
if __name__ == '__main__':
if len(sys.argv) < 3:
sys.stderr.write('Usage: %s <bootstrap-brokers> <operation> <args..>\n\n' % sys.argv[0])
sys.stderr.write('operations:\n')
sys.stderr.write(' create_topics <topic1> <topic2> ..\n')
sys.stderr.write(' delete_topics <topic1> <topic2> ..\n')
sys.stderr.write(' create_partitions <topic1> <new_total_count1> <topic2> <new_total_count2> ..\n')
sys.stderr.write(' describe_configs <resource_type1> <resource_name1> <resource2> <resource_name2> ..\n')
sys.stderr.write(' alter_configs <resource_type1> <resource_name1> ' +
'<config=val,config2=val2> <resource_type2> <resource_name2> <config..> ..\n')
sys.stderr.write(' delta_alter_configs <resource_type1> <resource_name1> ' +
'<config=val,config2=val2> <resource_type2> <resource_name2> <config..> ..\n')
sys.stderr.write(' list [<all|topics|brokers>]\n')
sys.exit(1)
broker = sys.argv[1]
operation = sys.argv[2]
args = sys.argv[3:]
# Create Admin client
a = AdminClient({'bootstrap.servers': broker})
opsmap = {'create_topics': example_create_topics,
'delete_topics': example_delete_topics,
'create_partitions': example_create_partitions,
'describe_configs': example_describe_configs,
'alter_configs': example_alter_configs,
'delta_alter_configs': example_delta_alter_configs,
'list': example_list}
if operation not in opsmap:
sys.stderr.write('Unknown operation: %s\n' % operation)
sys.exit(1)
opsmap[operation](a, args)
| 36.453674
| 113
| 0.627695
|
from confluent_kafka.admin import AdminClient, NewTopic, NewPartitions, ConfigResource, ConfigEntry
from confluent_kafka import KafkaException
import sys
import threading
import logging
logging.basicConfig()
def example_create_topics(a, topics):
new_topics = [NewTopic(topic, num_partitions=3, replication_factor=1) for topic in topics]
fs = a.create_topics(new_topics)
for topic, f in fs.items():
try:
f.result()
print("Topic {} created".format(topic))
except Exception as e:
print("Failed to create topic {}: {}".format(topic, e))
def example_delete_topics(a, topics):
fs = a.delete_topics(topics, operation_timeout=30)
for topic, f in fs.items():
try:
f.result()
print("Topic {} deleted".format(topic))
except Exception as e:
print("Failed to delete topic {}: {}".format(topic, e))
def example_create_partitions(a, topics):
new_parts = [NewPartitions(topic, int(new_total_count)) for
topic, new_total_count in zip(topics[0::2], topics[1::2])]
fs = a.create_partitions(new_parts, validate_only=False)
for topic, f in fs.items():
try:
f.result()
print("Additional partitions created for topic {}".format(topic))
except Exception as e:
print("Failed to add partitions to topic {}: {}".format(topic, e))
def print_config(config, depth):
print('%40s = %-50s [%s,is:read-only=%r,default=%r,sensitive=%r,synonym=%r,synonyms=%s]' %
((' ' * depth) + config.name, config.value,
ConfigEntry.config_source_to_str(config.source),
config.is_read_only, config.is_default,
config.is_sensitive, config.is_synonym,
["%s:%s" % (x.name, ConfigEntry.config_source_to_str(x.source))
for x in iter(config.synonyms.values())]))
def example_describe_configs(a, args):
resources = [ConfigResource(restype, resname) for
restype, resname in zip(args[0::2], args[1::2])]
fs = a.describe_configs(resources)
for res, f in fs.items():
try:
configs = f.result()
for config in iter(configs.values()):
print_config(config, 1)
except KafkaException as e:
print("Failed to describe {}: {}".format(res, e))
except Exception as e:
raise
def example_alter_configs(a, args):
resources = []
for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
resource = ConfigResource(restype, resname)
resources.append(resource)
for k, v in [conf.split('=') for conf in configs.split(',')]:
resource.set_config(k, v)
fs = a.alter_configs(resources)
for res, f in fs.items():
try:
f.result()
print("{} configuration successfully altered".format(res))
except Exception:
raise
def example_delta_alter_configs(a, args):
resources = []
for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
resource = ConfigResource(restype, resname)
resources.append(resource)
for k, v in [conf.split('=') for conf in configs.split(',')]:
resource.set_config(k, v)
# to wait on something to signal completion.
class WaitZero(object):
def __init__(self, waitcnt):
self.cnt = waitcnt
self.lock = threading.Lock()
self.event = threading.Event()
def decr(self):
with self.lock:
assert self.cnt > 0
self.cnt -= 1
self.event.set()
def wait(self):
self.lock.acquire()
while self.cnt > 0:
self.lock.release()
self.event.wait()
self.event.clear()
self.lock.acquire()
self.lock.release()
def __len__(self):
with self.lock:
return self.cnt
wait_zero = WaitZero(len(resources))
# Read existing configuration from cluster
fs = a.describe_configs(resources)
def delta_alter_configs_done(fut, resource):
e = fut.exception()
if e is not None:
print("Config update for {} failed: {}".format(resource, e))
else:
print("Config for {} updated".format(resource))
wait_zero.decr()
def delta_alter_configs(resource, remote_config):
print("Updating {} supplied config entries {} with {} config entries read from cluster".format(
len(resource), resource, len(remote_config)))
# Only set configuration that is not default
for k, entry in [(k, v) for k, v in remote_config.items() if not v.is_default]:
resource.set_config(k, entry.value, overwrite=False)
fs = a.alter_configs([resource])
fs[resource].add_done_callback(lambda fut: delta_alter_configs_done(fut, resource))
# For each resource's future set up a completion callback
# the futures here.
for res, f in fs.items():
f.add_done_callback(lambda fut, resource=res: delta_alter_configs(resource, fut.result()))
# Wait for done callbacks to be triggered and operations to complete.
print("Waiting for {} resource updates to finish".format(len(wait_zero)))
wait_zero.wait()
def example_list(a, args):
if len(args) == 0:
what = "all"
else:
what = args[0]
md = a.list_topics(timeout=10)
print("Cluster {} metadata (response from broker {}):".format(md.cluster_id, md.orig_broker_name))
if what in ("all", "brokers"):
print(" {} brokers:".format(len(md.brokers)))
for b in iter(md.brokers.values()):
if b.id == md.controller_id:
print(" {} (controller)".format(b))
else:
print(" {}".format(b))
if what not in ("all", "topics"):
return
print(" {} topics:".format(len(md.topics)))
for t in iter(md.topics.values()):
if t.error is not None:
errstr = ": {}".format(t.error)
else:
errstr = ""
print(" \"{}\" with {} partition(s){}".format(t, len(t.partitions), errstr))
for p in iter(t.partitions.values()):
if p.error is not None:
errstr = ": {}".format(p.error)
else:
errstr = ""
print(" partition {} leader: {}, replicas: {}, isrs: {}".format(
p.id, p.leader, p.replicas, p.isrs, errstr))
if __name__ == '__main__':
if len(sys.argv) < 3:
sys.stderr.write('Usage: %s <bootstrap-brokers> <operation> <args..>\n\n' % sys.argv[0])
sys.stderr.write('operations:\n')
sys.stderr.write(' create_topics <topic1> <topic2> ..\n')
sys.stderr.write(' delete_topics <topic1> <topic2> ..\n')
sys.stderr.write(' create_partitions <topic1> <new_total_count1> <topic2> <new_total_count2> ..\n')
sys.stderr.write(' describe_configs <resource_type1> <resource_name1> <resource2> <resource_name2> ..\n')
sys.stderr.write(' alter_configs <resource_type1> <resource_name1> ' +
'<config=val,config2=val2> <resource_type2> <resource_name2> <config..> ..\n')
sys.stderr.write(' delta_alter_configs <resource_type1> <resource_name1> ' +
'<config=val,config2=val2> <resource_type2> <resource_name2> <config..> ..\n')
sys.stderr.write(' list [<all|topics|brokers>]\n')
sys.exit(1)
broker = sys.argv[1]
operation = sys.argv[2]
args = sys.argv[3:]
# Create Admin client
a = AdminClient({'bootstrap.servers': broker})
opsmap = {'create_topics': example_create_topics,
'delete_topics': example_delete_topics,
'create_partitions': example_create_partitions,
'describe_configs': example_describe_configs,
'alter_configs': example_alter_configs,
'delta_alter_configs': example_delta_alter_configs,
'list': example_list}
if operation not in opsmap:
sys.stderr.write('Unknown operation: %s\n' % operation)
sys.exit(1)
opsmap[operation](a, args)
| true
| true
|
f70a6906b34d328a586c5a69de02ca915b6ad0ee
| 5,457
|
py
|
Python
|
fixit/cli/run_rules.py
|
isidentical/Fixit
|
e9bd1bcce14922d44086ee31798959b302377338
|
[
"Apache-2.0"
] | null | null | null |
fixit/cli/run_rules.py
|
isidentical/Fixit
|
e9bd1bcce14922d44086ee31798959b302377338
|
[
"Apache-2.0"
] | null | null | null |
fixit/cli/run_rules.py
|
isidentical/Fixit
|
e9bd1bcce14922d44086ee31798959b302377338
|
[
"Apache-2.0"
] | 1
|
2020-09-09T09:57:35.000Z
|
2020-09-09T09:57:35.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Usage:
#
# $ python -m fixit.cli.run_rules --help
# $ python -m fixit.cli.run_rules
# $ python -m fixit.cli.run_rules --rules AvoidOrInExceptRule
# $ python -m fixit.cli.run_rules . --rules AvoidOrInExceptRule NoUnnecessaryListComprehensionRule
# $ python -m fixit.cli.run_rules . --rules AvoidOrInExceptRule my.custom.rules.package
# $ python -m fixit.cli.run_rules . --rules fixit.rules
import argparse
import itertools
import shutil
import sys
import time
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Iterable, Mapping, Optional, Sequence
from libcst import ParserSyntaxError, parse_module
from libcst.metadata import MetadataWrapper
from fixit.cli import find_files, map_paths
from fixit.cli.args import (
get_compact_parser,
get_multiprocessing_parser,
get_paths_parser,
get_rules_parser,
get_skip_ignore_byte_marker_parser,
get_use_ignore_comments_parser,
)
from fixit.cli.formatter import LintRuleReportFormatter
from fixit.cli.full_repo_metadata import (
get_metadata_caches,
rules_require_metadata_cache,
)
from fixit.cli.utils import print_red
from fixit.common.utils import LintRuleCollectionT
from fixit.rule_lint_engine import lint_file
if TYPE_CHECKING:
from libcst.metadata.base_provider import ProviderT
@dataclass(frozen=True)
class LintOpts:
rules: LintRuleCollectionT
use_ignore_byte_markers: bool
use_ignore_comments: bool
formatter: LintRuleReportFormatter
def get_formatted_reports_for_path(
path: Path,
opts: LintOpts,
metadata_cache: Optional[Mapping["ProviderT", object]] = None,
) -> Iterable[str]:
with open(path, "rb") as f:
source = f.read()
try:
cst_wrapper = None
if metadata_cache is not None:
cst_wrapper = MetadataWrapper(parse_module(source), True, metadata_cache)
raw_reports = lint_file(
path,
source,
rules=opts.rules,
use_ignore_byte_markers=opts.use_ignore_byte_markers,
use_ignore_comments=opts.use_ignore_comments,
cst_wrapper=cst_wrapper,
)
except (SyntaxError, ParserSyntaxError) as e:
print_red(
f"Encountered the following error while parsing source code in file {path}:"
)
print(e)
return []
# linter completed successfully
return [opts.formatter.format(rr) for rr in raw_reports]
def main(raw_args: Sequence[str]) -> int:
parser = argparse.ArgumentParser(
description=(
"Validates your lint rules by running them against the specified, "
+ "directory or file(s). This is not a substitute for unit tests, "
+ "but it can provide additional confidence in your lint rules.\n"
+ "If no lint rules or packages are specified, runs all lint rules "
+ "found in the packages specified in `fixit.config.yaml`."
),
parents=[
get_paths_parser(),
get_rules_parser(),
get_use_ignore_comments_parser(),
get_skip_ignore_byte_marker_parser(),
get_compact_parser(),
get_multiprocessing_parser(),
],
)
parser.add_argument(
"--cache-timeout",
type=int,
help="Timeout (seconds) for metadata cache fetching. Default is 2 seconds.",
default=2,
)
args = parser.parse_args(raw_args)
width = shutil.get_terminal_size(fallback=(80, 24)).columns
# expand path if it's a directory
file_paths = tuple(find_files(args.paths))
all_rules = args.rules
if not args.compact:
print(f"Scanning {len(file_paths)} files")
print(f"Testing {len(all_rules)} rules")
print()
start_time = time.time()
metadata_caches: Optional[Mapping[str, Mapping["ProviderT", object]]] = None
if rules_require_metadata_cache(all_rules):
metadata_caches = get_metadata_caches(args.cache_timeout, file_paths)
# opts is a more type-safe version of args that we pass around
opts = LintOpts(
rules=all_rules,
use_ignore_byte_markers=args.use_ignore_byte_markers,
use_ignore_comments=args.use_ignore_comments,
formatter=LintRuleReportFormatter(width, args.compact),
)
formatted_reports_iter = itertools.chain.from_iterable(
map_paths(
get_formatted_reports_for_path,
file_paths,
opts,
workers=args.workers,
metadata_caches=metadata_caches,
)
)
formatted_reports = []
for formatted_report in formatted_reports_iter:
# Reports are yielded as soon as they're available. Stream the output to the
# terminal.
print(formatted_report)
# save the report from the iterator for later use
formatted_reports.append(formatted_report)
if not args.compact:
print()
print(
f"Found {len(formatted_reports)} reports in {len(file_paths)} files in "
+ f"{time.time() - start_time :.2f} seconds."
)
# Return with an exit code of 1 if there are any violations found.
return int(bool(formatted_reports))
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 31.912281
| 100
| 0.68206
|
import argparse
import itertools
import shutil
import sys
import time
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Iterable, Mapping, Optional, Sequence
from libcst import ParserSyntaxError, parse_module
from libcst.metadata import MetadataWrapper
from fixit.cli import find_files, map_paths
from fixit.cli.args import (
get_compact_parser,
get_multiprocessing_parser,
get_paths_parser,
get_rules_parser,
get_skip_ignore_byte_marker_parser,
get_use_ignore_comments_parser,
)
from fixit.cli.formatter import LintRuleReportFormatter
from fixit.cli.full_repo_metadata import (
get_metadata_caches,
rules_require_metadata_cache,
)
from fixit.cli.utils import print_red
from fixit.common.utils import LintRuleCollectionT
from fixit.rule_lint_engine import lint_file
if TYPE_CHECKING:
from libcst.metadata.base_provider import ProviderT
@dataclass(frozen=True)
class LintOpts:
rules: LintRuleCollectionT
use_ignore_byte_markers: bool
use_ignore_comments: bool
formatter: LintRuleReportFormatter
def get_formatted_reports_for_path(
path: Path,
opts: LintOpts,
metadata_cache: Optional[Mapping["ProviderT", object]] = None,
) -> Iterable[str]:
with open(path, "rb") as f:
source = f.read()
try:
cst_wrapper = None
if metadata_cache is not None:
cst_wrapper = MetadataWrapper(parse_module(source), True, metadata_cache)
raw_reports = lint_file(
path,
source,
rules=opts.rules,
use_ignore_byte_markers=opts.use_ignore_byte_markers,
use_ignore_comments=opts.use_ignore_comments,
cst_wrapper=cst_wrapper,
)
except (SyntaxError, ParserSyntaxError) as e:
print_red(
f"Encountered the following error while parsing source code in file {path}:"
)
print(e)
return []
return [opts.formatter.format(rr) for rr in raw_reports]
def main(raw_args: Sequence[str]) -> int:
parser = argparse.ArgumentParser(
description=(
"Validates your lint rules by running them against the specified, "
+ "directory or file(s). This is not a substitute for unit tests, "
+ "but it can provide additional confidence in your lint rules.\n"
+ "If no lint rules or packages are specified, runs all lint rules "
+ "found in the packages specified in `fixit.config.yaml`."
),
parents=[
get_paths_parser(),
get_rules_parser(),
get_use_ignore_comments_parser(),
get_skip_ignore_byte_marker_parser(),
get_compact_parser(),
get_multiprocessing_parser(),
],
)
parser.add_argument(
"--cache-timeout",
type=int,
help="Timeout (seconds) for metadata cache fetching. Default is 2 seconds.",
default=2,
)
args = parser.parse_args(raw_args)
width = shutil.get_terminal_size(fallback=(80, 24)).columns
file_paths = tuple(find_files(args.paths))
all_rules = args.rules
if not args.compact:
print(f"Scanning {len(file_paths)} files")
print(f"Testing {len(all_rules)} rules")
print()
start_time = time.time()
metadata_caches: Optional[Mapping[str, Mapping["ProviderT", object]]] = None
if rules_require_metadata_cache(all_rules):
metadata_caches = get_metadata_caches(args.cache_timeout, file_paths)
# opts is a more type-safe version of args that we pass around
opts = LintOpts(
rules=all_rules,
use_ignore_byte_markers=args.use_ignore_byte_markers,
use_ignore_comments=args.use_ignore_comments,
formatter=LintRuleReportFormatter(width, args.compact),
)
formatted_reports_iter = itertools.chain.from_iterable(
map_paths(
get_formatted_reports_for_path,
file_paths,
opts,
workers=args.workers,
metadata_caches=metadata_caches,
)
)
formatted_reports = []
for formatted_report in formatted_reports_iter:
# Reports are yielded as soon as they're available. Stream the output to the
print(formatted_report)
formatted_reports.append(formatted_report)
if not args.compact:
print()
print(
f"Found {len(formatted_reports)} reports in {len(file_paths)} files in "
+ f"{time.time() - start_time :.2f} seconds."
)
return int(bool(formatted_reports))
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| true
| true
|
f70a695a360de2a4638bc8fac6801bae01f235ff
| 19,184
|
py
|
Python
|
gym/envs/__init__.py
|
Jekyll1021/gym
|
1701741df2e6ae9a762fe647122ee8344f586bc9
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
gym/envs/__init__.py
|
Jekyll1021/gym
|
1701741df2e6ae9a762fe647122ee8344f586bc9
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
gym/envs/__init__.py
|
Jekyll1021/gym
|
1701741df2e6ae9a762fe647122ee8344f586bc9
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
from gym.envs.registration import registry, register, make, spec
# Algorithmic
# ----------------------------------------
register(
id='Copy-v0',
entry_point='gym.envs.algorithmic:CopyEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='RepeatCopy-v0',
entry_point='gym.envs.algorithmic:RepeatCopyEnv',
max_episode_steps=200,
reward_threshold=75.0,
)
register(
id='ReversedAddition-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 2},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='ReversedAddition3-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 3},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='DuplicatedInput-v0',
entry_point='gym.envs.algorithmic:DuplicatedInputEnv',
max_episode_steps=200,
reward_threshold=9.0,
)
register(
id='Reverse-v0',
entry_point='gym.envs.algorithmic:ReverseEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
# Classic
# ----------------------------------------
register(
id='CartPole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='CartPole-v1',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=500,
reward_threshold=475.0,
)
register(
id='MountainCar-v0',
entry_point='gym.envs.classic_control:MountainCarEnv',
max_episode_steps=200,
reward_threshold=-110.0,
)
register(
id='MountainCarContinuous-v0',
entry_point='gym.envs.classic_control:Continuous_MountainCarEnv',
max_episode_steps=999,
reward_threshold=90.0,
)
register(
id='Pendulum-v0',
entry_point='gym.envs.classic_control:PendulumEnv',
max_episode_steps=200,
)
register(
id='Acrobot-v1',
entry_point='gym.envs.classic_control:AcrobotEnv',
max_episode_steps=500,
)
# Box2d
# ----------------------------------------
register(
id='LunarLander-v2',
entry_point='gym.envs.box2d:LunarLander',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='LunarLanderContinuous-v2',
entry_point='gym.envs.box2d:LunarLanderContinuous',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='BipedalWalker-v2',
entry_point='gym.envs.box2d:BipedalWalker',
max_episode_steps=1600,
reward_threshold=300,
)
register(
id='BipedalWalkerHardcore-v2',
entry_point='gym.envs.box2d:BipedalWalkerHardcore',
max_episode_steps=2000,
reward_threshold=300,
)
register(
id='CarRacing-v0',
entry_point='gym.envs.box2d:CarRacing',
max_episode_steps=1000,
reward_threshold=900,
)
# Toy Text
# ----------------------------------------
register(
id='Blackjack-v0',
entry_point='gym.envs.toy_text:BlackjackEnv',
)
register(
id='KellyCoinflip-v0',
entry_point='gym.envs.toy_text:KellyCoinflipEnv',
reward_threshold=246.61,
)
register(
id='KellyCoinflipGeneralized-v0',
entry_point='gym.envs.toy_text:KellyCoinflipGeneralizedEnv',
)
register(
id='FrozenLake-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4'},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
register(
id='FrozenLake8x8-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '8x8'},
max_episode_steps=200,
reward_threshold=0.99, # optimum = 1
)
register(
id='CliffWalking-v0',
entry_point='gym.envs.toy_text:CliffWalkingEnv',
)
register(
id='NChain-v0',
entry_point='gym.envs.toy_text:NChainEnv',
max_episode_steps=1000,
)
register(
id='Roulette-v0',
entry_point='gym.envs.toy_text:RouletteEnv',
max_episode_steps=100,
)
register(
id='Taxi-v2',
entry_point='gym.envs.toy_text:TaxiEnv',
reward_threshold=8, # optimum = 8.46
max_episode_steps=200,
)
register(
id='GuessingGame-v0',
entry_point='gym.envs.toy_text:GuessingGame',
max_episode_steps=200,
)
register(
id='HotterColder-v0',
entry_point='gym.envs.toy_text:HotterColder',
max_episode_steps=200,
)
# Mujoco
# ----------------------------------------
# 2D
register(
id='Reacher-v2',
entry_point='gym.envs.mujoco:ReacherEnv',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='Pusher-v2',
entry_point='gym.envs.mujoco:PusherEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='Thrower-v2',
entry_point='gym.envs.mujoco:ThrowerEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='Striker-v2',
entry_point='gym.envs.mujoco:StrikerEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='InvertedPendulum-v2',
entry_point='gym.envs.mujoco:InvertedPendulumEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
register(
id='InvertedDoublePendulum-v2',
entry_point='gym.envs.mujoco:InvertedDoublePendulumEnv',
max_episode_steps=1000,
reward_threshold=9100.0,
)
register(
id='HalfCheetah-v2',
entry_point='gym.envs.mujoco:HalfCheetahEnv',
max_episode_steps=1000,
reward_threshold=4800.0,
)
register(
id='Hopper-v2',
entry_point='gym.envs.mujoco:HopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0,
)
register(
id='Swimmer-v2',
entry_point='gym.envs.mujoco:SwimmerEnv',
max_episode_steps=1000,
reward_threshold=360.0,
)
register(
id='Walker2d-v2',
max_episode_steps=1000,
entry_point='gym.envs.mujoco:Walker2dEnv',
)
register(
id='Ant-v2',
entry_point='gym.envs.mujoco:AntEnv',
max_episode_steps=1000,
reward_threshold=6000.0,
)
register(
id='Humanoid-v2',
entry_point='gym.envs.mujoco:HumanoidEnv',
max_episode_steps=1000,
)
register(
id='HumanoidStandup-v2',
entry_point='gym.envs.mujoco:HumanoidStandupEnv',
max_episode_steps=1000,
)
# Robotics
# ----------------------------------------
def _merge(a, b):
a.update(b)
return a
for reward_type in ['sparse', 'dense']:
suffix = 'Dense' if reward_type == 'dense' else ''
kwargs = {
'reward_type': reward_type,
}
# Fetch
register(
id='FetchSlide{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchSlideEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamSlide{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamSlideEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamSlideJoint{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamSlideJointEnv',
kwargs=kwargs,
max_episode_steps=500,
)
register(
id='FetchPickAndPlace{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchPickAndPlaceEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamPickAndPlace{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPickAndPlaceEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamPickAndPlaceJoint{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPickAndPlaceJointEnv',
kwargs=kwargs,
max_episode_steps=500,
)
register(
id='FetchReach{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamReach{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamReachJoint{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamReachJointEnv',
kwargs=kwargs,
max_episode_steps=500,
)
register(
id='FetchPush{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchPushEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamPush{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPushEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamPushJoint{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPushJointEnv',
kwargs=kwargs,
max_episode_steps=500,
)
# grasp
register(
id='Grasp{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamGraspEnv',
kwargs=kwargs,
max_episode_steps=2,
)
# grasp open to close
register(
id='GraspOpenToClose{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamGraspOpenToCloseEnv',
kwargs=kwargs,
max_episode_steps=3,
)
# grasp rotation
register(
id='GraspRot{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamGraspRotationEnv',
kwargs=kwargs,
max_episode_steps=2,
)
# push
register(
id='Push{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPushEnv',
kwargs=kwargs,
max_episode_steps=3,
)
# Peg Insertion
register(
id='PegInsert{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPegInsertEnv',
kwargs=kwargs,
max_episode_steps=2,
)
# peg rotation
register(
id='PegInsertRot{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPegInsertRotationEnv',
kwargs=kwargs,
max_episode_steps=2,
)
# peg open to close
register(
id='PegInsertOpenToClose{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPegInsertOpenToCloseEnv',
kwargs=kwargs,
max_episode_steps=3,
)
# slide
register(
id='Slide{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamSlideEnv',
kwargs=kwargs,
max_episode_steps=2,
)
# slide rotation
register(
id='SlideRot{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamSlideRotationEnv',
kwargs=kwargs,
max_episode_steps=2,
)
# slide open to close
register(
id='SlideOpenToClose{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamSlideOpenToCloseEnv',
kwargs=kwargs,
max_episode_steps=3,
)
# Drawer open
register(
id='Drawer{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamDrawerOpenEnv',
kwargs=kwargs,
max_episode_steps=2,
)
# Drawer open to close
register(
id='DrawerOpenToClose{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamDrawerOpenToCloseEnv',
kwargs=kwargs,
max_episode_steps=3,
)
# inverse peg insertion
register(
id='InversePegInsert{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamInversePegInsertEnv',
kwargs=kwargs,
max_episode_steps=2,
)
# Hand
register(
id='HandReach{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='HandManipulateBlockRotateZ{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'z'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateParallel{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'parallel'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateXYZ{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulateBlock{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggRotate{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulateEgg{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenRotate{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulatePen{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Atari
# ----------------------------------------
# # print ', '.join(["'{}'".format(name.split('.')[0]) for name in atari_py.list_games()])
for game in ['air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'defender', 'demon_attack', 'double_dunk',
'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'hero', 'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon']:
for obs_type in ['image', 'ram']:
# space_invaders should yield SpaceInvaders-v0 and SpaceInvaders-ram-v0
name = ''.join([g.capitalize() for g in game.split('_')])
if obs_type == 'ram':
name = '{}-ram'.format(name)
nondeterministic = False
if game == 'elevator_action' and obs_type == 'ram':
# ElevatorAction-ram-v0 seems to yield slightly
# non-deterministic observations about 10% of the time. We
# should track this down eventually, but for now we just
# mark it as nondeterministic.
nondeterministic = True
register(
id='{}-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'repeat_action_probability': 0.25},
max_episode_steps=10000,
nondeterministic=nondeterministic,
)
register(
id='{}-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
# Standard Deterministic (as in the original DeepMind paper)
if game == 'space_invaders':
frameskip = 3
else:
frameskip = 4
# Use a deterministic frame skip.
register(
id='{}Deterministic-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip, 'repeat_action_probability': 0.25},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}Deterministic-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}NoFrameskip-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1, 'repeat_action_probability': 0.25}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# No frameskip. (Atari has no entropy source, so these are
# deterministic environments.)
register(
id='{}NoFrameskip-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# Unit test
# ---------
register(
id='CubeCrash-v0',
entry_point='gym.envs.unittest:CubeCrash',
reward_threshold=0.9,
)
register(
id='CubeCrashSparse-v0',
entry_point='gym.envs.unittest:CubeCrashSparse',
reward_threshold=0.9,
)
register(
id='CubeCrashScreenBecomesBlack-v0',
entry_point='gym.envs.unittest:CubeCrashScreenBecomesBlack',
reward_threshold=0.9,
)
register(
id='MemorizeDigits-v0',
entry_point='gym.envs.unittest:MemorizeDigits',
reward_threshold=20,
)
| 26.868347
| 151
| 0.634122
|
from gym.envs.registration import registry, register, make, spec
register(
id='Copy-v0',
entry_point='gym.envs.algorithmic:CopyEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='RepeatCopy-v0',
entry_point='gym.envs.algorithmic:RepeatCopyEnv',
max_episode_steps=200,
reward_threshold=75.0,
)
register(
id='ReversedAddition-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 2},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='ReversedAddition3-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 3},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='DuplicatedInput-v0',
entry_point='gym.envs.algorithmic:DuplicatedInputEnv',
max_episode_steps=200,
reward_threshold=9.0,
)
register(
id='Reverse-v0',
entry_point='gym.envs.algorithmic:ReverseEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='CartPole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='CartPole-v1',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=500,
reward_threshold=475.0,
)
register(
id='MountainCar-v0',
entry_point='gym.envs.classic_control:MountainCarEnv',
max_episode_steps=200,
reward_threshold=-110.0,
)
register(
id='MountainCarContinuous-v0',
entry_point='gym.envs.classic_control:Continuous_MountainCarEnv',
max_episode_steps=999,
reward_threshold=90.0,
)
register(
id='Pendulum-v0',
entry_point='gym.envs.classic_control:PendulumEnv',
max_episode_steps=200,
)
register(
id='Acrobot-v1',
entry_point='gym.envs.classic_control:AcrobotEnv',
max_episode_steps=500,
)
register(
id='LunarLander-v2',
entry_point='gym.envs.box2d:LunarLander',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='LunarLanderContinuous-v2',
entry_point='gym.envs.box2d:LunarLanderContinuous',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='BipedalWalker-v2',
entry_point='gym.envs.box2d:BipedalWalker',
max_episode_steps=1600,
reward_threshold=300,
)
register(
id='BipedalWalkerHardcore-v2',
entry_point='gym.envs.box2d:BipedalWalkerHardcore',
max_episode_steps=2000,
reward_threshold=300,
)
register(
id='CarRacing-v0',
entry_point='gym.envs.box2d:CarRacing',
max_episode_steps=1000,
reward_threshold=900,
)
register(
id='Blackjack-v0',
entry_point='gym.envs.toy_text:BlackjackEnv',
)
register(
id='KellyCoinflip-v0',
entry_point='gym.envs.toy_text:KellyCoinflipEnv',
reward_threshold=246.61,
)
register(
id='KellyCoinflipGeneralized-v0',
entry_point='gym.envs.toy_text:KellyCoinflipGeneralizedEnv',
)
register(
id='FrozenLake-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4'},
max_episode_steps=100,
reward_threshold=0.78,
)
register(
id='FrozenLake8x8-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '8x8'},
max_episode_steps=200,
reward_threshold=0.99,
)
register(
id='CliffWalking-v0',
entry_point='gym.envs.toy_text:CliffWalkingEnv',
)
register(
id='NChain-v0',
entry_point='gym.envs.toy_text:NChainEnv',
max_episode_steps=1000,
)
register(
id='Roulette-v0',
entry_point='gym.envs.toy_text:RouletteEnv',
max_episode_steps=100,
)
register(
id='Taxi-v2',
entry_point='gym.envs.toy_text:TaxiEnv',
reward_threshold=8,
max_episode_steps=200,
)
register(
id='GuessingGame-v0',
entry_point='gym.envs.toy_text:GuessingGame',
max_episode_steps=200,
)
register(
id='HotterColder-v0',
entry_point='gym.envs.toy_text:HotterColder',
max_episode_steps=200,
)
register(
id='Reacher-v2',
entry_point='gym.envs.mujoco:ReacherEnv',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='Pusher-v2',
entry_point='gym.envs.mujoco:PusherEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='Thrower-v2',
entry_point='gym.envs.mujoco:ThrowerEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='Striker-v2',
entry_point='gym.envs.mujoco:StrikerEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='InvertedPendulum-v2',
entry_point='gym.envs.mujoco:InvertedPendulumEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
register(
id='InvertedDoublePendulum-v2',
entry_point='gym.envs.mujoco:InvertedDoublePendulumEnv',
max_episode_steps=1000,
reward_threshold=9100.0,
)
register(
id='HalfCheetah-v2',
entry_point='gym.envs.mujoco:HalfCheetahEnv',
max_episode_steps=1000,
reward_threshold=4800.0,
)
register(
id='Hopper-v2',
entry_point='gym.envs.mujoco:HopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0,
)
register(
id='Swimmer-v2',
entry_point='gym.envs.mujoco:SwimmerEnv',
max_episode_steps=1000,
reward_threshold=360.0,
)
register(
id='Walker2d-v2',
max_episode_steps=1000,
entry_point='gym.envs.mujoco:Walker2dEnv',
)
register(
id='Ant-v2',
entry_point='gym.envs.mujoco:AntEnv',
max_episode_steps=1000,
reward_threshold=6000.0,
)
register(
id='Humanoid-v2',
entry_point='gym.envs.mujoco:HumanoidEnv',
max_episode_steps=1000,
)
register(
id='HumanoidStandup-v2',
entry_point='gym.envs.mujoco:HumanoidStandupEnv',
max_episode_steps=1000,
)
def _merge(a, b):
a.update(b)
return a
for reward_type in ['sparse', 'dense']:
suffix = 'Dense' if reward_type == 'dense' else ''
kwargs = {
'reward_type': reward_type,
}
register(
id='FetchSlide{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchSlideEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamSlide{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamSlideEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamSlideJoint{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamSlideJointEnv',
kwargs=kwargs,
max_episode_steps=500,
)
register(
id='FetchPickAndPlace{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchPickAndPlaceEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamPickAndPlace{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPickAndPlaceEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamPickAndPlaceJoint{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPickAndPlaceJointEnv',
kwargs=kwargs,
max_episode_steps=500,
)
register(
id='FetchReach{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamReach{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamReachJoint{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamReachJointEnv',
kwargs=kwargs,
max_episode_steps=500,
)
register(
id='FetchPush{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchPushEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamPush{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPushEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='CamPushJoint{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPushJointEnv',
kwargs=kwargs,
max_episode_steps=500,
)
register(
id='Grasp{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamGraspEnv',
kwargs=kwargs,
max_episode_steps=2,
)
register(
id='GraspOpenToClose{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamGraspOpenToCloseEnv',
kwargs=kwargs,
max_episode_steps=3,
)
register(
id='GraspRot{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamGraspRotationEnv',
kwargs=kwargs,
max_episode_steps=2,
)
register(
id='Push{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPushEnv',
kwargs=kwargs,
max_episode_steps=3,
)
register(
id='PegInsert{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPegInsertEnv',
kwargs=kwargs,
max_episode_steps=2,
)
register(
id='PegInsertRot{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPegInsertRotationEnv',
kwargs=kwargs,
max_episode_steps=2,
)
register(
id='PegInsertOpenToClose{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamPegInsertOpenToCloseEnv',
kwargs=kwargs,
max_episode_steps=3,
)
register(
id='Slide{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamSlideEnv',
kwargs=kwargs,
max_episode_steps=2,
)
register(
id='SlideRot{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamSlideRotationEnv',
kwargs=kwargs,
max_episode_steps=2,
)
register(
id='SlideOpenToClose{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamSlideOpenToCloseEnv',
kwargs=kwargs,
max_episode_steps=3,
)
register(
id='Drawer{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamDrawerOpenEnv',
kwargs=kwargs,
max_episode_steps=2,
)
register(
id='DrawerOpenToClose{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamDrawerOpenToCloseEnv',
kwargs=kwargs,
max_episode_steps=3,
)
register(
id='InversePegInsert{}-v0'.format(suffix),
entry_point='gym.envs.robotics:CamInversePegInsertEnv',
kwargs=kwargs,
max_episode_steps=2,
)
register(
id='HandReach{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='HandManipulateBlockRotateZ{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'z'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateParallel{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'parallel'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateXYZ{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlock{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggRotate{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEgg{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenRotate{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePen{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
s',
'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'defender', 'demon_attack', 'double_dunk',
'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'hero', 'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon']:
for obs_type in ['image', 'ram']:
name = ''.join([g.capitalize() for g in game.split('_')])
if obs_type == 'ram':
name = '{}-ram'.format(name)
nondeterministic = False
if game == 'elevator_action' and obs_type == 'ram':
nondeterministic = True
register(
id='{}-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'repeat_action_probability': 0.25},
max_episode_steps=10000,
nondeterministic=nondeterministic,
)
register(
id='{}-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
if game == 'space_invaders':
frameskip = 3
else:
frameskip = 4
register(
id='{}Deterministic-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip, 'repeat_action_probability': 0.25},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}Deterministic-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}NoFrameskip-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1, 'repeat_action_probability': 0.25},
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
register(
id='{}NoFrameskip-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1},
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
register(
id='CubeCrash-v0',
entry_point='gym.envs.unittest:CubeCrash',
reward_threshold=0.9,
)
register(
id='CubeCrashSparse-v0',
entry_point='gym.envs.unittest:CubeCrashSparse',
reward_threshold=0.9,
)
register(
id='CubeCrashScreenBecomesBlack-v0',
entry_point='gym.envs.unittest:CubeCrashScreenBecomesBlack',
reward_threshold=0.9,
)
register(
id='MemorizeDigits-v0',
entry_point='gym.envs.unittest:MemorizeDigits',
reward_threshold=20,
)
| true
| true
|
f70a6966c8f433d99412dfc5c6bb2b7a62863608
| 347
|
py
|
Python
|
topicnet/cooking_machine/recipes/__init__.py
|
DmitriyValetov/TopicNet
|
b450606ce6cdf2b1f75280112627666f325b1b2c
|
[
"MIT"
] | null | null | null |
topicnet/cooking_machine/recipes/__init__.py
|
DmitriyValetov/TopicNet
|
b450606ce6cdf2b1f75280112627666f325b1b2c
|
[
"MIT"
] | null | null | null |
topicnet/cooking_machine/recipes/__init__.py
|
DmitriyValetov/TopicNet
|
b450606ce6cdf2b1f75280112627666f325b1b2c
|
[
"MIT"
] | null | null | null |
from .multimodal_exploratory_search_pipeline import MultimodalSearchRecipe
from .artm_baseline_pipeline import BaselineRecipe
from .exploratory_search_pipeline import SearchRecipe
from .artm_baseline_pipeline import ARTM_baseline_template as ARTM_baseline
from .exploratory_search_pipeline import exploratory_search_template as exploratory_search
| 57.833333
| 90
| 0.916427
|
from .multimodal_exploratory_search_pipeline import MultimodalSearchRecipe
from .artm_baseline_pipeline import BaselineRecipe
from .exploratory_search_pipeline import SearchRecipe
from .artm_baseline_pipeline import ARTM_baseline_template as ARTM_baseline
from .exploratory_search_pipeline import exploratory_search_template as exploratory_search
| true
| true
|
f70a69b8a7b993da0e40d67273f8006f8f15f747
| 2,299
|
py
|
Python
|
grr/parsers/windows_persistence_test.py
|
StanislavParovoy/GRR
|
7cdf490f9be2ccc0a8160c9b8ae23b73922049d5
|
[
"Apache-2.0"
] | 5
|
2017-03-17T08:25:09.000Z
|
2022-02-22T05:28:14.000Z
|
grr/parsers/windows_persistence_test.py
|
StanislavParovoy/GRR
|
7cdf490f9be2ccc0a8160c9b8ae23b73922049d5
|
[
"Apache-2.0"
] | null | null | null |
grr/parsers/windows_persistence_test.py
|
StanislavParovoy/GRR
|
7cdf490f9be2ccc0a8160c9b8ae23b73922049d5
|
[
"Apache-2.0"
] | 3
|
2018-12-07T07:04:37.000Z
|
2022-02-22T05:28:16.000Z
|
#!/usr/bin/env python
"""Tests for grr.parsers.windows_persistence."""
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.parsers import windows_persistence
class WindowsPersistenceMechanismsParserTest(test_lib.FlowTestsBaseclass):
def testParse(self):
parser = windows_persistence.WindowsPersistenceMechanismsParser()
path = (r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion"
r"\Run\test")
pathspec = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.REGISTRY)
reg_data = "C:\\blah\\some.exe /v"
reg_type = rdf_client.StatEntry.RegistryType.REG_SZ
stat = rdf_client.StatEntry(
aff4path="aff4:/asdfasdf/",
pathspec=pathspec,
registry_type=reg_type,
registry_data=rdf_protodict.DataBlob(string=reg_data))
persistence = [stat]
image_paths = [
"system32\\drivers\\ACPI.sys",
"%systemroot%\\system32\\svchost.exe -k netsvcs",
"\\SystemRoot\\system32\\drivers\\acpipmi.sys"
]
reg_key = rdfvalue.RDFURN("aff4:/C.1000000000000000/registry"
"/HKEY_LOCAL_MACHINE/SYSTEM/ControlSet001"
"/services/AcpiPmi")
for path in image_paths:
serv_info = rdf_client.WindowsServiceInformation(
name="blah",
display_name="GRRservice",
image_path=path,
registry_key=reg_key)
persistence.append(serv_info)
knowledge_base = rdf_client.KnowledgeBase()
knowledge_base.environ_systemroot = "C:\\Windows"
expected = [
"C:\\blah\\some.exe", "C:\\Windows\\system32\\drivers\\ACPI.sys",
"C:\\Windows\\system32\\svchost.exe",
"C:\\Windows\\system32\\drivers\\acpipmi.sys"
]
for index, item in enumerate(persistence):
results = list(
parser.Parse(item, knowledge_base, rdf_paths.PathSpec.PathType.OS))
self.assertEqual(results[0].pathspec.path, expected[index])
self.assertEqual(len(results), 1)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 33.808824
| 77
| 0.683776
|
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.parsers import windows_persistence
class WindowsPersistenceMechanismsParserTest(test_lib.FlowTestsBaseclass):
def testParse(self):
parser = windows_persistence.WindowsPersistenceMechanismsParser()
path = (r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion"
r"\Run\test")
pathspec = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.REGISTRY)
reg_data = "C:\\blah\\some.exe /v"
reg_type = rdf_client.StatEntry.RegistryType.REG_SZ
stat = rdf_client.StatEntry(
aff4path="aff4:/asdfasdf/",
pathspec=pathspec,
registry_type=reg_type,
registry_data=rdf_protodict.DataBlob(string=reg_data))
persistence = [stat]
image_paths = [
"system32\\drivers\\ACPI.sys",
"%systemroot%\\system32\\svchost.exe -k netsvcs",
"\\SystemRoot\\system32\\drivers\\acpipmi.sys"
]
reg_key = rdfvalue.RDFURN("aff4:/C.1000000000000000/registry"
"/HKEY_LOCAL_MACHINE/SYSTEM/ControlSet001"
"/services/AcpiPmi")
for path in image_paths:
serv_info = rdf_client.WindowsServiceInformation(
name="blah",
display_name="GRRservice",
image_path=path,
registry_key=reg_key)
persistence.append(serv_info)
knowledge_base = rdf_client.KnowledgeBase()
knowledge_base.environ_systemroot = "C:\\Windows"
expected = [
"C:\\blah\\some.exe", "C:\\Windows\\system32\\drivers\\ACPI.sys",
"C:\\Windows\\system32\\svchost.exe",
"C:\\Windows\\system32\\drivers\\acpipmi.sys"
]
for index, item in enumerate(persistence):
results = list(
parser.Parse(item, knowledge_base, rdf_paths.PathSpec.PathType.OS))
self.assertEqual(results[0].pathspec.path, expected[index])
self.assertEqual(len(results), 1)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| true
| true
|
f70a6a6b45048c2b9550d17284e2cbac8687e10a
| 1,656
|
py
|
Python
|
kafka-python-console-sample/consumertask.py
|
IBM-CSM/event-streams-samples
|
ce90b1f7f57f3d2afff0596b3f4610392c025ece
|
[
"Apache-2.0"
] | 39
|
2015-10-13T21:41:25.000Z
|
2018-08-14T12:29:48.000Z
|
kafka-python-console-sample/consumertask.py
|
IBM-CSM/event-streams-samples
|
ce90b1f7f57f3d2afff0596b3f4610392c025ece
|
[
"Apache-2.0"
] | 22
|
2016-05-06T15:30:43.000Z
|
2018-09-12T06:59:49.000Z
|
kafka-python-console-sample/consumertask.py
|
IBM-CSM/event-streams-samples
|
ce90b1f7f57f3d2afff0596b3f4610392c025ece
|
[
"Apache-2.0"
] | 92
|
2015-10-13T21:41:25.000Z
|
2018-09-19T09:08:10.000Z
|
"""
Copyright 2015-2018 IBM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Licensed Materials - Property of IBM
© Copyright IBM Corp. 2015-2018
"""
import asyncio
from confluent_kafka import Consumer
class ConsumerTask(object):
def __init__(self, conf, topic_name):
self.consumer = Consumer(conf)
self.topic_name = topic_name
self.running = True
def stop(self):
self.running = False
@asyncio.coroutine
def run(self):
print('The consumer has started')
self.consumer.subscribe([self.topic_name])
while self.running:
msg = self.consumer.poll(1)
if msg is not None and msg.error() is None:
print('Message consumed: topic={0}, partition={1}, offset={2}, key={3}, value={4}'.format(
msg.topic(),
msg.partition(),
msg.offset(),
msg.key().decode('utf-8'),
msg.value().decode('utf-8')))
else:
print('No messages consumed')
yield from asyncio.sleep(2)
self.consumer.unsubscribe()
self.consumer.close()
| 32.470588
| 106
| 0.629831
|
import asyncio
from confluent_kafka import Consumer
class ConsumerTask(object):
def __init__(self, conf, topic_name):
self.consumer = Consumer(conf)
self.topic_name = topic_name
self.running = True
def stop(self):
self.running = False
@asyncio.coroutine
def run(self):
print('The consumer has started')
self.consumer.subscribe([self.topic_name])
while self.running:
msg = self.consumer.poll(1)
if msg is not None and msg.error() is None:
print('Message consumed: topic={0}, partition={1}, offset={2}, key={3}, value={4}'.format(
msg.topic(),
msg.partition(),
msg.offset(),
msg.key().decode('utf-8'),
msg.value().decode('utf-8')))
else:
print('No messages consumed')
yield from asyncio.sleep(2)
self.consumer.unsubscribe()
self.consumer.close()
| true
| true
|
f70a6a7d524410e5221820b26a7da36dcf0ac821
| 1,296
|
py
|
Python
|
var/spack/repos/builtin/packages/seqan/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/seqan/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/seqan/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Seqan(CMakePackage):
"""SeqAn is an open source C++ library of efficient algorithms and data
structures for the analysis of sequences with the focus on biological data.
Our library applies a unique generic design that guarantees high
performance, generality, extensibility, and integration with other
libraries. SeqAn is easy to use and simplifies the development of new
software tools with a minimal loss of performance"""
homepage = "https://www.seqan.de"
url = "https://github.com/seqan/seqan/archive/seqan-v2.4.0.tar.gz"
version('2.4.0', sha256='d7084d17729214003e84818e0280a16f223c8f1c6a30eeef040c27e0c0047bd7')
depends_on('cmake@3.4.0:', type='build')
depends_on('python@2.7.0:', type='build')
depends_on('py-nose', type='build')
depends_on('py-sphinx', type='build')
depends_on('boost', type=('build', 'link'))
depends_on('zlib', type=('build', 'link'))
depends_on('bzip2', type=('build', 'link'))
conflicts('%intel@:16.0.4')
conflicts('%gcc@:4.9.4')
conflicts('%llvm@:3.5.1')
| 38.117647
| 95
| 0.70216
|
from spack import *
class Seqan(CMakePackage):
homepage = "https://www.seqan.de"
url = "https://github.com/seqan/seqan/archive/seqan-v2.4.0.tar.gz"
version('2.4.0', sha256='d7084d17729214003e84818e0280a16f223c8f1c6a30eeef040c27e0c0047bd7')
depends_on('cmake@3.4.0:', type='build')
depends_on('python@2.7.0:', type='build')
depends_on('py-nose', type='build')
depends_on('py-sphinx', type='build')
depends_on('boost', type=('build', 'link'))
depends_on('zlib', type=('build', 'link'))
depends_on('bzip2', type=('build', 'link'))
conflicts('%intel@:16.0.4')
conflicts('%gcc@:4.9.4')
conflicts('%llvm@:3.5.1')
| true
| true
|
f70a6a9e38230c1855566a8a044921b83b845f35
| 6,386
|
py
|
Python
|
tests/lite/test_wrappers.py
|
FeryET/pytorch-lightning
|
b1f8b111b5085373599758a4e155a482259cdbf0
|
[
"Apache-2.0"
] | null | null | null |
tests/lite/test_wrappers.py
|
FeryET/pytorch-lightning
|
b1f8b111b5085373599758a4e155a482259cdbf0
|
[
"Apache-2.0"
] | 1
|
2022-03-18T21:56:53.000Z
|
2022-03-18T21:56:53.000Z
|
tests/lite/test_wrappers.py
|
FeryET/pytorch-lightning
|
b1f8b111b5085373599758a4e155a482259cdbf0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import ANY, Mock
import pytest
import torch
from torch.utils.data.dataloader import DataLoader
from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin
from pytorch_lightning.lite import LightningLite
from pytorch_lightning.lite.wrappers import _LiteDataLoader, _LiteModule, _LiteOptimizer
from tests.helpers.runif import RunIf
class EmptyLite(LightningLite):
def run(self):
pass
def test_lite_module_wraps():
"""Test that the wrapped module is accessible via the property."""
module = Mock()
assert _LiteModule(module, Mock()).module is module
@RunIf(min_gpus=1)
@pytest.mark.parametrize(
"precision, input_type, expected_type",
[
(32, torch.float16, torch.float32),
(32, torch.float32, torch.float32),
(32, torch.float64, torch.float32),
(32, torch.int, torch.int),
(16, torch.float32, torch.float16),
(16, torch.float64, torch.float16),
(16, torch.long, torch.long),
pytest.param("bf16", torch.float32, torch.bfloat16, marks=RunIf(min_torch="1.10")),
pytest.param("bf16", torch.float64, torch.bfloat16, marks=RunIf(min_torch="1.10")),
pytest.param("bf16", torch.bool, torch.bool, marks=RunIf(min_torch="1.10")),
],
)
def test_lite_module_forward_conversion(precision, input_type, expected_type):
"""Test that the LiteModule performs autocasting on the input tensors and during forward()."""
lite = EmptyLite(precision=precision, accelerator="gpu", devices=1)
device = torch.device("cuda", 0)
def check_autocast(forward_input):
assert precision != 16 or torch.is_autocast_enabled()
return forward_input
module = Mock(wraps=torch.nn.Identity(), side_effect=check_autocast)
lite_module = _LiteModule(module, lite._precision_plugin).to(device)
out = lite_module(torch.tensor([1, 2, 3], dtype=input_type, device=device))
assert module.call_args[0][0].dtype == expected_type
assert out.dtype == input_type or out.dtype == torch.get_default_dtype()
@pytest.mark.parametrize(
"device", [torch.device("cpu"), pytest.param(torch.device("cuda", 0), marks=RunIf(min_gpus=1))]
)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16])
def test_lite_module_device_dtype_propagation(device, dtype):
"""Test that the LiteModule propagates device and dtype properties to its submodules (e.g. torchmetrics)."""
class DeviceModule(DeviceDtypeModuleMixin):
pass
device_module = DeviceModule()
lite_module = _LiteModule(device_module, Mock())
lite_module.to(device)
assert device_module.device == device
assert lite_module.device == device
lite_module.to(dtype)
assert device_module.dtype == dtype
assert lite_module.dtype == dtype
def test_lite_dataloader_iterator():
"""Test that the iteration over a LiteDataLoader wraps the iterator of the underlying dataloader (no automatic
device placement)."""
dataloader = DataLoader(range(5), batch_size=2)
lite_dataloader = _LiteDataLoader(dataloader)
assert len(lite_dataloader) == len(dataloader) == 3
iterator = iter(dataloader)
lite_iterator = iter(lite_dataloader)
assert torch.equal(next(iterator), next(lite_iterator))
assert torch.equal(next(iterator), next(lite_iterator))
assert torch.equal(next(iterator), next(lite_iterator))
with pytest.raises(StopIteration):
next(iterator)
with pytest.raises(StopIteration):
next(lite_iterator)
@pytest.mark.parametrize(
"src_device, dest_device",
[
(torch.device("cpu"), torch.device("cpu")),
pytest.param(torch.device("cpu"), torch.device("cuda", 0), marks=RunIf(min_gpus=1)),
pytest.param(torch.device("cuda", 0), torch.device("cpu"), marks=RunIf(min_gpus=1)),
],
)
def test_lite_dataloader_device_placement(src_device, dest_device):
"""Test that the LiteDataLoader moves data to the device in its iterator."""
sample0 = torch.tensor(0, device=src_device)
sample1 = torch.tensor(1, device=src_device)
sample2 = {"data": torch.tensor(2, device=src_device)}
sample3 = {"data": torch.tensor(3, device=src_device)}
dataloader = DataLoader([sample0, sample1, sample2, sample3], batch_size=2)
lite_dataloader = _LiteDataLoader(dataloader=dataloader, device=dest_device)
iterator = iter(lite_dataloader)
batch0 = next(iterator)
assert torch.equal(batch0, torch.tensor([0, 1], device=dest_device))
batch1 = next(iterator)
assert torch.equal(batch1["data"], torch.tensor([2, 3], device=dest_device))
def test_lite_optimizer_wraps():
"""Test that the LiteOptimizer fully wraps the optimizer."""
optimizer_cls = torch.optim.SGD
optimizer = Mock(spec=optimizer_cls)
lite_optimizer = _LiteOptimizer(optimizer, Mock())
assert lite_optimizer.optimizer is optimizer
assert isinstance(lite_optimizer, optimizer_cls)
def test_lite_optimizer_state_dict():
"""Test that the LiteOptimizer calls into the strategy to collect the state."""
optimizer = Mock()
strategy = Mock()
lite_optimizer = _LiteOptimizer(optimizer=optimizer, strategy=strategy)
lite_optimizer.state_dict()
strategy.optimizer_state.assert_called_with(optimizer)
def test_lite_optimizer_steps():
"""Test that the LiteOptimizer forwards the step() and zero_grad() calls to the wrapped optimizer."""
optimizer = Mock()
strategy = Mock()
strategy.optimizer_step.return_value = 123
lite_optimizer = _LiteOptimizer(optimizer=optimizer, strategy=strategy)
step_output = lite_optimizer.step()
assert step_output == 123
strategy.optimizer_step.assert_called_once()
strategy.optimizer_step.assert_called_with(optimizer, opt_idx=0, closure=ANY, model=strategy.model)
| 38.939024
| 114
| 0.727216
|
from unittest.mock import ANY, Mock
import pytest
import torch
from torch.utils.data.dataloader import DataLoader
from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin
from pytorch_lightning.lite import LightningLite
from pytorch_lightning.lite.wrappers import _LiteDataLoader, _LiteModule, _LiteOptimizer
from tests.helpers.runif import RunIf
class EmptyLite(LightningLite):
def run(self):
pass
def test_lite_module_wraps():
module = Mock()
assert _LiteModule(module, Mock()).module is module
@RunIf(min_gpus=1)
@pytest.mark.parametrize(
"precision, input_type, expected_type",
[
(32, torch.float16, torch.float32),
(32, torch.float32, torch.float32),
(32, torch.float64, torch.float32),
(32, torch.int, torch.int),
(16, torch.float32, torch.float16),
(16, torch.float64, torch.float16),
(16, torch.long, torch.long),
pytest.param("bf16", torch.float32, torch.bfloat16, marks=RunIf(min_torch="1.10")),
pytest.param("bf16", torch.float64, torch.bfloat16, marks=RunIf(min_torch="1.10")),
pytest.param("bf16", torch.bool, torch.bool, marks=RunIf(min_torch="1.10")),
],
)
def test_lite_module_forward_conversion(precision, input_type, expected_type):
lite = EmptyLite(precision=precision, accelerator="gpu", devices=1)
device = torch.device("cuda", 0)
def check_autocast(forward_input):
assert precision != 16 or torch.is_autocast_enabled()
return forward_input
module = Mock(wraps=torch.nn.Identity(), side_effect=check_autocast)
lite_module = _LiteModule(module, lite._precision_plugin).to(device)
out = lite_module(torch.tensor([1, 2, 3], dtype=input_type, device=device))
assert module.call_args[0][0].dtype == expected_type
assert out.dtype == input_type or out.dtype == torch.get_default_dtype()
@pytest.mark.parametrize(
"device", [torch.device("cpu"), pytest.param(torch.device("cuda", 0), marks=RunIf(min_gpus=1))]
)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16])
def test_lite_module_device_dtype_propagation(device, dtype):
class DeviceModule(DeviceDtypeModuleMixin):
pass
device_module = DeviceModule()
lite_module = _LiteModule(device_module, Mock())
lite_module.to(device)
assert device_module.device == device
assert lite_module.device == device
lite_module.to(dtype)
assert device_module.dtype == dtype
assert lite_module.dtype == dtype
def test_lite_dataloader_iterator():
dataloader = DataLoader(range(5), batch_size=2)
lite_dataloader = _LiteDataLoader(dataloader)
assert len(lite_dataloader) == len(dataloader) == 3
iterator = iter(dataloader)
lite_iterator = iter(lite_dataloader)
assert torch.equal(next(iterator), next(lite_iterator))
assert torch.equal(next(iterator), next(lite_iterator))
assert torch.equal(next(iterator), next(lite_iterator))
with pytest.raises(StopIteration):
next(iterator)
with pytest.raises(StopIteration):
next(lite_iterator)
@pytest.mark.parametrize(
"src_device, dest_device",
[
(torch.device("cpu"), torch.device("cpu")),
pytest.param(torch.device("cpu"), torch.device("cuda", 0), marks=RunIf(min_gpus=1)),
pytest.param(torch.device("cuda", 0), torch.device("cpu"), marks=RunIf(min_gpus=1)),
],
)
def test_lite_dataloader_device_placement(src_device, dest_device):
sample0 = torch.tensor(0, device=src_device)
sample1 = torch.tensor(1, device=src_device)
sample2 = {"data": torch.tensor(2, device=src_device)}
sample3 = {"data": torch.tensor(3, device=src_device)}
dataloader = DataLoader([sample0, sample1, sample2, sample3], batch_size=2)
lite_dataloader = _LiteDataLoader(dataloader=dataloader, device=dest_device)
iterator = iter(lite_dataloader)
batch0 = next(iterator)
assert torch.equal(batch0, torch.tensor([0, 1], device=dest_device))
batch1 = next(iterator)
assert torch.equal(batch1["data"], torch.tensor([2, 3], device=dest_device))
def test_lite_optimizer_wraps():
optimizer_cls = torch.optim.SGD
optimizer = Mock(spec=optimizer_cls)
lite_optimizer = _LiteOptimizer(optimizer, Mock())
assert lite_optimizer.optimizer is optimizer
assert isinstance(lite_optimizer, optimizer_cls)
def test_lite_optimizer_state_dict():
optimizer = Mock()
strategy = Mock()
lite_optimizer = _LiteOptimizer(optimizer=optimizer, strategy=strategy)
lite_optimizer.state_dict()
strategy.optimizer_state.assert_called_with(optimizer)
def test_lite_optimizer_steps():
optimizer = Mock()
strategy = Mock()
strategy.optimizer_step.return_value = 123
lite_optimizer = _LiteOptimizer(optimizer=optimizer, strategy=strategy)
step_output = lite_optimizer.step()
assert step_output == 123
strategy.optimizer_step.assert_called_once()
strategy.optimizer_step.assert_called_with(optimizer, opt_idx=0, closure=ANY, model=strategy.model)
| true
| true
|
f70a6b28b67cb2ac17dd95251e5df602c3b4223d
| 56,239
|
py
|
Python
|
.install/.backup/platform/gsutil/third_party/boto/boto/beanstalk/layer1.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | 1
|
2017-11-18T18:23:22.000Z
|
2017-11-18T18:23:22.000Z
|
taskqueue/venv_tq/lib/python2.7/site-packages/boto/beanstalk/layer1.py
|
matthappens/taskqueue
|
548979587326b95bf41851eb135052de782e74fc
|
[
"MIT"
] | null | null | null |
taskqueue/venv_tq/lib/python2.7/site-packages/boto/beanstalk/layer1.py
|
matthappens/taskqueue
|
548979587326b95bf41851eb135052de782e74fc
|
[
"MIT"
] | 1
|
2020-07-24T20:04:47.000Z
|
2020-07-24T20:04:47.000Z
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
import boto.jsonresponse
from boto.compat import json
from boto.regioninfo import RegionInfo
from boto.connection import AWSQueryConnection
class Layer1(AWSQueryConnection):
APIVersion = '2010-12-01'
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(Layer1, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token)
def _required_auth_capability(self):
return ['hmac-v4']
def _encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _get_response(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
def check_dns_availability(self, cname_prefix):
"""Checks if the specified CNAME is available.
:type cname_prefix: string
:param cname_prefix: The prefix used when this CNAME is
reserved.
"""
params = {'CNAMEPrefix': cname_prefix}
return self._get_response('CheckDNSAvailability', params)
def create_application(self, application_name, description=None):
"""
Creates an application that has one configuration template
named default and no application versions.
:type application_name: string
:param application_name: The name of the application.
Constraint: This name must be unique within your account. If the
specified name already exists, the action returns an
InvalidParameterValue error.
:type description: string
:param description: Describes the application.
:raises: TooManyApplicationsException
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('CreateApplication', params)
def create_application_version(self, application_name, version_label,
description=None, s3_bucket=None,
s3_key=None, auto_create_application=None):
"""Creates an application version for the specified application.
:type application_name: string
:param application_name: The name of the application. If no
application is found with this name, and AutoCreateApplication is
false, returns an InvalidParameterValue error.
:type version_label: string
:param version_label: A label identifying this version. Constraint:
Must be unique per application. If an application version already
exists with this label for the specified application, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type description: string
:param description: Describes this version.
:type s3_bucket: string
:param s3_bucket: The Amazon S3 bucket where the data is located.
:type s3_key: string
:param s3_key: The Amazon S3 key where the data is located. Both
s3_bucket and s3_key must be specified in order to use a specific
source bundle. If both of these values are not specified the
sample application will be used.
:type auto_create_application: boolean
:param auto_create_application: Determines how the system behaves if
the specified application for this version does not already exist:
true: Automatically creates the specified application for this
version if it does not already exist. false: Returns an
InvalidParameterValue if the specified application for this version
does not already exist. Default: false Valid Values: true | false
:raises: TooManyApplicationsException,
TooManyApplicationVersionsException,
InsufficientPrivilegesException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
if s3_bucket and s3_key:
params['SourceBundle.S3Bucket'] = s3_bucket
params['SourceBundle.S3Key'] = s3_key
if auto_create_application:
params['AutoCreateApplication'] = self._encode_bool(
auto_create_application)
return self._get_response('CreateApplicationVersion', params)
def create_configuration_template(self, application_name, template_name,
solution_stack_name=None,
source_configuration_application_name=None,
source_configuration_template_name=None,
environment_id=None, description=None,
option_settings=None):
"""Creates a configuration template.
Templates are associated with a specific application and are used to
deploy different versions of the application with the same
configuration settings.
:type application_name: string
:param application_name: The name of the application to associate with
this configuration template. If no application is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template.
Constraint: This name must be unique per application. Default: If
a configuration template already exists with this name, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack used by this
configuration. The solution stack specifies the operating system,
architecture, and application server for a configuration template.
It determines the set of configuration options as well as the
possible and default values. Use ListAvailableSolutionStacks to
obtain a list of available solution stacks. Default: If the
SolutionStackName is not specified and the source configuration
parameter is blank, AWS Elastic Beanstalk uses the default solution
stack. If not specified and the source configuration parameter is
specified, AWS Elastic Beanstalk uses the same solution stack as
the source configuration template.
:type source_configuration_application_name: string
:param source_configuration_application_name: The name of the
application associated with the configuration.
:type source_configuration_template_name: string
:param source_configuration_template_name: The name of the
configuration template.
:type environment_id: string
:param environment_id: The ID of the environment used with this
configuration template.
:type description: string
:param description: Describes this configuration.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration option to the requested value. The new
value overrides the value obtained from the solution stack or the
source configuration template.
:raises: InsufficientPrivilegesException,
TooManyConfigurationTemplatesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if source_configuration_application_name:
params['SourceConfiguration.ApplicationName'] = source_configuration_application_name
if source_configuration_template_name:
params['SourceConfiguration.TemplateName'] = source_configuration_template_name
if environment_id:
params['EnvironmentId'] = environment_id
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
return self._get_response('CreateConfigurationTemplate', params)
def create_environment(self, application_name, environment_name,
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""Launches an environment for the application using a configuration.
:type application_name: string
:param application_name: The name of the application that contains the
version to be deployed. If no application is found with this name,
CreateEnvironment returns an InvalidParameterValue error.
:type environment_name: string
:param environment_name: A unique name for the deployment environment.
Used in the application URL. Constraint: Must be from 4 to 23
characters in length. The name can contain only letters, numbers,
and hyphens. It cannot start or end with a hyphen. This name must
be unique in your account. If the specified name already exists,
AWS Elastic Beanstalk returns an InvalidParameterValue error.
Default: If the CNAME parameter is not specified, the environment
name becomes part of the CNAME, and therefore part of the visible
URL for your application.
:type version_label: string
:param version_label: The name of the application version to deploy. If
the specified application has no associated application versions,
AWS Elastic Beanstalk UpdateEnvironment returns an
InvalidParameterValue error. Default: If not specified, AWS
Elastic Beanstalk attempts to launch the most recently created
application version.
:type template_name: string
:param template_name: The name of the configuration template to
use in deployment. If no configuration template is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
Condition: You must specify either this parameter or a
SolutionStackName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type solution_stack_name: string
:param solution_stack_name: This is an alternative to specifying a
configuration name. If specified, AWS Elastic Beanstalk sets the
configuration values to the default values associated with the
specified solution stack. Condition: You must specify either this
or a TemplateName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type cname_prefix: string
:param cname_prefix: If specified, the environment attempts to use this
value as the prefix for the CNAME. If not specified, the
environment uses the environment name.
:type description: string
:param description: Describes this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration options to the requested value in the
configuration set for the new environment. These override the
values obtained from the solution stack or the configuration
template. Each element in the list is a tuple of (Namespace,
OptionName, Value), for example::
[('aws:autoscaling:launchconfiguration',
'Ec2KeyName', 'mykeypair')]
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this new
environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if cname_prefix:
params['CNAMEPrefix'] = cname_prefix
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.member.Name'] = tier_name
params['Tier.member.Type'] = tier_type
params['Tier.member.Version'] = tier_version
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
"""
Creates the Amazon S3 storage location for the account. This
location is used to store user log files.
:raises: TooManyBucketsException,
S3SubscriptionRequiredException,
InsufficientPrivilegesException
"""
return self._get_response('CreateStorageLocation', params={})
def delete_application(self, application_name,
terminate_env_by_force=None):
"""
Deletes the specified application along with all associated
versions and configurations. The application versions will not
be deleted from your Amazon S3 bucket.
:type application_name: string
:param application_name: The name of the application to delete.
:type terminate_env_by_force: boolean
:param terminate_env_by_force: When set to true, running
environments will be terminated before deleting the application.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name}
if terminate_env_by_force:
params['TerminateEnvByForce'] = self._encode_bool(
terminate_env_by_force)
return self._get_response('DeleteApplication', params)
def delete_application_version(self, application_name, version_label,
delete_source_bundle=None):
"""Deletes the specified version from the specified application.
:type application_name: string
:param application_name: The name of the application to delete
releases from.
:type version_label: string
:param version_label: The label of the version to delete.
:type delete_source_bundle: boolean
:param delete_source_bundle: Indicates whether to delete the
associated source bundle from Amazon S3. Valid Values: true |
false
:raises: SourceBundleDeletionException,
InsufficientPrivilegesException,
OperationInProgressException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if delete_source_bundle:
params['DeleteSourceBundle'] = self._encode_bool(
delete_source_bundle)
return self._get_response('DeleteApplicationVersion', params)
def delete_configuration_template(self, application_name, template_name):
"""Deletes the specified configuration template.
:type application_name: string
:param application_name: The name of the application to delete
the configuration template from.
:type template_name: string
:param template_name: The name of the configuration template to
delete.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
return self._get_response('DeleteConfigurationTemplate', params)
def delete_environment_configuration(self, application_name,
environment_name):
"""
Deletes the draft configuration associated with the running
environment. Updating a running environment with any
configuration changes creates a draft configuration set. You can
get the draft configuration using DescribeConfigurationSettings
while the update is in progress or if the update fails. The
DeploymentStatus for the draft configuration indicates whether
the deployment is in process or has failed. The draft
configuration remains in existence until it is deleted with this
action.
:type application_name: string
:param application_name: The name of the application the
environment is associated with.
:type environment_name: string
:param environment_name: The name of the environment to delete
the draft configuration from.
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
return self._get_response('DeleteEnvironmentConfiguration', params)
def describe_application_versions(self, application_name=None,
version_labels=None):
"""Returns descriptions for existing application versions.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include ones that are associated
with the specified application.
:type version_labels: list
:param version_labels: If specified, restricts the returned
descriptions to only include ones that have the specified version
labels.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_labels:
self.build_list_params(params, version_labels,
'VersionLabels.member')
return self._get_response('DescribeApplicationVersions', params)
def describe_applications(self, application_names=None):
"""Returns the descriptions of existing applications.
:type application_names: list
:param application_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include those with the specified
names.
"""
params = {}
if application_names:
self.build_list_params(params, application_names,
'ApplicationNames.member')
return self._get_response('DescribeApplications', params)
def describe_configuration_options(self, application_name=None,
template_name=None,
environment_name=None,
solution_stack_name=None, options=None):
"""Describes configuration options used in a template or environment.
Describes the configuration options that are used in a
particular configuration template or environment, or that a
specified solution stack defines. The description includes the
values the options, their default values, and an indication of
the required action on a running environment if an option value
is changed.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template or environment. Only needed if you want
to describe the configuration options associated with either the
configuration template or environment.
:type template_name: string
:param template_name: The name of the configuration template whose
configuration options you want to describe.
:type environment_name: string
:param environment_name: The name of the environment whose
configuration options you want to describe.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack whose
configuration options you want to describe.
:type options: list
:param options: If specified, restricts the descriptions to only
the specified options.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if options:
self.build_list_params(params, options, 'Options.member')
return self._get_response('DescribeConfigurationOptions', params)
def describe_configuration_settings(self, application_name,
template_name=None,
environment_name=None):
"""
Returns a description of the settings for the specified
configuration set, that is, either a configuration template or
the configuration set associated with a running environment.
When describing the settings for the configuration set
associated with a running environment, it is possible to receive
two sets of setting descriptions. One is the deployed
configuration set, and the other is a draft configuration of an
environment that is either in the process of deployment or that
failed to deploy.
:type application_name: string
:param application_name: The application for the environment or
configuration template.
:type template_name: string
:param template_name: The name of the configuration template to
describe. Conditional: You must specify either this parameter or
an EnvironmentName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to describe.
Condition: You must specify either this or a TemplateName, but not
both. If you specify both, AWS Elastic Beanstalk returns an
InvalidParameterCombination error. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
"""
params = {'ApplicationName': application_name}
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeConfigurationSettings', params)
def describe_environment_resources(self, environment_id=None,
environment_name=None):
"""Returns AWS resources for this environment.
:type environment_id: string
:param environment_id: The ID of the environment to retrieve AWS
resource usage data. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to retrieve
AWS resource usage data. Condition: You must specify either this
or an EnvironmentId, or both. If you do not specify either, AWS
Elastic Beanstalk returns MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeEnvironmentResources', params)
def describe_environments(self, application_name=None, version_label=None,
environment_ids=None, environment_names=None,
include_deleted=None,
included_deleted_back_to=None):
"""Returns descriptions for existing environments.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that are associated
with this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to include only those that are associated
with this application version.
:type environment_ids: list
:param environment_ids: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified IDs.
:type environment_names: list
:param environment_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified names.
:type include_deleted: boolean
:param include_deleted: Indicates whether to include deleted
environments: true: Environments that have been deleted after
IncludedDeletedBackTo are displayed. false: Do not include deleted
environments.
:type included_deleted_back_to: timestamp
:param included_deleted_back_to: If specified when IncludeDeleted is
set to true, then environments deleted after this date are
displayed.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if environment_ids:
self.build_list_params(params, environment_ids,
'EnvironmentIds.member')
if environment_names:
self.build_list_params(params, environment_names,
'EnvironmentNames.member')
if include_deleted:
params['IncludeDeleted'] = self._encode_bool(include_deleted)
if included_deleted_back_to:
params['IncludedDeletedBackTo'] = included_deleted_back_to
return self._get_response('DescribeEnvironments', params)
def describe_events(self, application_name=None, version_label=None,
template_name=None, environment_id=None,
environment_name=None, request_id=None, severity=None,
start_time=None, end_time=None, max_records=None,
next_token=None):
"""Returns event descriptions matching criteria up to the last 6 weeks.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those associated with
this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those associated with this application
version.
:type template_name: string
:param template_name: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that are associated with this
environment configuration.
:type environment_id: string
:param environment_id: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type environment_name: string
:param environment_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type request_id: string
:param request_id: If specified, AWS Elastic Beanstalk restricts the
described events to include only those associated with this request
ID.
:type severity: string
:param severity: If specified, limits the events returned from this
call to include only those with the specified severity or higher.
:type start_time: timestamp
:param start_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur on or after this time.
:type end_time: timestamp
:param end_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur up to, but not including,
the EndTime.
:type max_records: integer
:param max_records: Specifies the maximum number of events that can be
returned, beginning with the most recent event.
:type next_token: string
:param next_token: Pagination token. If specified, the events return
the next batch of results.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if request_id:
params['RequestId'] = request_id
if severity:
params['Severity'] = severity
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self._get_response('DescribeEvents', params)
def list_available_solution_stacks(self):
"""Returns a list of the available solution stack names."""
return self._get_response('ListAvailableSolutionStacks', params={})
def rebuild_environment(self, environment_id=None, environment_name=None):
"""
Deletes and recreates all of the AWS resources (for example:
the Auto Scaling group, load balancer, etc.) for a specified
environment and forces a restart.
:type environment_id: string
:param environment_id: The ID of the environment to rebuild.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to rebuild.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RebuildEnvironment', params)
def request_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Initiates a request to compile the specified type of
information of the deployed environment. Setting the InfoType
to tail compiles the last lines from the application server log
files of every Amazon EC2 instance in your environment. Use
RetrieveEnvironmentInfo to access the compiled information.
:type info_type: string
:param info_type: The type of information to request.
:type environment_id: string
:param environment_id: The ID of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RequestEnvironmentInfo', params)
def restart_app_server(self, environment_id=None, environment_name=None):
"""
Causes the environment to restart the application container
server running on each Amazon EC2 instance.
:type environment_id: string
:param environment_id: The ID of the environment to restart the server
for. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to restart the
server for. Condition: You must specify either this or an
EnvironmentId, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RestartAppServer', params)
def retrieve_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Retrieves the compiled information from a RequestEnvironmentInfo
request.
:type info_type: string
:param info_type: The type of information to retrieve.
:type environment_id: string
:param environment_id: The ID of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
"""Swaps the CNAMEs of two environments.
:type source_environment_id: string
:param source_environment_id: The ID of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentId, you must specify the
DestinationEnvironmentId.
:type source_environment_name: string
:param source_environment_name: The name of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentName, you must specify the
DestinationEnvironmentName.
:type destination_environment_id: string
:param destination_environment_id: The ID of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentId with
the DestinationEnvironmentId.
:type destination_environment_name: string
:param destination_environment_name: The name of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentName with
the DestinationEnvironmentName.
"""
params = {}
if source_environment_id:
params['SourceEnvironmentId'] = source_environment_id
if source_environment_name:
params['SourceEnvironmentName'] = source_environment_name
if destination_environment_id:
params['DestinationEnvironmentId'] = destination_environment_id
if destination_environment_name:
params['DestinationEnvironmentName'] = destination_environment_name
return self._get_response('SwapEnvironmentCNAMEs', params)
def terminate_environment(self, environment_id=None, environment_name=None,
terminate_resources=None):
"""Terminates the specified environment.
:type environment_id: string
:param environment_id: The ID of the environment to terminate.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to terminate.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type terminate_resources: boolean
:param terminate_resources: Indicates whether the associated AWS
resources should shut down when the environment is terminated:
true: (default) The user AWS resources (for example, the Auto
Scaling group, LoadBalancer, etc.) are terminated along with the
environment. false: The environment is removed from the AWS
Elastic Beanstalk but the AWS resources continue to operate. For
more information, see the AWS Elastic Beanstalk User Guide.
Default: true Valid Values: true | false
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if terminate_resources:
params['TerminateResources'] = self._encode_bool(
terminate_resources)
return self._get_response('TerminateEnvironment', params)
def update_application(self, application_name, description=None):
"""
Updates the specified application to have the specified
properties.
:type application_name: string
:param application_name: The name of the application to update.
If no such application is found, UpdateApplication returns an
InvalidParameterValue error.
:type description: string
:param description: A new description for the application. Default: If
not specified, AWS Elastic Beanstalk does not update the
description.
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('UpdateApplication', params)
def update_application_version(self, application_name, version_label,
description=None):
"""Updates the application version to have the properties.
:type application_name: string
:param application_name: The name of the application associated with
this version. If no application is found with this name,
UpdateApplication returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the version to update. If no
application version is found with this label, UpdateApplication
returns an InvalidParameterValue error.
:type description: string
:param description: A new description for this release.
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
return self._get_response('UpdateApplicationVersion', params)
def update_configuration_template(self, application_name, template_name,
description=None, option_settings=None,
options_to_remove=None):
"""
Updates the specified configuration template to have the
specified properties or configuration option values.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template to update. If no application is found
with this name, UpdateConfigurationTemplate returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template to update.
If no configuration template is found with this name,
UpdateConfigurationTemplate returns an InvalidParameterValue error.
:type description: string
:param description: A new description for the configuration.
:type option_settings: list
:param option_settings: A list of configuration option settings to
update with the new specified option value.
:type options_to_remove: list
:param options_to_remove: A list of configuration options to remove
from the configuration set. Constraint: You can remove only
UserDefined configuration options.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateConfigurationTemplate', params)
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""
Updates the environment description, deploys a new application
version, updates the configuration settings to an entirely new
configuration template, or updates select configuration option
values in the running environment. Attempting to update both
the release and configuration is not allowed and AWS Elastic
Beanstalk returns an InvalidParameterCombination error. When
updating the configuration settings to a new template or
individual settings, a draft configuration is created and
DescribeConfigurationSettings for this environment returns two
setting descriptions with different DeploymentStatus values.
:type environment_id: string
:param environment_id: The ID of the environment to update. If no
environment with this ID exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentName, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to update. If no
environment with this name exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentId, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type version_label: string
:param version_label: If this parameter is specified, AWS Elastic
Beanstalk deploys the named application version to the environment.
If no such application version is found, returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: If this parameter is specified, AWS Elastic
Beanstalk deploys this configuration template to the environment.
If no such configuration template is found, AWS Elastic Beanstalk
returns an InvalidParameterValue error.
:type description: string
:param description: If this parameter is specified, AWS Elastic
Beanstalk updates the description of this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk updates the
configuration set associated with the running environment and sets
the specified configuration options to the requested value.
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.member.Name'] = tier_name
params['Tier.member.Type'] = tier_type
params['Tier.member.Version'] = tier_version
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,
option_settings, template_name=None,
environment_name=None):
"""
Takes a set of configuration settings and either a
configuration template or environment, and determines whether
those values are valid. This action returns a list of messages
indicating any errors or warnings associated with the selection
of option values.
:type application_name: string
:param application_name: The name of the application that the
configuration template or environment belongs to.
:type template_name: string
:param template_name: The name of the configuration template to
validate the settings against. Condition: You cannot specify both
this and an environment name.
:type environment_name: string
:param environment_name: The name of the environment to validate the
settings against. Condition: You cannot specify both this and a
configuration template name.
:type option_settings: list
:param option_settings: A list of the options and desired values to
evaluate.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name}
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('ValidateConfigurationSettings', params)
def _build_list_params(self, params, user_values, prefix, tuple_names):
# For params such as the ConfigurationOptionSettings,
# they can specify a list of tuples where each tuple maps to a specific
# arg. For example:
# user_values = [('foo', 'bar', 'baz']
# prefix=MyOption.member
# tuple_names=('One', 'Two', 'Three')
# would result in:
# MyOption.member.1.One = foo
# MyOption.member.1.Two = bar
# MyOption.member.1.Three = baz
for i, user_value in enumerate(user_values, 1):
current_prefix = '%s.%s' % (prefix, i)
for key, value in zip(tuple_names, user_value):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
| 46.787854
| 97
| 0.655577
|
import boto
import boto.jsonresponse
from boto.compat import json
from boto.regioninfo import RegionInfo
from boto.connection import AWSQueryConnection
class Layer1(AWSQueryConnection):
APIVersion = '2010-12-01'
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(Layer1, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token)
def _required_auth_capability(self):
return ['hmac-v4']
def _encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _get_response(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
def check_dns_availability(self, cname_prefix):
params = {'CNAMEPrefix': cname_prefix}
return self._get_response('CheckDNSAvailability', params)
def create_application(self, application_name, description=None):
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('CreateApplication', params)
def create_application_version(self, application_name, version_label,
description=None, s3_bucket=None,
s3_key=None, auto_create_application=None):
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
if s3_bucket and s3_key:
params['SourceBundle.S3Bucket'] = s3_bucket
params['SourceBundle.S3Key'] = s3_key
if auto_create_application:
params['AutoCreateApplication'] = self._encode_bool(
auto_create_application)
return self._get_response('CreateApplicationVersion', params)
def create_configuration_template(self, application_name, template_name,
solution_stack_name=None,
source_configuration_application_name=None,
source_configuration_template_name=None,
environment_id=None, description=None,
option_settings=None):
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if source_configuration_application_name:
params['SourceConfiguration.ApplicationName'] = source_configuration_application_name
if source_configuration_template_name:
params['SourceConfiguration.TemplateName'] = source_configuration_template_name
if environment_id:
params['EnvironmentId'] = environment_id
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
return self._get_response('CreateConfigurationTemplate', params)
def create_environment(self, application_name, environment_name,
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if cname_prefix:
params['CNAMEPrefix'] = cname_prefix
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.member.Name'] = tier_name
params['Tier.member.Type'] = tier_type
params['Tier.member.Version'] = tier_version
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
return self._get_response('CreateStorageLocation', params={})
def delete_application(self, application_name,
terminate_env_by_force=None):
params = {'ApplicationName': application_name}
if terminate_env_by_force:
params['TerminateEnvByForce'] = self._encode_bool(
terminate_env_by_force)
return self._get_response('DeleteApplication', params)
def delete_application_version(self, application_name, version_label,
delete_source_bundle=None):
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if delete_source_bundle:
params['DeleteSourceBundle'] = self._encode_bool(
delete_source_bundle)
return self._get_response('DeleteApplicationVersion', params)
def delete_configuration_template(self, application_name, template_name):
params = {'ApplicationName': application_name,
'TemplateName': template_name}
return self._get_response('DeleteConfigurationTemplate', params)
def delete_environment_configuration(self, application_name,
environment_name):
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
return self._get_response('DeleteEnvironmentConfiguration', params)
def describe_application_versions(self, application_name=None,
version_labels=None):
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_labels:
self.build_list_params(params, version_labels,
'VersionLabels.member')
return self._get_response('DescribeApplicationVersions', params)
def describe_applications(self, application_names=None):
params = {}
if application_names:
self.build_list_params(params, application_names,
'ApplicationNames.member')
return self._get_response('DescribeApplications', params)
def describe_configuration_options(self, application_name=None,
template_name=None,
environment_name=None,
solution_stack_name=None, options=None):
params = {}
if application_name:
params['ApplicationName'] = application_name
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if options:
self.build_list_params(params, options, 'Options.member')
return self._get_response('DescribeConfigurationOptions', params)
def describe_configuration_settings(self, application_name,
template_name=None,
environment_name=None):
params = {'ApplicationName': application_name}
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeConfigurationSettings', params)
def describe_environment_resources(self, environment_id=None,
environment_name=None):
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeEnvironmentResources', params)
def describe_environments(self, application_name=None, version_label=None,
environment_ids=None, environment_names=None,
include_deleted=None,
included_deleted_back_to=None):
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if environment_ids:
self.build_list_params(params, environment_ids,
'EnvironmentIds.member')
if environment_names:
self.build_list_params(params, environment_names,
'EnvironmentNames.member')
if include_deleted:
params['IncludeDeleted'] = self._encode_bool(include_deleted)
if included_deleted_back_to:
params['IncludedDeletedBackTo'] = included_deleted_back_to
return self._get_response('DescribeEnvironments', params)
def describe_events(self, application_name=None, version_label=None,
template_name=None, environment_id=None,
environment_name=None, request_id=None, severity=None,
start_time=None, end_time=None, max_records=None,
next_token=None):
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if request_id:
params['RequestId'] = request_id
if severity:
params['Severity'] = severity
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self._get_response('DescribeEvents', params)
def list_available_solution_stacks(self):
return self._get_response('ListAvailableSolutionStacks', params={})
def rebuild_environment(self, environment_id=None, environment_name=None):
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RebuildEnvironment', params)
def request_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RequestEnvironmentInfo', params)
def restart_app_server(self, environment_id=None, environment_name=None):
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RestartAppServer', params)
def retrieve_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
params = {}
if source_environment_id:
params['SourceEnvironmentId'] = source_environment_id
if source_environment_name:
params['SourceEnvironmentName'] = source_environment_name
if destination_environment_id:
params['DestinationEnvironmentId'] = destination_environment_id
if destination_environment_name:
params['DestinationEnvironmentName'] = destination_environment_name
return self._get_response('SwapEnvironmentCNAMEs', params)
def terminate_environment(self, environment_id=None, environment_name=None,
terminate_resources=None):
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if terminate_resources:
params['TerminateResources'] = self._encode_bool(
terminate_resources)
return self._get_response('TerminateEnvironment', params)
def update_application(self, application_name, description=None):
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('UpdateApplication', params)
def update_application_version(self, application_name, version_label,
description=None):
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
return self._get_response('UpdateApplicationVersion', params)
def update_configuration_template(self, application_name, template_name,
description=None, option_settings=None,
options_to_remove=None):
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateConfigurationTemplate', params)
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.member.Name'] = tier_name
params['Tier.member.Type'] = tier_type
params['Tier.member.Version'] = tier_version
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,
option_settings, template_name=None,
environment_name=None):
params = {'ApplicationName': application_name}
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('ValidateConfigurationSettings', params)
def _build_list_params(self, params, user_values, prefix, tuple_names):
for i, user_value in enumerate(user_values, 1):
current_prefix = '%s.%s' % (prefix, i)
for key, value in zip(tuple_names, user_value):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
| true
| true
|
f70a6be4011c55e74593391945721361e11f0255
| 814
|
py
|
Python
|
concurrency-overview/io_mp.py
|
syberflea/materials
|
54f44725b40edf00c1b523d7a85b34a85014d7eb
|
[
"MIT"
] | 3,682
|
2018-05-07T19:45:24.000Z
|
2022-03-31T15:19:10.000Z
|
concurrency-overview/io_mp.py
|
sribarrow/materials
|
c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5
|
[
"MIT"
] | 148
|
2018-05-15T21:18:49.000Z
|
2022-03-21T11:25:39.000Z
|
concurrency-overview/io_mp.py
|
sribarrow/materials
|
c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5
|
[
"MIT"
] | 5,535
|
2018-05-25T23:36:08.000Z
|
2022-03-31T16:55:52.000Z
|
#!/usr/bin/env python3
import requests
import multiprocessing
import time
session = None
def set_global_session():
global session
if not session:
session = requests.Session()
def download_site(url):
with session.get(url) as response:
name = multiprocessing.current_process().name
print(f"{name}:Read {len(response.content)} from {url}")
def download_all_sites(sites):
with multiprocessing.Pool(initializer=set_global_session) as pool:
pool.map(download_site, sites)
if __name__ == "__main__":
sites = [
"https://www.jython.org",
"http://olympus.realpython.org/dice",
] * 80
start_time = time.time()
download_all_sites(sites)
duration = time.time() - start_time
print(f"Downloaded {len(sites)} in {duration} seconds")
| 23.257143
| 70
| 0.675676
|
import requests
import multiprocessing
import time
session = None
def set_global_session():
global session
if not session:
session = requests.Session()
def download_site(url):
with session.get(url) as response:
name = multiprocessing.current_process().name
print(f"{name}:Read {len(response.content)} from {url}")
def download_all_sites(sites):
with multiprocessing.Pool(initializer=set_global_session) as pool:
pool.map(download_site, sites)
if __name__ == "__main__":
sites = [
"https://www.jython.org",
"http://olympus.realpython.org/dice",
] * 80
start_time = time.time()
download_all_sites(sites)
duration = time.time() - start_time
print(f"Downloaded {len(sites)} in {duration} seconds")
| true
| true
|
f70a6c957ea73f9a7a4e7f4df245a126a32c588e
| 22,064
|
py
|
Python
|
tests/test_filters.py
|
ticketmaster/cloud-custodian
|
0da3866f70f858895af228cc08706d0909a2a324
|
[
"Apache-2.0"
] | null | null | null |
tests/test_filters.py
|
ticketmaster/cloud-custodian
|
0da3866f70f858895af228cc08706d0909a2a324
|
[
"Apache-2.0"
] | 4
|
2017-02-02T17:08:23.000Z
|
2017-05-25T19:33:19.000Z
|
tests/test_filters.py
|
ticketmaster/cloud-custodian
|
0da3866f70f858895af228cc08706d0909a2a324
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from dateutil import tz
from datetime import datetime, timedelta
import unittest
from c7n import filters as base_filters
from c7n.resources.ec2 import filters
from c7n.utils import annotation
from .common import instance, event_data, Bag
class BaseFilterTest(unittest.TestCase):
def assertFilter(self, f, i, v):
"""
f: filter data/spec
i: instance
v: expected value (true/false)
"""
try:
self.assertEqual(filters.factory(f)(i), v)
except AssertionError:
print(f, i['LaunchTime'], i['Tags'], v)
raise
class TestFilter(unittest.TestCase):
def test_filter_construction(self):
self.assertTrue(
isinstance(
filters.factory({'tag:ASV': 'absent'}),
base_filters.ValueFilter))
def test_filter_validation(self):
self.assertRaises(
base_filters.FilterValidationError,
filters.factory, {'type': 'ax', 'xyz': 1})
def test_filter_call(self):
filter_instance = base_filters.Filter({})
self.assertIsInstance(filter_instance, base_filters.Filter)
class TestOrFilter(unittest.TestCase):
def test_or(self):
f = filters.factory({
'or': [
{'Architecture': 'x86_64'},
{'Architecture': 'armv8'}]})
results = [instance(Architecture='x86_64')]
self.assertEqual(
f.process(results),
results)
self.assertEqual(
f.process([instance(Architecture='amd64')]),
[])
class TestAndFilter(unittest.TestCase):
def test_and(self):
f = filters.factory({
'and': [
{'Architecture': 'x86_64'},
{'Color': 'green'}]})
results = [instance(Architecture='x86_64', Color='green')]
self.assertEqual(
f.process(results),
results)
self.assertEqual(
f.process([
instance(
Architecture='x86_64',
Color='blue')]),
[])
self.assertEqual(
f.process([
instance(
Architecture='x86_64')]),
[])
class TestNotFilter(unittest.TestCase):
def test_not(self):
results = [
instance(Architecture='x86_64', Color='green'),
instance(Architecture='x86_64', Color='blue'),
instance(Architecture='x86_64', Color='yellow'),
]
f = filters.factory({
'not': [
{'Architecture': 'x86_64'},
{'Color': 'green'}]})
self.assertEqual(len(f.process(results)), 2)
"""
f = filters.factory({
'not': [
{'Architecture': 'x86'}]})
self.assertEqual(len(f.process(results)), 3)
f = filters.factory({
'not': [
{'Architecture': 'x86_64'},
{'or': [
{'Color': 'green'},
{'Color': 'blue'},
{'Color': 'yellow'},
]}]})
self.assertEqual(len(f.process(results)), 0)
"""
class TestValueFilter(unittest.TestCase):
# TODO test_manager needs a valid session_factory object
# def test_value_match(self):
# test_manager = ???
# f_data = {
# 'type': 'value',
# 'key': 'day',
# 'value': 5,
# 'value_from': {
# 'url': 's3://custodian-byebye/resource.json',
# },
# }
# vf = filters.factory(f_data, test_manager)
# vf.match({'tag:ASV': 'present'})
def test_value_type(self):
sentinel = datetime.now()
value = 5
resource = {'a': 1, 'Tags': [{'Key': 'xtra', 'Value': 'hello'}]}
vf = filters.factory({'tag:ASV': 'absent'})
vf.vtype = 'size'
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(res, (sentinel, 0))
vf.vtype = 'age'
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(res, (0, sentinel))
vf.vtype = 'cidr'
sentinel = '10.0.0.0/16'
value = '10.10.10.10'
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(
(str(res[0]), str(res[1])),
(sentinel, value),
)
vf.vtype = 'cidr_size'
value = '10.10.10.300'
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(res, (sentinel, 0))
vf.vtype = 'expr'
value = 'tag:xtra'
sentinel = None
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(res, (None, 'hello'))
vf.vtype = 'expr'
value = 'a'
sentinel = None
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(res, (None, 1))
class TestAgeFilter(unittest.TestCase):
def test_age_filter(self):
af = base_filters.AgeFilter({})
self.assertRaises(NotImplementedError, af.validate)
class TestGlobValue(unittest.TestCase):
def test_regex_match(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': '*green*',
'op': 'glob'})
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='mighty green papaya')),
True)
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='blue')),
False)
def test_glob_match(self):
glob_match = base_filters.core.glob_match
self.assertFalse(glob_match(0, ''))
class TestRegexValue(unittest.TestCase):
def test_regex_validate(self):
self.assertRaises(
base_filters.FilterValidationError,
filters.factory({
'type': 'value',
'key': 'Color',
'value': '*green',
'op': 'regex'}).validate)
def test_regex_match(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': '.*green.*',
'op': 'regex'})
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='green papaya')),
True)
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='blue')),
False)
self.assertEqual(
f(instance(
Architecture='x86_64')),
False)
class TestValueTypes(BaseFilterTest):
def test_normalize(self):
fdata = {
'type': 'value',
'key': 'tag:Name',
'value_type': 'normalize',
'value': 'compilelambda'
}
self.assertFilter(fdata, instance(), True)
def test_size(self):
fdata = {
'type': 'value',
'key': 'SecurityGroups[].GroupId',
'value_type': 'size',
'value': 2
}
self.assertFilter(fdata, instance(), True)
def test_integer(self):
fdata = {
'type': 'value',
'key': 'tag:Count',
'op': 'greater-than',
'value_type': 'integer',
'value': 0}
def i(d):
return instance(Tags=[{"Key": "Count", "Value": d}])
self.assertFilter(fdata, i('42'), True)
self.assertFilter(fdata, i('abc'), False)
fdata['op'] = 'equal'
self.assertFilter(fdata, i('abc'), True)
def test_swap(self):
fdata = {
'type': 'value',
'key': 'SecurityGroups[].GroupId',
'value_type': 'swap',
'op': 'in',
'value': 'sg-47b76f22'
}
self.assertFilter(fdata, instance(), True)
def test_age(self):
now = datetime.now(tz=tz.tzutc())
three_months = now - timedelta(90)
two_months = now - timedelta(60)
one_month = now - timedelta(30)
def i(d):
return instance(LaunchTime=d)
fdata = {
'type': 'value',
'key': 'LaunchTime',
'op': 'less-than',
'value_type': 'age',
'value': 32}
self.assertFilter(fdata, i(three_months), False)
self.assertFilter(fdata, i(two_months), False)
self.assertFilter(fdata, i(one_month), True)
self.assertFilter(fdata, i(now), True)
self.assertFilter(fdata, i(now.isoformat()), True)
def test_expiration(self):
now = datetime.now(tz=tz.tzutc())
three_months = now + timedelta(90)
two_months = now + timedelta(60)
def i(d):
return instance(LaunchTime=d)
fdata = {
'type': 'value',
'key': 'LaunchTime',
'op': 'less-than',
'value_type': 'expiration',
'value': 61}
self.assertFilter(fdata, i(three_months), False)
self.assertFilter(fdata, i(two_months), True)
self.assertFilter(fdata, i(now), True)
self.assertFilter(fdata, i(now.isoformat()), True)
def test_resource_count_filter(self):
fdata = {
'type': 'value',
'value_type': 'resource_count',
'op': 'lt',
'value': 2
}
self.assertFilter(fdata, instance(file='ec2-instances.json'), [])
f = filters.factory({
'type': 'value',
'value_type': 'resource_count',
'op': 'eq',
'value': 2
})
i = instance(file='ec2-instances.json')
self.assertEqual(i, f(i))
def test_resource_count_filter_validation(self):
# Bad `op`
f = {
'type': 'value',
'value_type': 'resource_count',
'op': 'regex',
'value': 1,
}
self.assertRaises(
base_filters.FilterValidationError, filters.factory(f, {}).validate)
# Bad `value`
f = {
'type': 'value',
'value_type': 'resource_count',
'op': 'eq',
'value': 'foo',
}
self.assertRaises(
base_filters.FilterValidationError, filters.factory(f, {}).validate)
# Missing `op`
f = {
'type': 'value',
'value_type': 'resource_count',
'value': 1,
}
self.assertRaises(
base_filters.FilterValidationError, filters.factory(f, {}).validate)
class TestInstanceAge(BaseFilterTest):
def test_filter_instance_age(self):
now = datetime.now(tz=tz.tzutc())
three_months = now - timedelta(90)
two_months = now - timedelta(60)
one_month = now - timedelta(30)
def i(d):
return instance(LaunchTime=d)
for ii, v in [
(i(now), False),
(i(three_months), True),
(i(two_months), True),
(i(one_month), False)
]:
self.assertFilter({'type': 'instance-uptime', 'op': 'gte', 'days': 60}, ii, v)
class TestInstanceAgeMinute(BaseFilterTest):
def test_filter_instance_age(self):
now = datetime.now(tz=tz.tzutc())
five_minute = now - timedelta(minutes=5)
def i(d):
return instance(LaunchTime=d)
for ii, v in [
(i(now), False),
(i(five_minute), True)
]:
self.assertFilter({'type': 'instance-uptime', 'op': 'gte', 'minutes': 5}, ii, v)
class TestMarkedForAction(BaseFilterTest):
def test_marked_for_op_with_skew(self):
now = datetime.now()
yesterday = datetime.now() - timedelta(7)
next_week = now + timedelta(7)
def i(d, action='stop'):
return instance(Tags=[
{"Key": "maid_status",
"Value": "not compliant: %s@%s" % (
action, d.strftime("%Y/%m/%d"))}])
for inst, skew, expected in [
(i(next_week), 7, True),
(i(next_week), 3, False),
(i(now), 0, True),
(i(now), 5, True),
(i(yesterday), 5, True),
(i(now+timedelta(1)), 1, True),
(i(now+timedelta(2)), 1, False),
(i(now+timedelta(3)), 1, False)
]:
self.assertFilter(
{'type': 'marked-for-op', 'skew': skew}, inst, expected)
def test_filter_action_date(self):
now = datetime.now()
yesterday = now - timedelta(1)
tomorrow = now + timedelta(1)
def i(d, action='stop'):
return instance(Tags=[
{"Key": "maid_status",
"Value": "not compliant: %s@%s" % (
action, d.strftime("%Y/%m/%d"))}])
for ii, v in [
(i(yesterday), True),
(i(now), True),
(i(tomorrow), False),
(i(yesterday, 'terminate'), False)
]:
self.assertFilter({'type': 'marked-for-op'}, ii, v)
class EventFilterTest(BaseFilterTest):
def test_event_filter(self):
b = Bag(data={'mode': []})
event = event_data('event-instance-state.json')
f = {'type': 'event',
'key': 'detail.state',
'value': 'pending'}
ef = filters.factory(f, b)
self.assertTrue(ef.process(
[instance()], event))
# event is None
self.assertEqual(ef.process('resources'), 'resources')
# event is not None, but is not "true" either
self.assertEqual(ef.process('resources', []), [])
def test_event_no_mode(self):
b = Bag(data={'resource': 'something'})
f = {'type': 'event',
'key': 'detail.state',
'value': 'pending'}
f = filters.factory(f, b)
self.assertRaises(
base_filters.FilterValidationError, f.validate)
class TestInstanceValue(BaseFilterTest):
def test_filter_tag_count(self):
tags = []
for i in range(10):
tags.append({'Key': str(i), 'Value': str(i)})
i = instance(Tags=tags)
self.assertFilter(
{'type': 'tag-count', 'op': 'lt'}, i, False)
tags.pop(0)
i = instance(Tags=tags)
self.assertFilter(
{'type': 'tag-count', 'op': 'gte', 'count': 9}, i, True)
def test_filter_tag(self):
i = instance(Tags=[
{'Key': 'ASV', 'Value': 'abcd'}])
self.assertFilter(
{'tag:ASV': 'def'}, i, False)
self.assertEqual(
annotation(i, base_filters.ANNOTATION_KEY), ())
i = instance(Tags=[
{'Key': 'CMDB', 'Value': 'abcd'}])
self.assertFilter(
{'tag:ASV': 'absent'}, i, True)
self.assertEqual(
annotation(i, base_filters.ANNOTATION_KEY), ['tag:ASV'])
def test_present(self):
i = instance(Tags=[
{'Key': 'ASV', 'Value': ''}])
self.assertFilter(
{'type': 'value',
'key': 'tag:ASV',
'value': 'present'},
i, True)
def test_jmespath(self):
self.assertFilter(
{'Placement.AvailabilityZone': 'us-west-2c'},
instance(),
True)
self.assertFilter(
{'Placement.AvailabilityZone': 'us-east-1c'},
instance(),
False)
def test_complex_validator(self):
self.assertRaises(
base_filters.FilterValidationError,
filters.factory({
"key": "xyz", "type": "value"}).validate)
self.assertRaises(
base_filters.FilterValidationError,
filters.factory({
"value": "xyz", "type": "value"}).validate)
self.assertRaises(
base_filters.FilterValidationError,
filters.factory({
"key": "xyz",
"value": "xyz",
"op": "oo",
"type": "value"}).validate)
def test_complex_value_filter(self):
self.assertFilter(
{"key": (
"length(BlockDeviceMappings"
"[?Ebs.DeleteOnTermination == `true`]"
".Ebs.DeleteOnTermination)"),
"value": 0,
"type": "value",
"op": "gt"},
instance(),
True)
def test_not_null_filter(self):
self.assertFilter(
{"key": "Hypervisor",
"value": "not-null",
"type": "value"},
instance(),
True)
class TestEqualValue(unittest.TestCase):
def test_eq(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'eq'})
self.assertEqual(
f(instance(Color='green')),
True)
self.assertEqual(
f(instance(Color='blue')),
False)
def test_equal(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'equal'})
self.assertEqual(
f(instance(Color='green')),
True)
self.assertEqual(
f(instance(Color='blue')),
False)
class TestNotEqualValue(unittest.TestCase):
def test_ne(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'ne'})
self.assertEqual(
f(instance(Color='green')),
False)
self.assertEqual(
f(instance(Color='blue')),
True)
def test_not_equal(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'not-equal'})
self.assertEqual(
f(instance(Color='green')),
False)
self.assertEqual(
f(instance(Color='blue')),
True)
class TestGreaterThanValue(unittest.TestCase):
def test_gt(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'gt'})
self.assertEqual(
f(instance(Number=11)),
True)
self.assertEqual(
f(instance(Number=9)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
def test_greater_than(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'greater-than'})
self.assertEqual(
f(instance(Number=11)),
True)
self.assertEqual(
f(instance(Number=9)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
class TestLessThanValue(unittest.TestCase):
def test_lt(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'lt'})
self.assertEqual(
f(instance(Number=9)),
True)
self.assertEqual(
f(instance(Number=11)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
def test_less_than(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'less-than'})
self.assertEqual(
f(instance(Number=9)),
True)
self.assertEqual(
f(instance(Number=11)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
class TestInList(unittest.TestCase):
def test_in(self):
f = filters.factory(
{'type': 'value',
'key': 'Thing',
'value': ['Foo', 'Bar', 'Quux'],
'op': 'in'})
self.assertEqual(
f(instance(Thing='Foo')),
True)
self.assertEqual(
f(instance(Thing='Baz')),
False)
class TestNotInList(unittest.TestCase):
def test_ni(self):
f = filters.factory(
{'type': 'value',
'key': 'Thing',
'value': ['Foo', 'Bar', 'Quux'],
'op': 'ni'})
self.assertEqual(
f(instance(Thing='Baz')),
True)
self.assertEqual(
f(instance(Thing='Foo')),
False)
def test_not_in(self):
f = filters.factory(
{'type': 'value',
'key': 'Thing',
'value': ['Foo', 'Bar', 'Quux'],
'op': 'not-in'})
self.assertEqual(
f(instance(Thing='Baz')),
True)
self.assertEqual(
f(instance(Thing='Foo')),
False)
class TestFilterRegistry(unittest.TestCase):
def test_filter_registry(self):
reg = base_filters.FilterRegistry('test.filters')
self.assertRaises(
base_filters.FilterValidationError,
reg.factory,
{'type': ''},
)
if __name__ == '__main__':
unittest.main()
| 28.654545
| 92
| 0.497145
|
from __future__ import absolute_import, division, print_function, unicode_literals
from dateutil import tz
from datetime import datetime, timedelta
import unittest
from c7n import filters as base_filters
from c7n.resources.ec2 import filters
from c7n.utils import annotation
from .common import instance, event_data, Bag
class BaseFilterTest(unittest.TestCase):
def assertFilter(self, f, i, v):
try:
self.assertEqual(filters.factory(f)(i), v)
except AssertionError:
print(f, i['LaunchTime'], i['Tags'], v)
raise
class TestFilter(unittest.TestCase):
def test_filter_construction(self):
self.assertTrue(
isinstance(
filters.factory({'tag:ASV': 'absent'}),
base_filters.ValueFilter))
def test_filter_validation(self):
self.assertRaises(
base_filters.FilterValidationError,
filters.factory, {'type': 'ax', 'xyz': 1})
def test_filter_call(self):
filter_instance = base_filters.Filter({})
self.assertIsInstance(filter_instance, base_filters.Filter)
class TestOrFilter(unittest.TestCase):
def test_or(self):
f = filters.factory({
'or': [
{'Architecture': 'x86_64'},
{'Architecture': 'armv8'}]})
results = [instance(Architecture='x86_64')]
self.assertEqual(
f.process(results),
results)
self.assertEqual(
f.process([instance(Architecture='amd64')]),
[])
class TestAndFilter(unittest.TestCase):
def test_and(self):
f = filters.factory({
'and': [
{'Architecture': 'x86_64'},
{'Color': 'green'}]})
results = [instance(Architecture='x86_64', Color='green')]
self.assertEqual(
f.process(results),
results)
self.assertEqual(
f.process([
instance(
Architecture='x86_64',
Color='blue')]),
[])
self.assertEqual(
f.process([
instance(
Architecture='x86_64')]),
[])
class TestNotFilter(unittest.TestCase):
def test_not(self):
results = [
instance(Architecture='x86_64', Color='green'),
instance(Architecture='x86_64', Color='blue'),
instance(Architecture='x86_64', Color='yellow'),
]
f = filters.factory({
'not': [
{'Architecture': 'x86_64'},
{'Color': 'green'}]})
self.assertEqual(len(f.process(results)), 2)
class TestValueFilter(unittest.TestCase):
def test_value_type(self):
sentinel = datetime.now()
value = 5
resource = {'a': 1, 'Tags': [{'Key': 'xtra', 'Value': 'hello'}]}
vf = filters.factory({'tag:ASV': 'absent'})
vf.vtype = 'size'
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(res, (sentinel, 0))
vf.vtype = 'age'
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(res, (0, sentinel))
vf.vtype = 'cidr'
sentinel = '10.0.0.0/16'
value = '10.10.10.10'
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(
(str(res[0]), str(res[1])),
(sentinel, value),
)
vf.vtype = 'cidr_size'
value = '10.10.10.300'
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(res, (sentinel, 0))
vf.vtype = 'expr'
value = 'tag:xtra'
sentinel = None
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(res, (None, 'hello'))
vf.vtype = 'expr'
value = 'a'
sentinel = None
res = vf.process_value_type(sentinel, value, resource)
self.assertEqual(res, (None, 1))
class TestAgeFilter(unittest.TestCase):
def test_age_filter(self):
af = base_filters.AgeFilter({})
self.assertRaises(NotImplementedError, af.validate)
class TestGlobValue(unittest.TestCase):
def test_regex_match(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': '*green*',
'op': 'glob'})
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='mighty green papaya')),
True)
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='blue')),
False)
def test_glob_match(self):
glob_match = base_filters.core.glob_match
self.assertFalse(glob_match(0, ''))
class TestRegexValue(unittest.TestCase):
def test_regex_validate(self):
self.assertRaises(
base_filters.FilterValidationError,
filters.factory({
'type': 'value',
'key': 'Color',
'value': '*green',
'op': 'regex'}).validate)
def test_regex_match(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': '.*green.*',
'op': 'regex'})
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='green papaya')),
True)
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='blue')),
False)
self.assertEqual(
f(instance(
Architecture='x86_64')),
False)
class TestValueTypes(BaseFilterTest):
def test_normalize(self):
fdata = {
'type': 'value',
'key': 'tag:Name',
'value_type': 'normalize',
'value': 'compilelambda'
}
self.assertFilter(fdata, instance(), True)
def test_size(self):
fdata = {
'type': 'value',
'key': 'SecurityGroups[].GroupId',
'value_type': 'size',
'value': 2
}
self.assertFilter(fdata, instance(), True)
def test_integer(self):
fdata = {
'type': 'value',
'key': 'tag:Count',
'op': 'greater-than',
'value_type': 'integer',
'value': 0}
def i(d):
return instance(Tags=[{"Key": "Count", "Value": d}])
self.assertFilter(fdata, i('42'), True)
self.assertFilter(fdata, i('abc'), False)
fdata['op'] = 'equal'
self.assertFilter(fdata, i('abc'), True)
def test_swap(self):
fdata = {
'type': 'value',
'key': 'SecurityGroups[].GroupId',
'value_type': 'swap',
'op': 'in',
'value': 'sg-47b76f22'
}
self.assertFilter(fdata, instance(), True)
def test_age(self):
now = datetime.now(tz=tz.tzutc())
three_months = now - timedelta(90)
two_months = now - timedelta(60)
one_month = now - timedelta(30)
def i(d):
return instance(LaunchTime=d)
fdata = {
'type': 'value',
'key': 'LaunchTime',
'op': 'less-than',
'value_type': 'age',
'value': 32}
self.assertFilter(fdata, i(three_months), False)
self.assertFilter(fdata, i(two_months), False)
self.assertFilter(fdata, i(one_month), True)
self.assertFilter(fdata, i(now), True)
self.assertFilter(fdata, i(now.isoformat()), True)
def test_expiration(self):
now = datetime.now(tz=tz.tzutc())
three_months = now + timedelta(90)
two_months = now + timedelta(60)
def i(d):
return instance(LaunchTime=d)
fdata = {
'type': 'value',
'key': 'LaunchTime',
'op': 'less-than',
'value_type': 'expiration',
'value': 61}
self.assertFilter(fdata, i(three_months), False)
self.assertFilter(fdata, i(two_months), True)
self.assertFilter(fdata, i(now), True)
self.assertFilter(fdata, i(now.isoformat()), True)
def test_resource_count_filter(self):
fdata = {
'type': 'value',
'value_type': 'resource_count',
'op': 'lt',
'value': 2
}
self.assertFilter(fdata, instance(file='ec2-instances.json'), [])
f = filters.factory({
'type': 'value',
'value_type': 'resource_count',
'op': 'eq',
'value': 2
})
i = instance(file='ec2-instances.json')
self.assertEqual(i, f(i))
def test_resource_count_filter_validation(self):
f = {
'type': 'value',
'value_type': 'resource_count',
'op': 'regex',
'value': 1,
}
self.assertRaises(
base_filters.FilterValidationError, filters.factory(f, {}).validate)
f = {
'type': 'value',
'value_type': 'resource_count',
'op': 'eq',
'value': 'foo',
}
self.assertRaises(
base_filters.FilterValidationError, filters.factory(f, {}).validate)
f = {
'type': 'value',
'value_type': 'resource_count',
'value': 1,
}
self.assertRaises(
base_filters.FilterValidationError, filters.factory(f, {}).validate)
class TestInstanceAge(BaseFilterTest):
def test_filter_instance_age(self):
now = datetime.now(tz=tz.tzutc())
three_months = now - timedelta(90)
two_months = now - timedelta(60)
one_month = now - timedelta(30)
def i(d):
return instance(LaunchTime=d)
for ii, v in [
(i(now), False),
(i(three_months), True),
(i(two_months), True),
(i(one_month), False)
]:
self.assertFilter({'type': 'instance-uptime', 'op': 'gte', 'days': 60}, ii, v)
class TestInstanceAgeMinute(BaseFilterTest):
def test_filter_instance_age(self):
now = datetime.now(tz=tz.tzutc())
five_minute = now - timedelta(minutes=5)
def i(d):
return instance(LaunchTime=d)
for ii, v in [
(i(now), False),
(i(five_minute), True)
]:
self.assertFilter({'type': 'instance-uptime', 'op': 'gte', 'minutes': 5}, ii, v)
class TestMarkedForAction(BaseFilterTest):
def test_marked_for_op_with_skew(self):
now = datetime.now()
yesterday = datetime.now() - timedelta(7)
next_week = now + timedelta(7)
def i(d, action='stop'):
return instance(Tags=[
{"Key": "maid_status",
"Value": "not compliant: %s@%s" % (
action, d.strftime("%Y/%m/%d"))}])
for inst, skew, expected in [
(i(next_week), 7, True),
(i(next_week), 3, False),
(i(now), 0, True),
(i(now), 5, True),
(i(yesterday), 5, True),
(i(now+timedelta(1)), 1, True),
(i(now+timedelta(2)), 1, False),
(i(now+timedelta(3)), 1, False)
]:
self.assertFilter(
{'type': 'marked-for-op', 'skew': skew}, inst, expected)
def test_filter_action_date(self):
now = datetime.now()
yesterday = now - timedelta(1)
tomorrow = now + timedelta(1)
def i(d, action='stop'):
return instance(Tags=[
{"Key": "maid_status",
"Value": "not compliant: %s@%s" % (
action, d.strftime("%Y/%m/%d"))}])
for ii, v in [
(i(yesterday), True),
(i(now), True),
(i(tomorrow), False),
(i(yesterday, 'terminate'), False)
]:
self.assertFilter({'type': 'marked-for-op'}, ii, v)
class EventFilterTest(BaseFilterTest):
def test_event_filter(self):
b = Bag(data={'mode': []})
event = event_data('event-instance-state.json')
f = {'type': 'event',
'key': 'detail.state',
'value': 'pending'}
ef = filters.factory(f, b)
self.assertTrue(ef.process(
[instance()], event))
self.assertEqual(ef.process('resources'), 'resources')
self.assertEqual(ef.process('resources', []), [])
def test_event_no_mode(self):
b = Bag(data={'resource': 'something'})
f = {'type': 'event',
'key': 'detail.state',
'value': 'pending'}
f = filters.factory(f, b)
self.assertRaises(
base_filters.FilterValidationError, f.validate)
class TestInstanceValue(BaseFilterTest):
def test_filter_tag_count(self):
tags = []
for i in range(10):
tags.append({'Key': str(i), 'Value': str(i)})
i = instance(Tags=tags)
self.assertFilter(
{'type': 'tag-count', 'op': 'lt'}, i, False)
tags.pop(0)
i = instance(Tags=tags)
self.assertFilter(
{'type': 'tag-count', 'op': 'gte', 'count': 9}, i, True)
def test_filter_tag(self):
i = instance(Tags=[
{'Key': 'ASV', 'Value': 'abcd'}])
self.assertFilter(
{'tag:ASV': 'def'}, i, False)
self.assertEqual(
annotation(i, base_filters.ANNOTATION_KEY), ())
i = instance(Tags=[
{'Key': 'CMDB', 'Value': 'abcd'}])
self.assertFilter(
{'tag:ASV': 'absent'}, i, True)
self.assertEqual(
annotation(i, base_filters.ANNOTATION_KEY), ['tag:ASV'])
def test_present(self):
i = instance(Tags=[
{'Key': 'ASV', 'Value': ''}])
self.assertFilter(
{'type': 'value',
'key': 'tag:ASV',
'value': 'present'},
i, True)
def test_jmespath(self):
self.assertFilter(
{'Placement.AvailabilityZone': 'us-west-2c'},
instance(),
True)
self.assertFilter(
{'Placement.AvailabilityZone': 'us-east-1c'},
instance(),
False)
def test_complex_validator(self):
self.assertRaises(
base_filters.FilterValidationError,
filters.factory({
"key": "xyz", "type": "value"}).validate)
self.assertRaises(
base_filters.FilterValidationError,
filters.factory({
"value": "xyz", "type": "value"}).validate)
self.assertRaises(
base_filters.FilterValidationError,
filters.factory({
"key": "xyz",
"value": "xyz",
"op": "oo",
"type": "value"}).validate)
def test_complex_value_filter(self):
self.assertFilter(
{"key": (
"length(BlockDeviceMappings"
"[?Ebs.DeleteOnTermination == `true`]"
".Ebs.DeleteOnTermination)"),
"value": 0,
"type": "value",
"op": "gt"},
instance(),
True)
def test_not_null_filter(self):
self.assertFilter(
{"key": "Hypervisor",
"value": "not-null",
"type": "value"},
instance(),
True)
class TestEqualValue(unittest.TestCase):
def test_eq(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'eq'})
self.assertEqual(
f(instance(Color='green')),
True)
self.assertEqual(
f(instance(Color='blue')),
False)
def test_equal(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'equal'})
self.assertEqual(
f(instance(Color='green')),
True)
self.assertEqual(
f(instance(Color='blue')),
False)
class TestNotEqualValue(unittest.TestCase):
def test_ne(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'ne'})
self.assertEqual(
f(instance(Color='green')),
False)
self.assertEqual(
f(instance(Color='blue')),
True)
def test_not_equal(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'not-equal'})
self.assertEqual(
f(instance(Color='green')),
False)
self.assertEqual(
f(instance(Color='blue')),
True)
class TestGreaterThanValue(unittest.TestCase):
def test_gt(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'gt'})
self.assertEqual(
f(instance(Number=11)),
True)
self.assertEqual(
f(instance(Number=9)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
def test_greater_than(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'greater-than'})
self.assertEqual(
f(instance(Number=11)),
True)
self.assertEqual(
f(instance(Number=9)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
class TestLessThanValue(unittest.TestCase):
def test_lt(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'lt'})
self.assertEqual(
f(instance(Number=9)),
True)
self.assertEqual(
f(instance(Number=11)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
def test_less_than(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'less-than'})
self.assertEqual(
f(instance(Number=9)),
True)
self.assertEqual(
f(instance(Number=11)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
class TestInList(unittest.TestCase):
def test_in(self):
f = filters.factory(
{'type': 'value',
'key': 'Thing',
'value': ['Foo', 'Bar', 'Quux'],
'op': 'in'})
self.assertEqual(
f(instance(Thing='Foo')),
True)
self.assertEqual(
f(instance(Thing='Baz')),
False)
class TestNotInList(unittest.TestCase):
def test_ni(self):
f = filters.factory(
{'type': 'value',
'key': 'Thing',
'value': ['Foo', 'Bar', 'Quux'],
'op': 'ni'})
self.assertEqual(
f(instance(Thing='Baz')),
True)
self.assertEqual(
f(instance(Thing='Foo')),
False)
def test_not_in(self):
f = filters.factory(
{'type': 'value',
'key': 'Thing',
'value': ['Foo', 'Bar', 'Quux'],
'op': 'not-in'})
self.assertEqual(
f(instance(Thing='Baz')),
True)
self.assertEqual(
f(instance(Thing='Foo')),
False)
class TestFilterRegistry(unittest.TestCase):
def test_filter_registry(self):
reg = base_filters.FilterRegistry('test.filters')
self.assertRaises(
base_filters.FilterValidationError,
reg.factory,
{'type': ''},
)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70a6d4fa3cb899a39d76c539603b4d81d6a554c
| 448
|
py
|
Python
|
14.py
|
CallMeTwitch/LeetCode
|
7d59b299fe76eb93ecc3d6936ab4bfedeb323ef7
|
[
"MIT"
] | null | null | null |
14.py
|
CallMeTwitch/LeetCode
|
7d59b299fe76eb93ecc3d6936ab4bfedeb323ef7
|
[
"MIT"
] | null | null | null |
14.py
|
CallMeTwitch/LeetCode
|
7d59b299fe76eb93ecc3d6936ab4bfedeb323ef7
|
[
"MIT"
] | null | null | null |
class Solution:
def longestCommonPrefix(self, strs):
min_len = len(min(strs, key = len))
for q in range(len(strs)):
strs[q] = list(strs[q])
lst = []
final = ''
for _ in range(min_len):
lst = [q.pop(0) for q in strs]
if all(q == lst[0] for q in lst):
final += lst[0]
else:
return final
return final
| 26.352941
| 46
| 0.4375
|
class Solution:
def longestCommonPrefix(self, strs):
min_len = len(min(strs, key = len))
for q in range(len(strs)):
strs[q] = list(strs[q])
lst = []
final = ''
for _ in range(min_len):
lst = [q.pop(0) for q in strs]
if all(q == lst[0] for q in lst):
final += lst[0]
else:
return final
return final
| true
| true
|
f70a6de635ea8ebcd428588097104e0bda6abb8a
| 664
|
py
|
Python
|
tests/test_slider.py
|
Yardanico/pylibui-cffi
|
10d90f08b6b1e43bf567ffcd22dbe976cb10e80e
|
[
"MIT"
] | 6
|
2017-10-16T03:23:05.000Z
|
2020-11-10T06:24:04.000Z
|
tests/test_slider.py
|
TiberiumN/pylibui-cffi
|
10d90f08b6b1e43bf567ffcd22dbe976cb10e80e
|
[
"MIT"
] | null | null | null |
tests/test_slider.py
|
TiberiumN/pylibui-cffi
|
10d90f08b6b1e43bf567ffcd22dbe976cb10e80e
|
[
"MIT"
] | 1
|
2018-09-07T06:14:27.000Z
|
2018-09-07T06:14:27.000Z
|
"""
Pylibui test suite.
"""
from pylibui.controls import Slider
from tests.utils import WindowTestCase
class SliderTest(WindowTestCase):
def setUp(self):
super().setUp()
self.slider = Slider(0, 100)
def test_value_initial_value(self):
"""Tests the sliders's `value` initial value is the first parameter
passed to constructor."""
slider = Slider(10, 110)
self.assertEqual(slider.value, 10)
def test_value_can_be_changed(self):
"""Tests the slider's `value` attribute can be changed."""
value = 30
self.slider.value = value
self.assertEqual(self.slider.value, value)
| 25.538462
| 75
| 0.653614
|
from pylibui.controls import Slider
from tests.utils import WindowTestCase
class SliderTest(WindowTestCase):
def setUp(self):
super().setUp()
self.slider = Slider(0, 100)
def test_value_initial_value(self):
slider = Slider(10, 110)
self.assertEqual(slider.value, 10)
def test_value_can_be_changed(self):
value = 30
self.slider.value = value
self.assertEqual(self.slider.value, value)
| true
| true
|
f70a6e363fb6bb482c5e3c38f34d9a3cc6bb57b1
| 2,239
|
py
|
Python
|
frappe/core/page/background_jobs/background_jobs.py
|
juhiwue/frappe
|
77f88af74e037dcca0bae3f3ef1e8cae7fb0f699
|
[
"MIT"
] | null | null | null |
frappe/core/page/background_jobs/background_jobs.py
|
juhiwue/frappe
|
77f88af74e037dcca0bae3f3ef1e8cae7fb0f699
|
[
"MIT"
] | 17
|
2021-03-22T18:47:14.000Z
|
2022-03-15T12:21:00.000Z
|
frappe/core/page/background_jobs/background_jobs.py
|
juhiwue/frappe
|
77f88af74e037dcca0bae3f3ef1e8cae7fb0f699
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import json
from typing import TYPE_CHECKING, Dict, List
from rq import Queue, Worker
import frappe
from frappe import _
from frappe.utils import convert_utc_to_user_timezone, format_datetime
from frappe.utils.background_jobs import get_redis_conn
from frappe.utils.scheduler import is_scheduler_inactive
if TYPE_CHECKING:
from rq.job import Job
JOB_COLORS = {
'queued': 'orange',
'failed': 'red',
'started': 'blue',
'finished': 'green'
}
@frappe.whitelist()
def get_info(show_failed=False) -> List[Dict]:
if isinstance(show_failed, str):
show_failed = json.loads(show_failed)
conn = get_redis_conn()
queues = Queue.all(conn)
workers = Worker.all(conn)
jobs = []
def add_job(job: 'Job', name: str) -> None:
if job.kwargs.get('site') == frappe.local.site:
job_info = {
'job_name': job.kwargs.get('kwargs', {}).get('playbook_method')
or job.kwargs.get('kwargs', {}).get('job_type')
or str(job.kwargs.get('job_name')),
'status': job.get_status(),
'queue': name,
'creation': format_datetime(convert_utc_to_user_timezone(job.created_at)),
'color': JOB_COLORS[job.get_status()]
}
if job.exc_info:
job_info['exc_info'] = job.exc_info
jobs.append(job_info)
# show worker jobs
for worker in workers:
job = worker.get_current_job()
if job:
add_job(job, worker.name)
for queue in queues:
# show active queued jobs
if queue.name != 'failed':
for job in queue.jobs:
add_job(job, queue.name)
# show failed jobs, if requested
if show_failed:
fail_registry = queue.failed_job_registry
for job_id in fail_registry.get_job_ids():
job = queue.fetch_job(job_id)
if job:
add_job(job, queue.name)
return jobs
@frappe.whitelist()
def remove_failed_jobs():
conn = get_redis_conn()
queues = Queue.all(conn)
for queue in queues:
fail_registry = queue.failed_job_registry
for job_id in fail_registry.get_job_ids():
job = queue.fetch_job(job_id)
fail_registry.remove(job, delete_job=True)
@frappe.whitelist()
def get_scheduler_status():
if is_scheduler_inactive():
return [_("Inactive"), "red"]
return [_("Active"), "green"]
| 24.336957
| 78
| 0.711925
|
import json
from typing import TYPE_CHECKING, Dict, List
from rq import Queue, Worker
import frappe
from frappe import _
from frappe.utils import convert_utc_to_user_timezone, format_datetime
from frappe.utils.background_jobs import get_redis_conn
from frappe.utils.scheduler import is_scheduler_inactive
if TYPE_CHECKING:
from rq.job import Job
JOB_COLORS = {
'queued': 'orange',
'failed': 'red',
'started': 'blue',
'finished': 'green'
}
@frappe.whitelist()
def get_info(show_failed=False) -> List[Dict]:
if isinstance(show_failed, str):
show_failed = json.loads(show_failed)
conn = get_redis_conn()
queues = Queue.all(conn)
workers = Worker.all(conn)
jobs = []
def add_job(job: 'Job', name: str) -> None:
if job.kwargs.get('site') == frappe.local.site:
job_info = {
'job_name': job.kwargs.get('kwargs', {}).get('playbook_method')
or job.kwargs.get('kwargs', {}).get('job_type')
or str(job.kwargs.get('job_name')),
'status': job.get_status(),
'queue': name,
'creation': format_datetime(convert_utc_to_user_timezone(job.created_at)),
'color': JOB_COLORS[job.get_status()]
}
if job.exc_info:
job_info['exc_info'] = job.exc_info
jobs.append(job_info)
for worker in workers:
job = worker.get_current_job()
if job:
add_job(job, worker.name)
for queue in queues:
if queue.name != 'failed':
for job in queue.jobs:
add_job(job, queue.name)
if show_failed:
fail_registry = queue.failed_job_registry
for job_id in fail_registry.get_job_ids():
job = queue.fetch_job(job_id)
if job:
add_job(job, queue.name)
return jobs
@frappe.whitelist()
def remove_failed_jobs():
conn = get_redis_conn()
queues = Queue.all(conn)
for queue in queues:
fail_registry = queue.failed_job_registry
for job_id in fail_registry.get_job_ids():
job = queue.fetch_job(job_id)
fail_registry.remove(job, delete_job=True)
@frappe.whitelist()
def get_scheduler_status():
if is_scheduler_inactive():
return [_("Inactive"), "red"]
return [_("Active"), "green"]
| true
| true
|
f70a6ea2f3bc1234f058f1d39e3b1937ed425d61
| 10,360
|
py
|
Python
|
jax/_src/device_array.py
|
zjzh/jax
|
8372b98c4856b6b2363b7bb28abdb4579440a656
|
[
"Apache-2.0"
] | null | null | null |
jax/_src/device_array.py
|
zjzh/jax
|
8372b98c4856b6b2363b7bb28abdb4579440a656
|
[
"Apache-2.0"
] | 8
|
2022-01-03T10:15:55.000Z
|
2022-02-14T10:19:45.000Z
|
jax/_src/device_array.py
|
zjzh/jax
|
8372b98c4856b6b2363b7bb28abdb4579440a656
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# On-device arrays.
from functools import partial, partialmethod
import operator
from typing import (Any, List, Optional, Union)
import weakref
import numpy as np
from jax import core
from jax._src.config import config
from jax._src import abstract_arrays
from jax._src import dtypes
from jax._src import profiler
from jax._src.lib import xla_client as xc
import jax._src.util as util
### device-persistent data
xe = xc._xla
Device = xc.Device
Buffer = xe.Buffer
def _forward_method(attrname, self, fun, *args):
return fun(getattr(self, attrname), *args)
_forward_to_value = partial(_forward_method, "_value")
# The following is used for the type xc.Buffer or _DeviceArray.
DeviceArrayProtocol = Any
DeviceArray = xc.DeviceArrayBase
def make_device_array(
aval: core.ShapedArray,
device: Optional[Device],
device_buffer: Buffer,
) -> Union[Buffer, "_DeviceArray"]:
"""Returns a DeviceArray implementation based on arguments.
This is to be used only within JAX. It will return either a PythonDeviceArray
or a C++ equivalent implementation.
"""
if isinstance(device_buffer, xc.Buffer):
if device_buffer.aval == aval and device_buffer._device == device:
return device_buffer
device_buffer = device_buffer.clone()
device_buffer._device = device
device_buffer.aval = aval
device_buffer.weak_type = aval.weak_type
return device_buffer
return _DeviceArray(aval, device, device_buffer)
def type_is_device_array(x):
"""Returns `True` if `x` is a non-sharded DeviceArray.
Use this function instead of `type(x) is Devicearray`.
"""
type_x = type(x)
return type_x is _DeviceArray or type_x is xc.Buffer
def device_array_supports_weakrefs():
try:
weakref.ref(DeviceArray())
return True
except TypeError:
return False
class _DeviceArray(DeviceArray): # type: ignore
"""A DeviceArray is an ndarray backed by a single device memory buffer."""
# We don't subclass ndarray because that would open up a host of issues,
# but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.
__slots__ = [
"aval", "device_buffer", "_npy_value", "_device", "__weakref__"
]
__array_priority__ = 100
# DeviceArray has methods that are dynamically populated in lax_numpy.py,
# and this annotation is needed to make pytype happy.
_HAS_DYNAMIC_ATTRIBUTES = True
def __init__(self, aval: core.ShapedArray, device: Optional[Device],
device_buffer: Buffer):
"""Initializer.
Args:
aval: The abstract value associated to this array (shape+dtype+weak_type).
device: The optional sticky device. See
https://jax.readthedocs.io/en/latest/faq.html#controlling-data-and-computation-placement-on-devices
device_buffer: The underlying buffer owning the on-device data.
"""
DeviceArray.__init__(self)
self.aval = aval
self.device_buffer = device_buffer
self._device = device
self._npy_value = None
if config.jax_enable_checks:
assert type(aval) is core.ShapedArray
npy_value = self._value
assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape, (
aval, npy_value.shape, npy_value.dtype)
assert (device is None) or device is device_buffer.device()
def _check_if_deleted(self):
if self.device_buffer is deleted_buffer:
raise RuntimeError("DeviceArray has been deleted.")
@profiler.annotate_function
def block_until_ready(self):
"""Blocks the caller until the buffer's value has been computed on device.
This method is mostly useful for timing microbenchmarks that wish to
time how long a computation takes, without transferring the result back
to the host.
Returns the buffer object (`self`).
"""
self._check_if_deleted()
self.device_buffer.block_host_until_ready() # pytype: disable=attribute-error
return self
@property
def _value(self):
self._check_if_deleted()
if self._npy_value is None:
self._npy_value = self.device_buffer.to_py() # pytype: disable=attribute-error # bind-properties
self._npy_value.flags.writeable = False
return self._npy_value
@property
def shape(self):
return self.aval.shape
@property
def dtype(self):
return self.aval.dtype
@property
def size(self):
return util.prod(self.aval.shape)
@property
def ndim(self):
return len(self.aval.shape)
def device(self):
self._check_if_deleted()
return self.device_buffer.device() # pytype: disable=attribute-error
def copy_to_host_async(self):
"""Requests a copy of the buffer to the host."""
self._check_if_deleted()
if self._npy_value is None:
self.device_buffer.copy_to_host_async() # pytype: disable=attribute-error
def delete(self):
"""Deletes the device array and any cached copy on the host.
It is an error to access the contents of a `DeviceArray` after it has
been deleted.
Use of this method is optional; device buffers will be reclaimed
automatically by Python when a DeviceArray object is garbage collected.
However, it is sometimes useful to have more explicit control over the
time of deletion.
"""
self.device_buffer.delete() # pytype: disable=attribute-error
self.device_buffer = deleted_buffer
self._npy_value = None
@property
def __cuda_array_interface__(self):
return self.device_buffer.__cuda_array_interface__ # pytype: disable=attribute-error # bind-properties
# Adding methods dynamically to both _DeviceArray and xc.Buffer
# pylint: disable=protected-access
for device_array in [DeviceArray]:
def copy(self):
"""Returns an ndarray (backed by host memory, not device memory)."""
return np.asarray(self)
setattr(device_array, "copy", copy)
def __repr__(self):
line_width = np.get_printoptions()["linewidth"]
prefix = '{}('.format(self.__class__.__name__.lstrip('_'))
s = np.array2string(self._value, prefix=prefix, suffix=',',
separator=', ', max_line_width=line_width)
if self.aval is not None and self.aval.weak_type:
dtype_str = f'dtype={self.dtype.name}, weak_type=True)'
else:
dtype_str = f'dtype={self.dtype.name})'
last_line_len = len(s) - s.rfind('\n') + 1
sep = ' '
if last_line_len + len(dtype_str) + 1 > line_width:
sep = ' ' * len(prefix)
return "{}{},{}{}".format(prefix, s, sep, dtype_str)
setattr(device_array, "__repr__", __repr__)
def item(self):
if dtypes.issubdtype(self.dtype, np.complexfloating):
return complex(self)
elif dtypes.issubdtype(self.dtype, np.floating):
return float(self)
elif dtypes.issubdtype(self.dtype, np.integer):
return int(self)
elif dtypes.issubdtype(self.dtype, np.bool_):
return bool(self)
else:
raise TypeError(self.dtype)
setattr(device_array, "item", item)
def __len__(self):
try:
return self.aval.shape[0]
except IndexError as err:
raise TypeError("len() of unsized object") from err # same as numpy error
setattr(device_array, "__len__", __len__)
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
return (sl for chunk in self._chunk_iter(100) for sl in chunk._unstack())
setattr(device_array, "__iter__", __iter__)
def __reversed__(self):
return iter(self[::-1])
setattr(device_array, "__reversed__", __reversed__)
def __format__(self, format_spec):
# Simulates behavior of https://github.com/numpy/numpy/pull/9883
if self.ndim == 0:
return format(self._value[()], format_spec)
else:
return format(self._value, format_spec)
setattr(device_array, "__format__", __format__)
def __array__(self, dtype=None, context=None):
return np.asarray(self._value, dtype=dtype)
setattr(device_array, "__array__", __array__)
setattr(device_array, "__str__", partialmethod(_forward_to_value, str))
setattr(device_array, "__bool__", partialmethod(_forward_to_value, bool))
setattr(device_array, "__nonzero__", partialmethod(_forward_to_value, bool))
setattr(device_array, "__float__", lambda self: self._value.__float__())
setattr(device_array, "__int__", lambda self: self._value.__int__())
setattr(device_array, "__complex__", lambda self: self._value.__complex__())
setattr(device_array, "__hex__", partialmethod(_forward_to_value, hex))
setattr(device_array, "__oct__", partialmethod(_forward_to_value, oct))
setattr(device_array, "__index__", partialmethod(_forward_to_value,
operator.index))
to_bytes = lambda self, order="C": self._value.tobytes(order)
setattr(device_array, "tobytes", to_bytes)
del to_bytes
setattr(device_array, "tolist", lambda self: self._value.tolist())
# pickle saves and loads just like an ndarray
setattr(device_array, "__reduce__",
partialmethod(_forward_to_value, operator.methodcaller("__reduce__")))
# explicitly set to be unhashable.
setattr(device_array, "__hash__", None)
# clobbered when jax.numpy is imported, but useful in tests
setattr(device_array, "__eq__", lambda self, other: self._value == other)
# The following methods are dynamically overridden in lax_numpy.py.
def raise_not_implemented():
raise NotImplementedError
setattr(device_array, "__getitem__", lambda self, i: raise_not_implemented())
# pylint: enable=protected-access
class DeletedBuffer(object): pass
deleted_buffer = DeletedBuffer()
device_array_types: List[type] = [xc.Buffer, _DeviceArray]
for _device_array in device_array_types:
core.literalable_types.add(_device_array)
core.pytype_aval_mappings[device_array] = abstract_arrays.canonical_concrete_aval
| 32.888889
| 108
| 0.721911
|
from functools import partial, partialmethod
import operator
from typing import (Any, List, Optional, Union)
import weakref
import numpy as np
from jax import core
from jax._src.config import config
from jax._src import abstract_arrays
from jax._src import dtypes
from jax._src import profiler
from jax._src.lib import xla_client as xc
import jax._src.util as util
ffer
def _forward_method(attrname, self, fun, *args):
return fun(getattr(self, attrname), *args)
_forward_to_value = partial(_forward_method, "_value")
DeviceArrayProtocol = Any
DeviceArray = xc.DeviceArrayBase
def make_device_array(
aval: core.ShapedArray,
device: Optional[Device],
device_buffer: Buffer,
) -> Union[Buffer, "_DeviceArray"]:
if isinstance(device_buffer, xc.Buffer):
if device_buffer.aval == aval and device_buffer._device == device:
return device_buffer
device_buffer = device_buffer.clone()
device_buffer._device = device
device_buffer.aval = aval
device_buffer.weak_type = aval.weak_type
return device_buffer
return _DeviceArray(aval, device, device_buffer)
def type_is_device_array(x):
type_x = type(x)
return type_x is _DeviceArray or type_x is xc.Buffer
def device_array_supports_weakrefs():
try:
weakref.ref(DeviceArray())
return True
except TypeError:
return False
class _DeviceArray(DeviceArray):
# but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.
__slots__ = [
"aval", "device_buffer", "_npy_value", "_device", "__weakref__"
]
__array_priority__ = 100
# DeviceArray has methods that are dynamically populated in lax_numpy.py,
# and this annotation is needed to make pytype happy.
_HAS_DYNAMIC_ATTRIBUTES = True
def __init__(self, aval: core.ShapedArray, device: Optional[Device],
device_buffer: Buffer):
DeviceArray.__init__(self)
self.aval = aval
self.device_buffer = device_buffer
self._device = device
self._npy_value = None
if config.jax_enable_checks:
assert type(aval) is core.ShapedArray
npy_value = self._value
assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape, (
aval, npy_value.shape, npy_value.dtype)
assert (device is None) or device is device_buffer.device()
def _check_if_deleted(self):
if self.device_buffer is deleted_buffer:
raise RuntimeError("DeviceArray has been deleted.")
@profiler.annotate_function
def block_until_ready(self):
self._check_if_deleted()
self.device_buffer.block_host_until_ready() # pytype: disable=attribute-error
return self
@property
def _value(self):
self._check_if_deleted()
if self._npy_value is None:
self._npy_value = self.device_buffer.to_py() # pytype: disable=attribute-error # bind-properties
self._npy_value.flags.writeable = False
return self._npy_value
@property
def shape(self):
return self.aval.shape
@property
def dtype(self):
return self.aval.dtype
@property
def size(self):
return util.prod(self.aval.shape)
@property
def ndim(self):
return len(self.aval.shape)
def device(self):
self._check_if_deleted()
return self.device_buffer.device() # pytype: disable=attribute-error
def copy_to_host_async(self):
self._check_if_deleted()
if self._npy_value is None:
self.device_buffer.copy_to_host_async() # pytype: disable=attribute-error
def delete(self):
self.device_buffer.delete() # pytype: disable=attribute-error
self.device_buffer = deleted_buffer
self._npy_value = None
@property
def __cuda_array_interface__(self):
return self.device_buffer.__cuda_array_interface__ # pytype: disable=attribute-error # bind-properties
# Adding methods dynamically to both _DeviceArray and xc.Buffer
# pylint: disable=protected-access
for device_array in [DeviceArray]:
def copy(self):
return np.asarray(self)
setattr(device_array, "copy", copy)
def __repr__(self):
line_width = np.get_printoptions()["linewidth"]
prefix = '{}('.format(self.__class__.__name__.lstrip('_'))
s = np.array2string(self._value, prefix=prefix, suffix=',',
separator=', ', max_line_width=line_width)
if self.aval is not None and self.aval.weak_type:
dtype_str = f'dtype={self.dtype.name}, weak_type=True)'
else:
dtype_str = f'dtype={self.dtype.name})'
last_line_len = len(s) - s.rfind('\n') + 1
sep = ' '
if last_line_len + len(dtype_str) + 1 > line_width:
sep = ' ' * len(prefix)
return "{}{},{}{}".format(prefix, s, sep, dtype_str)
setattr(device_array, "__repr__", __repr__)
def item(self):
if dtypes.issubdtype(self.dtype, np.complexfloating):
return complex(self)
elif dtypes.issubdtype(self.dtype, np.floating):
return float(self)
elif dtypes.issubdtype(self.dtype, np.integer):
return int(self)
elif dtypes.issubdtype(self.dtype, np.bool_):
return bool(self)
else:
raise TypeError(self.dtype)
setattr(device_array, "item", item)
def __len__(self):
try:
return self.aval.shape[0]
except IndexError as err:
raise TypeError("len() of unsized object") from err # same as numpy error
setattr(device_array, "__len__", __len__)
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
return (sl for chunk in self._chunk_iter(100) for sl in chunk._unstack())
setattr(device_array, "__iter__", __iter__)
def __reversed__(self):
return iter(self[::-1])
setattr(device_array, "__reversed__", __reversed__)
def __format__(self, format_spec):
# Simulates behavior of https://github.com/numpy/numpy/pull/9883
if self.ndim == 0:
return format(self._value[()], format_spec)
else:
return format(self._value, format_spec)
setattr(device_array, "__format__", __format__)
def __array__(self, dtype=None, context=None):
return np.asarray(self._value, dtype=dtype)
setattr(device_array, "__array__", __array__)
setattr(device_array, "__str__", partialmethod(_forward_to_value, str))
setattr(device_array, "__bool__", partialmethod(_forward_to_value, bool))
setattr(device_array, "__nonzero__", partialmethod(_forward_to_value, bool))
setattr(device_array, "__float__", lambda self: self._value.__float__())
setattr(device_array, "__int__", lambda self: self._value.__int__())
setattr(device_array, "__complex__", lambda self: self._value.__complex__())
setattr(device_array, "__hex__", partialmethod(_forward_to_value, hex))
setattr(device_array, "__oct__", partialmethod(_forward_to_value, oct))
setattr(device_array, "__index__", partialmethod(_forward_to_value,
operator.index))
to_bytes = lambda self, order="C": self._value.tobytes(order)
setattr(device_array, "tobytes", to_bytes)
del to_bytes
setattr(device_array, "tolist", lambda self: self._value.tolist())
# pickle saves and loads just like an ndarray
setattr(device_array, "__reduce__",
partialmethod(_forward_to_value, operator.methodcaller("__reduce__")))
# explicitly set to be unhashable.
setattr(device_array, "__hash__", None)
# clobbered when jax.numpy is imported, but useful in tests
setattr(device_array, "__eq__", lambda self, other: self._value == other)
# The following methods are dynamically overridden in lax_numpy.py.
def raise_not_implemented():
raise NotImplementedError
setattr(device_array, "__getitem__", lambda self, i: raise_not_implemented())
# pylint: enable=protected-access
class DeletedBuffer(object): pass
deleted_buffer = DeletedBuffer()
device_array_types: List[type] = [xc.Buffer, _DeviceArray]
for _device_array in device_array_types:
core.literalable_types.add(_device_array)
core.pytype_aval_mappings[device_array] = abstract_arrays.canonical_concrete_aval
| true
| true
|
f70a6f4e41b9deabaa98231dc49f102c2da5262c
| 3,144
|
py
|
Python
|
pkgs/ipykernel-4.3.1-py27_0/lib/python2.7/site-packages/ipykernel/gui/gtk3embed.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 652
|
2015-07-26T00:00:17.000Z
|
2022-02-24T18:30:04.000Z
|
pkgs/ipykernel-4.3.1-py27_0/lib/python2.7/site-packages/ipykernel/gui/gtk3embed.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 8
|
2015-09-07T03:38:19.000Z
|
2021-05-23T03:18:51.000Z
|
pkgs/ipykernel-4.3.1-py27_0/lib/python2.7/site-packages/ipykernel/gui/gtk3embed.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 40
|
2015-07-24T19:45:08.000Z
|
2021-11-01T14:54:56.000Z
|
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import sys
# Third-party
from gi.repository import GObject, Gtk
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class GTKEmbed(object):
"""A class to embed a kernel into the GTK main event loop.
"""
def __init__(self, kernel):
self.kernel = kernel
# These two will later store the real gtk functions when we hijack them
self.gtk_main = None
self.gtk_main_quit = None
def start(self):
"""Starts the GTK main event loop and sets our kernel startup routine.
"""
# Register our function to initiate the kernel and start gtk
GObject.idle_add(self._wire_kernel)
Gtk.main()
def _wire_kernel(self):
"""Initializes the kernel inside GTK.
This is meant to run only once at startup, so it does its job and
returns False to ensure it doesn't get run again by GTK.
"""
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
GObject.timeout_add(int(1000*self.kernel._poll_interval),
self.iterate_kernel)
return False
def iterate_kernel(self):
"""Run one iteration of the kernel and return True.
GTK timer functions must return True to be called again, so we make the
call to :meth:`do_one_iteration` and then return True for GTK.
"""
self.kernel.do_one_iteration()
return True
def stop(self):
# FIXME: this one isn't getting called because we have no reliable
# kernel shutdown. We need to fix that: once the kernel has a
# shutdown mechanism, it can call this.
self.gtk_main_quit()
sys.exit()
def _hijack_gtk(self):
"""Hijack a few key functions in GTK for IPython integration.
Modifies pyGTK's main and main_quit with a dummy so user code does not
block IPython. This allows us to use %run to run arbitrary pygtk
scripts from a long-lived IPython session, and when they attempt to
start or stop
Returns
-------
The original functions that have been hijacked:
- Gtk.main
- Gtk.main_quit
"""
def dummy(*args, **kw):
pass
# save and trap main and main_quit from gtk
orig_main, Gtk.main = Gtk.main, dummy
orig_main_quit, Gtk.main_quit = Gtk.main_quit, dummy
return orig_main, orig_main_quit
| 36.55814
| 79
| 0.54612
|
import sys
from gi.repository import GObject, Gtk
class GTKEmbed(object):
def __init__(self, kernel):
self.kernel = kernel
self.gtk_main = None
self.gtk_main_quit = None
def start(self):
GObject.idle_add(self._wire_kernel)
Gtk.main()
def _wire_kernel(self):
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
GObject.timeout_add(int(1000*self.kernel._poll_interval),
self.iterate_kernel)
return False
def iterate_kernel(self):
self.kernel.do_one_iteration()
return True
def stop(self):
# kernel shutdown. We need to fix that: once the kernel has a
# shutdown mechanism, it can call this.
self.gtk_main_quit()
sys.exit()
def _hijack_gtk(self):
def dummy(*args, **kw):
pass
# save and trap main and main_quit from gtk
orig_main, Gtk.main = Gtk.main, dummy
orig_main_quit, Gtk.main_quit = Gtk.main_quit, dummy
return orig_main, orig_main_quit
| true
| true
|
f70a703ecf8b20f0a4ea6cf7f1cfc565cffc8462
| 19,936
|
py
|
Python
|
caesd-master/main.py
|
korecodes/FYP
|
b4f67d968081f9199d1555a1729856d4af4a895e
|
[
"MIT"
] | 1
|
2022-01-18T15:33:46.000Z
|
2022-01-18T15:33:46.000Z
|
caesd-master/main.py
|
korecodes/FYP
|
b4f67d968081f9199d1555a1729856d4af4a895e
|
[
"MIT"
] | null | null | null |
caesd-master/main.py
|
korecodes/FYP
|
b4f67d968081f9199d1555a1729856d4af4a895e
|
[
"MIT"
] | null | null | null |
#GUI classes for the application
from kivy.app import App
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.spinner import Spinner
from kivy.uix.textinput import TextInput
from kivy.properties import ObjectProperty, BooleanProperty
from kivy.uix.recycleview import RecycleView
from kivy.uix.recycleboxlayout import RecycleBoxLayout
from kivy.uix.recycleview.views import RecycleDataViewBehavior
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.recycleview.layout import LayoutSelectionBehavior
#Window.size = (1200, 800)
#FUNCTION classes for the application
from app_functions import AmpFunctions, RoomDesign
from app_constants import AppConstants
class SelectableLabel(RecycleDataViewBehavior, Label):
index = None
selected = BooleanProperty(False)
selectable = BooleanProperty(True)
def refresh_view_attrs(self, rv, index, data):
self.index = index
self.selected = True
''' Catch and handle the view changes '''
return super(SelectableLabel, self).refresh_view_attrs(
rv, index, data)
def on_touch_down(self, touch):
''' Add selection on touch down '''
if super(SelectableLabel, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos) and self.selectable:
return self.parent.select_with_touch(self.index, touch)
def apply_selection(self, rv, index, is_selected):
action = CAESD()
''' Respond to the selection of items in the view. '''
self.selected = is_selected
if is_selected:
machine_data = """
Machine Section: %s
Machine Name: %s
Machine Load: %s
Machine Current: %sA
Machine Current(fx): %sA
Machine Cable Size: %smm2
Machine Breaker Size: %sA
Machine Cable Type: Armoured PVC Insulated Single Core Cable
Machine Breaker Type: %s
""" % (str(rv.data[index]['machine_section']),
str(rv.data[index]['machine_name']),
str(rv.data[index]['machine_load']),
str(rv.data[index]['machine_amp']),
str(rv.data[index]['machine_amp_gd']),
str(rv.data[index]['cable_size']),
str(rv.data[index]['breaker_size']),
str(rv.data[index]['breaker_type']))
action.popDisplays('Machine Details', machine_data)
class SelectableRecycleBoxLayout(FocusBehavior, LayoutSelectionBehavior,
RecycleBoxLayout):
''' Adds selection and focus behaviour to the view. '''
#Screens
class LaunchPage(Screen):
pass
class CctvPage(Screen):
dropManufacturer = ObjectProperty()
dropModel = ObjectProperty()
dropSensor = ObjectProperty()
distFromCamera = ObjectProperty()
sceneWidth = ObjectProperty()
sceneHeight = ObjectProperty()
sceneArea = ObjectProperty()
focalLength = ObjectProperty()
datastore = {
'Manu_Model_pairs': [],
'Manufacturer': '',
'Model': '',
'Sensor': '',
'Distance': '',
'Width': '',
'Height': '',
'Focal': '',
'Area': ''
}
def selectedManufacturer(self):
self.datastore['Manufacturer'] = self.dropManufacturer.text
self.datastore['Manu_Model_pairs'] = AppConstants().manufacturerModels(self.dropManufacturer.text)
self.dropModel.values = [i for i in self.datastore['Manu_Model_pairs'].keys()]
pass
def selectedModel(self):
if self.dropModel.text != 'Model':
self.datastore['Model'] = self.dropModel.text
self.datastore['Sensor'] = self.datastore['Manu_Model_pairs'][self.dropModel.text]
self.dropSensor.text = 'Sensor format: '+ self.datastore['Sensor']+'"'
self.sensor_values = AppConstants().sensorsValues(self.datastore['Sensor'])
def checkManufacturerModelSelected(self):
if self.dropManufacturer.text != "" and self.dropModel.text != 'Model':
return True
def clearValues(self):
if self.sceneWidth.text == '':
self.sceneHeight.text = ''
self.focalLength.text = ''
self.sceneArea.text = ''
elif self.sceneHeight.text == '':
self.sceneWidth.text = ''
self.focalLength.text = ''
self.sceneArea.text = ''
def calculateSceneDimensions(self, dimension, value):
app = CAESD()
if value != '':
if self.checkManufacturerModelSelected():
if self.distFromCamera.focus:
self.datastore['Distance'] = self.distFromCamera.text
if self.sceneWidth.text == '' or self.sceneHeight.text == '':
pass
else:
self.focalLength.text = str(round((float(self.sensor_values[0])*float(self.distFromCamera.text))/float(self.sceneWidth.text), 1))
self.sceneArea.text = str(round(float(self.sceneWidth.text)*float(self.sceneHeight.text), 2))
elif self.sceneWidth.focus:
self.datastore['Height'] = ''
self.datastore['Width'] = self.sceneWidth.text
self.sceneHeight.text = str(round((float(self.sceneWidth.text)*float(self.sensor_values[1]))/float(self.sensor_values[0]), 1))
if self.distFromCamera.text != '':
self.focalLength.text = str(round((float(self.sensor_values[0])*float(self.distFromCamera.text))/float(self.sceneWidth.text), 1))
self.sceneArea.text = str(round(float(self.sceneWidth.text)*float(self.sceneHeight.text), 2))
elif self.sceneHeight.focus:
self.datastore['Width'] = ''
self.datastore['Height'] = self.sceneHeight.text
self.sceneWidth.text = str(round((float(self.sceneHeight.text)*float(self.sensor_values[0]))/float(self.sensor_values[1]), 1))
if self.distFromCamera.text != '':
self.focalLength.text = str(round((float(self.sensor_values[1])*float(self.distFromCamera.text))/float(self.sceneHeight.text), 1))
self.sceneArea.text = str(round(float(self.sceneHeight.text)*float(self.sceneWidth.text), 2))
else:
pass
else:
errorMessage = 'Please select the Model'
app.popDisplays('Application Error', errorMessage)
else:
if self.distFromCamera.text == '':
self.focalLength.text = ''
self.clearValues()
else:
self.clearValues()
class EarthingPage(Screen):
pass
class PowerPage_one(Screen):
numMachines = ObjectProperty()
numSections = ObjectProperty()
normalVoltage: ObjectProperty()
utilityVoltage: ObjectProperty
growthFactor: ObjectProperty()
deratingFactor: ObjectProperty()
loadingFactor: ObjectProperty()
dispPowerOneError: ObjectProperty()
buttAddMachines: ObjectProperty()
def calculatePowerInputs(self, machines, sections):
if machines:
if sections:
self.buttAddMachines.disabled = False
PowerPage_two().powerdataApp(machines, sections, self.normalVoltage.text, self.utilityVoltage.text, self.growthFactor.text, self.deratingFactor.text)
else:
CAESD().displayInLabelMessage(self.dispPowerOneError, t='Please Indicate Number of Sections', i=True)
else:
CAESD().displayInLabelMessage(self.dispPowerOneError, t='Please Indicate Number of Machines', i=True)
class PowerPage_two(Screen):
machineOutOfNum = ObjectProperty()
machineNameName = ObjectProperty()
machineNameInput = ObjectProperty()
machineLoad = ObjectProperty
machineFactor = ObjectProperty()
dropSelectMachineSection = ObjectProperty()
dispPowerTwoScreen = ObjectProperty()
buttAddMachines = ObjectProperty()
buttAllMachines = ObjectProperty()
dropViewMachineSection = ObjectProperty()
dispMachineListHeader = ObjectProperty()
dispMachineScreen = ObjectProperty()
num_of_machines_and_sections = []
storageMachineData = []
def addMachineParameters(self, machine_name, load, section_selected):
if machine_name:
if load:
if section_selected != 'Select Machine Section':
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='', i=True)
self.buttAllMachines.disabled = False
self.dropViewMachineSection.disabled = False
self.dispMachineListHeader.disabled = False
if int(self.getCurMachineNumber()) == int(self.num_of_machines_and_sections[0]):
self.machineListLabels()
self. displayPowerViewboard()
self.buttAddMachines.disabled = True
self.dropSelectMachineSection.disabled = True
out_message = "Complete!!! "+str(int(self.getCurMachineNumber()))+" out of "+str(self.num_of_machines_and_sections[0])+" machines added!"
CAESD().displayInLabelMessage(self.machineOutOfNum, t=out_message)
else:
self.machineListLabels()
self. displayPowerViewboard()
self.machineNameName.text = "Name for Machine "+str(int(self.getCurMachineNumber())+1)
self.machineNameInput.text = "Machine "+str(int(self.getCurMachineNumber()))
out_message =str(int(self.getCurMachineNumber())-1)+" out of "+str(self.num_of_machines_and_sections[0])+" machines added!"
CAESD().displayInLabelMessage(self.machineOutOfNum, t=out_message, c=[0,0,0,1])
self.machineLoad.text = ''
self.dropSelectMachineSection.text = 'Select Machine Section'
else:
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='Please Select A Machine Section', i=True)
else:
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='Please Indicate Machine Load', i=True)
else:
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='Please Indicate Machine Name', i=True)
def powerdataApp(self, machines, sections, a, b, c, d):
self.num_of_machines_and_sections.append(machines)
self.num_of_machines_and_sections.append(sections)
self.num_of_machines_and_sections.append([a,b,c,d])
def getCurMachineNumber(self):
return self.machineNameName.text.split(' ')[3]
def selectMachineSection(self):
values = []
section_alt = [chr(i) for i in range(65,91)]
for i in range(1, int(self.num_of_machines_and_sections[1])+1):
values.append('Section '+str(section_alt[i-1]))
self.dropSelectMachineSection.values = values
self.dropViewMachineSection.values = values
#self.buttMachineSection.values = values
def machineListLabels(self):
ampCal = AmpFunctions(float(self.machineLoad.text),
float(self.num_of_machines_and_sections[2][0]),
float(self.num_of_machines_and_sections[2][2]),
float(self.num_of_machines_and_sections[2][3]))
appCons = AppConstants()
self.storageMachineData.insert(0, { 'machine_section': str(self.dropSelectMachineSection.text),
'machine_name': str(self.machineNameInput.text),
'machine_load': str(self.machineLoad.text),
'machine_amp': str(ampCal.ampWithoutFutureExpansion()),
'machine_amp_gd': str(ampCal.ampWithFutureExpansion()),
'breaker_size': str(appCons.breakerSize(ampCal.ampWithFutureExpansion())),
'cable_size': str(appCons.cableSize(ampCal.ampWithoutFutureExpansion())),
'breaker_type': str(appCons.breakerType(appCons.breakerSize(ampCal.ampWithFutureExpansion())))})
self.dispMachineScreen.data = self.storageMachineData
def machineSectionLabels(self, sections, data):
self.dispMachineSection.data = []
values = []
section_alt = [chr(i) for i in range(65,91)]
for i in range(1, int(sections)+1):
values.append('Section '+str(section_alt[i-1]))
values.reverse()
for sect in values:
section_data = []
for row in data:
if row['machine_section'] == sect:
section_data.append(row)
formatted_data = ['Machine | Load | Amp |\n']+[i['machine_name']+' | '+i['machine_load']+'kVa | '+i['machine_amp']+'A | \n' for i in section_data]
#section_header = 'Machine Name | Machine Load |\n'
#formatted_data(section_header)
self.dispMachineSection.data.insert(0, {'machine_section_name': str(sect), 'machine_section_data': str(''.join(formatted_data))})
def displayPowerViewboard(self):
ampCal = AmpFunctions(float(self.machineLoad.text),
float(self.num_of_machines_and_sections[2][0]),
float(self.num_of_machines_and_sections[2][2]),
float(self.num_of_machines_and_sections[2][3]))
#Determine the total current
all_currents = []
for i in self.dispMachineScreen.data:
all_currents.append(float(i['machine_amp']))
t_current = round(sum(all_currents), 2)
#Determine the transformer capacity
p_current = (float(self.num_of_machines_and_sections[2][0]) * t_current)/float(self.num_of_machines_and_sections[2][1])
t_capacity = round((ampCal.phaseRoot() * float(self.num_of_machines_and_sections[2][1]) * p_current * 1)/1000, 2)
power_viewboard_message = """
POWER VIEWBOARD
Total Current from Machines: %sA
Change Over Switch Capacity: 2500A
Transformer Capacity: %skVA
Generator Capacity: %skVA
""" % (t_current, t_capacity, t_capacity)
self.dispPowerTwoScreen.text = power_viewboard_message
def displayPanelBoard(self, data_key):
if data_key == 'All Machines':
self.dispMachineScreen.data = self.storageMachineData
#self.sectionViewboard.text = ''
else:
section_data = []
self.dispMachineScreen.data = []
for row in self.storageMachineData:
if row['machine_section'] == data_key:
section_data.append(row)
else:
self.dispMachineScreen.data = []
self.dispMachineScreen.data = section_data
if self.dispMachineScreen.data == []:
out_message = 'NO MACHINE ADDED YET FOR '+data_key.upper()
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t=out_message, c=[0,0,0,1])
else:
tot_load = 0
tot_amp = 0
tot_amp_gd = 0
tot_breaker_size = 0
#tot_cable_size = 0
for i in self.dispMachineScreen.data:
tot_load += float(i['machine_load'])
tot_amp += float(i['machine_amp'])
tot_amp_gd += float(i['machine_amp_gd'])
tot_breaker_size += float(i['breaker_size'])
#tot_cable_size += float(i['cable_size'])
data_summary = """
SUMMARY FOR %s
Number of Machines: %s
Total Load: %skVA
Total Current: %sA
Total Current(fx): %sA
Total Breaker Size: %sA
""" % (data_key.upper(), len(self.dispMachineScreen.data), tot_load, round(tot_amp, 2), round(tot_amp_gd, 2), round(tot_breaker_size, 2))
self.dispPowerTwoScreen.text = data_summary
class IlluminationPage(Screen):
lengthOfRoom = ObjectProperty()
breadthOfRoom = ObjectProperty()
workingHeight = ObjectProperty()
wattMSq = ObjectProperty()
lampL = ObjectProperty()
numL = ObjectProperty()
mainFac = ObjectProperty()
dispIllumination = ObjectProperty()
dispLampDistributions = ObjectProperty()
def calculateLampsNeeded(self, length, breadth, w_height, watt_m_sq, lamp_l, no_lumin, main_fac):
app = CAESD()
if length and breadth and watt_m_sq and lamp_l:
if lamp_l != 'Lamp lumen':
if main_fac != 'Maintenance factor':
Ll = AppConstants().lampLumen(str(self.lampL.text))
room = RoomDesign(float(self.lengthOfRoom.text),
float(self.breadthOfRoom.text),
float(self.workingHeight.text),
float(self.wattMSq.text),
float(Ll),
float(self.numL.text),
float(self.mainFac.text))
message_illumination = """
Room Index Calculated at: %s \r
Total Number of lamps needed: %s
""" % (str(room.roomIndex()), str(room.roomLamps()))
lamp_dis = """
POSSIBLE COMBINATIONS OF LAMPS\r
%s
""" % str(room.possibleLampConfigurations())
app.displayInLabelMessage(self.dispIllumination, t=message_illumination, c=[0,0,0,1])
app.displayInLabelMessage(self.dispLampDistributions, t=lamp_dis, c=[0,0,0,1])
else:
app.displayInLabelMessage(self.dispIllumination, t='Please select the maintenance factor', i=True)
else:
app.displayInLabelMessage(self.dispIllumination, t='Please choose the lamp lumen', i=True)
else:
app.displayInLabelMessage(self.dispIllumination, t='Missing Parameter/Input', i=True)
#Main Screen Manager
class CAESDApp(ScreenManager):
pass
main_kv = Builder.load_file("main.kv")
class CAESD(App):
def build(self):
self.title = 'Computer Aided Electrical Services Design'
self.background_color = 0,0,0,1
return main_kv
def displayInLabelMessage(self, obj, **kwargs):
obj.color = 1, 0, 0, 1
obj.italic = False
if kwargs == {}:
#Default error message
obj.text = 'Attention: Application Message'
else:
for i in kwargs.keys():
if i == 'text' or i == 't':
obj.text = kwargs[i]
elif i == 'color' or i == 'c':
obj.color = kwargs[i]
elif i == 'italic' or i == 'i':
obj.italic = kwargs[i]
def popDisplays(self, title, message, hint=(.7, .45)):
Popup(title=title, title_color=[1,1,1,1],
content=Label(text=message),
size_hint=hint,
separator_color=[1,1,0,.6]).open()
if __name__ == '__main__':
CAESD().run()
| 48.154589
| 169
| 0.589938
|
from kivy.app import App
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.spinner import Spinner
from kivy.uix.textinput import TextInput
from kivy.properties import ObjectProperty, BooleanProperty
from kivy.uix.recycleview import RecycleView
from kivy.uix.recycleboxlayout import RecycleBoxLayout
from kivy.uix.recycleview.views import RecycleDataViewBehavior
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.recycleview.layout import LayoutSelectionBehavior
from app_functions import AmpFunctions, RoomDesign
from app_constants import AppConstants
class SelectableLabel(RecycleDataViewBehavior, Label):
index = None
selected = BooleanProperty(False)
selectable = BooleanProperty(True)
def refresh_view_attrs(self, rv, index, data):
self.index = index
self.selected = True
return super(SelectableLabel, self).refresh_view_attrs(
rv, index, data)
def on_touch_down(self, touch):
if super(SelectableLabel, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos) and self.selectable:
return self.parent.select_with_touch(self.index, touch)
def apply_selection(self, rv, index, is_selected):
action = CAESD()
self.selected = is_selected
if is_selected:
machine_data = """
Machine Section: %s
Machine Name: %s
Machine Load: %s
Machine Current: %sA
Machine Current(fx): %sA
Machine Cable Size: %smm2
Machine Breaker Size: %sA
Machine Cable Type: Armoured PVC Insulated Single Core Cable
Machine Breaker Type: %s
""" % (str(rv.data[index]['machine_section']),
str(rv.data[index]['machine_name']),
str(rv.data[index]['machine_load']),
str(rv.data[index]['machine_amp']),
str(rv.data[index]['machine_amp_gd']),
str(rv.data[index]['cable_size']),
str(rv.data[index]['breaker_size']),
str(rv.data[index]['breaker_type']))
action.popDisplays('Machine Details', machine_data)
class SelectableRecycleBoxLayout(FocusBehavior, LayoutSelectionBehavior,
RecycleBoxLayout):
class LaunchPage(Screen):
pass
class CctvPage(Screen):
dropManufacturer = ObjectProperty()
dropModel = ObjectProperty()
dropSensor = ObjectProperty()
distFromCamera = ObjectProperty()
sceneWidth = ObjectProperty()
sceneHeight = ObjectProperty()
sceneArea = ObjectProperty()
focalLength = ObjectProperty()
datastore = {
'Manu_Model_pairs': [],
'Manufacturer': '',
'Model': '',
'Sensor': '',
'Distance': '',
'Width': '',
'Height': '',
'Focal': '',
'Area': ''
}
def selectedManufacturer(self):
self.datastore['Manufacturer'] = self.dropManufacturer.text
self.datastore['Manu_Model_pairs'] = AppConstants().manufacturerModels(self.dropManufacturer.text)
self.dropModel.values = [i for i in self.datastore['Manu_Model_pairs'].keys()]
pass
def selectedModel(self):
if self.dropModel.text != 'Model':
self.datastore['Model'] = self.dropModel.text
self.datastore['Sensor'] = self.datastore['Manu_Model_pairs'][self.dropModel.text]
self.dropSensor.text = 'Sensor format: '+ self.datastore['Sensor']+'"'
self.sensor_values = AppConstants().sensorsValues(self.datastore['Sensor'])
def checkManufacturerModelSelected(self):
if self.dropManufacturer.text != "" and self.dropModel.text != 'Model':
return True
def clearValues(self):
if self.sceneWidth.text == '':
self.sceneHeight.text = ''
self.focalLength.text = ''
self.sceneArea.text = ''
elif self.sceneHeight.text == '':
self.sceneWidth.text = ''
self.focalLength.text = ''
self.sceneArea.text = ''
def calculateSceneDimensions(self, dimension, value):
app = CAESD()
if value != '':
if self.checkManufacturerModelSelected():
if self.distFromCamera.focus:
self.datastore['Distance'] = self.distFromCamera.text
if self.sceneWidth.text == '' or self.sceneHeight.text == '':
pass
else:
self.focalLength.text = str(round((float(self.sensor_values[0])*float(self.distFromCamera.text))/float(self.sceneWidth.text), 1))
self.sceneArea.text = str(round(float(self.sceneWidth.text)*float(self.sceneHeight.text), 2))
elif self.sceneWidth.focus:
self.datastore['Height'] = ''
self.datastore['Width'] = self.sceneWidth.text
self.sceneHeight.text = str(round((float(self.sceneWidth.text)*float(self.sensor_values[1]))/float(self.sensor_values[0]), 1))
if self.distFromCamera.text != '':
self.focalLength.text = str(round((float(self.sensor_values[0])*float(self.distFromCamera.text))/float(self.sceneWidth.text), 1))
self.sceneArea.text = str(round(float(self.sceneWidth.text)*float(self.sceneHeight.text), 2))
elif self.sceneHeight.focus:
self.datastore['Width'] = ''
self.datastore['Height'] = self.sceneHeight.text
self.sceneWidth.text = str(round((float(self.sceneHeight.text)*float(self.sensor_values[0]))/float(self.sensor_values[1]), 1))
if self.distFromCamera.text != '':
self.focalLength.text = str(round((float(self.sensor_values[1])*float(self.distFromCamera.text))/float(self.sceneHeight.text), 1))
self.sceneArea.text = str(round(float(self.sceneHeight.text)*float(self.sceneWidth.text), 2))
else:
pass
else:
errorMessage = 'Please select the Model'
app.popDisplays('Application Error', errorMessage)
else:
if self.distFromCamera.text == '':
self.focalLength.text = ''
self.clearValues()
else:
self.clearValues()
class EarthingPage(Screen):
pass
class PowerPage_one(Screen):
numMachines = ObjectProperty()
numSections = ObjectProperty()
normalVoltage: ObjectProperty()
utilityVoltage: ObjectProperty
growthFactor: ObjectProperty()
deratingFactor: ObjectProperty()
loadingFactor: ObjectProperty()
dispPowerOneError: ObjectProperty()
buttAddMachines: ObjectProperty()
def calculatePowerInputs(self, machines, sections):
if machines:
if sections:
self.buttAddMachines.disabled = False
PowerPage_two().powerdataApp(machines, sections, self.normalVoltage.text, self.utilityVoltage.text, self.growthFactor.text, self.deratingFactor.text)
else:
CAESD().displayInLabelMessage(self.dispPowerOneError, t='Please Indicate Number of Sections', i=True)
else:
CAESD().displayInLabelMessage(self.dispPowerOneError, t='Please Indicate Number of Machines', i=True)
class PowerPage_two(Screen):
machineOutOfNum = ObjectProperty()
machineNameName = ObjectProperty()
machineNameInput = ObjectProperty()
machineLoad = ObjectProperty
machineFactor = ObjectProperty()
dropSelectMachineSection = ObjectProperty()
dispPowerTwoScreen = ObjectProperty()
buttAddMachines = ObjectProperty()
buttAllMachines = ObjectProperty()
dropViewMachineSection = ObjectProperty()
dispMachineListHeader = ObjectProperty()
dispMachineScreen = ObjectProperty()
num_of_machines_and_sections = []
storageMachineData = []
def addMachineParameters(self, machine_name, load, section_selected):
if machine_name:
if load:
if section_selected != 'Select Machine Section':
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='', i=True)
self.buttAllMachines.disabled = False
self.dropViewMachineSection.disabled = False
self.dispMachineListHeader.disabled = False
if int(self.getCurMachineNumber()) == int(self.num_of_machines_and_sections[0]):
self.machineListLabels()
self. displayPowerViewboard()
self.buttAddMachines.disabled = True
self.dropSelectMachineSection.disabled = True
out_message = "Complete!!! "+str(int(self.getCurMachineNumber()))+" out of "+str(self.num_of_machines_and_sections[0])+" machines added!"
CAESD().displayInLabelMessage(self.machineOutOfNum, t=out_message)
else:
self.machineListLabels()
self. displayPowerViewboard()
self.machineNameName.text = "Name for Machine "+str(int(self.getCurMachineNumber())+1)
self.machineNameInput.text = "Machine "+str(int(self.getCurMachineNumber()))
out_message =str(int(self.getCurMachineNumber())-1)+" out of "+str(self.num_of_machines_and_sections[0])+" machines added!"
CAESD().displayInLabelMessage(self.machineOutOfNum, t=out_message, c=[0,0,0,1])
self.machineLoad.text = ''
self.dropSelectMachineSection.text = 'Select Machine Section'
else:
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='Please Select A Machine Section', i=True)
else:
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='Please Indicate Machine Load', i=True)
else:
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='Please Indicate Machine Name', i=True)
def powerdataApp(self, machines, sections, a, b, c, d):
self.num_of_machines_and_sections.append(machines)
self.num_of_machines_and_sections.append(sections)
self.num_of_machines_and_sections.append([a,b,c,d])
def getCurMachineNumber(self):
return self.machineNameName.text.split(' ')[3]
def selectMachineSection(self):
values = []
section_alt = [chr(i) for i in range(65,91)]
for i in range(1, int(self.num_of_machines_and_sections[1])+1):
values.append('Section '+str(section_alt[i-1]))
self.dropSelectMachineSection.values = values
self.dropViewMachineSection.values = values
#self.buttMachineSection.values = values
def machineListLabels(self):
ampCal = AmpFunctions(float(self.machineLoad.text),
float(self.num_of_machines_and_sections[2][0]),
float(self.num_of_machines_and_sections[2][2]),
float(self.num_of_machines_and_sections[2][3]))
appCons = AppConstants()
self.storageMachineData.insert(0, { 'machine_section': str(self.dropSelectMachineSection.text),
'machine_name': str(self.machineNameInput.text),
'machine_load': str(self.machineLoad.text),
'machine_amp': str(ampCal.ampWithoutFutureExpansion()),
'machine_amp_gd': str(ampCal.ampWithFutureExpansion()),
'breaker_size': str(appCons.breakerSize(ampCal.ampWithFutureExpansion())),
'cable_size': str(appCons.cableSize(ampCal.ampWithoutFutureExpansion())),
'breaker_type': str(appCons.breakerType(appCons.breakerSize(ampCal.ampWithFutureExpansion())))})
self.dispMachineScreen.data = self.storageMachineData
def machineSectionLabels(self, sections, data):
self.dispMachineSection.data = []
values = []
section_alt = [chr(i) for i in range(65,91)]
for i in range(1, int(sections)+1):
values.append('Section '+str(section_alt[i-1]))
values.reverse()
for sect in values:
section_data = []
for row in data:
if row['machine_section'] == sect:
section_data.append(row)
formatted_data = ['Machine | Load | Amp |\n']+[i['machine_name']+' | '+i['machine_load']+'kVa | '+i['machine_amp']+'A | \n' for i in section_data]
#section_header = 'Machine Name | Machine Load |\n'
#formatted_data(section_header)
self.dispMachineSection.data.insert(0, {'machine_section_name': str(sect), 'machine_section_data': str(''.join(formatted_data))})
def displayPowerViewboard(self):
ampCal = AmpFunctions(float(self.machineLoad.text),
float(self.num_of_machines_and_sections[2][0]),
float(self.num_of_machines_and_sections[2][2]),
float(self.num_of_machines_and_sections[2][3]))
#Determine the total current
all_currents = []
for i in self.dispMachineScreen.data:
all_currents.append(float(i['machine_amp']))
t_current = round(sum(all_currents), 2)
#Determine the transformer capacity
p_current = (float(self.num_of_machines_and_sections[2][0]) * t_current)/float(self.num_of_machines_and_sections[2][1])
t_capacity = round((ampCal.phaseRoot() * float(self.num_of_machines_and_sections[2][1]) * p_current * 1)/1000, 2)
power_viewboard_message = """
POWER VIEWBOARD
Total Current from Machines: %sA
Change Over Switch Capacity: 2500A
Transformer Capacity: %skVA
Generator Capacity: %skVA
""" % (t_current, t_capacity, t_capacity)
self.dispPowerTwoScreen.text = power_viewboard_message
def displayPanelBoard(self, data_key):
if data_key == 'All Machines':
self.dispMachineScreen.data = self.storageMachineData
#self.sectionViewboard.text = ''
else:
section_data = []
self.dispMachineScreen.data = []
for row in self.storageMachineData:
if row['machine_section'] == data_key:
section_data.append(row)
else:
self.dispMachineScreen.data = []
self.dispMachineScreen.data = section_data
if self.dispMachineScreen.data == []:
out_message = 'NO MACHINE ADDED YET FOR '+data_key.upper()
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t=out_message, c=[0,0,0,1])
else:
tot_load = 0
tot_amp = 0
tot_amp_gd = 0
tot_breaker_size = 0
#tot_cable_size = 0
for i in self.dispMachineScreen.data:
tot_load += float(i['machine_load'])
tot_amp += float(i['machine_amp'])
tot_amp_gd += float(i['machine_amp_gd'])
tot_breaker_size += float(i['breaker_size'])
#tot_cable_size += float(i['cable_size'])
data_summary = """
SUMMARY FOR %s
Number of Machines: %s
Total Load: %skVA
Total Current: %sA
Total Current(fx): %sA
Total Breaker Size: %sA
""" % (data_key.upper(), len(self.dispMachineScreen.data), tot_load, round(tot_amp, 2), round(tot_amp_gd, 2), round(tot_breaker_size, 2))
self.dispPowerTwoScreen.text = data_summary
class IlluminationPage(Screen):
lengthOfRoom = ObjectProperty()
breadthOfRoom = ObjectProperty()
workingHeight = ObjectProperty()
wattMSq = ObjectProperty()
lampL = ObjectProperty()
numL = ObjectProperty()
mainFac = ObjectProperty()
dispIllumination = ObjectProperty()
dispLampDistributions = ObjectProperty()
def calculateLampsNeeded(self, length, breadth, w_height, watt_m_sq, lamp_l, no_lumin, main_fac):
app = CAESD()
if length and breadth and watt_m_sq and lamp_l:
if lamp_l != 'Lamp lumen':
if main_fac != 'Maintenance factor':
Ll = AppConstants().lampLumen(str(self.lampL.text))
room = RoomDesign(float(self.lengthOfRoom.text),
float(self.breadthOfRoom.text),
float(self.workingHeight.text),
float(self.wattMSq.text),
float(Ll),
float(self.numL.text),
float(self.mainFac.text))
message_illumination = """
Room Index Calculated at: %s \r
Total Number of lamps needed: %s
""" % (str(room.roomIndex()), str(room.roomLamps()))
lamp_dis = """
POSSIBLE COMBINATIONS OF LAMPS\r
%s
""" % str(room.possibleLampConfigurations())
app.displayInLabelMessage(self.dispIllumination, t=message_illumination, c=[0,0,0,1])
app.displayInLabelMessage(self.dispLampDistributions, t=lamp_dis, c=[0,0,0,1])
else:
app.displayInLabelMessage(self.dispIllumination, t='Please select the maintenance factor', i=True)
else:
app.displayInLabelMessage(self.dispIllumination, t='Please choose the lamp lumen', i=True)
else:
app.displayInLabelMessage(self.dispIllumination, t='Missing Parameter/Input', i=True)
#Main Screen Manager
class CAESDApp(ScreenManager):
pass
main_kv = Builder.load_file("main.kv")
class CAESD(App):
def build(self):
self.title = 'Computer Aided Electrical Services Design'
self.background_color = 0,0,0,1
return main_kv
def displayInLabelMessage(self, obj, **kwargs):
obj.color = 1, 0, 0, 1
obj.italic = False
if kwargs == {}:
#Default error message
obj.text = 'Attention: Application Message'
else:
for i in kwargs.keys():
if i == 'text' or i == 't':
obj.text = kwargs[i]
elif i == 'color' or i == 'c':
obj.color = kwargs[i]
elif i == 'italic' or i == 'i':
obj.italic = kwargs[i]
def popDisplays(self, title, message, hint=(.7, .45)):
Popup(title=title, title_color=[1,1,1,1],
content=Label(text=message),
size_hint=hint,
separator_color=[1,1,0,.6]).open()
if __name__ == '__main__':
CAESD().run()
| true
| true
|
f70a71c00a69752a6818debf925e56044120def3
| 619
|
py
|
Python
|
apps/addpaths.py
|
lorenzcsunikl/Dataset-of-Artefact-Aware-Human-Motion-Capture-using-Inertial-Sensors-Integrated-into-Loose-Clothing
|
e5864e20d60bd7fa38bf6935ba1bacfadcdb3035
|
[
"Apache-2.0"
] | null | null | null |
apps/addpaths.py
|
lorenzcsunikl/Dataset-of-Artefact-Aware-Human-Motion-Capture-using-Inertial-Sensors-Integrated-into-Loose-Clothing
|
e5864e20d60bd7fa38bf6935ba1bacfadcdb3035
|
[
"Apache-2.0"
] | null | null | null |
apps/addpaths.py
|
lorenzcsunikl/Dataset-of-Artefact-Aware-Human-Motion-Capture-using-Inertial-Sensors-Integrated-into-Loose-Clothing
|
e5864e20d60bd7fa38bf6935ba1bacfadcdb3035
|
[
"Apache-2.0"
] | null | null | null |
import os, sys, inspect
# realpath() will make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# # Use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
sys.path.append('../')
sys.path.append('../utils/')
sys.path.append('../vizualization/')
| 41.266667
| 105
| 0.722132
|
import os, sys, inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
sys.path.append('../')
sys.path.append('../utils/')
sys.path.append('../vizualization/')
| true
| true
|
f70a71c87434c0461dadcc68734d1ada03bc32f7
| 35,695
|
py
|
Python
|
snaps/openstack/tests/create_image_tests.py
|
hashnfv/hashnfv-snaps
|
0dfca494ef7c2778babfac48d9b701953860b54f
|
[
"Apache-2.0"
] | null | null | null |
snaps/openstack/tests/create_image_tests.py
|
hashnfv/hashnfv-snaps
|
0dfca494ef7c2778babfac48d9b701953860b54f
|
[
"Apache-2.0"
] | null | null | null |
snaps/openstack/tests/create_image_tests.py
|
hashnfv/hashnfv-snaps
|
0dfca494ef7c2778babfac48d9b701953860b54f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Cable Television Laboratories, Inc. ("CableLabs")
# and others. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from glanceclient.exc import HTTPBadRequest
try:
from urllib.request import URLError
except ImportError:
from urllib2 import URLError
import logging
import shutil
import unittest
import uuid
import os
from snaps import file_utils
from snaps.openstack import create_image
from snaps.openstack.create_image import (ImageSettings, ImageCreationError,
ImageSettingsError)
from snaps.openstack.tests import openstack_tests
from snaps.openstack.tests.os_source_file_test import OSIntegrationTestCase
from snaps.openstack.utils import glance_utils
__author__ = 'spisarski'
logger = logging.getLogger('create_image_tests')
class ImageSettingsUnitTests(unittest.TestCase):
"""
Tests the construction of the ImageSettings class
"""
def test_no_params(self):
with self.assertRaises(ImageSettingsError):
ImageSettings()
def test_empty_config(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(**dict())
def test_name_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(name='foo')
def test_config_with_name_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(**{'name': 'foo'})
def test_name_user_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(name='foo', image_user='bar')
def test_config_with_name_user_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(**{'name': 'foo', 'image_user': 'bar'})
def test_name_user_format_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(name='foo', image_user='bar', img_format='qcow2')
def test_config_with_name_user_format_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(
**{'name': 'foo', 'image_user': 'bar', 'format': 'qcow2'})
def test_name_user_format_url_only(self):
settings = ImageSettings(name='foo', image_user='bar',
img_format='qcow2', url='http://foo.com')
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertEqual('http://foo.com', settings.url)
self.assertIsNone(settings.image_file)
self.assertFalse(settings.exists)
self.assertFalse(settings.public)
self.assertIsNone(settings.nic_config_pb_loc)
def test_name_user_format_url_only_properties(self):
properties = {'hw_video_model': 'vga'}
settings = ImageSettings(name='foo', image_user='bar',
img_format='qcow2', url='http://foo.com',
extra_properties=properties)
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertEqual('http://foo.com', settings.url)
self.assertEqual(properties, settings.extra_properties)
self.assertIsNone(settings.image_file)
self.assertFalse(settings.exists)
self.assertFalse(settings.public)
self.assertIsNone(settings.nic_config_pb_loc)
def test_config_with_name_user_format_url_only(self):
settings = ImageSettings(
**{'name': 'foo', 'image_user': 'bar', 'format': 'qcow2',
'download_url': 'http://foo.com'})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertEqual('http://foo.com', settings.url)
self.assertIsNone(settings.image_file)
self.assertFalse(settings.exists)
self.assertFalse(settings.public)
self.assertIsNone(settings.nic_config_pb_loc)
def test_name_user_format_file_only(self):
settings = ImageSettings(name='foo', image_user='bar',
img_format='qcow2',
image_file='/foo/bar.qcow')
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertIsNone(settings.url)
self.assertEqual('/foo/bar.qcow', settings.image_file)
self.assertFalse(settings.exists)
self.assertFalse(settings.public)
self.assertIsNone(settings.nic_config_pb_loc)
def test_config_with_name_user_format_file_only(self):
settings = ImageSettings(
**{'name': 'foo', 'image_user': 'bar', 'format': 'qcow2',
'image_file': '/foo/bar.qcow'})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertIsNone(settings.url)
self.assertEqual('/foo/bar.qcow', settings.image_file)
self.assertFalse(settings.exists)
self.assertFalse(settings.public)
self.assertIsNone(settings.nic_config_pb_loc)
def test_all_url(self):
properties = {'hw_video_model': 'vga'}
kernel_settings = ImageSettings(name='kernel', url='http://kernel.com',
image_user='bar', img_format='qcow2')
ramdisk_settings = ImageSettings(name='ramdisk',
url='http://ramdisk.com',
image_user='bar', img_format='qcow2')
settings = ImageSettings(name='foo', image_user='bar',
img_format='qcow2', url='http://foo.com',
extra_properties=properties,
nic_config_pb_loc='/foo/bar',
kernel_image_settings=kernel_settings,
ramdisk_image_settings=ramdisk_settings,
exists=True, public=True)
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertEqual('http://foo.com', settings.url)
self.assertEqual(properties, settings.extra_properties)
self.assertIsNone(settings.image_file)
self.assertEqual('/foo/bar', settings.nic_config_pb_loc)
self.assertEqual('kernel', settings.kernel_image_settings.name)
self.assertEqual('http://kernel.com',
settings.kernel_image_settings.url)
self.assertEqual('bar', settings.kernel_image_settings.image_user)
self.assertEqual('qcow2', settings.kernel_image_settings.format)
self.assertEqual('ramdisk', settings.ramdisk_image_settings.name)
self.assertEqual('http://ramdisk.com',
settings.ramdisk_image_settings.url)
self.assertEqual('bar', settings.ramdisk_image_settings.image_user)
self.assertEqual('qcow2', settings.ramdisk_image_settings.format)
self.assertTrue(settings.exists)
self.assertTrue(settings.public)
def test_config_all_url(self):
settings = ImageSettings(
**{'name': 'foo', 'image_user': 'bar', 'format': 'qcow2',
'download_url': 'http://foo.com',
'extra_properties': '{\'hw_video_model\': \'vga\'}',
'nic_config_pb_loc': '/foo/bar',
'kernel_image_settings': {
'name': 'kernel',
'download_url': 'http://kernel.com',
'image_user': 'bar',
'format': 'qcow2'},
'ramdisk_image_settings': {
'name': 'ramdisk',
'download_url': 'http://ramdisk.com',
'image_user': 'bar',
'format': 'qcow2'},
'exists': True, 'public': True})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertEqual('http://foo.com', settings.url)
self.assertEqual('{\'hw_video_model\': \'vga\'}',
settings.extra_properties)
self.assertIsNone(settings.image_file)
self.assertEqual('/foo/bar', settings.nic_config_pb_loc)
self.assertEqual('kernel', settings.kernel_image_settings.name)
self.assertEqual('http://kernel.com',
settings.kernel_image_settings.url)
self.assertEqual('ramdisk', settings.ramdisk_image_settings.name)
self.assertEqual('http://ramdisk.com',
settings.ramdisk_image_settings.url)
self.assertTrue(settings.exists)
self.assertTrue(settings.public)
def test_all_file(self):
properties = {'hw_video_model': 'vga'}
settings = ImageSettings(name='foo', image_user='bar',
img_format='qcow2',
image_file='/foo/bar.qcow',
extra_properties=properties,
nic_config_pb_loc='/foo/bar', exists=True,
public=True)
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertIsNone(settings.url)
self.assertEqual('/foo/bar.qcow', settings.image_file)
self.assertEqual(properties, settings.extra_properties)
self.assertEqual('/foo/bar', settings.nic_config_pb_loc)
self.assertTrue(settings.exists)
self.assertTrue(settings.public)
def test_config_all_file(self):
settings = ImageSettings(
**{'name': 'foo', 'image_user': 'bar', 'format': 'qcow2',
'image_file': '/foo/bar.qcow',
'extra_properties': '{\'hw_video_model\' : \'vga\'}',
'nic_config_pb_loc': '/foo/bar', 'exists': True,
'public': True})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertIsNone(settings.url)
self.assertEqual('/foo/bar.qcow', settings.image_file)
self.assertEqual('{\'hw_video_model\' : \'vga\'}',
settings.extra_properties)
self.assertEqual('/foo/bar', settings.nic_config_pb_loc)
self.assertTrue(settings.exists)
self.assertTrue(settings.public)
class CreateImageSuccessTests(OSIntegrationTestCase):
"""
Test for the CreateImage class defined in create_image.py
"""
def setUp(self):
"""
Instantiates the CreateImage object that is responsible for downloading
and creating an OS image file within OpenStack
"""
super(self.__class__, self).__start__()
guid = uuid.uuid4()
self.image_name = self.__class__.__name__ + '-' + str(guid)
self.glance = glance_utils.glance_client(self.os_creds)
self.image_creator = None
if self.image_metadata and 'glance_tests' in self.image_metadata:
glance_test_meta = self.image_metadata['glance_tests']
else:
glance_test_meta = None
self.tmp_dir = 'tmp/' + str(guid)
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
self.image_settings = openstack_tests.cirros_image_settings(
name=self.image_name,
image_metadata=glance_test_meta)
def tearDown(self):
"""
Cleans the image and downloaded image file
"""
if self.image_creator:
self.image_creator.clean()
if os.path.exists(self.tmp_dir) and os.path.isdir(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
super(self.__class__, self).__clean__()
def test_create_image_clean_url(self):
"""
Tests the creation of an OpenStack image from a URL.
"""
# Create Image
# Set the default image settings, then set any custom parameters sent
# from the app
self.image_creator = create_image.OpenStackImage(self.os_creds,
self.image_settings)
created_image = self.image_creator.create()
self.assertIsNotNone(created_image)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=self.image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(created_image.size, retrieved_image.size)
self.assertEqual(get_image_size(self.image_settings),
retrieved_image.size)
self.assertEqual(created_image.name, retrieved_image.name)
self.assertEqual(created_image.id, retrieved_image.id)
def test_create_image_clean_url_properties(self):
"""
Tests the creation of an OpenStack image from a URL and set properties.
"""
# Create Image
# Set the default image settings, then set any custom parameters sent
# from the app
self.image_creator = create_image.OpenStackImage(self.os_creds,
self.image_settings)
created_image = self.image_creator.create()
self.assertIsNotNone(created_image)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=self.image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creator.get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(self.image_settings),
retrieved_image.size)
self.assertEqual(created_image.name, retrieved_image.name)
self.assertEqual(created_image.id, retrieved_image.id)
self.assertEqual(created_image.properties, retrieved_image.properties)
def test_create_image_clean_file(self):
"""
Tests the creation of an OpenStack image from a file.
"""
if not self.image_settings.image_file and self.image_settings.url:
# Download the file of the image
image_file_name = file_utils.download(self.image_settings.url,
self.tmp_dir).name
else:
image_file_name = self.image_settings.image_file
if image_file_name:
file_image_settings = openstack_tests.file_image_test_settings(
name=self.image_name, file_path=image_file_name)
self.image_creator = create_image.OpenStackImage(
self.os_creds, file_image_settings)
created_image = self.image_creator.create()
self.assertIsNotNone(created_image)
self.assertEqual(self.image_name, created_image.name)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=file_image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creator.get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(file_image_settings),
retrieved_image.size)
self.assertEqual(created_image.name, retrieved_image.name)
self.assertEqual(created_image.id, retrieved_image.id)
else:
logger.warn(
'Test not executed as the image metadata requires image files')
def test_create_delete_image(self):
"""
Tests the creation then deletion of an OpenStack image to ensure
clean() does not raise an Exception.
"""
# Create Image
self.image_creator = create_image.OpenStackImage(self.os_creds,
self.image_settings)
created_image = self.image_creator.create()
self.assertIsNotNone(created_image)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=self.image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creator.get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(self.image_settings),
retrieved_image.size)
# Delete Image manually
glance_utils.delete_image(self.glance, created_image)
self.assertIsNone(glance_utils.get_image(
self.glance, image_settings=self.image_creator.image_settings))
# Must not throw an exception when attempting to cleanup non-existent
# image
self.image_creator.clean()
self.assertIsNone(self.image_creator.get_image())
def test_create_same_image(self):
"""
Tests the creation of an OpenStack image when the image already exists.
"""
# Create Image
self.image_creator = create_image.OpenStackImage(self.os_creds,
self.image_settings)
image1 = self.image_creator.create()
retrieved_image = glance_utils.get_image(
self.glance, image_settings=self.image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creator.get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(self.image_settings),
retrieved_image.size)
self.assertEqual(image1.name, retrieved_image.name)
self.assertEqual(image1.id, retrieved_image.id)
self.assertEqual(image1.properties, retrieved_image.properties)
# Should be retrieving the instance data
os_image_2 = create_image.OpenStackImage(self.os_creds,
self.image_settings)
image2 = os_image_2.create()
self.assertEqual(image1.id, image2.id)
def test_create_same_image_new_settings(self):
"""
Tests the creation of an OpenStack image when the image already exists
and the configuration only contains the name.
"""
# Create Image
self.image_creator = create_image.OpenStackImage(self.os_creds,
self.image_settings)
image1 = self.image_creator.create()
retrieved_image = glance_utils.get_image(
self.glance, image_settings=self.image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creator.get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(self.image_settings),
retrieved_image.size)
self.assertEqual(image1.name, retrieved_image.name)
self.assertEqual(image1.id, retrieved_image.id)
self.assertEqual(image1.properties, retrieved_image.properties)
# Should be retrieving the instance data
image_2_settings = ImageSettings(name=self.image_settings.name,
image_user='foo', exists=True)
os_image_2 = create_image.OpenStackImage(self.os_creds,
image_2_settings)
image2 = os_image_2.create()
self.assertEqual(image1.id, image2.id)
class CreateImageNegativeTests(OSIntegrationTestCase):
"""
Negative test cases for the CreateImage class
"""
def setUp(self):
super(self.__class__, self).__start__()
self.image_name = self.__class__.__name__ + '-' + str(uuid.uuid4())
self.image_creator = None
def tearDown(self):
if self.image_creator:
self.image_creator.clean()
super(self.__class__, self).__clean__()
def test_bad_image_name(self):
"""
Expect an ImageCreationError when the image name does not exist when a
file or URL has not been configured
"""
os_image_settings = ImageSettings(name='foo', image_user='bar',
exists=True)
self.image_creator = create_image.OpenStackImage(self.os_creds,
os_image_settings)
with self.assertRaises(ImageCreationError):
self.image_creator.create()
self.fail('ImageCreationError should have been raised prior to'
'this line')
def test_bad_image_url(self):
"""
Expect an ImageCreationError when the image download url is bad
"""
os_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name)
self.image_creator = create_image.OpenStackImage(
self.os_creds,
create_image.ImageSettings(name=os_image_settings.name,
image_user=os_image_settings.image_user,
img_format=os_image_settings.format,
url="http://foo.bar"))
try:
self.image_creator.create()
except HTTPBadRequest:
pass
except URLError:
pass
except Exception as e:
self.fail('Invalid Exception ' + str(e))
def test_bad_image_image_type(self):
"""
Expect an ImageCreationError when the image type bad
"""
os_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name)
self.image_creator = create_image.OpenStackImage(
self.os_creds,
create_image.ImageSettings(name=os_image_settings.name,
image_user=os_image_settings.image_user,
img_format='foo',
url=os_image_settings.url))
with self.assertRaises(Exception):
self.image_creator.create()
def test_bad_image_file(self):
"""
Expect an ImageCreationError when the image file does not exist
"""
os_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name)
self.image_creator = create_image.OpenStackImage(
self.os_creds,
create_image.ImageSettings(name=os_image_settings.name,
image_user=os_image_settings.image_user,
img_format=os_image_settings.format,
image_file="/foo/bar.qcow"))
with self.assertRaises(IOError):
self.image_creator.create()
class CreateMultiPartImageTests(OSIntegrationTestCase):
"""
Test different means for creating a 3-part images
"""
def setUp(self):
"""
Instantiates the CreateImage object that is responsible for
downloading and creating an OS image file within OpenStack
"""
super(self.__class__, self).__start__()
guid = uuid.uuid4()
self.image_creators = list()
self.image_name = self.__class__.__name__ + '-' + str(guid)
self.glance = glance_utils.glance_client(self.os_creds)
self.tmp_dir = 'tmp/' + str(guid)
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
if self.image_metadata and 'glance_tests' in self.image_metadata:
self.glance_test_meta = self.image_metadata['glance_tests']
else:
self.glance_test_meta = dict()
def tearDown(self):
"""
Cleans the images and downloaded image file
"""
for image_creator in self.image_creators:
image_creator.clean()
if os.path.exists(self.tmp_dir) and os.path.isdir(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
super(self.__class__, self).__clean__()
def test_create_three_part_image_from_url(self):
"""
Tests the creation of a 3-part OpenStack image from a URL.
"""
# Create the kernel image
if 'disk_file' not in self.glance_test_meta:
image_settings = openstack_tests.cirros_image_settings(
name=self.image_name,
image_metadata={
'disk_url':
openstack_tests.CIRROS_DEFAULT_IMAGE_URL,
'kernel_url':
openstack_tests.CIRROS_DEFAULT_KERNEL_IMAGE_URL,
'ramdisk_url':
openstack_tests.CIRROS_DEFAULT_RAMDISK_IMAGE_URL})
image_creator = create_image.OpenStackImage(self.os_creds,
image_settings)
self.image_creators.append(image_creator)
image_creator.create()
main_image = glance_utils.get_image(self.glance,
image_settings=image_settings)
self.assertIsNotNone(main_image)
self.assertIsNotNone(image_creator.get_image())
self.assertEqual(image_creator.get_image().id, main_image.id)
kernel_image = glance_utils.get_image(
self.glance,
image_settings=image_settings.kernel_image_settings)
self.assertIsNotNone(kernel_image)
self.assertIsNotNone(image_creator.get_kernel_image())
self.assertEqual(kernel_image.id,
image_creator.get_kernel_image().id)
ramdisk_image = glance_utils.get_image(
self.glance,
image_settings=image_settings.ramdisk_image_settings)
self.assertIsNotNone(ramdisk_image)
self.assertIsNotNone(image_creator.get_ramdisk_image())
self.assertEqual(ramdisk_image.id,
image_creator.get_ramdisk_image().id)
else:
logger.warn(
'Test not executed as the image metadata requires image files')
def test_create_three_part_image_from_file_3_creators(self):
"""
Tests the creation of a 3-part OpenStack image from files.
"""
file_only = False
# Set properties
properties = {}
if self.glance_test_meta:
if 'extra_properties' in self.glance_test_meta:
properties = self.glance_test_meta['extra_properties']
if 'disk_file' in self.glance_test_meta:
file_only = True
# Create the kernel image
kernel_file_name = None
kernel_url = openstack_tests.CIRROS_DEFAULT_KERNEL_IMAGE_URL
if 'kernel_file' in self.glance_test_meta:
kernel_file_name = self.glance_test_meta['kernel_file']
elif 'kernel_url' in self.glance_test_meta:
kernel_url = self.glance_test_meta['kernel_url']
else:
kernel_url = openstack_tests.CIRROS_DEFAULT_KERNEL_IMAGE_URL
if not kernel_file_name and not file_only:
kernel_file_name = file_utils.download(kernel_url,
self.tmp_dir).name
else:
logger.warn('Will not download the kernel image.'
' Cannot execute test')
return
kernel_file_image_settings = openstack_tests.file_image_test_settings(
name=self.image_name + '_kernel', file_path=kernel_file_name)
self.image_creators.append(create_image.OpenStackImage(
self.os_creds, kernel_file_image_settings))
kernel_image = self.image_creators[-1].create()
self.assertIsNotNone(kernel_image)
self.assertEqual(get_image_size(kernel_file_image_settings),
kernel_image.size)
# Create the ramdisk image
ramdisk_file_name = None
ramdisk_url = openstack_tests.CIRROS_DEFAULT_RAMDISK_IMAGE_URL
if 'ramdisk_file' in self.glance_test_meta:
ramdisk_file_name = self.glance_test_meta['ramdisk_file']
elif 'ramdisk_url' in self.glance_test_meta:
ramdisk_url = self.glance_test_meta['ramdisk_url']
if not ramdisk_file_name and not file_only:
ramdisk_file_name = file_utils.download(ramdisk_url,
self.tmp_dir).name
else:
logger.warn('Will not download the ramdisk image.'
' Cannot execute test')
return
ramdisk_file_image_settings = openstack_tests.file_image_test_settings(
name=self.image_name + '_ramdisk', file_path=ramdisk_file_name)
self.image_creators.append(create_image.OpenStackImage(
self.os_creds, ramdisk_file_image_settings))
ramdisk_image = self.image_creators[-1].create()
self.assertIsNotNone(ramdisk_image)
self.assertEqual(get_image_size(ramdisk_file_image_settings),
ramdisk_image.size)
# Create the main disk image
disk_file_name = None
disk_url = openstack_tests.CIRROS_DEFAULT_IMAGE_URL
if 'disk_file' in self.glance_test_meta:
disk_file_name = self.glance_test_meta['disk_file']
elif 'disk_url' in self.glance_test_meta:
disk_url = self.glance_test_meta['disk_url']
if not disk_file_name and not file_only:
disk_file_name = file_utils.download(disk_url, self.tmp_dir).name
else:
logger.warn('Will not download the disk file image.'
' Cannot execute test')
return
file_image_settings = openstack_tests.file_image_test_settings(
name=self.image_name, file_path=disk_file_name)
properties['kernel_id'] = kernel_image.id
properties['ramdisk_id'] = ramdisk_image.id
file_image_settings.extra_properties = properties
self.image_creators.append(
create_image.OpenStackImage(self.os_creds, file_image_settings))
created_image = self.image_creators[-1].create()
self.assertIsNotNone(created_image)
self.assertEqual(self.image_name, created_image.name)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=file_image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creators[-1].get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(file_image_settings),
retrieved_image.size)
self.assertEqual(created_image.name, retrieved_image.name)
self.assertEqual(created_image.id, retrieved_image.id)
self.assertEqual(created_image.properties, retrieved_image.properties)
def test_create_three_part_image_from_url_3_creators(self):
"""
Tests the creation of a 3-part OpenStack image from a URL.
"""
if 'disk_file' not in self.glance_test_meta:
# Set properties
properties = {}
if self.glance_test_meta and \
'extra_properties' in self.glance_test_meta:
properties = self.glance_test_meta['extra_properties']
# Create the kernel image
kernel_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name + '_kernel',
url=openstack_tests.CIRROS_DEFAULT_KERNEL_IMAGE_URL)
if self.glance_test_meta:
if 'kernel_url' in self.glance_test_meta:
kernel_image_settings.url = self.glance_test_meta[
'kernel_url']
self.image_creators.append(
create_image.OpenStackImage(self.os_creds,
kernel_image_settings))
kernel_image = self.image_creators[-1].create()
self.assertIsNotNone(kernel_image)
self.assertEqual(get_image_size(kernel_image_settings),
kernel_image.size)
# Create the ramdisk image
ramdisk_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name + '_ramdisk',
url=openstack_tests.CIRROS_DEFAULT_RAMDISK_IMAGE_URL)
if self.glance_test_meta:
if 'ramdisk_url' in self.glance_test_meta:
ramdisk_image_settings.url = self.glance_test_meta[
'ramdisk_url']
self.image_creators.append(
create_image.OpenStackImage(self.os_creds,
ramdisk_image_settings))
ramdisk_image = self.image_creators[-1].create()
self.assertIsNotNone(ramdisk_image)
self.assertEqual(get_image_size(ramdisk_image_settings),
ramdisk_image.size)
# Create the main image
os_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name,
url=openstack_tests.CIRROS_DEFAULT_IMAGE_URL)
if self.glance_test_meta:
if 'disk_url' in self.glance_test_meta:
os_image_settings.url = self.glance_test_meta['disk_url']
properties['kernel_id'] = kernel_image.id
properties['ramdisk_id'] = ramdisk_image.id
os_image_settings.extra_properties = properties
self.image_creators.append(
create_image.OpenStackImage(self.os_creds, os_image_settings))
created_image = self.image_creators[-1].create()
self.assertIsNotNone(created_image)
self.assertEqual(self.image_name, created_image.name)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=os_image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creators[-1].get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(os_image_settings),
retrieved_image.size)
self.assertEqual(created_image.name, retrieved_image.name)
self.assertEqual(created_image.id, retrieved_image.id)
self.assertEqual(created_image.properties,
retrieved_image.properties)
else:
logger.warn(
'Test not executed as the image metadata requires image files')
def get_image_size(image_settings):
"""
Returns the expected image size
:return:
"""
if image_settings.image_file:
return os.path.getsize(image_settings.image_file)
elif image_settings.url:
return int(file_utils.get_content_length(image_settings.url))
else:
raise Exception(
'Cannot retrieve expected image size. Image filename or URL has '
'not been configured')
| 43.109903
| 79
| 0.620143
|
from glanceclient.exc import HTTPBadRequest
try:
from urllib.request import URLError
except ImportError:
from urllib2 import URLError
import logging
import shutil
import unittest
import uuid
import os
from snaps import file_utils
from snaps.openstack import create_image
from snaps.openstack.create_image import (ImageSettings, ImageCreationError,
ImageSettingsError)
from snaps.openstack.tests import openstack_tests
from snaps.openstack.tests.os_source_file_test import OSIntegrationTestCase
from snaps.openstack.utils import glance_utils
__author__ = 'spisarski'
logger = logging.getLogger('create_image_tests')
class ImageSettingsUnitTests(unittest.TestCase):
def test_no_params(self):
with self.assertRaises(ImageSettingsError):
ImageSettings()
def test_empty_config(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(**dict())
def test_name_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(name='foo')
def test_config_with_name_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(**{'name': 'foo'})
def test_name_user_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(name='foo', image_user='bar')
def test_config_with_name_user_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(**{'name': 'foo', 'image_user': 'bar'})
def test_name_user_format_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(name='foo', image_user='bar', img_format='qcow2')
def test_config_with_name_user_format_only(self):
with self.assertRaises(ImageSettingsError):
ImageSettings(
**{'name': 'foo', 'image_user': 'bar', 'format': 'qcow2'})
def test_name_user_format_url_only(self):
settings = ImageSettings(name='foo', image_user='bar',
img_format='qcow2', url='http://foo.com')
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertEqual('http://foo.com', settings.url)
self.assertIsNone(settings.image_file)
self.assertFalse(settings.exists)
self.assertFalse(settings.public)
self.assertIsNone(settings.nic_config_pb_loc)
def test_name_user_format_url_only_properties(self):
properties = {'hw_video_model': 'vga'}
settings = ImageSettings(name='foo', image_user='bar',
img_format='qcow2', url='http://foo.com',
extra_properties=properties)
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertEqual('http://foo.com', settings.url)
self.assertEqual(properties, settings.extra_properties)
self.assertIsNone(settings.image_file)
self.assertFalse(settings.exists)
self.assertFalse(settings.public)
self.assertIsNone(settings.nic_config_pb_loc)
def test_config_with_name_user_format_url_only(self):
settings = ImageSettings(
**{'name': 'foo', 'image_user': 'bar', 'format': 'qcow2',
'download_url': 'http://foo.com'})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertEqual('http://foo.com', settings.url)
self.assertIsNone(settings.image_file)
self.assertFalse(settings.exists)
self.assertFalse(settings.public)
self.assertIsNone(settings.nic_config_pb_loc)
def test_name_user_format_file_only(self):
settings = ImageSettings(name='foo', image_user='bar',
img_format='qcow2',
image_file='/foo/bar.qcow')
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertIsNone(settings.url)
self.assertEqual('/foo/bar.qcow', settings.image_file)
self.assertFalse(settings.exists)
self.assertFalse(settings.public)
self.assertIsNone(settings.nic_config_pb_loc)
def test_config_with_name_user_format_file_only(self):
settings = ImageSettings(
**{'name': 'foo', 'image_user': 'bar', 'format': 'qcow2',
'image_file': '/foo/bar.qcow'})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertIsNone(settings.url)
self.assertEqual('/foo/bar.qcow', settings.image_file)
self.assertFalse(settings.exists)
self.assertFalse(settings.public)
self.assertIsNone(settings.nic_config_pb_loc)
def test_all_url(self):
properties = {'hw_video_model': 'vga'}
kernel_settings = ImageSettings(name='kernel', url='http://kernel.com',
image_user='bar', img_format='qcow2')
ramdisk_settings = ImageSettings(name='ramdisk',
url='http://ramdisk.com',
image_user='bar', img_format='qcow2')
settings = ImageSettings(name='foo', image_user='bar',
img_format='qcow2', url='http://foo.com',
extra_properties=properties,
nic_config_pb_loc='/foo/bar',
kernel_image_settings=kernel_settings,
ramdisk_image_settings=ramdisk_settings,
exists=True, public=True)
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertEqual('http://foo.com', settings.url)
self.assertEqual(properties, settings.extra_properties)
self.assertIsNone(settings.image_file)
self.assertEqual('/foo/bar', settings.nic_config_pb_loc)
self.assertEqual('kernel', settings.kernel_image_settings.name)
self.assertEqual('http://kernel.com',
settings.kernel_image_settings.url)
self.assertEqual('bar', settings.kernel_image_settings.image_user)
self.assertEqual('qcow2', settings.kernel_image_settings.format)
self.assertEqual('ramdisk', settings.ramdisk_image_settings.name)
self.assertEqual('http://ramdisk.com',
settings.ramdisk_image_settings.url)
self.assertEqual('bar', settings.ramdisk_image_settings.image_user)
self.assertEqual('qcow2', settings.ramdisk_image_settings.format)
self.assertTrue(settings.exists)
self.assertTrue(settings.public)
def test_config_all_url(self):
settings = ImageSettings(
**{'name': 'foo', 'image_user': 'bar', 'format': 'qcow2',
'download_url': 'http://foo.com',
'extra_properties': '{\'hw_video_model\': \'vga\'}',
'nic_config_pb_loc': '/foo/bar',
'kernel_image_settings': {
'name': 'kernel',
'download_url': 'http://kernel.com',
'image_user': 'bar',
'format': 'qcow2'},
'ramdisk_image_settings': {
'name': 'ramdisk',
'download_url': 'http://ramdisk.com',
'image_user': 'bar',
'format': 'qcow2'},
'exists': True, 'public': True})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertEqual('http://foo.com', settings.url)
self.assertEqual('{\'hw_video_model\': \'vga\'}',
settings.extra_properties)
self.assertIsNone(settings.image_file)
self.assertEqual('/foo/bar', settings.nic_config_pb_loc)
self.assertEqual('kernel', settings.kernel_image_settings.name)
self.assertEqual('http://kernel.com',
settings.kernel_image_settings.url)
self.assertEqual('ramdisk', settings.ramdisk_image_settings.name)
self.assertEqual('http://ramdisk.com',
settings.ramdisk_image_settings.url)
self.assertTrue(settings.exists)
self.assertTrue(settings.public)
def test_all_file(self):
properties = {'hw_video_model': 'vga'}
settings = ImageSettings(name='foo', image_user='bar',
img_format='qcow2',
image_file='/foo/bar.qcow',
extra_properties=properties,
nic_config_pb_loc='/foo/bar', exists=True,
public=True)
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertIsNone(settings.url)
self.assertEqual('/foo/bar.qcow', settings.image_file)
self.assertEqual(properties, settings.extra_properties)
self.assertEqual('/foo/bar', settings.nic_config_pb_loc)
self.assertTrue(settings.exists)
self.assertTrue(settings.public)
def test_config_all_file(self):
settings = ImageSettings(
**{'name': 'foo', 'image_user': 'bar', 'format': 'qcow2',
'image_file': '/foo/bar.qcow',
'extra_properties': '{\'hw_video_model\' : \'vga\'}',
'nic_config_pb_loc': '/foo/bar', 'exists': True,
'public': True})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.image_user)
self.assertEqual('qcow2', settings.format)
self.assertIsNone(settings.url)
self.assertEqual('/foo/bar.qcow', settings.image_file)
self.assertEqual('{\'hw_video_model\' : \'vga\'}',
settings.extra_properties)
self.assertEqual('/foo/bar', settings.nic_config_pb_loc)
self.assertTrue(settings.exists)
self.assertTrue(settings.public)
class CreateImageSuccessTests(OSIntegrationTestCase):
def setUp(self):
super(self.__class__, self).__start__()
guid = uuid.uuid4()
self.image_name = self.__class__.__name__ + '-' + str(guid)
self.glance = glance_utils.glance_client(self.os_creds)
self.image_creator = None
if self.image_metadata and 'glance_tests' in self.image_metadata:
glance_test_meta = self.image_metadata['glance_tests']
else:
glance_test_meta = None
self.tmp_dir = 'tmp/' + str(guid)
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
self.image_settings = openstack_tests.cirros_image_settings(
name=self.image_name,
image_metadata=glance_test_meta)
def tearDown(self):
if self.image_creator:
self.image_creator.clean()
if os.path.exists(self.tmp_dir) and os.path.isdir(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
super(self.__class__, self).__clean__()
def test_create_image_clean_url(self):
self.image_creator = create_image.OpenStackImage(self.os_creds,
self.image_settings)
created_image = self.image_creator.create()
self.assertIsNotNone(created_image)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=self.image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(created_image.size, retrieved_image.size)
self.assertEqual(get_image_size(self.image_settings),
retrieved_image.size)
self.assertEqual(created_image.name, retrieved_image.name)
self.assertEqual(created_image.id, retrieved_image.id)
def test_create_image_clean_url_properties(self):
self.image_creator = create_image.OpenStackImage(self.os_creds,
self.image_settings)
created_image = self.image_creator.create()
self.assertIsNotNone(created_image)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=self.image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creator.get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(self.image_settings),
retrieved_image.size)
self.assertEqual(created_image.name, retrieved_image.name)
self.assertEqual(created_image.id, retrieved_image.id)
self.assertEqual(created_image.properties, retrieved_image.properties)
def test_create_image_clean_file(self):
if not self.image_settings.image_file and self.image_settings.url:
image_file_name = file_utils.download(self.image_settings.url,
self.tmp_dir).name
else:
image_file_name = self.image_settings.image_file
if image_file_name:
file_image_settings = openstack_tests.file_image_test_settings(
name=self.image_name, file_path=image_file_name)
self.image_creator = create_image.OpenStackImage(
self.os_creds, file_image_settings)
created_image = self.image_creator.create()
self.assertIsNotNone(created_image)
self.assertEqual(self.image_name, created_image.name)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=file_image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creator.get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(file_image_settings),
retrieved_image.size)
self.assertEqual(created_image.name, retrieved_image.name)
self.assertEqual(created_image.id, retrieved_image.id)
else:
logger.warn(
'Test not executed as the image metadata requires image files')
def test_create_delete_image(self):
self.image_creator = create_image.OpenStackImage(self.os_creds,
self.image_settings)
created_image = self.image_creator.create()
self.assertIsNotNone(created_image)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=self.image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creator.get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(self.image_settings),
retrieved_image.size)
glance_utils.delete_image(self.glance, created_image)
self.assertIsNone(glance_utils.get_image(
self.glance, image_settings=self.image_creator.image_settings))
self.image_creator.clean()
self.assertIsNone(self.image_creator.get_image())
def test_create_same_image(self):
self.image_creator = create_image.OpenStackImage(self.os_creds,
self.image_settings)
image1 = self.image_creator.create()
retrieved_image = glance_utils.get_image(
self.glance, image_settings=self.image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creator.get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(self.image_settings),
retrieved_image.size)
self.assertEqual(image1.name, retrieved_image.name)
self.assertEqual(image1.id, retrieved_image.id)
self.assertEqual(image1.properties, retrieved_image.properties)
os_image_2 = create_image.OpenStackImage(self.os_creds,
self.image_settings)
image2 = os_image_2.create()
self.assertEqual(image1.id, image2.id)
def test_create_same_image_new_settings(self):
self.image_creator = create_image.OpenStackImage(self.os_creds,
self.image_settings)
image1 = self.image_creator.create()
retrieved_image = glance_utils.get_image(
self.glance, image_settings=self.image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creator.get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(self.image_settings),
retrieved_image.size)
self.assertEqual(image1.name, retrieved_image.name)
self.assertEqual(image1.id, retrieved_image.id)
self.assertEqual(image1.properties, retrieved_image.properties)
image_2_settings = ImageSettings(name=self.image_settings.name,
image_user='foo', exists=True)
os_image_2 = create_image.OpenStackImage(self.os_creds,
image_2_settings)
image2 = os_image_2.create()
self.assertEqual(image1.id, image2.id)
class CreateImageNegativeTests(OSIntegrationTestCase):
def setUp(self):
super(self.__class__, self).__start__()
self.image_name = self.__class__.__name__ + '-' + str(uuid.uuid4())
self.image_creator = None
def tearDown(self):
if self.image_creator:
self.image_creator.clean()
super(self.__class__, self).__clean__()
def test_bad_image_name(self):
os_image_settings = ImageSettings(name='foo', image_user='bar',
exists=True)
self.image_creator = create_image.OpenStackImage(self.os_creds,
os_image_settings)
with self.assertRaises(ImageCreationError):
self.image_creator.create()
self.fail('ImageCreationError should have been raised prior to'
'this line')
def test_bad_image_url(self):
os_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name)
self.image_creator = create_image.OpenStackImage(
self.os_creds,
create_image.ImageSettings(name=os_image_settings.name,
image_user=os_image_settings.image_user,
img_format=os_image_settings.format,
url="http://foo.bar"))
try:
self.image_creator.create()
except HTTPBadRequest:
pass
except URLError:
pass
except Exception as e:
self.fail('Invalid Exception ' + str(e))
def test_bad_image_image_type(self):
os_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name)
self.image_creator = create_image.OpenStackImage(
self.os_creds,
create_image.ImageSettings(name=os_image_settings.name,
image_user=os_image_settings.image_user,
img_format='foo',
url=os_image_settings.url))
with self.assertRaises(Exception):
self.image_creator.create()
def test_bad_image_file(self):
os_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name)
self.image_creator = create_image.OpenStackImage(
self.os_creds,
create_image.ImageSettings(name=os_image_settings.name,
image_user=os_image_settings.image_user,
img_format=os_image_settings.format,
image_file="/foo/bar.qcow"))
with self.assertRaises(IOError):
self.image_creator.create()
class CreateMultiPartImageTests(OSIntegrationTestCase):
def setUp(self):
super(self.__class__, self).__start__()
guid = uuid.uuid4()
self.image_creators = list()
self.image_name = self.__class__.__name__ + '-' + str(guid)
self.glance = glance_utils.glance_client(self.os_creds)
self.tmp_dir = 'tmp/' + str(guid)
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
if self.image_metadata and 'glance_tests' in self.image_metadata:
self.glance_test_meta = self.image_metadata['glance_tests']
else:
self.glance_test_meta = dict()
def tearDown(self):
for image_creator in self.image_creators:
image_creator.clean()
if os.path.exists(self.tmp_dir) and os.path.isdir(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
super(self.__class__, self).__clean__()
def test_create_three_part_image_from_url(self):
if 'disk_file' not in self.glance_test_meta:
image_settings = openstack_tests.cirros_image_settings(
name=self.image_name,
image_metadata={
'disk_url':
openstack_tests.CIRROS_DEFAULT_IMAGE_URL,
'kernel_url':
openstack_tests.CIRROS_DEFAULT_KERNEL_IMAGE_URL,
'ramdisk_url':
openstack_tests.CIRROS_DEFAULT_RAMDISK_IMAGE_URL})
image_creator = create_image.OpenStackImage(self.os_creds,
image_settings)
self.image_creators.append(image_creator)
image_creator.create()
main_image = glance_utils.get_image(self.glance,
image_settings=image_settings)
self.assertIsNotNone(main_image)
self.assertIsNotNone(image_creator.get_image())
self.assertEqual(image_creator.get_image().id, main_image.id)
kernel_image = glance_utils.get_image(
self.glance,
image_settings=image_settings.kernel_image_settings)
self.assertIsNotNone(kernel_image)
self.assertIsNotNone(image_creator.get_kernel_image())
self.assertEqual(kernel_image.id,
image_creator.get_kernel_image().id)
ramdisk_image = glance_utils.get_image(
self.glance,
image_settings=image_settings.ramdisk_image_settings)
self.assertIsNotNone(ramdisk_image)
self.assertIsNotNone(image_creator.get_ramdisk_image())
self.assertEqual(ramdisk_image.id,
image_creator.get_ramdisk_image().id)
else:
logger.warn(
'Test not executed as the image metadata requires image files')
def test_create_three_part_image_from_file_3_creators(self):
file_only = False
properties = {}
if self.glance_test_meta:
if 'extra_properties' in self.glance_test_meta:
properties = self.glance_test_meta['extra_properties']
if 'disk_file' in self.glance_test_meta:
file_only = True
kernel_file_name = None
kernel_url = openstack_tests.CIRROS_DEFAULT_KERNEL_IMAGE_URL
if 'kernel_file' in self.glance_test_meta:
kernel_file_name = self.glance_test_meta['kernel_file']
elif 'kernel_url' in self.glance_test_meta:
kernel_url = self.glance_test_meta['kernel_url']
else:
kernel_url = openstack_tests.CIRROS_DEFAULT_KERNEL_IMAGE_URL
if not kernel_file_name and not file_only:
kernel_file_name = file_utils.download(kernel_url,
self.tmp_dir).name
else:
logger.warn('Will not download the kernel image.'
' Cannot execute test')
return
kernel_file_image_settings = openstack_tests.file_image_test_settings(
name=self.image_name + '_kernel', file_path=kernel_file_name)
self.image_creators.append(create_image.OpenStackImage(
self.os_creds, kernel_file_image_settings))
kernel_image = self.image_creators[-1].create()
self.assertIsNotNone(kernel_image)
self.assertEqual(get_image_size(kernel_file_image_settings),
kernel_image.size)
ramdisk_file_name = None
ramdisk_url = openstack_tests.CIRROS_DEFAULT_RAMDISK_IMAGE_URL
if 'ramdisk_file' in self.glance_test_meta:
ramdisk_file_name = self.glance_test_meta['ramdisk_file']
elif 'ramdisk_url' in self.glance_test_meta:
ramdisk_url = self.glance_test_meta['ramdisk_url']
if not ramdisk_file_name and not file_only:
ramdisk_file_name = file_utils.download(ramdisk_url,
self.tmp_dir).name
else:
logger.warn('Will not download the ramdisk image.'
' Cannot execute test')
return
ramdisk_file_image_settings = openstack_tests.file_image_test_settings(
name=self.image_name + '_ramdisk', file_path=ramdisk_file_name)
self.image_creators.append(create_image.OpenStackImage(
self.os_creds, ramdisk_file_image_settings))
ramdisk_image = self.image_creators[-1].create()
self.assertIsNotNone(ramdisk_image)
self.assertEqual(get_image_size(ramdisk_file_image_settings),
ramdisk_image.size)
disk_file_name = None
disk_url = openstack_tests.CIRROS_DEFAULT_IMAGE_URL
if 'disk_file' in self.glance_test_meta:
disk_file_name = self.glance_test_meta['disk_file']
elif 'disk_url' in self.glance_test_meta:
disk_url = self.glance_test_meta['disk_url']
if not disk_file_name and not file_only:
disk_file_name = file_utils.download(disk_url, self.tmp_dir).name
else:
logger.warn('Will not download the disk file image.'
' Cannot execute test')
return
file_image_settings = openstack_tests.file_image_test_settings(
name=self.image_name, file_path=disk_file_name)
properties['kernel_id'] = kernel_image.id
properties['ramdisk_id'] = ramdisk_image.id
file_image_settings.extra_properties = properties
self.image_creators.append(
create_image.OpenStackImage(self.os_creds, file_image_settings))
created_image = self.image_creators[-1].create()
self.assertIsNotNone(created_image)
self.assertEqual(self.image_name, created_image.name)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=file_image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creators[-1].get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(file_image_settings),
retrieved_image.size)
self.assertEqual(created_image.name, retrieved_image.name)
self.assertEqual(created_image.id, retrieved_image.id)
self.assertEqual(created_image.properties, retrieved_image.properties)
def test_create_three_part_image_from_url_3_creators(self):
if 'disk_file' not in self.glance_test_meta:
properties = {}
if self.glance_test_meta and \
'extra_properties' in self.glance_test_meta:
properties = self.glance_test_meta['extra_properties']
kernel_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name + '_kernel',
url=openstack_tests.CIRROS_DEFAULT_KERNEL_IMAGE_URL)
if self.glance_test_meta:
if 'kernel_url' in self.glance_test_meta:
kernel_image_settings.url = self.glance_test_meta[
'kernel_url']
self.image_creators.append(
create_image.OpenStackImage(self.os_creds,
kernel_image_settings))
kernel_image = self.image_creators[-1].create()
self.assertIsNotNone(kernel_image)
self.assertEqual(get_image_size(kernel_image_settings),
kernel_image.size)
ramdisk_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name + '_ramdisk',
url=openstack_tests.CIRROS_DEFAULT_RAMDISK_IMAGE_URL)
if self.glance_test_meta:
if 'ramdisk_url' in self.glance_test_meta:
ramdisk_image_settings.url = self.glance_test_meta[
'ramdisk_url']
self.image_creators.append(
create_image.OpenStackImage(self.os_creds,
ramdisk_image_settings))
ramdisk_image = self.image_creators[-1].create()
self.assertIsNotNone(ramdisk_image)
self.assertEqual(get_image_size(ramdisk_image_settings),
ramdisk_image.size)
os_image_settings = openstack_tests.cirros_image_settings(
name=self.image_name,
url=openstack_tests.CIRROS_DEFAULT_IMAGE_URL)
if self.glance_test_meta:
if 'disk_url' in self.glance_test_meta:
os_image_settings.url = self.glance_test_meta['disk_url']
properties['kernel_id'] = kernel_image.id
properties['ramdisk_id'] = ramdisk_image.id
os_image_settings.extra_properties = properties
self.image_creators.append(
create_image.OpenStackImage(self.os_creds, os_image_settings))
created_image = self.image_creators[-1].create()
self.assertIsNotNone(created_image)
self.assertEqual(self.image_name, created_image.name)
retrieved_image = glance_utils.get_image(
self.glance, image_settings=os_image_settings)
self.assertIsNotNone(retrieved_image)
self.assertEqual(self.image_creators[-1].get_image().size,
retrieved_image.size)
self.assertEqual(get_image_size(os_image_settings),
retrieved_image.size)
self.assertEqual(created_image.name, retrieved_image.name)
self.assertEqual(created_image.id, retrieved_image.id)
self.assertEqual(created_image.properties,
retrieved_image.properties)
else:
logger.warn(
'Test not executed as the image metadata requires image files')
def get_image_size(image_settings):
if image_settings.image_file:
return os.path.getsize(image_settings.image_file)
elif image_settings.url:
return int(file_utils.get_content_length(image_settings.url))
else:
raise Exception(
'Cannot retrieve expected image size. Image filename or URL has '
'not been configured')
| true
| true
|
f70a721597372b0efa52b1d23b20e2d0f1387886
| 7,859
|
py
|
Python
|
benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/10-19_7.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/10-19_7.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/10-19_7.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
stutter = mgr.Equals(x_x, x)
loc0 = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, i_1)))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_z, y))
loc1 = Location(env, mgr.GE(z, i_0), mgr.GE(x, i_3))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, x)))
h_z = Hint("h_z3", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_3))
loc0.set_progress(0, mgr.GT(x_z, z))
h_z = Hint("h_z1", env, frozenset([z]), symbs)
h_z.set_locs([loc0])
res.append(h_z)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc = Location(env, mgr.LE(z, i_0))
loc.set_progress(0, mgr.Equals(x_z, z))
h_z = Hint("h_z0", env, frozenset([z]), symbs)
h_z.set_locs([loc])
res.append(h_z)
loc0 = Location(env, mgr.GE(z, i_0))
loc0.set_progress(1, mgr.Equals(x_z, z))
loc1 = Location(env, mgr.GE(z, i_0))
loc1.set_progress(0, mgr.Equals(x_z, mgr.Plus(z, i_3)))
h_z = Hint("h_z4", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(z, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, z)))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
return frozenset(res)
| 34.169565
| 81
| 0.529457
|
from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = mgr.And(
mgr.Implies(pcend, x_pcend),
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
mgr.Implies(pcs[3], x_pcs[4]),
mgr.Implies(pcs[4], x_pcs[2]))
labels = mgr.And(
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
trans = mgr.And(cfg, labels)
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
stutter = mgr.Equals(x_x, x)
loc0 = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, i_1)))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_z, y))
loc1 = Location(env, mgr.GE(z, i_0), mgr.GE(x, i_3))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, x)))
h_z = Hint("h_z3", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_3))
loc0.set_progress(0, mgr.GT(x_z, z))
h_z = Hint("h_z1", env, frozenset([z]), symbs)
h_z.set_locs([loc0])
res.append(h_z)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc = Location(env, mgr.LE(z, i_0))
loc.set_progress(0, mgr.Equals(x_z, z))
h_z = Hint("h_z0", env, frozenset([z]), symbs)
h_z.set_locs([loc])
res.append(h_z)
loc0 = Location(env, mgr.GE(z, i_0))
loc0.set_progress(1, mgr.Equals(x_z, z))
loc1 = Location(env, mgr.GE(z, i_0))
loc1.set_progress(0, mgr.Equals(x_z, mgr.Plus(z, i_3)))
h_z = Hint("h_z4", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(z, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, z)))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
return frozenset(res)
| true
| true
|
f70a73527a65c5e526a4eb9382c4dd98ceed86bc
| 294
|
py
|
Python
|
manage.py
|
dstl/lighthouse
|
b810742d9f4cbfac02bf99096542499d25c88b58
|
[
"MIT"
] | 5
|
2016-05-12T13:47:38.000Z
|
2020-06-22T07:33:35.000Z
|
manage.py
|
dstl/lighthouse
|
b810742d9f4cbfac02bf99096542499d25c88b58
|
[
"MIT"
] | 7
|
2016-10-24T12:41:09.000Z
|
2016-12-08T21:58:18.000Z
|
manage.py
|
dstl/lighthouse
|
b810742d9f4cbfac02bf99096542499d25c88b58
|
[
"MIT"
] | 4
|
2016-05-12T21:53:21.000Z
|
2021-04-10T22:02:26.000Z
|
#!/usr/bin/env python
# (c) Crown Owned Copyright, 2016. Dstl.
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lighthouse.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 24.5
| 74
| 0.761905
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lighthouse.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true
| true
|
f70a73e7ec769ceacef155b98ab3be8009c63172
| 5,229
|
py
|
Python
|
models/project.py
|
jlgoh/labeldat
|
057248a22c7f022110d712dbcb61befd40e62760
|
[
"MIT"
] | 1
|
2021-09-07T06:34:54.000Z
|
2021-09-07T06:34:54.000Z
|
models/project.py
|
wilsonteng97/labeldat
|
bdca5df0af55bdd460807808861de25d762b28da
|
[
"MIT"
] | 5
|
2021-09-08T02:44:59.000Z
|
2022-02-27T10:55:29.000Z
|
models/project.py
|
wilsonteng97/labeldat
|
bdca5df0af55bdd460807808861de25d762b28da
|
[
"MIT"
] | 1
|
2020-12-31T11:03:39.000Z
|
2020-12-31T11:03:39.000Z
|
from extensions import db
from models.item_data_type import ItemDataType
from models.label import Label
from models.task import Task
class Project(db.Model):
id = db.Column(db.String(80), primary_key=True, nullable=False)
# 1(Project)-to-1(organisation)
org_id = db.Column(db.String(80), db.ForeignKey('organisation.id'), nullable=False)
project_name = db.Column(db.String(80), nullable=False)
item_data_type = db.Column(db.Enum(ItemDataType), nullable=False)
layout = db.Column(db.JSON, nullable=False)
outsource_labelling = db.Column(db.Boolean, nullable=False)
created_at = db.Column(db.DateTime(), nullable=False)
# parent 1-to-many w Task
tasks = db.relationship('Task', backref='task', lazy=True)
# parent 1-to-many w ProjectManager
project_managers = db.relationship('ProjectManager', backref='project', lazy=True)
def __repr__(self):
return f"<Project {self.id} | {self.project_name} | Organisation : {self.org_id}>"
def to_response(self):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"created_at": self.created_at
}
def to_project_for_user_response(self, user_id):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasksLabelled": [t.to_response_with_labels_from_user(user_id)
for t in self.tasks_and_labels_from_user(user_id)],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"created_at": self.created_at
}
def to_created_project_response(self):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"tasksCount": self.calculate_number_of_tasks(),
"overallPercentage": self.calculate_tasks_labelled_percentage(),
"created_at": self.created_at
}
def to_contributed_project_response(self, user_id):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"tasksCount": self.calculate_number_of_tasks(),
"overallPercentage": self.calculate_tasks_labelled_percentage(),
"contributionCount": self.calculate_tasks_labelled_by_user(user_id),
"contributionPercentage": self.calculate_tasks_labelled_percentage_by_user(user_id),
"created_at": self.created_at
}
def tasks_and_labels_from_user(self, user_id):
resulting_tasks = []
for task in self.tasks:
for label in task.labels:
if label.user_id == user_id:
resulting_tasks.append(task)
break
return resulting_tasks
def calculate_number_of_tasks(self):
return len(self.tasks)
def calculate_tasks_labelled_percentage(self):
"""
Count % of tasks that have >= 1 label
"""
number_of_tasks = self.calculate_number_of_tasks()
if not number_of_tasks: # When there are no tasks
return 0
num_labelled = len([task for task in self.tasks if len(task.labels) > 0])
return round(float((num_labelled / number_of_tasks * 100)), 1)
def calculate_tasks_labelled_percentage_by_user(self, user_id):
"""
Count % of tasks that a user has labelled
"""
number_of_tasks = self.calculate_number_of_tasks()
if not number_of_tasks: # When there are no tasks
return 0
num_labelled_by_user = self.calculate_tasks_labelled_by_user(user_id)
return round(float((num_labelled_by_user / number_of_tasks) * 100), 1)
def calculate_tasks_labelled_by_user(self, user_id):
"""
Count number of tasks that a user has labelled
"""
tasks_by_user = db.session.query(Task).filter_by(project_id=self.id).join(Label).filter_by(
user_id=user_id).all()
num_labelled = len(tasks_by_user)
return num_labelled
| 42.169355
| 99
| 0.634921
|
from extensions import db
from models.item_data_type import ItemDataType
from models.label import Label
from models.task import Task
class Project(db.Model):
id = db.Column(db.String(80), primary_key=True, nullable=False)
org_id = db.Column(db.String(80), db.ForeignKey('organisation.id'), nullable=False)
project_name = db.Column(db.String(80), nullable=False)
item_data_type = db.Column(db.Enum(ItemDataType), nullable=False)
layout = db.Column(db.JSON, nullable=False)
outsource_labelling = db.Column(db.Boolean, nullable=False)
created_at = db.Column(db.DateTime(), nullable=False)
tasks = db.relationship('Task', backref='task', lazy=True)
project_managers = db.relationship('ProjectManager', backref='project', lazy=True)
def __repr__(self):
return f"<Project {self.id} | {self.project_name} | Organisation : {self.org_id}>"
def to_response(self):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"created_at": self.created_at
}
def to_project_for_user_response(self, user_id):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasksLabelled": [t.to_response_with_labels_from_user(user_id)
for t in self.tasks_and_labels_from_user(user_id)],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"created_at": self.created_at
}
def to_created_project_response(self):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"tasksCount": self.calculate_number_of_tasks(),
"overallPercentage": self.calculate_tasks_labelled_percentage(),
"created_at": self.created_at
}
def to_contributed_project_response(self, user_id):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"tasksCount": self.calculate_number_of_tasks(),
"overallPercentage": self.calculate_tasks_labelled_percentage(),
"contributionCount": self.calculate_tasks_labelled_by_user(user_id),
"contributionPercentage": self.calculate_tasks_labelled_percentage_by_user(user_id),
"created_at": self.created_at
}
def tasks_and_labels_from_user(self, user_id):
resulting_tasks = []
for task in self.tasks:
for label in task.labels:
if label.user_id == user_id:
resulting_tasks.append(task)
break
return resulting_tasks
def calculate_number_of_tasks(self):
return len(self.tasks)
def calculate_tasks_labelled_percentage(self):
number_of_tasks = self.calculate_number_of_tasks()
if not number_of_tasks:
return 0
num_labelled = len([task for task in self.tasks if len(task.labels) > 0])
return round(float((num_labelled / number_of_tasks * 100)), 1)
def calculate_tasks_labelled_percentage_by_user(self, user_id):
number_of_tasks = self.calculate_number_of_tasks()
if not number_of_tasks:
return 0
num_labelled_by_user = self.calculate_tasks_labelled_by_user(user_id)
return round(float((num_labelled_by_user / number_of_tasks) * 100), 1)
def calculate_tasks_labelled_by_user(self, user_id):
tasks_by_user = db.session.query(Task).filter_by(project_id=self.id).join(Label).filter_by(
user_id=user_id).all()
num_labelled = len(tasks_by_user)
return num_labelled
| true
| true
|
f70a74917258038a2fbc62fab3d8f0fe001b74ce
| 8,773
|
py
|
Python
|
videoanalyst/model/task_model/taskmodel_impl/siamese_track.py
|
983632847/video_analyst
|
01b7ad278b828a3f7ff7a0488c5ca8f055240192
|
[
"MIT"
] | 2
|
2020-07-30T08:26:08.000Z
|
2020-11-24T07:40:46.000Z
|
videoanalyst/model/task_model/taskmodel_impl/siamese_track.py
|
983632847/video_analyst
|
01b7ad278b828a3f7ff7a0488c5ca8f055240192
|
[
"MIT"
] | null | null | null |
videoanalyst/model/task_model/taskmodel_impl/siamese_track.py
|
983632847/video_analyst
|
01b7ad278b828a3f7ff7a0488c5ca8f055240192
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*
import numpy as np
from loguru import logger
import torch
import torch.nn as nn
import torch.nn.functional as F
from videoanalyst.model.common_opr.common_block import (conv_bn_relu,
xcorr_depthwise)
from videoanalyst.model.module_base import ModuleBase
from videoanalyst.model.task_model.taskmodel_base import (TRACK_TASKMODELS,
VOS_TASKMODELS)
torch.set_printoptions(precision=8)
@TRACK_TASKMODELS.register
@VOS_TASKMODELS.register
class SiamTrack(ModuleBase):
r"""
SiamTrack model for tracking
Hyper-Parameters
----------------
pretrain_model_path: string
path to parameter to be loaded into module
head_width: int
feature width in head structure
"""
default_hyper_params = dict(pretrain_model_path="",
head_width=256,
conv_weight_std=0.01,
neck_conv_bias=[True, True, True, True],
corr_fea_output=False,
trt_mode=False,
trt_fea_model_path="",
trt_track_model_path="")
support_phases = ["train", "feature", "track", "freeze_track_fea"]
def __init__(self, backbone, head, loss=None):
super(SiamTrack, self).__init__()
self.basemodel = backbone
self.head = head
self.loss = loss
self.trt_fea_model = None
self.trt_track_model = None
self._phase = "train"
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, p):
assert p in self.support_phases
self._phase = p
def forward(self, *args, phase=None):
r"""
Perform tracking process for different phases (e.g. train / init / track)
Arguments
---------
target_img: torch.Tensor
target template image patch
search_img: torch.Tensor
search region image patch
Returns
-------
fcos_score_final: torch.Tensor
predicted score for bboxes, shape=(B, HW, 1)
fcos_bbox_final: torch.Tensor
predicted bbox in the crop, shape=(B, HW, 4)
fcos_cls_prob_final: torch.Tensor
classification score, shape=(B, HW, 1)
fcos_ctr_prob_final: torch.Tensor
center-ness score, shape=(B, HW, 1)
"""
if phase is None:
phase = self._phase
# used during training
if phase == 'train':
# resolve training data
training_data = args[0]
target_img = training_data["im_z"]
search_img = training_data["im_x"]
# backbone feature
f_z = self.basemodel(target_img)
f_x = self.basemodel(search_img)
# feature adjustment
c_z_k = self.c_z_k(f_z)
r_z_k = self.r_z_k(f_z)
c_x = self.c_x(f_x)
r_x = self.r_x(f_x)
# feature matching
r_out = xcorr_depthwise(r_x, r_z_k)
c_out = xcorr_depthwise(c_x, c_z_k)
# head
fcos_cls_score_final, fcos_ctr_score_final, fcos_bbox_final, corr_fea = self.head(
c_out, r_out)
predict_data = dict(
cls_pred=fcos_cls_score_final,
ctr_pred=fcos_ctr_score_final,
box_pred=fcos_bbox_final,
)
if self._hyper_params["corr_fea_output"]:
predict_data["corr_fea"] = corr_fea
return predict_data
# used for template feature extraction (normal mode)
elif phase == 'feature':
target_img, = args
if self._hyper_params["trt_mode"]:
# extract feature with trt model
out_list = self.trt_fea_model(target_img)
else:
# backbone feature
f_z = self.basemodel(target_img)
# template as kernel
c_z_k = self.c_z_k(f_z)
r_z_k = self.r_z_k(f_z)
# output
out_list = [c_z_k, r_z_k]
# used for template feature extraction (trt mode)
elif phase == "freeze_track_fea":
search_img, = args
# backbone feature
f_x = self.basemodel(search_img)
# feature adjustment
c_x = self.c_x(f_x)
r_x = self.r_x(f_x)
# head
return [c_x, r_x]
# [Broken] used for template feature extraction (trt mode)
# currently broken due to following issue of "torch2trt" package
# c.f. https://github.com/NVIDIA-AI-IOT/torch2trt/issues/251
elif phase == "freeze_track_head":
c_out, r_out = args
# head
outputs = self.head(c_out, r_out, 0, True)
return outputs
# used for tracking one frame during test
elif phase == 'track':
if len(args) == 3:
search_img, c_z_k, r_z_k = args
if self._hyper_params["trt_mode"]:
c_x, r_x = self.trt_track_model(search_img)
else:
# backbone feature
f_x = self.basemodel(search_img)
# feature adjustment
c_x = self.c_x(f_x)
r_x = self.r_x(f_x)
elif len(args) == 4:
# c_x, r_x already computed
c_z_k, r_z_k, c_x, r_x = args
else:
raise ValueError("Illegal args length: %d" % len(args))
# feature matching
r_out = xcorr_depthwise(r_x, r_z_k)
c_out = xcorr_depthwise(c_x, c_z_k)
# head
fcos_cls_score_final, fcos_ctr_score_final, fcos_bbox_final, corr_fea = self.head(
c_out, r_out, search_img.size(-1))
# apply sigmoid
fcos_cls_prob_final = torch.sigmoid(fcos_cls_score_final)
fcos_ctr_prob_final = torch.sigmoid(fcos_ctr_score_final)
# apply centerness correction
fcos_score_final = fcos_cls_prob_final * fcos_ctr_prob_final
# register extra output
extra = dict(c_x=c_x, r_x=r_x, corr_fea=corr_fea)
# output
out_list = fcos_score_final, fcos_bbox_final, fcos_cls_prob_final, fcos_ctr_prob_final, extra
else:
raise ValueError("Phase non-implemented.")
return out_list
def update_params(self):
r"""
Load model parameters
"""
self._make_convs()
self._initialize_conv()
super().update_params()
if self._hyper_params["trt_mode"]:
logger.info("trt mode enable")
from torch2trt import TRTModule
self.trt_fea_model = TRTModule()
self.trt_fea_model.load_state_dict(
torch.load(self._hyper_params["trt_fea_model_path"]))
self.trt_track_model = TRTModule()
self.trt_track_model.load_state_dict(
torch.load(self._hyper_params["trt_track_model_path"]))
logger.info("loading trt model succefully")
def _make_convs(self):
head_width = self._hyper_params['head_width']
# feature adjustment
self.r_z_k = conv_bn_relu(head_width,
head_width,
1,
3,
0,
has_relu=False)
self.c_z_k = conv_bn_relu(head_width,
head_width,
1,
3,
0,
has_relu=False)
self.r_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)
self.c_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)
def _initialize_conv(self, ):
conv_weight_std = self._hyper_params['conv_weight_std']
conv_list = [
self.r_z_k.conv, self.c_z_k.conv, self.r_x.conv, self.c_x.conv
]
for ith in range(len(conv_list)):
conv = conv_list[ith]
torch.nn.init.normal_(conv.weight,
std=conv_weight_std) # conv_weight_std=0.01
def set_device(self, dev):
if not isinstance(dev, torch.device):
dev = torch.device(dev)
self.to(dev)
if self.loss is not None:
for loss_name in self.loss:
self.loss[loss_name].to(dev)
| 36.861345
| 105
| 0.53904
|
import numpy as np
from loguru import logger
import torch
import torch.nn as nn
import torch.nn.functional as F
from videoanalyst.model.common_opr.common_block import (conv_bn_relu,
xcorr_depthwise)
from videoanalyst.model.module_base import ModuleBase
from videoanalyst.model.task_model.taskmodel_base import (TRACK_TASKMODELS,
VOS_TASKMODELS)
torch.set_printoptions(precision=8)
@TRACK_TASKMODELS.register
@VOS_TASKMODELS.register
class SiamTrack(ModuleBase):
default_hyper_params = dict(pretrain_model_path="",
head_width=256,
conv_weight_std=0.01,
neck_conv_bias=[True, True, True, True],
corr_fea_output=False,
trt_mode=False,
trt_fea_model_path="",
trt_track_model_path="")
support_phases = ["train", "feature", "track", "freeze_track_fea"]
def __init__(self, backbone, head, loss=None):
super(SiamTrack, self).__init__()
self.basemodel = backbone
self.head = head
self.loss = loss
self.trt_fea_model = None
self.trt_track_model = None
self._phase = "train"
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, p):
assert p in self.support_phases
self._phase = p
def forward(self, *args, phase=None):
if phase is None:
phase = self._phase
if phase == 'train':
training_data = args[0]
target_img = training_data["im_z"]
search_img = training_data["im_x"]
f_z = self.basemodel(target_img)
f_x = self.basemodel(search_img)
c_z_k = self.c_z_k(f_z)
r_z_k = self.r_z_k(f_z)
c_x = self.c_x(f_x)
r_x = self.r_x(f_x)
r_out = xcorr_depthwise(r_x, r_z_k)
c_out = xcorr_depthwise(c_x, c_z_k)
fcos_cls_score_final, fcos_ctr_score_final, fcos_bbox_final, corr_fea = self.head(
c_out, r_out)
predict_data = dict(
cls_pred=fcos_cls_score_final,
ctr_pred=fcos_ctr_score_final,
box_pred=fcos_bbox_final,
)
if self._hyper_params["corr_fea_output"]:
predict_data["corr_fea"] = corr_fea
return predict_data
elif phase == 'feature':
target_img, = args
if self._hyper_params["trt_mode"]:
out_list = self.trt_fea_model(target_img)
else:
f_z = self.basemodel(target_img)
c_z_k = self.c_z_k(f_z)
r_z_k = self.r_z_k(f_z)
out_list = [c_z_k, r_z_k]
elif phase == "freeze_track_fea":
search_img, = args
f_x = self.basemodel(search_img)
c_x = self.c_x(f_x)
r_x = self.r_x(f_x)
return [c_x, r_x]
elif phase == "freeze_track_head":
c_out, r_out = args
outputs = self.head(c_out, r_out, 0, True)
return outputs
elif phase == 'track':
if len(args) == 3:
search_img, c_z_k, r_z_k = args
if self._hyper_params["trt_mode"]:
c_x, r_x = self.trt_track_model(search_img)
else:
f_x = self.basemodel(search_img)
c_x = self.c_x(f_x)
r_x = self.r_x(f_x)
elif len(args) == 4:
c_z_k, r_z_k, c_x, r_x = args
else:
raise ValueError("Illegal args length: %d" % len(args))
r_out = xcorr_depthwise(r_x, r_z_k)
c_out = xcorr_depthwise(c_x, c_z_k)
fcos_cls_score_final, fcos_ctr_score_final, fcos_bbox_final, corr_fea = self.head(
c_out, r_out, search_img.size(-1))
fcos_cls_prob_final = torch.sigmoid(fcos_cls_score_final)
fcos_ctr_prob_final = torch.sigmoid(fcos_ctr_score_final)
fcos_score_final = fcos_cls_prob_final * fcos_ctr_prob_final
extra = dict(c_x=c_x, r_x=r_x, corr_fea=corr_fea)
out_list = fcos_score_final, fcos_bbox_final, fcos_cls_prob_final, fcos_ctr_prob_final, extra
else:
raise ValueError("Phase non-implemented.")
return out_list
def update_params(self):
self._make_convs()
self._initialize_conv()
super().update_params()
if self._hyper_params["trt_mode"]:
logger.info("trt mode enable")
from torch2trt import TRTModule
self.trt_fea_model = TRTModule()
self.trt_fea_model.load_state_dict(
torch.load(self._hyper_params["trt_fea_model_path"]))
self.trt_track_model = TRTModule()
self.trt_track_model.load_state_dict(
torch.load(self._hyper_params["trt_track_model_path"]))
logger.info("loading trt model succefully")
def _make_convs(self):
head_width = self._hyper_params['head_width']
self.r_z_k = conv_bn_relu(head_width,
head_width,
1,
3,
0,
has_relu=False)
self.c_z_k = conv_bn_relu(head_width,
head_width,
1,
3,
0,
has_relu=False)
self.r_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)
self.c_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)
def _initialize_conv(self, ):
conv_weight_std = self._hyper_params['conv_weight_std']
conv_list = [
self.r_z_k.conv, self.c_z_k.conv, self.r_x.conv, self.c_x.conv
]
for ith in range(len(conv_list)):
conv = conv_list[ith]
torch.nn.init.normal_(conv.weight,
std=conv_weight_std)
def set_device(self, dev):
if not isinstance(dev, torch.device):
dev = torch.device(dev)
self.to(dev)
if self.loss is not None:
for loss_name in self.loss:
self.loss[loss_name].to(dev)
| true
| true
|
f70a75256c638f4a3ce9cda3b9577176e49f3cca
| 4,042
|
py
|
Python
|
youtube_dl/extractor/redtube.py
|
aalvarito68/https-github.com-rg3-youtube-dl
|
dfc80bdd2e4ef3d30f161a93f99f3050537944ab
|
[
"Unlicense"
] | 3
|
2017-09-28T22:31:51.000Z
|
2021-09-15T07:43:07.000Z
|
youtube_dl/extractor/redtube.py
|
aalvarito68/https-github.com-rg3-youtube-dl
|
dfc80bdd2e4ef3d30f161a93f99f3050537944ab
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/redtube.py
|
aalvarito68/https-github.com-rg3-youtube-dl
|
dfc80bdd2e4ef3d30f161a93f99f3050537944ab
|
[
"Unlicense"
] | 3
|
2020-12-01T10:58:29.000Z
|
2021-07-22T15:57:22.000Z
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
str_to_int,
unified_strdate,
)
class RedTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.redtube.com/66418',
'md5': '7b8c22b5e7098a3e1c09709df1126d2d',
'info_dict': {
'id': '66418',
'ext': 'mp4',
'title': 'Sucked on a toilet',
'upload_date': '20120831',
'duration': 596,
'view_count': int,
'age_limit': 18,
}
}, {
'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)',
webpage)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.redtube.com/%s' % video_id, video_id)
if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']):
raise ExtractorError('Video %s has been removed' % video_id, expected=True)
title = self._html_search_regex(
(r'<h1 class="videoTitle[^"]*">(?P<title>.+?)</h1>',
r'videoTitle\s*:\s*(["\'])(?P<title>)\1'),
webpage, 'title', group='title')
formats = []
sources = self._parse_json(
self._search_regex(
r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'),
video_id, fatal=False)
if sources and isinstance(sources, dict):
for format_id, format_url in sources.items():
if format_url:
formats.append({
'url': format_url,
'format_id': format_id,
'height': int_or_none(format_id),
})
medias = self._parse_json(
self._search_regex(
r'mediaDefinition\s*:\s*(\[.+?\])', webpage,
'media definitions', default='{}'),
video_id, fatal=False)
if medias and isinstance(medias, list):
for media in medias:
format_url = media.get('videoUrl')
if not format_url or not isinstance(format_url, compat_str):
continue
format_id = media.get('quality')
formats.append({
'url': format_url,
'format_id': format_id,
'height': int_or_none(format_id),
})
if not formats:
video_url = self._html_search_regex(
r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
formats.append({'url': video_url})
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._search_regex(
r'<span[^>]+class="added-time"[^>]*>ADDED ([^<]+)<',
webpage, 'upload date', fatal=False))
duration = int_or_none(self._search_regex(
r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None))
view_count = str_to_int(self._search_regex(
r'<span[^>]*>VIEWS</span></td>\s*<td>([\d,.]+)',
webpage, 'view count', fatal=False))
# No self-labeling, but they describe themselves as
# "Home of Videos Porno"
age_limit = 18
return {
'id': video_id,
'ext': 'mp4',
'title': title,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'age_limit': age_limit,
'formats': formats,
}
| 35.769912
| 101
| 0.517566
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
str_to_int,
unified_strdate,
)
class RedTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.redtube.com/66418',
'md5': '7b8c22b5e7098a3e1c09709df1126d2d',
'info_dict': {
'id': '66418',
'ext': 'mp4',
'title': 'Sucked on a toilet',
'upload_date': '20120831',
'duration': 596,
'view_count': int,
'age_limit': 18,
}
}, {
'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)',
webpage)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.redtube.com/%s' % video_id, video_id)
if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']):
raise ExtractorError('Video %s has been removed' % video_id, expected=True)
title = self._html_search_regex(
(r'<h1 class="videoTitle[^"]*">(?P<title>.+?)</h1>',
r'videoTitle\s*:\s*(["\'])(?P<title>)\1'),
webpage, 'title', group='title')
formats = []
sources = self._parse_json(
self._search_regex(
r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'),
video_id, fatal=False)
if sources and isinstance(sources, dict):
for format_id, format_url in sources.items():
if format_url:
formats.append({
'url': format_url,
'format_id': format_id,
'height': int_or_none(format_id),
})
medias = self._parse_json(
self._search_regex(
r'mediaDefinition\s*:\s*(\[.+?\])', webpage,
'media definitions', default='{}'),
video_id, fatal=False)
if medias and isinstance(medias, list):
for media in medias:
format_url = media.get('videoUrl')
if not format_url or not isinstance(format_url, compat_str):
continue
format_id = media.get('quality')
formats.append({
'url': format_url,
'format_id': format_id,
'height': int_or_none(format_id),
})
if not formats:
video_url = self._html_search_regex(
r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
formats.append({'url': video_url})
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._search_regex(
r'<span[^>]+class="added-time"[^>]*>ADDED ([^<]+)<',
webpage, 'upload date', fatal=False))
duration = int_or_none(self._search_regex(
r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None))
view_count = str_to_int(self._search_regex(
r'<span[^>]*>VIEWS</span></td>\s*<td>([\d,.]+)',
webpage, 'view count', fatal=False))
# No self-labeling, but they describe themselves as
# "Home of Videos Porno"
age_limit = 18
return {
'id': video_id,
'ext': 'mp4',
'title': title,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'age_limit': age_limit,
'formats': formats,
}
| true
| true
|
f70a75b50f6ab4c03d21568a0b919684fa9dd706
| 187
|
py
|
Python
|
repos/tf_ctpn_cpu/lib/utils/setup.py
|
ysglh/DeepVideoAnalytics
|
ce807cc1595c813250bb4bc7dfc6fb76cd644335
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2019-03-05T00:46:56.000Z
|
2021-11-26T10:20:40.000Z
|
repos/tf_ctpn_cpu/lib/utils/setup.py
|
jiangxu87/DeepVideoAnalytics
|
e401b3273782409b2604657514bec293d6aa75b0
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
repos/tf_ctpn_cpu/lib/utils/setup.py
|
jiangxu87/DeepVideoAnalytics
|
e401b3273782409b2604657514bec293d6aa75b0
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 4
|
2021-09-22T07:47:27.000Z
|
2022-01-23T14:16:08.000Z
|
from Cython.Build import cythonize
import numpy as np
from distutils.core import setup
setup(ext_modules=cythonize(["bbox.pyx","cython_nms.pyx"],include_path=[np.get_include()]
))
| 20.777778
| 89
| 0.764706
|
from Cython.Build import cythonize
import numpy as np
from distutils.core import setup
setup(ext_modules=cythonize(["bbox.pyx","cython_nms.pyx"],include_path=[np.get_include()]
))
| true
| true
|
f70a75c0e2859d8527b98b051b3342213f23e151
| 6,992
|
py
|
Python
|
nicos/clients/gui/panels/base.py
|
ess-dmsc/nicos
|
755d61d403ff7123f804c45fc80c7ff4d762993b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1
|
2021-03-26T10:30:45.000Z
|
2021-03-26T10:30:45.000Z
|
nicos/clients/gui/panels/base.py
|
ess-dmsc/nicos
|
755d61d403ff7123f804c45fc80c7ff4d762993b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91
|
2020-08-18T09:20:26.000Z
|
2022-02-01T11:07:14.000Z
|
nicos/clients/gui/panels/base.py
|
ess-dmsc/nicos
|
755d61d403ff7123f804c45fc80c7ff4d762993b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 3
|
2020-08-04T18:35:05.000Z
|
2021-04-16T11:22:08.000Z
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <g.brandl@fz-juelich.de>
# Christian Felder <c.felder@fz-juelich.de>
#
# *****************************************************************************
"""Support for "auxiliary" windows containing panels."""
from time import time as currenttime
from nicos.clients.gui.config import panel
from nicos.clients.gui.utils import DlgUtils, SettingGroup
from nicos.guisupport.qt import QDialog, QHBoxLayout, QObject, QPainter, \
QPalette, QStyle, QStyleOption, QWidget, pyqtSignal
from nicos.utils import checkSetupSpec
from nicos.utils.loggers import NicosLogger
class SetupDepWindowMixin:
def __init__(self, client):
if 'session/mastersetup' not in client._reg_keys:
return
values = client.ask('getcachekeys', 'session/mastersetup',
quiet=True, default=[])
for key, value in values:
if key == 'session/mastersetup':
currtime = currenttime()
for widget in client._reg_keys[key]:
if widget():
widget().on_keyChange(key, value, currtime, False)
class PanelDialog(SetupDepWindowMixin, QDialog):
def __init__(self, parent, client, panelcfg, title, **options):
from nicos.clients.gui.panels.utils import createWindowItem
QDialog.__init__(self, parent)
self.panels = []
self.mainwindow = parent.mainwindow
self.log = NicosLogger('PanelDialog')
self.log.parent = self.mainwindow.log
self.client = client
self.user_color = self.palette().color(QPalette.Base)
self.user_font = self.font()
if isinstance(panelcfg, type) and issubclass(panelcfg, Panel):
panelcfg = panel('%s.%s' % (panelcfg.__module__,
panelcfg.__name__), **options)
elif isinstance(panelcfg, str):
panelcfg = panel(panelcfg, **options)
hbox = QHBoxLayout()
hbox.setContentsMargins(0, 0, 0, 0)
pnl = createWindowItem(panelcfg, self, self, self.mainwindow, self.log)
if pnl:
hbox.addWidget(pnl)
self.setLayout(hbox)
self.setWindowTitle(title)
SetupDepWindowMixin.__init__(self, self.client)
self.setProperty('type', 'PanelDialog')
def addPanel(self, panel, always=True):
if always or panel not in self.panels:
self.panels.append(panel)
class SetupDepPanelMixin(QObject):
"""Mixin to handle setup-dependent visibility.
Note: You must explicity add the following class attribute in all
classes using this mixin (A PyQt resctriction, see
https://riverbankcomputing.com/pipermail/pyqt/2013-September/033199.html):
`setWidgetVisible = SetupDepPanelMixin.setWidgetVisible`
"""
setupSpec = ()
setWidgetVisible = pyqtSignal(QWidget, bool, name='setWidgetVisible')
def __init__(self, client, options): # pylint: disable=super-init-not-called
setups = options.get('setups', '')
self.setSetups(setups)
client.register(self, 'session/mastersetup')
def setSetups(self, setupSpec):
self.setupSpec = setupSpec
self.log.debug('setups are: %r', self.setupSpec)
checkSetupSpec(self.setupSpec, '', log=self.log)
def on_keyChange(self, key, value, time, expired):
if key == 'session/mastersetup' and self.setupSpec:
if hasattr(self, 'setWidgetVisible'):
enabled = checkSetupSpec(self.setupSpec, value, log=self.log)
self.setWidgetVisible.emit(self, enabled)
class Panel(DlgUtils, QWidget, SetupDepPanelMixin):
panelName = ''
setWidgetVisible = SetupDepPanelMixin.setWidgetVisible
def __init__(self, parent, client, options):
QWidget.__init__(self, parent)
self.log = NicosLogger(self.panelName)
self.log.parent = parent.mainwindow.log
SetupDepPanelMixin.__init__(self, client, options)
DlgUtils.__init__(self, self.panelName)
self.parentwindow = parent
self.client = client
self.mainwindow = parent.mainwindow
self.actions = set()
self.sgroup = SettingGroup(self.panelName)
with self.sgroup as settings:
self.loadSettings(settings)
self.setProperty('type', 'Panel')
self.setProperty('panel', self.__class__.__name__)
def closeWindow(self):
"""Try to close the window containing this panel.
If the window is the main window, nothing will be done.
"""
from nicos.clients.gui.panels.tabwidget import DetachedWindow
from nicos.clients.gui.panels.auxwindows import AuxiliaryWindow
obj = self
while hasattr(obj, 'parent'):
obj = obj.parent()
if isinstance(obj, (DetachedWindow, AuxiliaryWindow, PanelDialog)):
obj.close()
return
def postInit(self):
"""This method can be implemented to perform actions after **all** panels
have been created. This can be useful e.g. for accessing other panels
using their unique ``panelName``.
"""
def setExpertMode(self, expert):
pass
def setViewOnly(self, viewonly):
pass
def loadSettings(self, settings):
pass
def saveSettings(self, settings):
pass
def setCustomStyle(self, font, back):
pass
def getToolbars(self):
return []
def getMenus(self):
return []
def hideTitle(self):
"""Called when the panel is shown in a dock or tab widget, which
provides its own place for the panel title.
If the panel has a title widget, it should hide it in this method.
"""
def requestClose(self):
return True
def updateStatus(self, status, exception=False):
pass
def paintEvent(self, event):
opt = QStyleOption()
opt.initFrom(self)
painter = QPainter(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)
| 36.227979
| 81
| 0.641447
|
from time import time as currenttime
from nicos.clients.gui.config import panel
from nicos.clients.gui.utils import DlgUtils, SettingGroup
from nicos.guisupport.qt import QDialog, QHBoxLayout, QObject, QPainter, \
QPalette, QStyle, QStyleOption, QWidget, pyqtSignal
from nicos.utils import checkSetupSpec
from nicos.utils.loggers import NicosLogger
class SetupDepWindowMixin:
def __init__(self, client):
if 'session/mastersetup' not in client._reg_keys:
return
values = client.ask('getcachekeys', 'session/mastersetup',
quiet=True, default=[])
for key, value in values:
if key == 'session/mastersetup':
currtime = currenttime()
for widget in client._reg_keys[key]:
if widget():
widget().on_keyChange(key, value, currtime, False)
class PanelDialog(SetupDepWindowMixin, QDialog):
def __init__(self, parent, client, panelcfg, title, **options):
from nicos.clients.gui.panels.utils import createWindowItem
QDialog.__init__(self, parent)
self.panels = []
self.mainwindow = parent.mainwindow
self.log = NicosLogger('PanelDialog')
self.log.parent = self.mainwindow.log
self.client = client
self.user_color = self.palette().color(QPalette.Base)
self.user_font = self.font()
if isinstance(panelcfg, type) and issubclass(panelcfg, Panel):
panelcfg = panel('%s.%s' % (panelcfg.__module__,
panelcfg.__name__), **options)
elif isinstance(panelcfg, str):
panelcfg = panel(panelcfg, **options)
hbox = QHBoxLayout()
hbox.setContentsMargins(0, 0, 0, 0)
pnl = createWindowItem(panelcfg, self, self, self.mainwindow, self.log)
if pnl:
hbox.addWidget(pnl)
self.setLayout(hbox)
self.setWindowTitle(title)
SetupDepWindowMixin.__init__(self, self.client)
self.setProperty('type', 'PanelDialog')
def addPanel(self, panel, always=True):
if always or panel not in self.panels:
self.panels.append(panel)
class SetupDepPanelMixin(QObject):
setupSpec = ()
setWidgetVisible = pyqtSignal(QWidget, bool, name='setWidgetVisible')
def __init__(self, client, options):
setups = options.get('setups', '')
self.setSetups(setups)
client.register(self, 'session/mastersetup')
def setSetups(self, setupSpec):
self.setupSpec = setupSpec
self.log.debug('setups are: %r', self.setupSpec)
checkSetupSpec(self.setupSpec, '', log=self.log)
def on_keyChange(self, key, value, time, expired):
if key == 'session/mastersetup' and self.setupSpec:
if hasattr(self, 'setWidgetVisible'):
enabled = checkSetupSpec(self.setupSpec, value, log=self.log)
self.setWidgetVisible.emit(self, enabled)
class Panel(DlgUtils, QWidget, SetupDepPanelMixin):
panelName = ''
setWidgetVisible = SetupDepPanelMixin.setWidgetVisible
def __init__(self, parent, client, options):
QWidget.__init__(self, parent)
self.log = NicosLogger(self.panelName)
self.log.parent = parent.mainwindow.log
SetupDepPanelMixin.__init__(self, client, options)
DlgUtils.__init__(self, self.panelName)
self.parentwindow = parent
self.client = client
self.mainwindow = parent.mainwindow
self.actions = set()
self.sgroup = SettingGroup(self.panelName)
with self.sgroup as settings:
self.loadSettings(settings)
self.setProperty('type', 'Panel')
self.setProperty('panel', self.__class__.__name__)
def closeWindow(self):
from nicos.clients.gui.panels.tabwidget import DetachedWindow
from nicos.clients.gui.panels.auxwindows import AuxiliaryWindow
obj = self
while hasattr(obj, 'parent'):
obj = obj.parent()
if isinstance(obj, (DetachedWindow, AuxiliaryWindow, PanelDialog)):
obj.close()
return
def postInit(self):
def setExpertMode(self, expert):
pass
def setViewOnly(self, viewonly):
pass
def loadSettings(self, settings):
pass
def saveSettings(self, settings):
pass
def setCustomStyle(self, font, back):
pass
def getToolbars(self):
return []
def getMenus(self):
return []
def hideTitle(self):
def requestClose(self):
return True
def updateStatus(self, status, exception=False):
pass
def paintEvent(self, event):
opt = QStyleOption()
opt.initFrom(self)
painter = QPainter(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)
| true
| true
|
f70a75c3e0f6aec1f478f8422ff34adbef44b487
| 3,812
|
py
|
Python
|
tests/unit/test_diffusion2d_functions.py
|
constracktor/testing-python-exercise
|
70b15a9d8e193fc518e46996cbc3e9f52cb1336d
|
[
"CC-BY-4.0"
] | null | null | null |
tests/unit/test_diffusion2d_functions.py
|
constracktor/testing-python-exercise
|
70b15a9d8e193fc518e46996cbc3e9f52cb1336d
|
[
"CC-BY-4.0"
] | null | null | null |
tests/unit/test_diffusion2d_functions.py
|
constracktor/testing-python-exercise
|
70b15a9d8e193fc518e46996cbc3e9f52cb1336d
|
[
"CC-BY-4.0"
] | null | null | null |
"""
Tests for functions in class SolveDiffusion2D
"""
import numpy as np
#import pytest
from diffusion2d import SolveDiffusion2D
from unittest import TestCase
class TestOperations(TestCase):
"""
Test suite for mathematical operations functions.
"""
def setUp(self):
# Fixture
self.w = 12.
self.h = 20.
self.dx = 0.4
self.dy = 0.2
self.D = 0.5
self.T_cold = 300.
self.T_hot = 700.
def test_initialize_domain(self):
"""
Check function SolveDiffusion2D.initialize_domain
"""
solver = SolveDiffusion2D()
expected_nx = 30 #int(self.w / self.dx)
expected_ny = 100 #int(self.h / self.dy)
solver.initialize_domain(self.w,self.h,self.dx,self.dy)
self.assertEqual(solver.nx, expected_nx)
self.assertEqual(solver.ny, expected_ny)
def test_initialize_physical_parameters(self):
"""
Checks function SolveDiffusion2D.initialize_domain
"""
solver = SolveDiffusion2D()
solver.dx = self.dx
solver.dy = self.dy
#dx**2 * dy**2 / (2 * d * (dx**2 + dy**2))
expected_dt = 0.032
solver.initialize_physical_parameters(self.D)
self.assertAlmostEqual(solver.dt, expected_dt, 6)
def test_get_initial_condition(self):
"""
Checks function SolveDiffusion2D.get_initial_function
"""
solver = SolveDiffusion2D()
solver.T_cold = self.T_cold
solver.T_hot = self.T_hot
solver.initialize_domain(self.w,self.h,self.dx,self.dy)
expected_u = self.T_cold * np.ones((solver.nx, solver.ny))
# Initial conditions - circle of radius r centred at (cx,cy) (mm)
r, cx, cy = 2, 5, 5
r2 = r ** 2
for i in range(solver.nx):
for j in range(solver.ny):
p2 = (i * solver.dx - cx) ** 2 + (j * solver.dy - cy) ** 2
if p2 < r2:
expected_u[i, j] = self.T_hot
actual_u = solver.get_initial_condition()
for i in range(solver.nx):
for j in range(solver.ny):
self.assertEqual(actual_u[i,j], expected_u[i,j])
# def test_initialize_domain():
# """
# Check function SolveDiffusion2D.initialize_domain
# """
# solver = SolveDiffusion2D()
#
# w = 12.
# h = 20.
# dx = 0.4
# dy = 0.2
# expected_nx = 30 #int(w / dx)
# expected_ny = 100 #int(h / dy)
#
# solver.initialize_domain(w,h,dx,dy)
#
# assert solver.nx == expected_nx
# assert solver.ny == expected_ny
#
# def test_initialize_physical_parameters():
# """
# Checks function SolveDiffusion2D.initialize_domain
# """
# solver = SolveDiffusion2D()
# solver.dx = 0.2
# solver.dy = 0.4
# d=5.
#
# #dx**2 * dy**2 / (2 * d * (dx**2 + dy**2))
# expected_dt = pytest.approx(0.0032, abs=0.000001)
#
# solver.initialize_physical_parameters(d)
#
# assert solver.dt == expected_dt
#
# def test_get_initial_condition():
# """
# Checks function SolveDiffusion2D.get_initial_function
# """
# solver = SolveDiffusion2D()
# solver.T_cold = 300.
# solver.T_hot = 700.
# solver.dx = 0.1
# solver.dy = 0.2
# solver.nx = 100
# solver.ny = 50
#
# expected_u = solver.T_cold * np.ones((solver.nx, solver.ny))
#
# # Initial conditions - circle of radius r centred at (cx,cy) (mm)
# r, cx, cy = 2, 5, 5
# r2 = r ** 2
# for i in range(solver.nx):
# for j in range(solver.ny):
# p2 = (i * solver.dx - cx) ** 2 + (j * solver.dy - cy) ** 2
# if p2 < r2:
# expected_u[i, j] = solver.T_hot
#
# actual_u = solver.get_initial_condition()
#
# assert np.all(actual_u == expected_u)
| 27.228571
| 74
| 0.574239
|
import numpy as np
from diffusion2d import SolveDiffusion2D
from unittest import TestCase
class TestOperations(TestCase):
def setUp(self):
self.w = 12.
self.h = 20.
self.dx = 0.4
self.dy = 0.2
self.D = 0.5
self.T_cold = 300.
self.T_hot = 700.
def test_initialize_domain(self):
solver = SolveDiffusion2D()
expected_nx = 30
expected_ny = 100
solver.initialize_domain(self.w,self.h,self.dx,self.dy)
self.assertEqual(solver.nx, expected_nx)
self.assertEqual(solver.ny, expected_ny)
def test_initialize_physical_parameters(self):
solver = SolveDiffusion2D()
solver.dx = self.dx
solver.dy = self.dy
expected_dt = 0.032
solver.initialize_physical_parameters(self.D)
self.assertAlmostEqual(solver.dt, expected_dt, 6)
def test_get_initial_condition(self):
solver = SolveDiffusion2D()
solver.T_cold = self.T_cold
solver.T_hot = self.T_hot
solver.initialize_domain(self.w,self.h,self.dx,self.dy)
expected_u = self.T_cold * np.ones((solver.nx, solver.ny))
r, cx, cy = 2, 5, 5
r2 = r ** 2
for i in range(solver.nx):
for j in range(solver.ny):
p2 = (i * solver.dx - cx) ** 2 + (j * solver.dy - cy) ** 2
if p2 < r2:
expected_u[i, j] = self.T_hot
actual_u = solver.get_initial_condition()
for i in range(solver.nx):
for j in range(solver.ny):
self.assertEqual(actual_u[i,j], expected_u[i,j])
# Check function SolveDiffusion2D.initialize_domain
# """
unction SolveDiffusion2D.initialize_domain
# """
ion2D.get_initial_function
# """
| true
| true
|
f70a75e1d9bb17e8ca4b66e379fd93f2e4479d40
| 5,117
|
py
|
Python
|
examples/gto/20-ao_integrals.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | 1
|
2018-05-02T19:55:30.000Z
|
2018-05-02T19:55:30.000Z
|
examples/gto/20-ao_integrals.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | null | null | null |
examples/gto/20-ao_integrals.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Access AO integrals
Mole.intor and Mole.intor_by_shell functions can generate AO integrals.
Calling Mole.intor with the integral function name returns a integral matrix
for all basis functions defined in Mole. If the integral operator has many
compenents eg gradients, keyword argument comp=* needs to be specified to
tell the function how many components the integrals have.
Mole.intor_by_shell function generates the integrals for the given shell
indices. Keyword argument comp=* is also required when the integral operator
has multiple components.
See pyscf/gto/moleintor.py file for the complete list of supported integrals.
'''
import numpy
from pyscf import gto, scf
mol = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'ccpvdz'
)
mf = scf.RHF(mol)
mf.kernel()
dm = mf.make_rdm1()
# Overlap, kinetic, nuclear attraction
s = mol.intor('cint1e_ovlp_sph')
t = mol.intor('cint1e_kin_sph')
v = mol.intor('cint1e_nuc_sph')
# Overlap, kinetic, nuclear attraction gradients (against electron coordinates)
s1 = mol.intor('cint1e_ipovlp_sph', comp=3)
t1 = mol.intor('cint1e_ipkin_sph' , comp=3)
v1 = mol.intor('cint1e_ipnuc_sph' , comp=3)
print('Dipole %s' % numpy.einsum('xij,ij->x',
mol.intor('cint1e_r_sph', comp=3), dm))
#
# AO overlap between two molecules
#
mol1 = gto.M(
verbose = 0,
atom = 'H 0 1 0; H 1 0 0',
basis = 'ccpvdz'
)
s = gto.intor_cross('cint1e_ovlp_sph', mol, mol1)
print('overlap shape (%d, %d)' % s.shape)
#
# 2e integrals. Keyword aosym is to specify the permutation symmetry in the
# AO integral matrix. s8 means 8-fold symmetry, s2kl means 2-fold symmetry
# for the symmetry between kl in (ij|kl)
#
eri = mol.intor('cint2e_sph', aosym='s8')
#
# 2e gradient integrals on first atom only
#
eri = mol.intor('cint2e_ip1_sph', aosym='s2kl')
#
# 2e integral gradients on certain atom
#
atm_id = 1 # second atom
bas_start, bas_end, ao_start, ao_end = mol.aoslice_by_atom()[atm_id]
tot_bra = ao_end - ao_start
nao = mol.nao_nr()
eri1 = numpy.empty((3,tot_bra,nao,nao,nao))
pi = 0
for i in range(mol.nbas):
if mol.bas_atom(i) == atm_id:
pj = 0
for j in range(mol.nbas):
pk = 0
for k in range(mol.nbas):
pl = 0
for l in range(mol.nbas):
shls = (i, j, k, l)
buf = mol.intor_by_shell('cint2e_ip1_sph', shls, comp=3)
di, dj, dk, dl = buf.shape[1:]
eri1[:,pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf
pl += dl
pk += dk
pj += dj
pi += di
print('integral shape %s' % str(eri1.shape))
#
# Generate a sub-block of AO integrals. The sub-block (ij|kl) contains the
# shells 2:5 for basis i, 0:2 for j, 0:4 for k and 1:3 for l
#
sub_eri = mol.intor('int2e_sph', shls_slice=(2,5,0,2,0,4,1,3))
# This statement is equivalent to
dims = []
for i in range(mol.nbas):
l = mol.bas_angular(i)
nc = mol.bas_nctr(i)
dims.append((l * 2 + 1) * nc)
nao_i = sum(dims[2:5])
nao_j = sum(dims[0:2])
nao_k = sum(dims[0:4])
nao_l = sum(dims[1:3])
sub_eri = numpy.empty((nao_i,nao_j,nao_k,nao_l))
pi = 0
for i in range(2,5):
pj = 0
for j in range(0,2):
pk = 0
for k in range(0,4):
pl = 0
for l in range(1,3):
shls = (i, j, k, l)
buf = mol.intor_by_shell('int2e_sph', shls)
di, dj, dk, dl = buf.shape
sub_eri[pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf
pl += dl
pk += dk
pj += dj
pi += di
sub_eri = sub_eri.reshape(nao_i*nao_j,nao_k*nao_l)
#
# Generate all AO integrals for a sub-system.
#
mol = gto.M(atom=[['H', 0,0,i] for i in range(10)])
atom_idx = [0,2,4] # The disjoint atoms
sub_mol = mol.copy()
sub_mol._bas = mol._bas[atom_idx]
sub_eri = sub_mol.intor('int2e_sph', aosym='s1')
# This statement is equivalent to
sub_nao = 0
for i in range(mol.nbas):
if mol.bas_atom(i) in atom_idx:
l = mol.bas_angular(i)
nc = mol.bas_nctr(i)
sub_nao += (l * 2 + 1) * nc
sub_eri = numpy.empty((sub_nao,sub_nao,sub_nao,sub_nao))
pi = 0
for i in range(mol.nbas):
if mol.bas_atom(i) in atom_idx:
pj = 0
for j in range(mol.nbas):
if mol.bas_atom(j) in atom_idx:
pk = 0
for k in range(mol.nbas):
if mol.bas_atom(k) in atom_idx:
pl = 0
for l in range(mol.nbas):
if mol.bas_atom(l) in atom_idx:
shls = (i, j, k, l)
buf = mol.intor_by_shell('int2e_sph', shls)
di, dj, dk, dl = buf.shape
sub_eri[pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf
pl += dl
pk += dk
pj += dj
pi += di
sub_eri = sub_eri.reshape(sub_nao**2,sub_nao**2)
| 30.825301
| 82
| 0.580027
|
import numpy
from pyscf import gto, scf
mol = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'ccpvdz'
)
mf = scf.RHF(mol)
mf.kernel()
dm = mf.make_rdm1()
s = mol.intor('cint1e_ovlp_sph')
t = mol.intor('cint1e_kin_sph')
v = mol.intor('cint1e_nuc_sph')
s1 = mol.intor('cint1e_ipovlp_sph', comp=3)
t1 = mol.intor('cint1e_ipkin_sph' , comp=3)
v1 = mol.intor('cint1e_ipnuc_sph' , comp=3)
print('Dipole %s' % numpy.einsum('xij,ij->x',
mol.intor('cint1e_r_sph', comp=3), dm))
mol1 = gto.M(
verbose = 0,
atom = 'H 0 1 0; H 1 0 0',
basis = 'ccpvdz'
)
s = gto.intor_cross('cint1e_ovlp_sph', mol, mol1)
print('overlap shape (%d, %d)' % s.shape)
eri = mol.intor('cint2e_sph', aosym='s8')
eri = mol.intor('cint2e_ip1_sph', aosym='s2kl')
atm_id = 1
bas_start, bas_end, ao_start, ao_end = mol.aoslice_by_atom()[atm_id]
tot_bra = ao_end - ao_start
nao = mol.nao_nr()
eri1 = numpy.empty((3,tot_bra,nao,nao,nao))
pi = 0
for i in range(mol.nbas):
if mol.bas_atom(i) == atm_id:
pj = 0
for j in range(mol.nbas):
pk = 0
for k in range(mol.nbas):
pl = 0
for l in range(mol.nbas):
shls = (i, j, k, l)
buf = mol.intor_by_shell('cint2e_ip1_sph', shls, comp=3)
di, dj, dk, dl = buf.shape[1:]
eri1[:,pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf
pl += dl
pk += dk
pj += dj
pi += di
print('integral shape %s' % str(eri1.shape))
sub_eri = mol.intor('int2e_sph', shls_slice=(2,5,0,2,0,4,1,3))
dims = []
for i in range(mol.nbas):
l = mol.bas_angular(i)
nc = mol.bas_nctr(i)
dims.append((l * 2 + 1) * nc)
nao_i = sum(dims[2:5])
nao_j = sum(dims[0:2])
nao_k = sum(dims[0:4])
nao_l = sum(dims[1:3])
sub_eri = numpy.empty((nao_i,nao_j,nao_k,nao_l))
pi = 0
for i in range(2,5):
pj = 0
for j in range(0,2):
pk = 0
for k in range(0,4):
pl = 0
for l in range(1,3):
shls = (i, j, k, l)
buf = mol.intor_by_shell('int2e_sph', shls)
di, dj, dk, dl = buf.shape
sub_eri[pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf
pl += dl
pk += dk
pj += dj
pi += di
sub_eri = sub_eri.reshape(nao_i*nao_j,nao_k*nao_l)
mol = gto.M(atom=[['H', 0,0,i] for i in range(10)])
atom_idx = [0,2,4]
sub_mol = mol.copy()
sub_mol._bas = mol._bas[atom_idx]
sub_eri = sub_mol.intor('int2e_sph', aosym='s1')
sub_nao = 0
for i in range(mol.nbas):
if mol.bas_atom(i) in atom_idx:
l = mol.bas_angular(i)
nc = mol.bas_nctr(i)
sub_nao += (l * 2 + 1) * nc
sub_eri = numpy.empty((sub_nao,sub_nao,sub_nao,sub_nao))
pi = 0
for i in range(mol.nbas):
if mol.bas_atom(i) in atom_idx:
pj = 0
for j in range(mol.nbas):
if mol.bas_atom(j) in atom_idx:
pk = 0
for k in range(mol.nbas):
if mol.bas_atom(k) in atom_idx:
pl = 0
for l in range(mol.nbas):
if mol.bas_atom(l) in atom_idx:
shls = (i, j, k, l)
buf = mol.intor_by_shell('int2e_sph', shls)
di, dj, dk, dl = buf.shape
sub_eri[pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf
pl += dl
pk += dk
pj += dj
pi += di
sub_eri = sub_eri.reshape(sub_nao**2,sub_nao**2)
| true
| true
|
f70a767814ed94c06907f469c28d401cf661137d
| 1,461
|
py
|
Python
|
setup.py
|
mcflugen/plume
|
7fc65ba9461fece372eef4b2bee9ba6e72f42d19
|
[
"MIT"
] | null | null | null |
setup.py
|
mcflugen/plume
|
7fc65ba9461fece372eef4b2bee9ba6e72f42d19
|
[
"MIT"
] | null | null | null |
setup.py
|
mcflugen/plume
|
7fc65ba9461fece372eef4b2bee9ba6e72f42d19
|
[
"MIT"
] | 1
|
2018-08-30T17:32:26.000Z
|
2018-08-30T17:32:26.000Z
|
from setuptools import setup, find_packages
from distutils.extension import Extension
import numpy as np
import cython_gsl
import versioneer
def read_requirements():
import os
path = os.path.dirname(os.path.abspath(__file__))
requirements_file = os.path.join(path, 'requirements.txt')
try:
with open(requirements_file, 'r') as req_fp:
requires = req_fp.read().split()
except IOError:
return []
else:
return [require.split() for require in requires]
setup(name='plume',
version=versioneer.get_version(),
description='A hypopycnal sediment-carrying plume entering the ocean',
author='Eric Hutton',
author_email='huttone@colorado.edu',
url='http://csdms.colorado.edu',
install_requires=read_requirements(),
setup_requires=['setuptools', ],
packages=find_packages(),
include_dirs = [np.get_include(), cython_gsl.get_include()],
entry_points={
'console_scripts': [
'plume=plume.cli:main',
],
},
ext_modules = [
Extension('plume.ext.centerline',
['plume/ext/centerline.pyx'],
extra_compile_args=['-O3'],
libraries=cython_gsl.get_libraries(),
library_dirs=[cython_gsl.get_library_dir()],
include_dirs=[cython_gsl.get_cython_include_dir()])],
cmdclass=versioneer.get_cmdclass(),
)
| 31.085106
| 76
| 0.626283
|
from setuptools import setup, find_packages
from distutils.extension import Extension
import numpy as np
import cython_gsl
import versioneer
def read_requirements():
import os
path = os.path.dirname(os.path.abspath(__file__))
requirements_file = os.path.join(path, 'requirements.txt')
try:
with open(requirements_file, 'r') as req_fp:
requires = req_fp.read().split()
except IOError:
return []
else:
return [require.split() for require in requires]
setup(name='plume',
version=versioneer.get_version(),
description='A hypopycnal sediment-carrying plume entering the ocean',
author='Eric Hutton',
author_email='huttone@colorado.edu',
url='http://csdms.colorado.edu',
install_requires=read_requirements(),
setup_requires=['setuptools', ],
packages=find_packages(),
include_dirs = [np.get_include(), cython_gsl.get_include()],
entry_points={
'console_scripts': [
'plume=plume.cli:main',
],
},
ext_modules = [
Extension('plume.ext.centerline',
['plume/ext/centerline.pyx'],
extra_compile_args=['-O3'],
libraries=cython_gsl.get_libraries(),
library_dirs=[cython_gsl.get_library_dir()],
include_dirs=[cython_gsl.get_cython_include_dir()])],
cmdclass=versioneer.get_cmdclass(),
)
| true
| true
|
f70a7759e95a54a93bcca22412d4b186cb575890
| 791
|
py
|
Python
|
src/home/migrations/0001_initial.py
|
gatortechuf/gatortechuf.com
|
8d0ad5f0772a42113c41bf454e96c2fa2c22d1f3
|
[
"MIT"
] | 2
|
2016-07-18T02:11:37.000Z
|
2017-08-27T17:28:25.000Z
|
src/home/migrations/0001_initial.py
|
gatortechuf/gatortechuf.com
|
8d0ad5f0772a42113c41bf454e96c2fa2c22d1f3
|
[
"MIT"
] | 66
|
2016-06-18T04:00:01.000Z
|
2018-02-03T17:42:17.000Z
|
src/home/migrations/0001_initial.py
|
gatortechuf/gatortechuf.com
|
8d0ad5f0772a42113c41bf454e96c2fa2c22d1f3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-17 03:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SemesterModules',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('module_tile', models.CharField(max_length=256, verbose_name='Title')),
('module_icon', models.CharField(max_length=128, verbose_name='Font Awesome Icon')),
('module_description', models.TextField(max_length=1024, verbose_name='Description')),
],
),
]
| 30.423077
| 114
| 0.624526
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SemesterModules',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('module_tile', models.CharField(max_length=256, verbose_name='Title')),
('module_icon', models.CharField(max_length=128, verbose_name='Font Awesome Icon')),
('module_description', models.TextField(max_length=1024, verbose_name='Description')),
],
),
]
| true
| true
|
f70a79fccd50251c7ab4ed1433fa98f79020be5a
| 3,038
|
py
|
Python
|
guardabinario.py
|
paatshala1/firststeps
|
5b91e1ad9a0a1197438d827d23879701cf81afbb
|
[
"MIT"
] | null | null | null |
guardabinario.py
|
paatshala1/firststeps
|
5b91e1ad9a0a1197438d827d23879701cf81afbb
|
[
"MIT"
] | null | null | null |
guardabinario.py
|
paatshala1/firststeps
|
5b91e1ad9a0a1197438d827d23879701cf81afbb
|
[
"MIT"
] | null | null | null |
import pickle
# =============================================================================
# EL MÉTODO __str__ NO PERMITE IMPRIMIR LA INFO DEL OBJETO COMO STRING, YA QUE
# DE LO CONTRARIO EL MÉTODO showp() MOSTRARÍA LOS OBJETOS CREADOS EN MEMORIA
# PERO NO SU INFO: (<__main__.People object at 0x00000218F088B9C8>)
# =============================================================================
class Person:
def __init__(self , name , nac , age):
self.name = name
self.nac = nac
self.age = age
print("\nIt's been created:" , self.name)
def __str__(self):
return "{} {} {}".format(self.name , self.nac , self.age)
# =============================================================================
# LA CLASE NO TENÍA EL __init__ PERO SI SU CONTENIDO, LO AGREGUÉ PARA TENER
# CLARO QUE LO QUE SE ESTABA HACIENDO ERA CREAR UNA "PROPIEDAD" DEL OBJETO
# PEOPLELIST QUE CONSISTE EN UNA LISTA Y QUE POR LO TANTO COMO CUALQUIER OTRA
# PROPIEDAD DEBE SER LLAMADA EXPRESAMENTE PARA PODERLA UTILIZAR/MODIFICAR
# =============================================================================
class Peoplelist:
def __init__(self):
self.persons = []
def addp(self , p):
self.persons.append(p)
def showp(self):
for i in self.persons:
print(i)
# =============================================================================
x = input("Would you like to add (a) or read (r)?: \n>> ")
while True:
if x == "q":
print("\t--Process finished by user--")
del x
break
if x== "r":
try:
with open("People_info" , "rb") as pickledfile:
unpickled = pickle.load(pickledfile)
for i in unpickled:
print(i)
del unpickled
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
except:
print("\t--File doesn't exist, you should create one first--")
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
elif x== "a":
lst = Peoplelist()
p = Person(input("Name: "), input("Country: "), int(input("Age: ")))
try:
with open("People_info" , "rb") as reading2update:
lst.persons = pickle.load(reading2update)
lst.addp(p)
except:
lst.addp(p)
finally:
lst.showp()
with open("People_info" , "wb") as file:
pickle.dump(lst.persons, file)
# del lst
print("Pickling process succesfully finished")
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
else:
print("\t--You must select a valid option (a, r or q--")
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
print("\n** THIS IS THE END **")
| 34.134831
| 81
| 0.455892
|
import pickle
class Person:
def __init__(self , name , nac , age):
self.name = name
self.nac = nac
self.age = age
print("\nIt's been created:" , self.name)
def __str__(self):
return "{} {} {}".format(self.name , self.nac , self.age)
# =============================================================================
# LA CLASE NO TENÍA EL __init__ PERO SI SU CONTENIDO, LO AGREGUÉ PARA TENER
# CLARO QUE LO QUE SE ESTABA HACIENDO ERA CREAR UNA "PROPIEDAD" DEL OBJETO
# PEOPLELIST QUE CONSISTE EN UNA LISTA Y QUE POR LO TANTO COMO CUALQUIER OTRA
# PROPIEDAD DEBE SER LLAMADA EXPRESAMENTE PARA PODERLA UTILIZAR/MODIFICAR
# =============================================================================
class Peoplelist:
def __init__(self):
self.persons = []
def addp(self , p):
self.persons.append(p)
def showp(self):
for i in self.persons:
print(i)
# =============================================================================
x = input("Would you like to add (a) or read (r)?: \n>> ")
while True:
if x == "q":
print("\t--Process finished by user--")
del x
break
if x== "r":
try:
with open("People_info" , "rb") as pickledfile:
unpickled = pickle.load(pickledfile)
for i in unpickled:
print(i)
del unpickled
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
except:
print("\t--File doesn't exist, you should create one first--")
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
elif x== "a":
lst = Peoplelist()
p = Person(input("Name: "), input("Country: "), int(input("Age: ")))
try:
with open("People_info" , "rb") as reading2update:
lst.persons = pickle.load(reading2update)
lst.addp(p)
except:
lst.addp(p)
finally:
lst.showp()
with open("People_info" , "wb") as file:
pickle.dump(lst.persons, file)
print("Pickling process succesfully finished")
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
else:
print("\t--You must select a valid option (a, r or q--")
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
print("\n** THIS IS THE END **")
| true
| true
|
f70a7a3f1fea92b6119cb3af8052d25d1baf7caf
| 737
|
py
|
Python
|
python-algorithm/common/trie_node.py
|
isudox/nerd-algorithm
|
c1fbe153953cf3fc24395f75d102016fdf9ea0fa
|
[
"MIT"
] | 5
|
2017-06-11T09:19:34.000Z
|
2019-01-16T16:58:31.000Z
|
python-algorithm/common/trie_node.py
|
isudox/leetcode-solution
|
60085e64deaf396a171367affc94b18114565c43
|
[
"MIT"
] | 5
|
2020-03-22T13:53:54.000Z
|
2020-03-23T08:49:35.000Z
|
python-algorithm/common/trie_node.py
|
isudox/nerd-algorithm
|
c1fbe153953cf3fc24395f75d102016fdf9ea0fa
|
[
"MIT"
] | 1
|
2019-03-02T15:50:43.000Z
|
2019-03-02T15:50:43.000Z
|
# Trie Tree Node
from typing import Optional
class TrieNode:
def __init__(self, char: Optional[str] = None):
self.char = char
self.children = []
self.counter = 0
self.end = False
def add(self, word: str):
node = self
for char in word:
found_in_children = False
for child in node.children:
if child.char == char:
found_in_children = True
child.counter += 1
node = child
break
if not found_in_children:
new_node = TrieNode(char)
node.children.append(new_node)
node = new_node
node.end = True
| 27.296296
| 51
| 0.497965
|
from typing import Optional
class TrieNode:
def __init__(self, char: Optional[str] = None):
self.char = char
self.children = []
self.counter = 0
self.end = False
def add(self, word: str):
node = self
for char in word:
found_in_children = False
for child in node.children:
if child.char == char:
found_in_children = True
child.counter += 1
node = child
break
if not found_in_children:
new_node = TrieNode(char)
node.children.append(new_node)
node = new_node
node.end = True
| true
| true
|
f70a7a5a3c556402a7e07cce5f17dc361e7a5d74
| 7,221
|
py
|
Python
|
moto/ec2/responses/elastic_ip_addresses.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | null | null | null |
moto/ec2/responses/elastic_ip_addresses.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | 1
|
2022-03-07T07:39:03.000Z
|
2022-03-07T07:39:03.000Z
|
moto/ec2/responses/elastic_ip_addresses.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | null | null | null |
from moto.ec2.utils import add_tag_specification
from ._base_response import EC2BaseResponse
class ElasticIPAddresses(EC2BaseResponse):
def allocate_address(self):
domain = self._get_param("Domain", if_none="standard")
reallocate_address = self._get_param("Address", if_none=None)
tags = self._get_multi_param("TagSpecification")
tags = add_tag_specification(tags)
if self.is_not_dryrun("AllocateAddress"):
if reallocate_address:
address = self.ec2_backend.allocate_address(
domain, address=reallocate_address, tags=tags
)
else:
address = self.ec2_backend.allocate_address(domain, tags=tags)
template = self.response_template(ALLOCATE_ADDRESS_RESPONSE)
return template.render(address=address)
def associate_address(self):
instance = eni = None
if "InstanceId" in self.querystring:
instance = self.ec2_backend.get_instance(self._get_param("InstanceId"))
elif "NetworkInterfaceId" in self.querystring:
eni = self.ec2_backend.get_network_interface(
self._get_param("NetworkInterfaceId")
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect InstanceId/NetworkId parameter.",
)
reassociate = False
if "AllowReassociation" in self.querystring:
reassociate = self._get_param("AllowReassociation") == "true"
if self.is_not_dryrun("AssociateAddress"):
if instance or eni:
if "PublicIp" in self.querystring:
eip = self.ec2_backend.associate_address(
instance=instance,
eni=eni,
address=self._get_param("PublicIp"),
reassociate=reassociate,
)
elif "AllocationId" in self.querystring:
eip = self.ec2_backend.associate_address(
instance=instance,
eni=eni,
allocation_id=self._get_param("AllocationId"),
reassociate=reassociate,
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect PublicIp/AllocationId parameter.",
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect either instance or ENI.",
)
template = self.response_template(ASSOCIATE_ADDRESS_RESPONSE)
return template.render(address=eip)
def describe_addresses(self):
self.error_on_dryrun()
allocation_ids = self._get_multi_param("AllocationId")
public_ips = self._get_multi_param("PublicIp")
filters = self._filters_from_querystring()
addresses = self.ec2_backend.describe_addresses(
allocation_ids, public_ips, filters
)
template = self.response_template(DESCRIBE_ADDRESS_RESPONSE)
return template.render(addresses=addresses)
def disassociate_address(self):
if self.is_not_dryrun("DisAssociateAddress"):
if "PublicIp" in self.querystring:
self.ec2_backend.disassociate_address(
address=self._get_param("PublicIp")
)
elif "AssociationId" in self.querystring:
self.ec2_backend.disassociate_address(
association_id=self._get_param("AssociationId")
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect PublicIp/AssociationId parameter.",
)
return self.response_template(DISASSOCIATE_ADDRESS_RESPONSE).render()
def release_address(self):
if self.is_not_dryrun("ReleaseAddress"):
if "PublicIp" in self.querystring:
self.ec2_backend.release_address(address=self._get_param("PublicIp"))
elif "AllocationId" in self.querystring:
self.ec2_backend.release_address(
allocation_id=self._get_param("AllocationId")
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect PublicIp/AllocationId parameter.",
)
return self.response_template(RELEASE_ADDRESS_RESPONSE).render()
ALLOCATE_ADDRESS_RESPONSE = """<AllocateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
</AllocateAddressResponse>"""
ASSOCIATE_ADDRESS_RESPONSE = """<AssociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
</AssociateAddressResponse>"""
DESCRIBE_ADDRESS_RESPONSE = """<DescribeAddressesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<addressesSet>
{% for address in addresses %}
<item>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.instance %}
<instanceId>{{ address.instance.id }}</instanceId>
{% else %}
<instanceId/>
{% endif %}
{% if address.eni %}
<networkInterfaceId>{{ address.eni.id }}</networkInterfaceId>
{% else %}
<networkInterfaceId/>
{% endif %}
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
<tagSet>
{% for tag in address.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</addressesSet>
</DescribeAddressesResponse>"""
DISASSOCIATE_ADDRESS_RESPONSE = """<DisassociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateAddressResponse>"""
RELEASE_ADDRESS_RESPONSE = """<ReleaseAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</ReleaseAddressResponse>"""
| 40.340782
| 113
| 0.596316
|
from moto.ec2.utils import add_tag_specification
from ._base_response import EC2BaseResponse
class ElasticIPAddresses(EC2BaseResponse):
def allocate_address(self):
domain = self._get_param("Domain", if_none="standard")
reallocate_address = self._get_param("Address", if_none=None)
tags = self._get_multi_param("TagSpecification")
tags = add_tag_specification(tags)
if self.is_not_dryrun("AllocateAddress"):
if reallocate_address:
address = self.ec2_backend.allocate_address(
domain, address=reallocate_address, tags=tags
)
else:
address = self.ec2_backend.allocate_address(domain, tags=tags)
template = self.response_template(ALLOCATE_ADDRESS_RESPONSE)
return template.render(address=address)
def associate_address(self):
instance = eni = None
if "InstanceId" in self.querystring:
instance = self.ec2_backend.get_instance(self._get_param("InstanceId"))
elif "NetworkInterfaceId" in self.querystring:
eni = self.ec2_backend.get_network_interface(
self._get_param("NetworkInterfaceId")
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect InstanceId/NetworkId parameter.",
)
reassociate = False
if "AllowReassociation" in self.querystring:
reassociate = self._get_param("AllowReassociation") == "true"
if self.is_not_dryrun("AssociateAddress"):
if instance or eni:
if "PublicIp" in self.querystring:
eip = self.ec2_backend.associate_address(
instance=instance,
eni=eni,
address=self._get_param("PublicIp"),
reassociate=reassociate,
)
elif "AllocationId" in self.querystring:
eip = self.ec2_backend.associate_address(
instance=instance,
eni=eni,
allocation_id=self._get_param("AllocationId"),
reassociate=reassociate,
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect PublicIp/AllocationId parameter.",
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect either instance or ENI.",
)
template = self.response_template(ASSOCIATE_ADDRESS_RESPONSE)
return template.render(address=eip)
def describe_addresses(self):
self.error_on_dryrun()
allocation_ids = self._get_multi_param("AllocationId")
public_ips = self._get_multi_param("PublicIp")
filters = self._filters_from_querystring()
addresses = self.ec2_backend.describe_addresses(
allocation_ids, public_ips, filters
)
template = self.response_template(DESCRIBE_ADDRESS_RESPONSE)
return template.render(addresses=addresses)
def disassociate_address(self):
if self.is_not_dryrun("DisAssociateAddress"):
if "PublicIp" in self.querystring:
self.ec2_backend.disassociate_address(
address=self._get_param("PublicIp")
)
elif "AssociationId" in self.querystring:
self.ec2_backend.disassociate_address(
association_id=self._get_param("AssociationId")
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect PublicIp/AssociationId parameter.",
)
return self.response_template(DISASSOCIATE_ADDRESS_RESPONSE).render()
def release_address(self):
if self.is_not_dryrun("ReleaseAddress"):
if "PublicIp" in self.querystring:
self.ec2_backend.release_address(address=self._get_param("PublicIp"))
elif "AllocationId" in self.querystring:
self.ec2_backend.release_address(
allocation_id=self._get_param("AllocationId")
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect PublicIp/AllocationId parameter.",
)
return self.response_template(RELEASE_ADDRESS_RESPONSE).render()
ALLOCATE_ADDRESS_RESPONSE = """<AllocateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
</AllocateAddressResponse>"""
ASSOCIATE_ADDRESS_RESPONSE = """<AssociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
</AssociateAddressResponse>"""
DESCRIBE_ADDRESS_RESPONSE = """<DescribeAddressesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<addressesSet>
{% for address in addresses %}
<item>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.instance %}
<instanceId>{{ address.instance.id }}</instanceId>
{% else %}
<instanceId/>
{% endif %}
{% if address.eni %}
<networkInterfaceId>{{ address.eni.id }}</networkInterfaceId>
{% else %}
<networkInterfaceId/>
{% endif %}
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
<tagSet>
{% for tag in address.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</addressesSet>
</DescribeAddressesResponse>"""
DISASSOCIATE_ADDRESS_RESPONSE = """<DisassociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateAddressResponse>"""
RELEASE_ADDRESS_RESPONSE = """<ReleaseAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</ReleaseAddressResponse>"""
| true
| true
|
f70a7a9d650cd69fbf70293d70feac3812614d3c
| 6,344
|
py
|
Python
|
markdown/extensions/headerid.py
|
koocieyu/interactive-tutorials
|
873851b37f0a13b6218ba1e656d51169010981fe
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
markdown/extensions/headerid.py
|
koocieyu/interactive-tutorials
|
873851b37f0a13b6218ba1e656d51169010981fe
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
markdown/extensions/headerid.py
|
koocieyu/interactive-tutorials
|
873851b37f0a13b6218ba1e656d51169010981fe
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
#!/usr/bin/python
"""
HeaderID Extension for Python-Markdown
======================================
Adds ability to set HTML IDs for headers.
Basic usage:
>>> import markdown
>>> text = "# Some Header # {#some_id}"
>>> md = markdown.markdown(text, ['headerid'])
>>> md
u'<h1 id="some_id">Some Header</h1>'
All header IDs are unique:
>>> text = '''
... #Header
... #Another Header {#header}
... #Third Header {#header}'''
>>> md = markdown.markdown(text, ['headerid'])
>>> md
u'<h1 id="header">Header</h1>\\n<h1 id="header_1">Another Header</h1>\\n<h1 id="header_2">Third Header</h1>'
To fit within a html template's hierarchy, set the header base level:
>>> text = '''
... #Some Header
... ## Next Level'''
>>> md = markdown.markdown(text, ['headerid(level=3)'])
>>> md
u'<h3 id="some_header">Some Header</h3>\\n<h4 id="next_level">Next Level</h4>'
Turn off auto generated IDs:
>>> text = '''
... # Some Header
... # Header with ID # { #foo }'''
>>> md = markdown.markdown(text, ['headerid(forceid=False)'])
>>> md
u'<h1>Some Header</h1>\\n<h1 id="foo">Header with ID</h1>'
Use with MetaData extension:
>>> text = '''header_level: 2
... header_forceid: Off
...
... # A Header'''
>>> md = markdown.markdown(text, ['headerid', 'meta'])
>>> md
u'<h2>A Header</h2>'
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/HeaderId>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
from markdown import etree
import re
from string import ascii_lowercase, digits, punctuation
ID_CHARS = ascii_lowercase + digits + '-_'
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor):
""" Replacement BlockProcessor for Header IDs. """
# Detect a header at start of any line in block
RE = re.compile(r"""(^|\n)
(?P<level>\#{1,6}) # group('level') = string of hashes
(?P<header>.*?) # group('header') = Header text
\#* # optional closing hashes
(?:[ \t]*\{[ \t]*\#(?P<id>[-_:a-zA-Z0-9]+)[ \t]*\})?
(\n|$) # ^^ group('id') = id attribute
""",
re.VERBOSE)
IDs = []
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
start_level, force_id = self._get_meta()
level = len(m.group('level')) + start_level
if level > 6:
level = 6
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = m.group('header').strip()
if m.group('id'):
h.set('id', self._unique_id(m.group('id')))
elif force_id:
h.set('id', self._create_id(m.group('header').strip()))
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
message(CRITICAL, "We've got a problem header!")
def _get_meta(self):
""" Return meta data suported by this ext as a tuple """
level = int(self.config['level'][0]) - 1
force = self._str2bool(self.config['forceid'][0])
if hasattr(self.md, 'Meta'):
if self.md.Meta.has_key('header_level'):
level = int(self.md.Meta['header_level'][0]) - 1
if self.md.Meta.has_key('header_forceid'):
force = self._str2bool(self.md.Meta['header_forceid'][0])
return level, force
def _str2bool(self, s, default=False):
""" Convert a string to a booleen value. """
s = str(s)
if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
return False
elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
return True
return default
def _unique_id(self, id):
""" Ensure ID is unique. Append '_1', '_2'... if not """
while id in self.IDs:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
self.IDs.append(id)
return id
def _create_id(self, header):
""" Return ID from Header text. """
h = ''
for c in header.lower().replace(' ', '_'):
if c in ID_CHARS:
h += c
elif c not in punctuation:
h += '+'
return self._unique_id(h)
class HeaderIdExtension (markdown.Extension):
def __init__(self, configs):
# set defaults
self.config = {
'level' : ['1', 'Base level for headers.'],
'forceid' : ['True', 'Force all headers to have an id.']
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = HeaderIdProcessor(md.parser)
self.processor.md = md
self.processor.config = self.config
# Replace existing hasheader in place.
md.parser.blockprocessors['hashheader'] = self.processor
def reset(self):
self.processor.IDs = []
def makeExtension(configs=None):
return HeaderIdExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32.367347
| 112
| 0.539565
|
import markdown
from markdown import etree
import re
from string import ascii_lowercase, digits, punctuation
ID_CHARS = ascii_lowercase + digits + '-_'
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor):
RE = re.compile(r"""(^|\n)
(?P<level>\#{1,6}) # group('level') = string of hashes
(?P<header>.*?) # group('header') = Header text
\#* # optional closing hashes
(?:[ \t]*\{[ \t]*\#(?P<id>[-_:a-zA-Z0-9]+)[ \t]*\})?
(\n|$) # ^^ group('id') = id attribute
""",
re.VERBOSE)
IDs = []
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()]
after = block[m.end():]
if before:
self.parser.parseBlocks(parent, [before])
start_level, force_id = self._get_meta()
level = len(m.group('level')) + start_level
if level > 6:
level = 6
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = m.group('header').strip()
if m.group('id'):
h.set('id', self._unique_id(m.group('id')))
elif force_id:
h.set('id', self._create_id(m.group('header').strip()))
if after:
blocks.insert(0, after)
else:
message(CRITICAL, "We've got a problem header!")
def _get_meta(self):
level = int(self.config['level'][0]) - 1
force = self._str2bool(self.config['forceid'][0])
if hasattr(self.md, 'Meta'):
if self.md.Meta.has_key('header_level'):
level = int(self.md.Meta['header_level'][0]) - 1
if self.md.Meta.has_key('header_forceid'):
force = self._str2bool(self.md.Meta['header_forceid'][0])
return level, force
def _str2bool(self, s, default=False):
s = str(s)
if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
return False
elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
return True
return default
def _unique_id(self, id):
while id in self.IDs:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
self.IDs.append(id)
return id
def _create_id(self, header):
h = ''
for c in header.lower().replace(' ', '_'):
if c in ID_CHARS:
h += c
elif c not in punctuation:
h += '+'
return self._unique_id(h)
class HeaderIdExtension (markdown.Extension):
def __init__(self, configs):
# set defaults
self.config = {
'level' : ['1', 'Base level for headers.'],
'forceid' : ['True', 'Force all headers to have an id.']
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = HeaderIdProcessor(md.parser)
self.processor.md = md
self.processor.config = self.config
# Replace existing hasheader in place.
md.parser.blockprocessors['hashheader'] = self.processor
def reset(self):
self.processor.IDs = []
def makeExtension(configs=None):
return HeaderIdExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| true
| true
|
f70a7c482c9860f258e06df2c22abbd6f1f50e9f
| 60,529
|
py
|
Python
|
xdl-algorithm-solution/Rocket/script/rnn.py
|
Ru-Xiang/x-deeplearning
|
04cc0497150920c64b06bb8c314ef89977a3427a
|
[
"Apache-2.0"
] | 4,071
|
2018-12-13T04:17:38.000Z
|
2022-03-30T03:29:35.000Z
|
xdl-algorithm-solution/Rocket/script/rnn.py
|
laozhuang727/x-deeplearning
|
781545783a4e2bbbda48fc64318fb2c6d8bbb3cc
|
[
"Apache-2.0"
] | 359
|
2018-12-21T01:14:57.000Z
|
2022-02-15T07:18:02.000Z
|
xdl-algorithm-solution/Rocket/script/rnn.py
|
laozhuang727/x-deeplearning
|
781545783a4e2bbbda48fc64318fb2c6d8bbb3cc
|
[
"Apache-2.0"
] | 1,054
|
2018-12-20T09:57:42.000Z
|
2022-03-29T07:16:53.000Z
|
# Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models.
@@bidirectional_dynamic_rnn
@@dynamic_rnn
@@raw_rnn
@@static_rnn
@@static_state_saving_rnn
@@static_bidirectional_rnn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_concat = rnn_cell_impl._concat
assert_like_rnncell = rnn_cell_impl.assert_like_rnncell
# pylint: enable=protected-access
def _transpose_batch_time(x):
"""Transpose the batch and time dimensions of a Tensor.
Retains as much of the static shape information as possible.
Args:
x: A tensor of rank 2 or higher.
Returns:
x transposed along the first two dimensions.
Raises:
ValueError: if `x` is rank 1 or lower.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2, but saw shape: %s" %
(x, x_static_shape))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _best_effort_input_batch_size(flat_input):
"""Get static input batch size if available, with fallback to the dynamic one.
Args:
flat_input: An iterable of time major input Tensors of shape [max_time,
batch_size, ...]. All inputs should have compatible batch sizes.
Returns:
The batch size in Python integer if available, or a scalar Tensor otherwise.
Raises:
ValueError: if there is any input with an invalid shape.
"""
for input_ in flat_input:
shape = input_.shape
if shape.ndims is None:
continue
if shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2" % input_)
batch_size = shape[1].value
if batch_size is not None:
return batch_size
# Fallback to the dynamic batch size of the first input.
return array_ops.shape(flat_input[0])[1]
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
# If the state contains a scalar value we simply pass it through.
if output.shape.ndims == 0:
return new_output
copy_cond = (time >= sequence_length)
with ops.colocate_with(new_output):
return array_ops.where(copy_cond, output, new_output)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Takes input and builds independent forward and backward RNNs. The input_size
of forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such elements.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences in the batch.
If not provided, all batch entries are assumed to be full sequences; and
time reversal is applied from time `0` to `max_time` for each sequence.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
assert_like_rnncell("cell_fw", cell_fw)
assert_like_rnncell("cell_bw", cell_bw)
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
def _reverse(input_, seq_lengths, seq_dim, batch_dim):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_, seq_lengths=seq_lengths,
seq_dim=seq_dim, batch_dim=batch_dim)
else:
return array_ops.reverse(input_, axis=[seq_dim])
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = _reverse(
inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = _reverse(
tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, att_scores=None, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
Performs fully dynamic unrolling of `inputs`.
Example:
```python
# create a BasicRNNCell
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# defining initial state
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
Used to copy-through state and zero-out outputs when past a batch
element's sequence length. So it's more for correctness than performance.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes. If cells are `LSTMCells`
`state` will be a tuple containing a `LSTMStateTuple` for each cell.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
assert_like_rnncell("cell", cell)
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
batch_size = _best_effort_input_batch_size(flat_input)
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If there is no initial_state, you must give a dtype.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
att_scores = att_scores,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
outputs = nest.map_structure(_transpose_batch_time, outputs)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
att_scores = None,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = _best_effort_input_batch_size(flat_input)
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _concat(batch_size, size)
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[i].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state, att_scores=None):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
if att_scores is not None:
att_score = att_scores[:, time, :]
call_cell = lambda: cell(input_t, state, att_score)
else:
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
if att_scores is not None:
return (time + 1, output_ta_t, new_state, att_scores)
else:
return (time + 1, output_ta_t, new_state)
if att_scores is not None:
_, output_final_ta, final_state, _ = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state, att_scores),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
else:
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _concat(
[const_time_steps, const_batch_size], output_size, static=True)
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, _, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
assert_like_rnncell("cell", cell)
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.shape if emit.shape.is_fully_defined() else
array_ops.shape(emit) for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(_concat(batch_size, size_i), dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
def copy_fn(cur_i, cand_i):
with ops.colocate_with(cand_i):
return array_ops.where(elements_finished, cur_i, cand_i)
return nest.map_structure(copy_fn, current, candidate)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_ta = nest.map_structure(
lambda ta, emit: ta.write(time, emit), emit_ta, emit_output)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
def static_rnn(cell,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
```python
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
```
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time `t` for batch row `b`,
```python
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
```
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`, or a nested tuple of such elements.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
- outputs is a length T list of outputs (one for each input), or a nested
tuple of such elements.
- state is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the input depth
(column size) cannot be inferred from inputs via shape inference.
"""
assert_like_rnncell("cell", cell)
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Obtain the first sequence of the input
first_input = inputs
while nest.is_sequence(first_input):
first_input = first_input[0]
# Temporarily avoid EmbeddingWrapper and seq2seq badness
# TODO(lukaszkaiser): remove EmbeddingWrapper
if first_input.get_shape().ndims != 1:
input_shape = first_input.get_shape().with_rank_at_least(2)
fixed_batch_size = input_shape[0]
flat_inputs = nest.flatten(inputs)
for flat_input in flat_inputs:
input_shape = flat_input.get_shape().with_rank_at_least(2)
batch_size, input_size = input_shape[0], input_shape[1:]
fixed_batch_size.merge_with(batch_size)
for i, size in enumerate(input_size):
if size.value is None:
raise ValueError(
"Input size (dimension %d of inputs) must be accessible via "
"shape inference, but saw value None." % i)
else:
fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(first_input)[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, "
"dtype must be specified")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None: # Prepare variables
sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size")
def _create_zero_output(output_size):
# convert int to TensorShape if necessary
size = _concat(batch_size, output_size)
output = array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
shape = _concat(fixed_batch_size.value, output_size, static=True)
output.set_shape(tensor_shape.TensorShape(shape))
return output
output_size = cell.output_size
flat_output_size = nest.flatten(output_size)
flat_zero_output = tuple(
_create_zero_output(size) for size in flat_output_size)
zero_output = nest.pack_sequence_as(
structure=output_size, flat_sequence=flat_zero_output)
sequence_length = math_ops.to_int32(sequence_length)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0:
varscope.reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
(output, state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=cell.state_size)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def static_state_saving_rnn(cell,
inputs,
state_saver,
state_name,
sequence_length=None,
scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of `RNNCell`.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
state_saver. If the cell returns tuples of states (i.e.,
`cell.state_size` is a tuple) then `state_name` should be a tuple of
strings having the same length as `cell.state_size`. Otherwise it should
be a single string.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the arity and
type of `state_name` does not match that of `cell.state_size`.
"""
state_size = cell.state_size
state_is_tuple = nest.is_sequence(state_size)
state_name_tuple = nest.is_sequence(state_name)
if state_is_tuple != state_name_tuple:
raise ValueError("state_name should be the same type as cell.state_size. "
"state_name: %s, cell.state_size: %s" % (str(state_name),
str(state_size)))
if state_is_tuple:
state_name_flat = nest.flatten(state_name)
state_size_flat = nest.flatten(state_size)
if len(state_name_flat) != len(state_size_flat):
raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d" %
(len(state_name_flat), len(state_size_flat)))
initial_state = nest.pack_sequence_as(
structure=state_size,
flat_sequence=[state_saver.state(s) for s in state_name_flat])
else:
initial_state = state_saver.state(state_name)
(outputs, state) = static_rnn(
cell,
inputs,
initial_state=initial_state,
sequence_length=sequence_length,
scope=scope)
if state_is_tuple:
flat_state = nest.flatten(state)
state_name = nest.flatten(state_name)
save_state = [
state_saver.save_state(name, substate)
for name, substate in zip(state_name, flat_state)
]
else:
save_state = [state_saver.save_state(state_name, state)]
with ops.control_dependencies(save_state):
last_output = outputs[-1]
flat_last_output = nest.flatten(last_output)
flat_last_output = [
array_ops.identity(output) for output in flat_last_output
]
outputs[-1] = nest.pack_sequence_as(
structure=last_output, flat_sequence=flat_last_output)
return (outputs, state)
def static_bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_state_fw is the final state of the forward rnn.
output_state_bw is the final state of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None or an empty list.
"""
if not _like_rnncell(cell_fw):
raise TypeError("cell_fw must be an instance of RNNCell")
if not _like_rnncell(cell_bw):
raise TypeError("cell_bw must be an instance of RNNCell")
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = static_rnn(
cell_fw,
inputs,
initial_state_fw,
dtype,
sequence_length,
scope=fw_scope)
# Backward direction
with vs.variable_scope("bw") as bw_scope:
reversed_inputs = _reverse_seq(inputs, sequence_length)
tmp, output_state_bw = static_rnn(
cell_bw,
reversed_inputs,
initial_state_bw,
dtype,
sequence_length,
scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
flat_output_fw = nest.flatten(output_fw)
flat_output_bw = nest.flatten(output_bw)
flat_outputs = tuple(
array_ops.concat([fw, bw], 1)
for fw, bw in zip(flat_output_fw, flat_output_bw))
outputs = nest.pack_sequence_as(
structure=output_fw, flat_sequence=flat_outputs)
return (outputs, output_state_fw, output_state_bw)
| 41.316724
| 88
| 0.693321
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
_concat = rnn_cell_impl._concat
assert_like_rnncell = rnn_cell_impl.assert_like_rnncell
def _transpose_batch_time(x):
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2, but saw shape: %s" %
(x, x_static_shape))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _best_effort_input_batch_size(flat_input):
for input_ in flat_input:
shape = input_.shape
if shape.ndims is None:
continue
if shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2" % input_)
batch_size = shape[1].value
if batch_size is not None:
return batch_size
return array_ops.shape(flat_input[0])[1]
def _infer_state_dtype(explicit_dtype, state):
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
if output.shape.ndims == 0:
return new_output
copy_cond = (time >= sequence_length)
with ops.colocate_with(new_output):
return array_ops.where(copy_cond, output, new_output)
def _copy_some_through(flat_new_output, flat_new_state):
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
lambda: _copy_some_through(flat_new_output, flat_new_state))
if skip_conditionals:
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
time >= max_sequence_length, empty_update,
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
s_joined = array_ops.stack(sequence)
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
assert_like_rnncell("cell_fw", cell_fw)
assert_like_rnncell("cell_bw", cell_bw)
with vs.variable_scope(scope or "bidirectional_rnn"):
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
def _reverse(input_, seq_lengths, seq_dim, batch_dim):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_, seq_lengths=seq_lengths,
seq_dim=seq_dim, batch_dim=batch_dim)
else:
return array_ops.reverse(input_, axis=[seq_dim])
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = _reverse(
inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = _reverse(
tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, att_scores=None, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
assert_like_rnncell("cell", cell)
flat_input = nest.flatten(inputs)
if not time_major:
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity(
sequence_length, name="sequence_length")
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
batch_size = _best_effort_input_batch_size(flat_input)
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If there is no initial_state, you must give a dtype.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
att_scores = att_scores,
sequence_length=sequence_length,
dtype=dtype)
if not time_major:
outputs = nest.map_structure(_transpose_batch_time, outputs)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
att_scores = None,
sequence_length=None,
dtype=None):
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = _best_effort_input_batch_size(flat_input)
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
def _create_zero_arrays(size):
size = _concat(batch_size, size)
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[i].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state, att_scores=None):
input_t = tuple(ta.read(time) for ta in input_ta)
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
if att_scores is not None:
att_score = att_scores[:, time, :]
call_cell = lambda: cell(input_t, state, att_score)
else:
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
if att_scores is not None:
return (time + 1, output_ta_t, new_state, att_scores)
else:
return (time + 1, output_ta_t, new_state)
if att_scores is not None:
_, output_final_ta, final_state, _ = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state, att_scores),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
else:
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_outputs = tuple(ta.stack() for ta in output_final_ta)
for output, output_size in zip(final_outputs, flat_output_size):
shape = _concat(
[const_time_steps, const_batch_size], output_size, static=True)
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
assert_like_rnncell("cell", cell)
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None)
flat_input = nest.flatten(next_input)
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.shape if emit.shape.is_fully_defined() else
array_ops.shape(emit) for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(_concat(batch_size, size_i), dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
def copy_fn(cur_i, cand_i):
with ops.colocate_with(cand_i):
return array_ops.where(elements_finished, cur_i, cand_i)
return nest.map_structure(copy_fn, current, candidate)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_ta = nest.map_structure(
lambda ta, emit: ta.write(time, emit), emit_ta, emit_output)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
def static_rnn(cell,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
assert_like_rnncell("cell", cell)
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
first_input = inputs
while nest.is_sequence(first_input):
first_input = first_input[0]
if first_input.get_shape().ndims != 1:
input_shape = first_input.get_shape().with_rank_at_least(2)
fixed_batch_size = input_shape[0]
flat_inputs = nest.flatten(inputs)
for flat_input in flat_inputs:
input_shape = flat_input.get_shape().with_rank_at_least(2)
batch_size, input_size = input_shape[0], input_shape[1:]
fixed_batch_size.merge_with(batch_size)
for i, size in enumerate(input_size):
if size.value is None:
raise ValueError(
"Input size (dimension %d of inputs) must be accessible via "
"shape inference, but saw value None." % i)
else:
fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(first_input)[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, "
"dtype must be specified")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size")
def _create_zero_output(output_size):
size = _concat(batch_size, output_size)
output = array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
shape = _concat(fixed_batch_size.value, output_size, static=True)
output.set_shape(tensor_shape.TensorShape(shape))
return output
output_size = cell.output_size
flat_output_size = nest.flatten(output_size)
flat_zero_output = tuple(
_create_zero_output(size) for size in flat_output_size)
zero_output = nest.pack_sequence_as(
structure=output_size, flat_sequence=flat_zero_output)
sequence_length = math_ops.to_int32(sequence_length)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0:
varscope.reuse_variables()
call_cell = lambda: cell(input_, state)
if sequence_length is not None:
(output, state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=cell.state_size)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def static_state_saving_rnn(cell,
inputs,
state_saver,
state_name,
sequence_length=None,
scope=None):
state_size = cell.state_size
state_is_tuple = nest.is_sequence(state_size)
state_name_tuple = nest.is_sequence(state_name)
if state_is_tuple != state_name_tuple:
raise ValueError("state_name should be the same type as cell.state_size. "
"state_name: %s, cell.state_size: %s" % (str(state_name),
str(state_size)))
if state_is_tuple:
state_name_flat = nest.flatten(state_name)
state_size_flat = nest.flatten(state_size)
if len(state_name_flat) != len(state_size_flat):
raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d" %
(len(state_name_flat), len(state_size_flat)))
initial_state = nest.pack_sequence_as(
structure=state_size,
flat_sequence=[state_saver.state(s) for s in state_name_flat])
else:
initial_state = state_saver.state(state_name)
(outputs, state) = static_rnn(
cell,
inputs,
initial_state=initial_state,
sequence_length=sequence_length,
scope=scope)
if state_is_tuple:
flat_state = nest.flatten(state)
state_name = nest.flatten(state_name)
save_state = [
state_saver.save_state(name, substate)
for name, substate in zip(state_name, flat_state)
]
else:
save_state = [state_saver.save_state(state_name, state)]
with ops.control_dependencies(save_state):
last_output = outputs[-1]
flat_last_output = nest.flatten(last_output)
flat_last_output = [
array_ops.identity(output) for output in flat_last_output
]
outputs[-1] = nest.pack_sequence_as(
structure=last_output, flat_sequence=flat_last_output)
return (outputs, state)
def static_bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
if not _like_rnncell(cell_fw):
raise TypeError("cell_fw must be an instance of RNNCell")
if not _like_rnncell(cell_bw):
raise TypeError("cell_bw must be an instance of RNNCell")
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
with vs.variable_scope(scope or "bidirectional_rnn"):
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = static_rnn(
cell_fw,
inputs,
initial_state_fw,
dtype,
sequence_length,
scope=fw_scope)
with vs.variable_scope("bw") as bw_scope:
reversed_inputs = _reverse_seq(inputs, sequence_length)
tmp, output_state_bw = static_rnn(
cell_bw,
reversed_inputs,
initial_state_bw,
dtype,
sequence_length,
scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
flat_output_fw = nest.flatten(output_fw)
flat_output_bw = nest.flatten(output_bw)
flat_outputs = tuple(
array_ops.concat([fw, bw], 1)
for fw, bw in zip(flat_output_fw, flat_output_bw))
outputs = nest.pack_sequence_as(
structure=output_fw, flat_sequence=flat_outputs)
return (outputs, output_state_fw, output_state_bw)
| true
| true
|
f70a7c8757ec8f2334ab61df77799b4f77e92dfc
| 1,711
|
py
|
Python
|
libs/cnn/customlayers.py
|
franckfotso/kr_cnn_models
|
242f4a6650004af5849404c8e0e7b3621ba020b6
|
[
"MIT"
] | 1
|
2017-07-06T03:30:33.000Z
|
2017-07-06T03:30:33.000Z
|
libs/cnn/customlayers.py
|
romyny/kr_cnn_models
|
242f4a6650004af5849404c8e0e7b3621ba020b6
|
[
"MIT"
] | null | null | null |
libs/cnn/customlayers.py
|
romyny/kr_cnn_models
|
242f4a6650004af5849404c8e0e7b3621ba020b6
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Written by: Romuald FOTSO
# Licensed: MIT License
# Copyright (c) 2017
# Based on 'dandxy89' github repository:
# https://github.com/dandxy89/ImageModels/blob/master/KerasLayers/Custom_layers.py
# --------------------------------------------------------
from keras.engine import Layer
from keras import backend as K
class LRN2D(Layer):
def __init__(self, alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
if n % 2 == 0:
raise NotImplementedError(
"LRN2D only works with odd n. n provided: " + str(n))
super(LRN2D, self).__init__(**kwargs)
self.alpha = alpha
self.k = k
self.beta = beta
self.n = n
def get_output(self, train):
X = self.get_input(train)
b, ch, r, c = K.shape(X)
half_n = self.n // 2
input_sqr = K.square(X)
extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
input_sqr,
extra_channels[:, half_n + ch:, :, :]],
axis=1)
scale = self.k
for i in range(self.n):
scale += self.alpha * input_sqr[:, i:i + ch, :, :]
scale = scale ** self.beta
return X / scale
def get_config(self):
config = {"name": self.__class__.__name__,
"alpha": self.alpha,
"k": self.k,
"beta": self.beta,
"n": self.n}
base_config = super(LRN2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 33.54902
| 82
| 0.484512
|
from keras.engine import Layer
from keras import backend as K
class LRN2D(Layer):
def __init__(self, alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
if n % 2 == 0:
raise NotImplementedError(
"LRN2D only works with odd n. n provided: " + str(n))
super(LRN2D, self).__init__(**kwargs)
self.alpha = alpha
self.k = k
self.beta = beta
self.n = n
def get_output(self, train):
X = self.get_input(train)
b, ch, r, c = K.shape(X)
half_n = self.n // 2
input_sqr = K.square(X)
extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
input_sqr,
extra_channels[:, half_n + ch:, :, :]],
axis=1)
scale = self.k
for i in range(self.n):
scale += self.alpha * input_sqr[:, i:i + ch, :, :]
scale = scale ** self.beta
return X / scale
def get_config(self):
config = {"name": self.__class__.__name__,
"alpha": self.alpha,
"k": self.k,
"beta": self.beta,
"n": self.n}
base_config = super(LRN2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| true
| true
|
f70a7c97aa3c694dfc2fdc8eb7fb9de62211e209
| 119
|
py
|
Python
|
carbon/client/metrics/__init__.py
|
mosquito/carbonate
|
5eca69602b9fc03dc0b982f9104c7ebb04159059
|
[
"MIT"
] | 2
|
2017-12-21T15:40:12.000Z
|
2018-02-07T10:00:14.000Z
|
carbon/client/metrics/__init__.py
|
mosquito/carbonate
|
5eca69602b9fc03dc0b982f9104c7ebb04159059
|
[
"MIT"
] | 2
|
2016-12-02T08:53:48.000Z
|
2016-12-05T21:46:04.000Z
|
carbon/client/metrics/__init__.py
|
mosquito/carbonate
|
5eca69602b9fc03dc0b982f9104c7ebb04159059
|
[
"MIT"
] | 5
|
2015-07-22T14:31:28.000Z
|
2020-09-30T08:20:29.000Z
|
from .timer import Timer
from .simple import Counter
from .heartbeat import HeartBeat
from .collector import Collector
| 23.8
| 32
| 0.831933
|
from .timer import Timer
from .simple import Counter
from .heartbeat import HeartBeat
from .collector import Collector
| true
| true
|
f70a7d0c89eb7ecab3a17df6d81f44d7bf8719a8
| 928
|
py
|
Python
|
examples/addons/pycsg_sphere_vs_menger_sponge.py
|
hh-wu/ezdxf
|
62509ba39b826ee9b36f19c0a5abad7f3518186a
|
[
"MIT"
] | 1
|
2021-06-05T09:15:15.000Z
|
2021-06-05T09:15:15.000Z
|
examples/addons/pycsg_sphere_vs_menger_sponge.py
|
luoyu-123/ezdxf
|
40963a2010028f87846241e08434f43ab421f3fb
|
[
"MIT"
] | null | null | null |
examples/addons/pycsg_sphere_vs_menger_sponge.py
|
luoyu-123/ezdxf
|
40963a2010028f87846241e08434f43ab421f3fb
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
from pathlib import Path
from time import perf_counter
import ezdxf
from ezdxf.render.forms import sphere
from ezdxf.addons import MengerSponge
from ezdxf.addons.pycsg import CSG
DIR = Path('~/Desktop/Outbox').expanduser()
doc = ezdxf.new()
doc.layers.new('sponge', dxfattribs={'color': 5})
doc.layers.new('sphere', dxfattribs={'color': 6})
doc.set_modelspace_vport(6, center=(5, 0))
msp = doc.modelspace()
sponge1 = MengerSponge(level=3).mesh()
sphere1 = sphere(count=32, stacks=16, radius=.5, quads=True).translate(.25, .25, 1)
t0 = perf_counter()
subtract = (CSG(sponge1, meshid=1) - CSG(sphere1, meshid=2))
t1 = perf_counter()
# get mesh result by id
subtract.mesh(1).render(msp, dxfattribs={'layer': 'sponge'})
subtract.mesh(2).render(msp, dxfattribs={'layer': 'sphere'})
print(f'runtime: {t1-t0:.3f}s')
doc.saveas(DIR / 'csg_sphere_vs_menger_sponge.dxf')
| 29
| 83
| 0.727371
|
from pathlib import Path
from time import perf_counter
import ezdxf
from ezdxf.render.forms import sphere
from ezdxf.addons import MengerSponge
from ezdxf.addons.pycsg import CSG
DIR = Path('~/Desktop/Outbox').expanduser()
doc = ezdxf.new()
doc.layers.new('sponge', dxfattribs={'color': 5})
doc.layers.new('sphere', dxfattribs={'color': 6})
doc.set_modelspace_vport(6, center=(5, 0))
msp = doc.modelspace()
sponge1 = MengerSponge(level=3).mesh()
sphere1 = sphere(count=32, stacks=16, radius=.5, quads=True).translate(.25, .25, 1)
t0 = perf_counter()
subtract = (CSG(sponge1, meshid=1) - CSG(sphere1, meshid=2))
t1 = perf_counter()
subtract.mesh(1).render(msp, dxfattribs={'layer': 'sponge'})
subtract.mesh(2).render(msp, dxfattribs={'layer': 'sphere'})
print(f'runtime: {t1-t0:.3f}s')
doc.saveas(DIR / 'csg_sphere_vs_menger_sponge.dxf')
| true
| true
|
f70a7dc2646487373bb5e2077dd5a51a79e9d7fb
| 10,424
|
py
|
Python
|
Code/Model/resnet.py
|
Jinwon-DK/GaitAnalysis
|
6b7be4aae9963b8986519af5bcbff39f32ebf2cd
|
[
"MIT"
] | 5
|
2020-07-23T05:55:54.000Z
|
2021-07-09T22:15:33.000Z
|
Code/Model/resnet.py
|
Jinwon-DK/GaitAnalysis
|
6b7be4aae9963b8986519af5bcbff39f32ebf2cd
|
[
"MIT"
] | null | null | null |
Code/Model/resnet.py
|
Jinwon-DK/GaitAnalysis
|
6b7be4aae9963b8986519af5bcbff39f32ebf2cd
|
[
"MIT"
] | 2
|
2020-07-23T06:05:54.000Z
|
2021-04-13T05:55:24.000Z
|
from __future__ import division
import six
import keras
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import add
from keras.layers import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
import tensorflow as tf
def _bn_relu(input):
"""Helper to build a BN -> relu block
"""
norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
return Activation("relu")(norm)
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu block
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(input)
return _bn_relu(conv)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv block.
This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(activation)
return f
def _shortcut(input, residual):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def _residual_block(block_function, filters, repetitions, is_first_layer=False):
"""Builds a residual block with repeating bottleneck blocks.
"""
def f(input):
for i in range(repetitions):
init_strides = (1, 1)
if i == 0 and not is_first_layer:
init_strides = (2, 2)
input = block_function(filters=filters, init_strides=init_strides,
is_first_block_of_first_layer=(is_first_layer and i == 0))(input)
return input
return f
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv1 = Conv2D(filters=filters, kernel_size=(3, 3),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),
strides=init_strides)(input)
residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)
return _shortcut(input, residual)
return f
def bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(1, 1),
strides=init_strides)(input)
conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1)
residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3)
return _shortcut(input, residual)
return f
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_dim_ordering() == 'tf':
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = 3
else:
CHANNEL_AXIS = 1
ROW_AXIS = 2
COL_AXIS = 3
def _get_block(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
class ResnetBuilder(object):
@staticmethod
def build(input_shape, num_outputs, block_fn, repetitions, input):
"""Builds a custom ResNet like architecture.
Args:
input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
num_outputs: The number of outputs at final softmax layer
block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
The original paper used basic_block for layers < 50
repetitions: Number of repetitions of various block units.
At each block unit, the number of filters are doubled and the input size is halved
Returns:
The keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")
# Permute dimension order if necessary
#if K.image_dim_ordering() == 'tf':
# input_shape = (input_shape[1], input_shape[2], input_shape[0])#???
# Load function from str if needed.
block_fn = _get_block(block_fn)
# input = Input(shape=input_shape)
conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input)
pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1)
block = pool1
filters = 64
for i, r in enumerate(repetitions):
block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block)
filters *= 2
# Last activation
block = _bn_relu(block)
# Classifier block
block_shape = K.int_shape(block)
pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]),
strides=(1, 1))(block)
flatten1 = Flatten()(pool2)
# dense = Dense(units=num_outputs, kernel_initializer="he_normal",
# activation="softmax")(flatten1)
# model = Model(inputs=input, outputs=flatten1)
return flatten1
@staticmethod
def build_resnet_18(input_shape, num_outputs, input):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [2, 2, 2, 2], input)
@staticmethod
def build_resnet_34(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [3, 4, 6, 3])
@staticmethod
def build_resnet_50(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 6, 3])
@staticmethod
def build_resnet_101(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 23, 3])
@staticmethod
def build_resnet_152(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 8, 36, 3])
def resnet_builder(shape_list, nb_class):
input_layers = list()
resnet_layers = list()
for input_shape in shape_list:
input_layer = keras.layers.Input(shape=input_shape)
input_layers.append(input_layer)
resnet_layers.append(ResnetBuilder.build_resnet_18(input_shape, nb_class, input_layer))
merged_layer = keras.layers.concatenate(resnet_layers)
merged_dense = keras.layers.Dense(units=1000, activation='relu')(merged_layer)
merged_batchnorm = keras.layers.BatchNormalization()(merged_dense)
merged_dropout = keras.layers.Dropout(0.7)(merged_batchnorm)
merged_class_layer = keras.layers.Dense(units=nb_class, activation='softmax')(merged_dropout)
model = keras.models.Model(inputs=input_layers, output=merged_class_layer)
# model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),
# loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),
loss='categorical_crossentropy', metrics=['accuracy'])
return model
| 37.905455
| 109
| 0.647448
|
from __future__ import division
import six
import keras
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import add
from keras.layers import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
import tensorflow as tf
def _bn_relu(input):
norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
return Activation("relu")(norm)
def _conv_bn_relu(**conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(input)
return _bn_relu(conv)
return f
def _bn_relu_conv(**conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(activation)
return f
def _shortcut(input, residual):
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def _residual_block(block_function, filters, repetitions, is_first_layer=False):
def f(input):
for i in range(repetitions):
init_strides = (1, 1)
if i == 0 and not is_first_layer:
init_strides = (2, 2)
input = block_function(filters=filters, init_strides=init_strides,
is_first_block_of_first_layer=(is_first_layer and i == 0))(input)
return input
return f
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
def f(input):
if is_first_block_of_first_layer:
conv1 = Conv2D(filters=filters, kernel_size=(3, 3),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),
strides=init_strides)(input)
residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)
return _shortcut(input, residual)
return f
def bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(1, 1),
strides=init_strides)(input)
conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1)
residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3)
return _shortcut(input, residual)
return f
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_dim_ordering() == 'tf':
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = 3
else:
CHANNEL_AXIS = 1
ROW_AXIS = 2
COL_AXIS = 3
def _get_block(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
class ResnetBuilder(object):
@staticmethod
def build(input_shape, num_outputs, block_fn, repetitions, input):
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")
block_fn = _get_block(block_fn)
conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input)
pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1)
block = pool1
filters = 64
for i, r in enumerate(repetitions):
block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block)
filters *= 2
block = _bn_relu(block)
block_shape = K.int_shape(block)
pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]),
strides=(1, 1))(block)
flatten1 = Flatten()(pool2)
return flatten1
@staticmethod
def build_resnet_18(input_shape, num_outputs, input):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [2, 2, 2, 2], input)
@staticmethod
def build_resnet_34(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [3, 4, 6, 3])
@staticmethod
def build_resnet_50(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 6, 3])
@staticmethod
def build_resnet_101(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 23, 3])
@staticmethod
def build_resnet_152(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 8, 36, 3])
def resnet_builder(shape_list, nb_class):
input_layers = list()
resnet_layers = list()
for input_shape in shape_list:
input_layer = keras.layers.Input(shape=input_shape)
input_layers.append(input_layer)
resnet_layers.append(ResnetBuilder.build_resnet_18(input_shape, nb_class, input_layer))
merged_layer = keras.layers.concatenate(resnet_layers)
merged_dense = keras.layers.Dense(units=1000, activation='relu')(merged_layer)
merged_batchnorm = keras.layers.BatchNormalization()(merged_dense)
merged_dropout = keras.layers.Dropout(0.7)(merged_batchnorm)
merged_class_layer = keras.layers.Dense(units=nb_class, activation='softmax')(merged_dropout)
model = keras.models.Model(inputs=input_layers, output=merged_class_layer)
model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),
loss='categorical_crossentropy', metrics=['accuracy'])
return model
| true
| true
|
f70a7e2116a93a54134b28967766017ed21b90c0
| 633
|
py
|
Python
|
Back-End/Python/Basics/Part -4- OOP/07 - Metaprogramming/04_metaclass.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 25
|
2021-04-28T02:51:26.000Z
|
2022-03-24T13:58:04.000Z
|
Back-End/Python/Basics/Part -4- OOP/07 - Metaprogramming/04_metaclass.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 1
|
2022-03-03T23:33:41.000Z
|
2022-03-03T23:35:41.000Z
|
Back-End/Python/Basics/Part -4- OOP/07 - Metaprogramming/04_metaclass.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 15
|
2021-05-30T01:35:20.000Z
|
2022-03-25T12:38:25.000Z
|
import math
class CustomType(type):
def __new__(mcls, name, bases, class_dict):
print(f'Using custom metaclass {mcls} to create class {name}...')
cls_obj = super().__new__(mcls, name, bases, class_dict)
cls_obj.circ = lambda self: 2 * math.pi * self.r
return cls_obj
class Circle(metaclass=CustomType):
def __init__(self, x, y, r):
self.x = x
self.y = y
self.r = r
def area(self):
return math.pi * self.r ** 2
# Using custom metaclass <class '__main__.CustomType'> to create class Circle...
c = Circle(0, 0, 1)
print(c.area())
print(c.circ())
| 26.375
| 81
| 0.606635
|
import math
class CustomType(type):
def __new__(mcls, name, bases, class_dict):
print(f'Using custom metaclass {mcls} to create class {name}...')
cls_obj = super().__new__(mcls, name, bases, class_dict)
cls_obj.circ = lambda self: 2 * math.pi * self.r
return cls_obj
class Circle(metaclass=CustomType):
def __init__(self, x, y, r):
self.x = x
self.y = y
self.r = r
def area(self):
return math.pi * self.r ** 2
c = Circle(0, 0, 1)
print(c.area())
print(c.circ())
| true
| true
|
f70a7f477db698ca69684e5e9325cf10a6ff9cb3
| 1,801
|
py
|
Python
|
main.py
|
ankurankan/game_of_life
|
81cf2f7f70a05019e78206d1ee7a8205aa590186
|
[
"MIT"
] | null | null | null |
main.py
|
ankurankan/game_of_life
|
81cf2f7f70a05019e78206d1ee7a8205aa590186
|
[
"MIT"
] | null | null | null |
main.py
|
ankurankan/game_of_life
|
81cf2f7f70a05019e78206d1ee7a8205aa590186
|
[
"MIT"
] | null | null | null |
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
def get_initial_state(size):
return np.random.choice([0, 1], size)
def compute_next_state(state):
new_state = np.zeros(state.shape, dtype=int)
for i in range(state.shape[0]):
for j in range(state.shape[1]):
low_x, high_x = max(0, i-1), min(i+2, state.shape[0])
low_y, high_y = max(0, j-1), min(j+2, state.shape[1])
n_live = np.sum(state[low_x: high_x, low_y: high_y]) - state[i, j]
if (state[i, j] == 1) and (n_live < 2):
new_state[i, j] = 0
elif (state[i, j] == 1) and (2 <= n_live <= 3):
new_state[i, j] = 1
elif (state[i, j] == 1) and (n_live > 3):
new_state[i, j] = 0
elif (state[i, j] == 0) and (n_live == 3):
new_state[i, j] = 1
else:
new_state[i, j] = state[i, j]
return new_state
def start(initial_state=None, loop_delay=1, size=(200, 200)):
if initial_state is None:
state = get_initial_state(size)
else:
state = initial_state
size = state.shape
age = np.zeros(size, dtype=int)
counter = 0
while True:
new_state = compute_next_state(state)
age += new_state
age = age * new_state
counter += 1
plt.imshow(age, cmap='Greys')
plt.xlim(right=size[1], left=0)
plt.ylim(top=0, bottom=size[0])
plt.pause(loop_delay)
if (np.sum(new_state) == 0) or (new_state == state).all():
print(counter)
state = get_initial_state(size)
age = np.zeros(size, dtype=int)
counter = 0
else:
state = new_state
if __name__ == "__main__":
start()
| 28.587302
| 78
| 0.533592
|
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
def get_initial_state(size):
return np.random.choice([0, 1], size)
def compute_next_state(state):
new_state = np.zeros(state.shape, dtype=int)
for i in range(state.shape[0]):
for j in range(state.shape[1]):
low_x, high_x = max(0, i-1), min(i+2, state.shape[0])
low_y, high_y = max(0, j-1), min(j+2, state.shape[1])
n_live = np.sum(state[low_x: high_x, low_y: high_y]) - state[i, j]
if (state[i, j] == 1) and (n_live < 2):
new_state[i, j] = 0
elif (state[i, j] == 1) and (2 <= n_live <= 3):
new_state[i, j] = 1
elif (state[i, j] == 1) and (n_live > 3):
new_state[i, j] = 0
elif (state[i, j] == 0) and (n_live == 3):
new_state[i, j] = 1
else:
new_state[i, j] = state[i, j]
return new_state
def start(initial_state=None, loop_delay=1, size=(200, 200)):
if initial_state is None:
state = get_initial_state(size)
else:
state = initial_state
size = state.shape
age = np.zeros(size, dtype=int)
counter = 0
while True:
new_state = compute_next_state(state)
age += new_state
age = age * new_state
counter += 1
plt.imshow(age, cmap='Greys')
plt.xlim(right=size[1], left=0)
plt.ylim(top=0, bottom=size[0])
plt.pause(loop_delay)
if (np.sum(new_state) == 0) or (new_state == state).all():
print(counter)
state = get_initial_state(size)
age = np.zeros(size, dtype=int)
counter = 0
else:
state = new_state
if __name__ == "__main__":
start()
| true
| true
|
f70a7f830ddc667a9fa64921ab6b3d031ed80d41
| 42,759
|
py
|
Python
|
tests/auth_tests/test_forms.py
|
markvdb/django
|
b08a18f17ba53eb0bc7fd7993924f3d7f8ed5c52
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2019-03-04T12:45:49.000Z
|
2019-03-04T12:45:49.000Z
|
tests/auth_tests/test_forms.py
|
Kiku-git/django
|
b08a18f17ba53eb0bc7fd7993924f3d7f8ed5c52
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2019-06-24T07:34:43.000Z
|
2019-06-24T07:34:43.000Z
|
tests/auth_tests/test_forms.py
|
Kiku-git/django
|
b08a18f17ba53eb0bc7fd7993924f3d7f8ed5c52
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
import datetime
import re
from unittest import mock
from django import forms
from django.contrib.auth.forms import (
AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm,
PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
SetPasswordForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_login_failed
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field, IntegerField
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from .models.custom_user import (
CustomUser, CustomUserWithoutIsActiveField, ExtensionUser,
)
from .models.with_custom_email_field import CustomEmailField
from .models.with_integer_username import IntegerUsernameUser
from .settings import AUTH_TEMPLATES
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password', email='testclient@example.com')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
cls.u3 = User.objects.create_user(username='staff', password='password')
cls.u4 = User.objects.create(username='empty_password', password='')
cls.u5 = User.objects.create(username='unmanageable_password', password='$')
cls.u6 = User.objects.create(username='unknown_password', password='foo$bar')
class UserCreationFormTest(TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[str(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[str(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [str(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), '<User: jsmith@example.com>')
def test_unicode_username(self):
data = {
'username': '宝',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(u.username, '宝')
def test_normalize_username(self):
# The normalization happens in AbstractBaseUser.clean() and ModelForm
# validation calls Model.clean().
ohm_username = 'testΩ' # U+2126 OHM SIGN
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertNotEqual(user.username, ohm_username)
self.assertEqual(user.username, 'testΩ') # U+03A9 GREEK CAPITAL LETTER OMEGA
def test_duplicate_normalized_unicode(self):
"""
To prevent almost identical usernames, visually identical but differing
by their unicode code points only, Unicode NFKC normalization should
make appear them equal to Django.
"""
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
self.assertNotEqual(omega_username, ohm_username)
User.objects.create_user(username=omega_username, password='pwd')
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['username'], ["A user with that username already exists."]
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form['password2'].errors), 2)
self.assertIn('The password is too similar to the username.', form['password2'].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form['password2'].errors
)
def test_custom_form(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = ExtensionUser
fields = UserCreationForm.Meta.fields + ('date_of_birth',)
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_with_different_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('email', 'date_of_birth')
data = {
'email': 'test@client222.com',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_hidden_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUserWithoutIsActiveField
fields = ('email',) # without USERNAME_FIELD
data = {
'email': 'testclient@example.com',
'password1': 'testclient',
'password2': 'testclient',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password1': ' testpassword ',
'password2': ' testpassword ',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_password_help_text(self):
form = UserCreationForm()
self.assertEqual(
form.fields['password1'].help_text,
'<ul><li>Your password can't be too similar to your other personal information.</li></ul>'
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_user_create_form_validates_password_with_all_data(self):
"""UserCreationForm password validation uses all of the form's data."""
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('username', 'email', 'first_name', 'last_name')
form = CustomUserCreationForm({
'username': 'testuser',
'password1': 'testpassword',
'password2': 'testpassword',
'first_name': 'testpassword',
'last_name': 'lastname',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['password2'],
['The password is too similar to the first name.'],
)
def test_username_field_autocapitalize_none(self):
form = UserCreationForm()
self.assertEqual(form.fields['username'].widget.attrs.get('autocapitalize'), 'none')
def test_html_autocomplete_attributes(self):
form = UserCreationForm()
tests = (
('username', 'username'),
('password1', 'new-password'),
('password2', 'new-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
# To verify that the login form rejects inactive users, use an authentication
# backend that allows them.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
# Use an authentication backend that rejects inactive users.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.ModelBackend'])
def test_inactive_user_incorrect_password(self):
"""An invalid login doesn't leak the inactive status of a user."""
data = {
'username': 'inactive',
'password': 'incorrect',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_login_failed(self):
signal_calls = []
def signal_handler(**kwargs):
signal_calls.append(kwargs)
user_login_failed.connect(signal_handler)
fake_request = object()
try:
form = AuthenticationForm(fake_request, {
'username': 'testclient',
'password': 'incorrect',
})
self.assertFalse(form.is_valid())
self.assertIs(signal_calls[0]['request'], fake_request)
finally:
user_login_failed.disconnect(signal_handler)
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
# Use an authentication backend that allows inactive users.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_unicode_username(self):
User.objects.create_user(username='Σαρα', password='pwd')
data = {
'username': 'Σαρα',
'password': 'pwd',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_username_field_max_length_matches_user_model(self):
self.assertEqual(CustomEmailField._meta.get_field('username').max_length, 255)
data = {
'username': 'u' * 255,
'password': 'pwd',
'email': 'test@example.com',
}
CustomEmailField.objects.create_user(**data)
form = AuthenticationForm(None, data)
self.assertEqual(form.fields['username'].max_length, 255)
self.assertEqual(form.errors, {})
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_username_field_max_length_defaults_to_254(self):
self.assertIsNone(IntegerUsernameUser._meta.get_field('username').max_length)
data = {
'username': '0123456',
'password': 'password',
}
IntegerUsernameUser.objects.create_user(**data)
form = AuthenticationForm(None, data)
self.assertEqual(form.fields['username'].max_length, 254)
self.assertEqual(form.errors, {})
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_autocapitalize_none(self):
form = AuthenticationForm()
self.assertEqual(form.fields['username'].widget.attrs.get('autocapitalize'), 'none')
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password': ' pass ',
}
form = AuthenticationForm(None, data)
form.is_valid() # Not necessary to have valid credentails for the test.
self.assertEqual(form.cleaned_data['password'], data['password'])
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_integer_username(self):
class CustomAuthenticationForm(AuthenticationForm):
username = IntegerField()
user = IntegerUsernameUser.objects.create_user(username=0, password='pwd')
data = {
'username': 0,
'password': 'pwd',
}
form = CustomAuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['username'], data['username'])
self.assertEqual(form.cleaned_data['password'], data['password'])
self.assertEqual(form.errors, {})
self.assertEqual(form.user_cache, user)
def test_get_invalid_login_error(self):
error = AuthenticationForm().get_invalid_login_error()
self.assertIsInstance(error, forms.ValidationError)
self.assertEqual(
error.message,
'Please enter a correct %(username)s and password. Note that both '
'fields may be case-sensitive.',
)
self.assertEqual(error.code, 'invalid_login')
self.assertEqual(error.params, {'username': 'username'})
def test_html_autocomplete_attributes(self):
form = AuthenticationForm()
tests = (
('username', 'username'),
('password', 'current-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["new_password2"].errors,
[str(form.error_messages['password_mismatch'])]
)
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'testclient',
'new_password2': 'testclient',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["new_password2"].errors), 2)
self.assertIn('The password is too similar to the username.', form["new_password2"].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form["new_password2"].errors
)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': ' password ',
'new_password2': ' password ',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_help_text_translation(self):
french_help_texts = [
'Votre mot de passe ne peut pas trop ressembler à vos autres informations personnelles.',
'Votre mot de passe doit contenir au minimum 12 caractères.',
]
form = SetPasswordForm(self.u1)
with translation.override('fr'):
html = form.as_p()
for french_text in french_help_texts:
self.assertIn(french_text, html)
def test_html_autocomplete_attributes(self):
form = SetPasswordForm(self.u1)
tests = (
('new_password1', 'new-password'),
('new_password2', 'new-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors, [str(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors, [str(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields), ['old_password', 'new_password1', 'new_password2'])
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
user.set_password(' oldpassword ')
data = {
'old_password': ' oldpassword ',
'new_password1': ' pass ',
'new_password2': ' pass ',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['old_password'], data['old_password'])
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
def test_html_autocomplete_attributes(self):
user = User.objects.get(username='testclient')
form = PasswordChangeForm(user)
self.assertEqual(form.fields['old_password'].widget.attrs['autocomplete'], 'current-password')
class UserChangeFormTest(TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unusable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
# original hashed password contains $
self.assertIn('$', form.cleaned_data['password'])
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
def test_custom_form(self):
class CustomUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = ExtensionUser
fields = ('username', 'password', 'date_of_birth',)
user = User.objects.get(username='testclient')
data = {
'username': 'testclient',
'password': 'testclient',
'date_of_birth': '1998-02-24',
}
form = CustomUserChangeForm(data, instance=user)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['username'], 'testclient')
self.assertEqual(form.cleaned_data['date_of_birth'], datetime.date(1998, 2, 24))
def test_password_excluded(self):
class UserChangeFormWithoutPassword(UserChangeForm):
password = None
class Meta:
model = User
exclude = ['password']
form = UserChangeFormWithoutPassword()
self.assertNotIn('password', form.fields)
def test_username_field_autocapitalize_none(self):
form = UserChangeForm()
self.assertEqual(form.fields['username'].widget.attrs.get('autocapitalize'), 'none')
@override_settings(TEMPLATES=AUTH_TEMPLATES)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[
("Really sorry to hear you forgot your password.", "text/html")
],
).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetForm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(
r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload()
))
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_custom_email_field(self):
email = 'test@mail.com'
CustomEmailField.objects.create_user('test name', 'test password', email)
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [email])
def test_html_autocomplete_attributes(self):
form = PasswordResetForm()
self.assertEqual(form.fields['email'].widget.attrs['autocomplete'], 'email')
class ReadOnlyPasswordHashTest(SimpleTestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.PBKDF2PasswordHasher'])
def test_render(self):
widget = ReadOnlyPasswordHashWidget()
value = 'pbkdf2_sha256$100000$a6Pucb1qSFcD$WmCkn9Hqidj48NVe5x0FEM6A9YiOqQcl/83m2Z5udm0='
self.assertHTMLEqual(
widget.render('name', value, {'id': 'id_password'}),
"""
<div id="id_password">
<strong>algorithm</strong>: pbkdf2_sha256
<strong>iterations</strong>: 100000
<strong>salt</strong>: a6Pucb******
<strong>hash</strong>: WmCkn9**************************************
</div>
"""
)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
class AdminPasswordChangeFormTest(TestDataMixin, TestCase):
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'password1': 'test123',
'password2': 'test123',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'password1': ' pass ',
'password2': ' pass ',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
def test_non_matching_passwords(self):
user = User.objects.get(username='testclient')
data = {'password1': 'password1', 'password2': 'password2'}
form = AdminPasswordChangeForm(user, data)
self.assertEqual(form.errors['password2'], [form.error_messages['password_mismatch']])
def test_missing_passwords(self):
user = User.objects.get(username='testclient')
data = {'password1': '', 'password2': ''}
form = AdminPasswordChangeForm(user, data)
required_error = [Field.default_error_messages['required']]
self.assertEqual(form.errors['password1'], required_error)
self.assertEqual(form.errors['password2'], required_error)
def test_one_password(self):
user = User.objects.get(username='testclient')
form1 = AdminPasswordChangeForm(user, {'password1': '', 'password2': 'test'})
required_error = [Field.default_error_messages['required']]
self.assertEqual(form1.errors['password1'], required_error)
self.assertNotIn('password2', form1.errors)
form2 = AdminPasswordChangeForm(user, {'password1': 'test', 'password2': ''})
self.assertEqual(form2.errors['password2'], required_error)
self.assertNotIn('password1', form2.errors)
def test_html_autocomplete_attributes(self):
user = User.objects.get(username='testclient')
form = AdminPasswordChangeForm(user)
tests = (
('password1', 'new-password'),
('password2', 'new-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
| 40.68411
| 119
| 0.634556
|
import datetime
import re
from unittest import mock
from django import forms
from django.contrib.auth.forms import (
AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm,
PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
SetPasswordForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_login_failed
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field, IntegerField
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from .models.custom_user import (
CustomUser, CustomUserWithoutIsActiveField, ExtensionUser,
)
from .models.with_custom_email_field import CustomEmailField
from .models.with_integer_username import IntegerUsernameUser
from .settings import AUTH_TEMPLATES
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password', email='testclient@example.com')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
cls.u3 = User.objects.create_user(username='staff', password='password')
cls.u4 = User.objects.create(username='empty_password', password='')
cls.u5 = User.objects.create(username='unmanageable_password', password='$')
cls.u6 = User.objects.create(username='unknown_password', password='foo$bar')
class UserCreationFormTest(TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[str(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_password_verification(self):
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[str(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [str(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), '<User: jsmith@example.com>')
def test_unicode_username(self):
data = {
'username': '宝',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(u.username, '宝')
def test_normalize_username(self):
# The normalization happens in AbstractBaseUser.clean() and ModelForm
# validation calls Model.clean().
ohm_username = 'testΩ' # U+2126 OHM SIGN
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertNotEqual(user.username, ohm_username)
self.assertEqual(user.username, 'testΩ') # U+03A9 GREEK CAPITAL LETTER OMEGA
def test_duplicate_normalized_unicode(self):
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
self.assertNotEqual(omega_username, ohm_username)
User.objects.create_user(username=omega_username, password='pwd')
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['username'], ["A user with that username already exists."]
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form['password2'].errors), 2)
self.assertIn('The password is too similar to the username.', form['password2'].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form['password2'].errors
)
def test_custom_form(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = ExtensionUser
fields = UserCreationForm.Meta.fields + ('date_of_birth',)
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_with_different_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('email', 'date_of_birth')
data = {
'email': 'test@client222.com',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_hidden_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUserWithoutIsActiveField
fields = ('email',) # without USERNAME_FIELD
data = {
'email': 'testclient@example.com',
'password1': 'testclient',
'password2': 'testclient',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password1': ' testpassword ',
'password2': ' testpassword ',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_password_help_text(self):
form = UserCreationForm()
self.assertEqual(
form.fields['password1'].help_text,
'<ul><li>Your password can&
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_user_create_form_validates_password_with_all_data(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('username', 'email', 'first_name', 'last_name')
form = CustomUserCreationForm({
'username': 'testuser',
'password1': 'testpassword',
'password2': 'testpassword',
'first_name': 'testpassword',
'last_name': 'lastname',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['password2'],
['The password is too similar to the first name.'],
)
def test_username_field_autocapitalize_none(self):
form = UserCreationForm()
self.assertEqual(form.fields['username'].widget.attrs.get('autocapitalize'), 'none')
def test_html_autocomplete_attributes(self):
form = UserCreationForm()
tests = (
('username', 'username'),
('password1', 'new-password'),
('password2', 'new-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
# To verify that the login form rejects inactive users, use an authentication
# backend that allows them.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
# Use an authentication backend that rejects inactive users.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.ModelBackend'])
def test_inactive_user_incorrect_password(self):
data = {
'username': 'inactive',
'password': 'incorrect',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_login_failed(self):
signal_calls = []
def signal_handler(**kwargs):
signal_calls.append(kwargs)
user_login_failed.connect(signal_handler)
fake_request = object()
try:
form = AuthenticationForm(fake_request, {
'username': 'testclient',
'password': 'incorrect',
})
self.assertFalse(form.is_valid())
self.assertIs(signal_calls[0]['request'], fake_request)
finally:
user_login_failed.disconnect(signal_handler)
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
# Use an authentication backend that allows inactive users.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_unicode_username(self):
User.objects.create_user(username='Σαρα', password='pwd')
data = {
'username': 'Σαρα',
'password': 'pwd',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_username_field_max_length_matches_user_model(self):
self.assertEqual(CustomEmailField._meta.get_field('username').max_length, 255)
data = {
'username': 'u' * 255,
'password': 'pwd',
'email': 'test@example.com',
}
CustomEmailField.objects.create_user(**data)
form = AuthenticationForm(None, data)
self.assertEqual(form.fields['username'].max_length, 255)
self.assertEqual(form.errors, {})
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_username_field_max_length_defaults_to_254(self):
self.assertIsNone(IntegerUsernameUser._meta.get_field('username').max_length)
data = {
'username': '0123456',
'password': 'password',
}
IntegerUsernameUser.objects.create_user(**data)
form = AuthenticationForm(None, data)
self.assertEqual(form.fields['username'].max_length, 254)
self.assertEqual(form.errors, {})
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_autocapitalize_none(self):
form = AuthenticationForm()
self.assertEqual(form.fields['username'].widget.attrs.get('autocapitalize'), 'none')
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password': ' pass ',
}
form = AuthenticationForm(None, data)
form.is_valid() # Not necessary to have valid credentails for the test.
self.assertEqual(form.cleaned_data['password'], data['password'])
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_integer_username(self):
class CustomAuthenticationForm(AuthenticationForm):
username = IntegerField()
user = IntegerUsernameUser.objects.create_user(username=0, password='pwd')
data = {
'username': 0,
'password': 'pwd',
}
form = CustomAuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['username'], data['username'])
self.assertEqual(form.cleaned_data['password'], data['password'])
self.assertEqual(form.errors, {})
self.assertEqual(form.user_cache, user)
def test_get_invalid_login_error(self):
error = AuthenticationForm().get_invalid_login_error()
self.assertIsInstance(error, forms.ValidationError)
self.assertEqual(
error.message,
'Please enter a correct %(username)s and password. Note that both '
'fields may be case-sensitive.',
)
self.assertEqual(error.code, 'invalid_login')
self.assertEqual(error.params, {'username': 'username'})
def test_html_autocomplete_attributes(self):
form = AuthenticationForm()
tests = (
('username', 'username'),
('password', 'current-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["new_password2"].errors,
[str(form.error_messages['password_mismatch'])]
)
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'testclient',
'new_password2': 'testclient',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["new_password2"].errors), 2)
self.assertIn('The password is too similar to the username.', form["new_password2"].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form["new_password2"].errors
)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': ' password ',
'new_password2': ' password ',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_help_text_translation(self):
french_help_texts = [
'Votre mot de passe ne peut pas trop ressembler à vos autres informations personnelles.',
'Votre mot de passe doit contenir au minimum 12 caractères.',
]
form = SetPasswordForm(self.u1)
with translation.override('fr'):
html = form.as_p()
for french_text in french_help_texts:
self.assertIn(french_text, html)
def test_html_autocomplete_attributes(self):
form = SetPasswordForm(self.u1)
tests = (
('new_password1', 'new-password'),
('new_password2', 'new-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors, [str(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors, [str(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields), ['old_password', 'new_password1', 'new_password2'])
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
user.set_password(' oldpassword ')
data = {
'old_password': ' oldpassword ',
'new_password1': ' pass ',
'new_password2': ' pass ',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['old_password'], data['old_password'])
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
def test_html_autocomplete_attributes(self):
user = User.objects.get(username='testclient')
form = PasswordChangeForm(user)
self.assertEqual(form.fields['old_password'].widget.attrs['autocomplete'], 'current-password')
class UserChangeFormTest(TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unusable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_19133(self):
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
# original hashed password contains $
self.assertIn('$', form.cleaned_data['password'])
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
def test_custom_form(self):
class CustomUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = ExtensionUser
fields = ('username', 'password', 'date_of_birth',)
user = User.objects.get(username='testclient')
data = {
'username': 'testclient',
'password': 'testclient',
'date_of_birth': '1998-02-24',
}
form = CustomUserChangeForm(data, instance=user)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['username'], 'testclient')
self.assertEqual(form.cleaned_data['date_of_birth'], datetime.date(1998, 2, 24))
def test_password_excluded(self):
class UserChangeFormWithoutPassword(UserChangeForm):
password = None
class Meta:
model = User
exclude = ['password']
form = UserChangeFormWithoutPassword()
self.assertNotIn('password', form.fields)
def test_username_field_autocapitalize_none(self):
form = UserChangeForm()
self.assertEqual(form.fields['username'].widget.attrs.get('autocapitalize'), 'none')
@override_settings(TEMPLATES=AUTH_TEMPLATES)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[
("Really sorry to hear you forgot your password.", "text/html")
],
).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(
r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload()
))
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_custom_email_field(self):
email = 'test@mail.com'
CustomEmailField.objects.create_user('test name', 'test password', email)
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [email])
def test_html_autocomplete_attributes(self):
form = PasswordResetForm()
self.assertEqual(form.fields['email'].widget.attrs['autocomplete'], 'email')
class ReadOnlyPasswordHashTest(SimpleTestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.PBKDF2PasswordHasher'])
def test_render(self):
widget = ReadOnlyPasswordHashWidget()
value = 'pbkdf2_sha256$100000$a6Pucb1qSFcD$WmCkn9Hqidj48NVe5x0FEM6A9YiOqQcl/83m2Z5udm0='
self.assertHTMLEqual(
widget.render('name', value, {'id': 'id_password'}),
"""
<div id="id_password">
<strong>algorithm</strong>: pbkdf2_sha256
<strong>iterations</strong>: 100000
<strong>salt</strong>: a6Pucb******
<strong>hash</strong>: WmCkn9**************************************
</div>
"""
)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
class AdminPasswordChangeFormTest(TestDataMixin, TestCase):
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'password1': 'test123',
'password2': 'test123',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'password1': ' pass ',
'password2': ' pass ',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
def test_non_matching_passwords(self):
user = User.objects.get(username='testclient')
data = {'password1': 'password1', 'password2': 'password2'}
form = AdminPasswordChangeForm(user, data)
self.assertEqual(form.errors['password2'], [form.error_messages['password_mismatch']])
def test_missing_passwords(self):
user = User.objects.get(username='testclient')
data = {'password1': '', 'password2': ''}
form = AdminPasswordChangeForm(user, data)
required_error = [Field.default_error_messages['required']]
self.assertEqual(form.errors['password1'], required_error)
self.assertEqual(form.errors['password2'], required_error)
def test_one_password(self):
user = User.objects.get(username='testclient')
form1 = AdminPasswordChangeForm(user, {'password1': '', 'password2': 'test'})
required_error = [Field.default_error_messages['required']]
self.assertEqual(form1.errors['password1'], required_error)
self.assertNotIn('password2', form1.errors)
form2 = AdminPasswordChangeForm(user, {'password1': 'test', 'password2': ''})
self.assertEqual(form2.errors['password2'], required_error)
self.assertNotIn('password1', form2.errors)
def test_html_autocomplete_attributes(self):
user = User.objects.get(username='testclient')
form = AdminPasswordChangeForm(user)
tests = (
('password1', 'new-password'),
('password2', 'new-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
| true
| true
|
f70a801c5d683c1dba1026dff2fe4f7c00cc9e36
| 1,524
|
py
|
Python
|
NiaPy/benchmarks/chungReynolds.py
|
tuahk/NiaPy
|
c863d801fda8e1949a3ca716a4de7c7ca3d0ea16
|
[
"MIT"
] | null | null | null |
NiaPy/benchmarks/chungReynolds.py
|
tuahk/NiaPy
|
c863d801fda8e1949a3ca716a4de7c7ca3d0ea16
|
[
"MIT"
] | null | null | null |
NiaPy/benchmarks/chungReynolds.py
|
tuahk/NiaPy
|
c863d801fda8e1949a3ca716a4de7c7ca3d0ea16
|
[
"MIT"
] | null | null | null |
# encoding=utf8
# pylint: disable=anomalous-backslash-in-string, old-style-class
import math
__all__ = ['ChungReynolds']
class ChungReynolds:
r"""Implementation of Chung Reynolds functions.
Date: 2018
Authors: Lucija Brezočnik
License: MIT
Function: **Chung Reynolds function**
:math:`f(\mathbf{x}) = \left(\sum_{i=1}^D x_i^2\right)^2`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-100, 100]`, for all :math:`i = 1, 2,..., D`
**Global minimum:** :math:`f(x^*) = 0`, at :math:`x^* = (0,...,0)`
LaTeX formats:
Inline:
$f(\mathbf{x}) = \left(\sum_{i=1}^D x_i^2\right)^2$
Equation:
\begin{equation} f(\mathbf{x}) = \left(\sum_{i=1}^D x_i^2\right)^2 \end{equation}
Domain:
$-100 \leq x_i \leq 100$
Reference paper:
Jamil, M., and Yang, X. S. (2013).
A literature survey of benchmark functions for global optimisation problems.
International Journal of Mathematical Modelling and Numerical Optimisation,
4(2), 150-194.
"""
def __init__(self, Lower=-100.0, Upper=100.0):
self.Lower = Lower
self.Upper = Upper
@classmethod
def function(cls):
def evaluate(D, sol):
val = 0.0
for i in range(D):
val += math.pow(sol[i], 2)
return math.pow(val, 2)
return evaluate
| 25.4
| 97
| 0.566273
|
import math
__all__ = ['ChungReynolds']
class ChungReynolds:
def __init__(self, Lower=-100.0, Upper=100.0):
self.Lower = Lower
self.Upper = Upper
@classmethod
def function(cls):
def evaluate(D, sol):
val = 0.0
for i in range(D):
val += math.pow(sol[i], 2)
return math.pow(val, 2)
return evaluate
| true
| true
|
f70a811a53d750ec8a97ef9cb6bb7b23600aa0f9
| 3,184
|
bzl
|
Python
|
google/cloud/bigtable/bigtable_client.bzl
|
millerantonio810/google-cloud-cpp
|
71582d922bc22b0dcbc58234f36c726ea3b7c171
|
[
"Apache-2.0"
] | 3
|
2020-05-27T23:21:23.000Z
|
2020-05-31T22:31:53.000Z
|
google/cloud/bigtable/bigtable_client.bzl
|
millerantonio810/google-cloud-cpp
|
71582d922bc22b0dcbc58234f36c726ea3b7c171
|
[
"Apache-2.0"
] | 2
|
2020-05-31T22:26:57.000Z
|
2020-06-19T00:14:10.000Z
|
google/cloud/bigtable/bigtable_client.bzl
|
millerantonio810/google-cloud-cpp
|
71582d922bc22b0dcbc58234f36c726ea3b7c171
|
[
"Apache-2.0"
] | 1
|
2021-12-09T16:26:23.000Z
|
2021-12-09T16:26:23.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
"""Automatically generated source lists for bigtable_client - DO NOT EDIT."""
bigtable_client_hdrs = [
"admin_client.h",
"app_profile_config.h",
"async_row_reader.h",
"cell.h",
"client_options.h",
"cluster_config.h",
"cluster_list_responses.h",
"column_family.h",
"completion_queue.h",
"data_client.h",
"expr.h",
"filters.h",
"iam_binding.h",
"iam_policy.h",
"idempotent_mutation_policy.h",
"instance_admin.h",
"instance_admin_client.h",
"instance_config.h",
"instance_list_responses.h",
"instance_update_config.h",
"internal/async_bulk_apply.h",
"internal/async_longrunning_op.h",
"internal/async_poll_op.h",
"internal/async_retry_multi_page.h",
"internal/async_retry_op.h",
"internal/async_retry_unary_rpc_and_poll.h",
"internal/bulk_mutator.h",
"internal/client_options_defaults.h",
"internal/common_client.h",
"internal/conjunction.h",
"internal/google_bytes_traits.h",
"internal/prefix_range_end.h",
"internal/readrowsparser.h",
"internal/rowreaderiterator.h",
"internal/rpc_policy_parameters.h",
"internal/rpc_policy_parameters.inc",
"internal/unary_client_utils.h",
"metadata_update_policy.h",
"mutation_batcher.h",
"mutations.h",
"polling_policy.h",
"read_modify_write_rule.h",
"row.h",
"row_key.h",
"row_key_sample.h",
"row_range.h",
"row_reader.h",
"row_set.h",
"rpc_backoff_policy.h",
"rpc_retry_policy.h",
"table.h",
"table_admin.h",
"table_config.h",
"version.h",
"version_info.h",
]
bigtable_client_srcs = [
"admin_client.cc",
"app_profile_config.cc",
"client_options.cc",
"cluster_config.cc",
"data_client.cc",
"expr.cc",
"iam_binding.cc",
"iam_policy.cc",
"idempotent_mutation_policy.cc",
"instance_admin.cc",
"instance_admin_client.cc",
"instance_config.cc",
"instance_update_config.cc",
"internal/async_bulk_apply.cc",
"internal/bulk_mutator.cc",
"internal/common_client.cc",
"internal/google_bytes_traits.cc",
"internal/prefix_range_end.cc",
"internal/readrowsparser.cc",
"internal/rowreaderiterator.cc",
"metadata_update_policy.cc",
"mutation_batcher.cc",
"mutations.cc",
"polling_policy.cc",
"row_range.cc",
"row_reader.cc",
"row_set.cc",
"rpc_backoff_policy.cc",
"rpc_retry_policy.cc",
"table.cc",
"table_admin.cc",
"table_config.cc",
"version.cc",
]
| 28.428571
| 79
| 0.682789
|
bigtable_client_hdrs = [
"admin_client.h",
"app_profile_config.h",
"async_row_reader.h",
"cell.h",
"client_options.h",
"cluster_config.h",
"cluster_list_responses.h",
"column_family.h",
"completion_queue.h",
"data_client.h",
"expr.h",
"filters.h",
"iam_binding.h",
"iam_policy.h",
"idempotent_mutation_policy.h",
"instance_admin.h",
"instance_admin_client.h",
"instance_config.h",
"instance_list_responses.h",
"instance_update_config.h",
"internal/async_bulk_apply.h",
"internal/async_longrunning_op.h",
"internal/async_poll_op.h",
"internal/async_retry_multi_page.h",
"internal/async_retry_op.h",
"internal/async_retry_unary_rpc_and_poll.h",
"internal/bulk_mutator.h",
"internal/client_options_defaults.h",
"internal/common_client.h",
"internal/conjunction.h",
"internal/google_bytes_traits.h",
"internal/prefix_range_end.h",
"internal/readrowsparser.h",
"internal/rowreaderiterator.h",
"internal/rpc_policy_parameters.h",
"internal/rpc_policy_parameters.inc",
"internal/unary_client_utils.h",
"metadata_update_policy.h",
"mutation_batcher.h",
"mutations.h",
"polling_policy.h",
"read_modify_write_rule.h",
"row.h",
"row_key.h",
"row_key_sample.h",
"row_range.h",
"row_reader.h",
"row_set.h",
"rpc_backoff_policy.h",
"rpc_retry_policy.h",
"table.h",
"table_admin.h",
"table_config.h",
"version.h",
"version_info.h",
]
bigtable_client_srcs = [
"admin_client.cc",
"app_profile_config.cc",
"client_options.cc",
"cluster_config.cc",
"data_client.cc",
"expr.cc",
"iam_binding.cc",
"iam_policy.cc",
"idempotent_mutation_policy.cc",
"instance_admin.cc",
"instance_admin_client.cc",
"instance_config.cc",
"instance_update_config.cc",
"internal/async_bulk_apply.cc",
"internal/bulk_mutator.cc",
"internal/common_client.cc",
"internal/google_bytes_traits.cc",
"internal/prefix_range_end.cc",
"internal/readrowsparser.cc",
"internal/rowreaderiterator.cc",
"metadata_update_policy.cc",
"mutation_batcher.cc",
"mutations.cc",
"polling_policy.cc",
"row_range.cc",
"row_reader.cc",
"row_set.cc",
"rpc_backoff_policy.cc",
"rpc_retry_policy.cc",
"table.cc",
"table_admin.cc",
"table_config.cc",
"version.cc",
]
| true
| true
|
f70a8219ec7071e721f28c56f09238a86a3a82ea
| 312
|
py
|
Python
|
models/devices.py
|
stephanGarland/PyNotion
|
74460e4792758c740b4e779772f734f97d7ad371
|
[
"MIT"
] | 9
|
2017-11-29T04:01:22.000Z
|
2022-02-06T09:19:24.000Z
|
models/devices.py
|
stephanGarland/PyNotion
|
74460e4792758c740b4e779772f734f97d7ad371
|
[
"MIT"
] | 3
|
2021-09-01T20:51:32.000Z
|
2021-09-03T16:30:48.000Z
|
models/devices.py
|
stephanGarland/PyNotion
|
74460e4792758c740b4e779772f734f97d7ad371
|
[
"MIT"
] | 1
|
2021-09-02T19:28:44.000Z
|
2021-09-02T19:28:44.000Z
|
class Device:
def __init__(self, id=None, token=None, platform=None, endpoint=None, created_at=None, updated_at=None):
self.id = id
self.token = token
self.platform = platform
self.endpoint = endpoint
self.created_at = created_at
self.updated_at = updated_at
| 31.2
| 108
| 0.650641
|
class Device:
def __init__(self, id=None, token=None, platform=None, endpoint=None, created_at=None, updated_at=None):
self.id = id
self.token = token
self.platform = platform
self.endpoint = endpoint
self.created_at = created_at
self.updated_at = updated_at
| true
| true
|
f70a8336bc5479f73419747b75d720f65693f002
| 3,574
|
py
|
Python
|
django_rq/management/commands/rqworker.py
|
UKTV/django_rq
|
681d8797eacda78a46db2897235b84b6929b8d16
|
[
"MIT"
] | null | null | null |
django_rq/management/commands/rqworker.py
|
UKTV/django_rq
|
681d8797eacda78a46db2897235b84b6929b8d16
|
[
"MIT"
] | null | null | null |
django_rq/management/commands/rqworker.py
|
UKTV/django_rq
|
681d8797eacda78a46db2897235b84b6929b8d16
|
[
"MIT"
] | 1
|
2017-06-07T16:03:35.000Z
|
2017-06-07T16:03:35.000Z
|
from distutils.version import LooseVersion
import os
import importlib
import logging
import sys
from django.core.management.base import BaseCommand
from django.utils.version import get_version
from django_rq.queues import get_queues
from django_rq.workers import get_exception_handlers
from redis.exceptions import ConnectionError
from rq import use_connection
from rq.utils import ColorizingStreamHandler
# Setup logging for RQWorker if not already configured
logger = logging.getLogger('rq.worker')
if not logger.handlers:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s %(message)s',
datefmt='%H:%M:%S')
handler = ColorizingStreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
# Copied from rq.utils
def import_attribute(name):
"""Return an attribute from a dotted path name (e.g. "path.to.func")."""
module_name, attribute = name.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, attribute)
class Command(BaseCommand):
"""
Runs RQ workers on specified queues. Note that all queues passed into a
single rqworker command must share the same connection.
Example usage:
python manage.py rqworker high medium low
"""
args = '<queue queue ...>'
def add_arguments(self, parser):
parser.add_argument('--worker-class', action='store', dest='worker_class',
default='rq.Worker', help='RQ Worker class to use')
parser.add_argument('--pid', action='store', dest='pid',
default=None, help='PID file to write the worker`s pid into')
parser.add_argument('--burst', action='store_true', dest='burst',
default=False, help='Run worker in burst mode')
parser.add_argument('--name', action='store', dest='name',
default=None, help='Name of the worker')
parser.add_argument('--queue-class', action='store', dest='queue_class',
default='django_rq.queues.DjangoRQ', help='Queues class to use')
parser.add_argument('--worker-ttl', action='store', type=int,
dest='worker_ttl', default=420,
help='Default worker timeout to be used')
if LooseVersion(get_version()) >= LooseVersion('1.10'):
parser.add_argument('args', nargs='*', type=str,
help='The queues to work on, separated by space')
def handle(self, *args, **options):
pid = options.get('pid')
if pid:
with open(os.path.expanduser(pid), "w") as fp:
fp.write(str(os.getpid()))
try:
# Instantiate a worker
worker_class = import_attribute(options['worker_class'])
queues = get_queues(*args, queue_class=import_attribute(options['queue_class']))
w = worker_class(
queues,
connection=queues[0].connection,
name=options['name'],
exception_handlers=get_exception_handlers() or None,
default_worker_ttl=options['worker_ttl']
)
# Call use_connection to push the redis connection into LocalStack
# without this, jobs using RQ's get_current_job() will fail
use_connection(w.connection)
w.work(burst=options.get('burst', False))
except ConnectionError as e:
print(e)
sys.exit(1)
| 39.274725
| 92
| 0.622832
|
from distutils.version import LooseVersion
import os
import importlib
import logging
import sys
from django.core.management.base import BaseCommand
from django.utils.version import get_version
from django_rq.queues import get_queues
from django_rq.workers import get_exception_handlers
from redis.exceptions import ConnectionError
from rq import use_connection
from rq.utils import ColorizingStreamHandler
logger = logging.getLogger('rq.worker')
if not logger.handlers:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s %(message)s',
datefmt='%H:%M:%S')
handler = ColorizingStreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
def import_attribute(name):
module_name, attribute = name.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, attribute)
class Command(BaseCommand):
args = '<queue queue ...>'
def add_arguments(self, parser):
parser.add_argument('--worker-class', action='store', dest='worker_class',
default='rq.Worker', help='RQ Worker class to use')
parser.add_argument('--pid', action='store', dest='pid',
default=None, help='PID file to write the worker`s pid into')
parser.add_argument('--burst', action='store_true', dest='burst',
default=False, help='Run worker in burst mode')
parser.add_argument('--name', action='store', dest='name',
default=None, help='Name of the worker')
parser.add_argument('--queue-class', action='store', dest='queue_class',
default='django_rq.queues.DjangoRQ', help='Queues class to use')
parser.add_argument('--worker-ttl', action='store', type=int,
dest='worker_ttl', default=420,
help='Default worker timeout to be used')
if LooseVersion(get_version()) >= LooseVersion('1.10'):
parser.add_argument('args', nargs='*', type=str,
help='The queues to work on, separated by space')
def handle(self, *args, **options):
pid = options.get('pid')
if pid:
with open(os.path.expanduser(pid), "w") as fp:
fp.write(str(os.getpid()))
try:
worker_class = import_attribute(options['worker_class'])
queues = get_queues(*args, queue_class=import_attribute(options['queue_class']))
w = worker_class(
queues,
connection=queues[0].connection,
name=options['name'],
exception_handlers=get_exception_handlers() or None,
default_worker_ttl=options['worker_ttl']
)
use_connection(w.connection)
w.work(burst=options.get('burst', False))
except ConnectionError as e:
print(e)
sys.exit(1)
| true
| true
|
f70a8465913381e196aa41c4ceeea530e222a6a2
| 2,947
|
py
|
Python
|
src/utils/args.py
|
ioangatop/srVAE
|
dfee765c53f11f4653e7c6e7118a339832656867
|
[
"MIT"
] | 60
|
2020-06-11T11:06:15.000Z
|
2022-03-31T14:35:19.000Z
|
src/utils/args.py
|
ioangatop/srVAE
|
dfee765c53f11f4653e7c6e7118a339832656867
|
[
"MIT"
] | 9
|
2020-06-28T09:45:28.000Z
|
2020-12-30T15:20:19.000Z
|
src/utils/args.py
|
ioangatop/srVAE
|
dfee765c53f11f4653e7c6e7118a339832656867
|
[
"MIT"
] | 9
|
2020-07-28T12:03:32.000Z
|
2022-03-31T14:34:08.000Z
|
import torch
import argparse
# ----- Parser -----
def parser():
PARSER = argparse.ArgumentParser(description='Training parameters.')
# Dataset
PARSER.add_argument('--dataset', default='CIFAR10', type=str,
choices=['CIFAR10', 'CelebA', 'Imagenette', 'ImageNet32', 'ImageNet64'],
help="Data to be used.")
PARSER.add_argument('--img_resize', default=32, type=int,
help='Change image resolution.')
# Model
PARSER.add_argument('--model', default='VAE', type=str,
choices=['VAE', 'srVAE'],
help="Model to be used.")
PARSER.add_argument('--network', default='densenet32', type=str,
choices=['densenet32', 'densenet16x32'],
help="Neural Network architecture to be used.")
# Prior
PARSER.add_argument('--prior', default='MixtureOfGaussians', type=str,
choices=['StandardNormal', 'MixtureOfGaussians', 'RealNVP'],
help='Prior type.')
PARSER.add_argument('--z_dim', default=1024, type=int,
help='Dimensionality of z latent space.')
PARSER.add_argument('--u_dim', default=1024, type=int,
help='Dimensionality of z latent space.')
# data likelihood
PARSER.add_argument('--likelihood', default='dmol', type=str,
choices=['dmol'],
help="Type of likelihood.")
PARSER.add_argument('--iw_test', default=512, type=int,
help="Number of Importance Weighting samples used for approximating the test log-likelihood.")
# Training Parameters
PARSER.add_argument('--batch_size', default=32, type=int,
help='Batch size.')
PARSER.add_argument('--epochs', default=2000, type=int,
help='Number of training epochs.')
# General Configs
PARSER.add_argument('--seed', default=None, type=int,
help='Fix random seed.')
PARSER.add_argument('--n_samples', default=8, type=int,
help='Number of generated samples.')
PARSER.add_argument('--log_interval', default=True, type=bool,
help='Print progress on every batch.')
PARSER.add_argument('--device', default=None, type=str,
choices=['cpu', 'cuda'],
help='Device to run the experiment.')
PARSER.add_argument('--use_tb', default=True, type=bool,
help='Use TensorBoard.')
PARSER.add_argument('--tags', default='logs', type=str,
help='Run tags.')
ARGS = PARSER.parse_args()
# Check device
if ARGS.device is None:
ARGS.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return ARGS
args = parser()
if __name__ == "__main__":
pass
| 38.272727
| 118
| 0.562606
|
import torch
import argparse
def parser():
PARSER = argparse.ArgumentParser(description='Training parameters.')
PARSER.add_argument('--dataset', default='CIFAR10', type=str,
choices=['CIFAR10', 'CelebA', 'Imagenette', 'ImageNet32', 'ImageNet64'],
help="Data to be used.")
PARSER.add_argument('--img_resize', default=32, type=int,
help='Change image resolution.')
PARSER.add_argument('--model', default='VAE', type=str,
choices=['VAE', 'srVAE'],
help="Model to be used.")
PARSER.add_argument('--network', default='densenet32', type=str,
choices=['densenet32', 'densenet16x32'],
help="Neural Network architecture to be used.")
PARSER.add_argument('--prior', default='MixtureOfGaussians', type=str,
choices=['StandardNormal', 'MixtureOfGaussians', 'RealNVP'],
help='Prior type.')
PARSER.add_argument('--z_dim', default=1024, type=int,
help='Dimensionality of z latent space.')
PARSER.add_argument('--u_dim', default=1024, type=int,
help='Dimensionality of z latent space.')
PARSER.add_argument('--likelihood', default='dmol', type=str,
choices=['dmol'],
help="Type of likelihood.")
PARSER.add_argument('--iw_test', default=512, type=int,
help="Number of Importance Weighting samples used for approximating the test log-likelihood.")
PARSER.add_argument('--batch_size', default=32, type=int,
help='Batch size.')
PARSER.add_argument('--epochs', default=2000, type=int,
help='Number of training epochs.')
PARSER.add_argument('--seed', default=None, type=int,
help='Fix random seed.')
PARSER.add_argument('--n_samples', default=8, type=int,
help='Number of generated samples.')
PARSER.add_argument('--log_interval', default=True, type=bool,
help='Print progress on every batch.')
PARSER.add_argument('--device', default=None, type=str,
choices=['cpu', 'cuda'],
help='Device to run the experiment.')
PARSER.add_argument('--use_tb', default=True, type=bool,
help='Use TensorBoard.')
PARSER.add_argument('--tags', default='logs', type=str,
help='Run tags.')
ARGS = PARSER.parse_args()
if ARGS.device is None:
ARGS.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return ARGS
args = parser()
if __name__ == "__main__":
pass
| true
| true
|
f70a84c63ca36138b87e96ed011c0fe5cf9d31bc
| 9,961
|
py
|
Python
|
third_party/catapult/dashboard/dashboard/models/histogram_test.py
|
zipated/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
third_party/catapult/dashboard/dashboard/models/histogram_test.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
third_party/catapult/dashboard/dashboard/models/histogram_test.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import sys
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import histogram
from tracing.value.diagnostics import reserved_infos
class SparseDiagnosticTest(testing_common.TestCase):
"""Test case for functions in SparseDiagnostic."""
def setUp(self):
super(SparseDiagnosticTest, self).setUp()
self.SetCurrentUser('foo@bar.com', is_admin=True)
def _AddMockData(self, test_key):
data_samples = {
'owners': [
{
'type': 'GenericSet',
'guid': '1',
'values': ['1']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['2']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['3']
},
],
'bugs': [
{
'type': 'GenericSet',
'guid': '1',
'values': ['a']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['b']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['c']
},
]
}
for k, diagnostic_samples in data_samples.iteritems():
for i in xrange(len(diagnostic_samples)):
start_revision = i * 10
end_revision = (i + 1) * 10 - 1
if i == len(diagnostic_samples) - 1:
end_revision = sys.maxint
e = histogram.SparseDiagnostic(
data=diagnostic_samples[i], test=test_key,
start_revision=start_revision, end_revision=end_revision,
name=k, internal_only=False)
e.put()
def testFixupDiagnostics_Middle_FixesRange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['10']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key,
start_revision=5, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 4), (5, 9), (10, 19), (20, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query().fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_End_FixesRange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['10']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key,
start_revision=100, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, 99), (100, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query().fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_DifferentTestPath_NoChange(self):
test_key1 = utils.TestKey('Chromium/win7/1')
test_key2 = utils.TestKey('Chromium/win7/2')
self._AddMockData(test_key1)
self._AddMockData(test_key2)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['10']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key1,
start_revision=5, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key2).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query(
histogram.SparseDiagnostic.test == test_key2).fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_NotUnique_NoChange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['1']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key,
start_revision=5, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query(
histogram.SparseDiagnostic.test == test_key).fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testGetMostRecentValuesByNames_ReturnAllData(self):
data_samples = [
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
},
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'values': ['abc']
}]
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[0]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[0]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[1]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[1]['guid'],
name=reserved_infos.BUG_COMPONENTS.name)
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentValuesByNames(
test_key, set([reserved_infos.OWNERS.name,
reserved_infos.BUG_COMPONENTS.name]))
self.assertEqual(lookup_result.get(reserved_infos.OWNERS.name),
['alice@chromium.org'])
self.assertEqual(lookup_result.get(reserved_infos.BUG_COMPONENTS.name),
['abc'])
def testGetMostRecentValuesByNames_ReturnsNoneIfNoneFound(self):
data_sample = {
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
}
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_sample), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_sample['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentValuesByNames(
test_key, set([reserved_infos.OWNERS.name,
reserved_infos.BUG_COMPONENTS.name]))
self.assertEqual(lookup_result.get(reserved_infos.OWNERS.name),
['alice@chromium.org'])
self.assertIsNone(lookup_result.get(reserved_infos.BUG_COMPONENTS.name))
def testGetMostRecentValuesByNames_ReturnsNoneIfNoName(self):
data_sample = {
'guid': 'abc',
'osName': 'linux',
'type': 'DeviceInfo'
}
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_sample), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_sample['guid'])
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentValuesByNames(
test_key, set([reserved_infos.OWNERS.name,
reserved_infos.BUG_COMPONENTS.name]))
self.assertIsNone(lookup_result.get(reserved_infos.OWNERS.name))
self.assertIsNone(lookup_result.get(reserved_infos.BUG_COMPONENTS.name))
def testGetMostRecentValuesByNames_RaisesErrorIfDuplicateName(self):
data_samples = [
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
},
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'values': ['bob@chromium.org']
}]
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[0]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[0]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[1]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[1]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
self.assertRaises(
AssertionError,
histogram.SparseDiagnostic.GetMostRecentValuesByNames,
test_key,
set([reserved_infos.OWNERS.name, reserved_infos.BUG_COMPONENTS.name]))
| 32.766447
| 78
| 0.609979
|
import json
import sys
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import histogram
from tracing.value.diagnostics import reserved_infos
class SparseDiagnosticTest(testing_common.TestCase):
def setUp(self):
super(SparseDiagnosticTest, self).setUp()
self.SetCurrentUser('foo@bar.com', is_admin=True)
def _AddMockData(self, test_key):
data_samples = {
'owners': [
{
'type': 'GenericSet',
'guid': '1',
'values': ['1']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['2']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['3']
},
],
'bugs': [
{
'type': 'GenericSet',
'guid': '1',
'values': ['a']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['b']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['c']
},
]
}
for k, diagnostic_samples in data_samples.iteritems():
for i in xrange(len(diagnostic_samples)):
start_revision = i * 10
end_revision = (i + 1) * 10 - 1
if i == len(diagnostic_samples) - 1:
end_revision = sys.maxint
e = histogram.SparseDiagnostic(
data=diagnostic_samples[i], test=test_key,
start_revision=start_revision, end_revision=end_revision,
name=k, internal_only=False)
e.put()
def testFixupDiagnostics_Middle_FixesRange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['10']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key,
start_revision=5, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 4), (5, 9), (10, 19), (20, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query().fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_End_FixesRange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['10']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key,
start_revision=100, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, 99), (100, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query().fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_DifferentTestPath_NoChange(self):
test_key1 = utils.TestKey('Chromium/win7/1')
test_key2 = utils.TestKey('Chromium/win7/2')
self._AddMockData(test_key1)
self._AddMockData(test_key2)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['10']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key1,
start_revision=5, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key2).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query(
histogram.SparseDiagnostic.test == test_key2).fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_NotUnique_NoChange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['1']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key,
start_revision=5, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query(
histogram.SparseDiagnostic.test == test_key).fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testGetMostRecentValuesByNames_ReturnAllData(self):
data_samples = [
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
},
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'values': ['abc']
}]
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[0]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[0]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[1]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[1]['guid'],
name=reserved_infos.BUG_COMPONENTS.name)
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentValuesByNames(
test_key, set([reserved_infos.OWNERS.name,
reserved_infos.BUG_COMPONENTS.name]))
self.assertEqual(lookup_result.get(reserved_infos.OWNERS.name),
['alice@chromium.org'])
self.assertEqual(lookup_result.get(reserved_infos.BUG_COMPONENTS.name),
['abc'])
def testGetMostRecentValuesByNames_ReturnsNoneIfNoneFound(self):
data_sample = {
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
}
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_sample), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_sample['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentValuesByNames(
test_key, set([reserved_infos.OWNERS.name,
reserved_infos.BUG_COMPONENTS.name]))
self.assertEqual(lookup_result.get(reserved_infos.OWNERS.name),
['alice@chromium.org'])
self.assertIsNone(lookup_result.get(reserved_infos.BUG_COMPONENTS.name))
def testGetMostRecentValuesByNames_ReturnsNoneIfNoName(self):
data_sample = {
'guid': 'abc',
'osName': 'linux',
'type': 'DeviceInfo'
}
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_sample), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_sample['guid'])
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentValuesByNames(
test_key, set([reserved_infos.OWNERS.name,
reserved_infos.BUG_COMPONENTS.name]))
self.assertIsNone(lookup_result.get(reserved_infos.OWNERS.name))
self.assertIsNone(lookup_result.get(reserved_infos.BUG_COMPONENTS.name))
def testGetMostRecentValuesByNames_RaisesErrorIfDuplicateName(self):
data_samples = [
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
},
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'values': ['bob@chromium.org']
}]
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[0]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[0]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[1]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[1]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
self.assertRaises(
AssertionError,
histogram.SparseDiagnostic.GetMostRecentValuesByNames,
test_key,
set([reserved_infos.OWNERS.name, reserved_infos.BUG_COMPONENTS.name]))
| true
| true
|
f70a85945485651bb9a81ecb734bd755346a98cc
| 3,208
|
py
|
Python
|
meetnowport/settings.py
|
bonaw/Meetnow
|
02b77af78db7fa403a5ecee49ee1c64eea893a7a
|
[
"MIT"
] | null | null | null |
meetnowport/settings.py
|
bonaw/Meetnow
|
02b77af78db7fa403a5ecee49ee1c64eea893a7a
|
[
"MIT"
] | null | null | null |
meetnowport/settings.py
|
bonaw/Meetnow
|
02b77af78db7fa403a5ecee49ee1c64eea893a7a
|
[
"MIT"
] | null | null | null |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a6j(qtzl$#pd2g^fm+=g27^^r&%gz6sh!o45ekij=--bj)^qx$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| 26.512397
| 92
| 0.668329
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'a6j(qtzl$#pd2g^fm+=g27^^r&%gz6sh!o45ekij=--bj)^qx$'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| true
| true
|
f70a859d84435c0a6414934e105c3751a53b1cad
| 161
|
py
|
Python
|
uclasm/matching/filters/__init__.py
|
cfld/uclasm
|
dbdbe99fa8bd6e85a7e90ac2e666c1e667c62d57
|
[
"MIT"
] | null | null | null |
uclasm/matching/filters/__init__.py
|
cfld/uclasm
|
dbdbe99fa8bd6e85a7e90ac2e666c1e667c62d57
|
[
"MIT"
] | null | null | null |
uclasm/matching/filters/__init__.py
|
cfld/uclasm
|
dbdbe99fa8bd6e85a7e90ac2e666c1e667c62d57
|
[
"MIT"
] | null | null | null |
"""Provide functions for filtering."""
from .stats_filter import stats_filter
from .topology_filter import topology_filter
from .run_filters import run_filters
| 26.833333
| 44
| 0.832298
|
from .stats_filter import stats_filter
from .topology_filter import topology_filter
from .run_filters import run_filters
| true
| true
|
f70a8613e1d5d47e63f3f531a3bb99f989be6e47
| 11,728
|
py
|
Python
|
libcxx/utils/libcxx/test/features.py
|
jinge90/llvm
|
1f3f9b9b1181feb559e85970155678c18a436711
|
[
"Apache-2.0"
] | null | null | null |
libcxx/utils/libcxx/test/features.py
|
jinge90/llvm
|
1f3f9b9b1181feb559e85970155678c18a436711
|
[
"Apache-2.0"
] | null | null | null |
libcxx/utils/libcxx/test/features.py
|
jinge90/llvm
|
1f3f9b9b1181feb559e85970155678c18a436711
|
[
"Apache-2.0"
] | null | null | null |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from libcxx.test.dsl import *
import re
import shutil
import sys
import subprocess
_isClang = lambda cfg: '__clang__' in compilerMacros(cfg) and '__apple_build_version__' not in compilerMacros(cfg)
_isAppleClang = lambda cfg: '__apple_build_version__' in compilerMacros(cfg)
_isGCC = lambda cfg: '__GNUC__' in compilerMacros(cfg) and '__clang__' not in compilerMacros(cfg)
_isMSVC = lambda cfg: '_MSC_VER' in compilerMacros(cfg)
_msvcVersion = lambda cfg: (int(compilerMacros(cfg)['_MSC_VER']) // 100, int(compilerMacros(cfg)['_MSC_VER']) % 100)
DEFAULT_FEATURES = [
Feature(name='fcoroutines-ts',
when=lambda cfg: hasCompileFlag(cfg, '-fcoroutines-ts') and
featureTestMacros(cfg, flags='-fcoroutines-ts').get('__cpp_coroutines', 0) >= 201703,
actions=[AddCompileFlag('-fcoroutines-ts')]),
Feature(name='thread-safety',
when=lambda cfg: hasCompileFlag(cfg, '-Werror=thread-safety'),
actions=[AddCompileFlag('-Werror=thread-safety')]),
Feature(name='diagnose-if-support',
when=lambda cfg: hasCompileFlag(cfg, '-Wuser-defined-warnings'),
actions=[AddCompileFlag('-Wuser-defined-warnings')]),
Feature(name='has-fblocks', when=lambda cfg: hasCompileFlag(cfg, '-fblocks')),
Feature(name='-fsized-deallocation', when=lambda cfg: hasCompileFlag(cfg, '-fsized-deallocation')),
Feature(name='-faligned-allocation', when=lambda cfg: hasCompileFlag(cfg, '-faligned-allocation')),
Feature(name='fdelayed-template-parsing', when=lambda cfg: hasCompileFlag(cfg, '-fdelayed-template-parsing')),
Feature(name='libcpp-no-concepts', when=lambda cfg: featureTestMacros(cfg).get('__cpp_concepts', 0) < 201907),
Feature(name='libcpp-no-coroutines', when=lambda cfg: featureTestMacros(cfg).get('__cpp_impl_coroutine', 0) < 201902),
Feature(name='has-fobjc-arc', when=lambda cfg: hasCompileFlag(cfg, '-xobjective-c++ -fobjc-arc') and
sys.platform.lower().strip() == 'darwin'), # TODO: this doesn't handle cross-compiling to Apple platforms.
Feature(name='objective-c++', when=lambda cfg: hasCompileFlag(cfg, '-xobjective-c++ -fobjc-arc')),
Feature(name='non-lockfree-atomics',
when=lambda cfg: sourceBuilds(cfg, """
#include <atomic>
struct Large { int storage[100]; };
std::atomic<Large> x;
int main(int, char**) { (void)x.load(); return 0; }
""")),
# TODO: Remove this feature once compiler-rt includes __atomic_is_lockfree()
# on all supported platforms.
Feature(name='is-lockfree-runtime-function',
when=lambda cfg: sourceBuilds(cfg, """
#include <atomic>
struct Large { int storage[100]; };
std::atomic<Large> x;
int main(int, char**) { return x.is_lock_free(); }
""")),
# Some tests rely on creating shared libraries which link in the C++ Standard Library. In some
# cases, this doesn't work (e.g. if the library was built as a static archive and wasn't compiled
# as position independent). This feature informs the test suite of whether it's possible to create
# a shared library in a shell test by using the '-shared' compiler flag.
#
# Note: To implement this check properly, we need to make sure that we use something inside the
# compiled library, not only in the headers. It should be safe to assume that all implementations
# define `operator new` in the compiled library.
Feature(name='cant-build-shared-library',
when=lambda cfg: not sourceBuilds(cfg, """
void f() { new int(3); }
""", ['-shared'])),
Feature(name='apple-clang', when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}.{__clang_minor__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name='clang', when=_isClang,
actions=[AddCompileFlag('-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER')]),
Feature(name=lambda cfg: 'clang-{__clang_major__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}.{__clang_minor__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name='gcc', when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}.{__GNUC_MINOR__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}.{__GNUC_MINOR__}.{__GNUC_PATCHLEVEL__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name='msvc', when=_isMSVC),
Feature(name=lambda cfg: 'msvc-{}'.format(*_msvcVersion(cfg)), when=_isMSVC),
Feature(name=lambda cfg: 'msvc-{}.{}'.format(*_msvcVersion(cfg)), when=_isMSVC),
]
# Deduce and add the test features that that are implied by the #defines in
# the <__config_site> header.
#
# For each macro of the form `_LIBCPP_XXX_YYY_ZZZ` defined below that
# is defined after including <__config_site>, add a Lit feature called
# `libcpp-xxx-yyy-zzz`. When a macro is defined to a specific value
# (e.g. `_LIBCPP_ABI_VERSION=2`), the feature is `libcpp-xxx-yyy-zzz=<value>`.
macros = {
'_LIBCPP_HAS_NO_MONOTONIC_CLOCK': 'libcpp-has-no-monotonic-clock',
'_LIBCPP_HAS_NO_THREADS': 'libcpp-has-no-threads',
'_LIBCPP_HAS_THREAD_API_EXTERNAL': 'libcpp-has-thread-api-external',
'_LIBCPP_HAS_THREAD_API_PTHREAD': 'libcpp-has-thread-api-pthread',
'_LIBCPP_NO_VCRUNTIME': 'libcpp-no-vcruntime',
'_LIBCPP_ABI_VERSION': 'libcpp-abi-version',
'_LIBCPP_ABI_UNSTABLE': 'libcpp-abi-unstable',
'_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY': 'libcpp-has-no-filesystem-library',
'_LIBCPP_HAS_NO_RANDOM_DEVICE': 'libcpp-has-no-random-device',
'_LIBCPP_HAS_NO_LOCALIZATION': 'libcpp-has-no-localization',
'_LIBCPP_HAS_NO_WIDE_CHARACTERS': 'libcpp-has-no-wide-characters',
'_LIBCPP_HAS_NO_INCOMPLETE_FORMAT': 'libcpp-has-no-incomplete-format',
'_LIBCPP_HAS_NO_INCOMPLETE_RANGES': 'libcpp-has-no-incomplete-ranges',
'_LIBCPP_HAS_NO_UNICODE': 'libcpp-has-no-unicode',
}
for macro, feature in macros.items():
DEFAULT_FEATURES += [
Feature(name=lambda cfg, m=macro, f=feature: f + (
'={}'.format(compilerMacros(cfg)[m]) if compilerMacros(cfg)[m] else ''
),
when=lambda cfg, m=macro: m in compilerMacros(cfg),
# FIXME: This is a hack that should be fixed using module maps.
# If modules are enabled then we have to lift all of the definitions
# in <__config_site> onto the command line.
actions=lambda cfg, m=macro: [
AddCompileFlag('-Wno-macro-redefined -D{}'.format(m) + (
'={}'.format(compilerMacros(cfg)[m]) if compilerMacros(cfg)[m] else ''
))
]
)
]
# Mapping from canonical locale names (used in the tests) to possible locale
# names on various systems. Each locale is considered supported if any of the
# alternative names is supported.
locales = {
'en_US.UTF-8': ['en_US.UTF-8', 'en_US.utf8', 'English_United States.1252'],
'fr_FR.UTF-8': ['fr_FR.UTF-8', 'fr_FR.utf8', 'French_France.1252'],
'ru_RU.UTF-8': ['ru_RU.UTF-8', 'ru_RU.utf8', 'Russian_Russia.1251'],
'zh_CN.UTF-8': ['zh_CN.UTF-8', 'zh_CN.utf8', 'Chinese_China.936'],
'fr_CA.ISO8859-1': ['fr_CA.ISO8859-1', 'French_Canada.1252'],
'cs_CZ.ISO8859-2': ['cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250']
}
for locale, alts in locales.items():
# Note: Using alts directly in the lambda body here will bind it to the value at the
# end of the loop. Assigning it to a default argument works around this issue.
DEFAULT_FEATURES.append(Feature(name='locale.{}'.format(locale),
when=lambda cfg, alts=alts: hasAnyLocale(cfg, alts)))
# Add features representing the platform name: darwin, linux, windows, etc...
DEFAULT_FEATURES += [
Feature(name='darwin', when=lambda cfg: '__APPLE__' in compilerMacros(cfg)),
Feature(name='windows', when=lambda cfg: '_WIN32' in compilerMacros(cfg)),
Feature(name='windows-dll', when=lambda cfg: '_WIN32' in compilerMacros(cfg) and not '_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS' in compilerMacros(cfg)),
Feature(name='linux', when=lambda cfg: '__linux__' in compilerMacros(cfg)),
Feature(name='netbsd', when=lambda cfg: '__NetBSD__' in compilerMacros(cfg)),
Feature(name='freebsd', when=lambda cfg: '__FreeBSD__' in compilerMacros(cfg))
]
# Add features representing the build host platform name.
# The build host could differ from the target platform for cross-compilation.
DEFAULT_FEATURES += [
Feature(name='buildhost={}'.format(sys.platform.lower().strip())),
# sys.platform can be represented by "sub-system" on Windows host, such as 'win32', 'cygwin', 'mingw' & etc.
# Here is a consolidated feature for the build host plaform name on Windows.
Feature(name='buildhost=windows', when=lambda cfg: platform.system().lower().startswith('windows'))
]
# Detect whether GDB is on the system, has Python scripting and supports
# adding breakpoint commands. If so add a substitution to access it.
def check_gdb(cfg):
gdb_path = shutil.which('gdb')
if gdb_path is None:
return False
# Check that we can set breakpoint commands, which was added in 8.3.
# Using the quit command here means that gdb itself exits, not just
# the "python <...>" command.
test_src = """\
try:
gdb.Breakpoint(\"main\").commands=\"foo\"
except AttributeError:
gdb.execute(\"quit 1\")
gdb.execute(\"quit\")"""
try:
stdout = subprocess.check_output(
[gdb_path, "-ex", "python " + test_src, "--batch"],
stderr=subprocess.DEVNULL, universal_newlines=True)
except subprocess.CalledProcessError:
# We can't set breakpoint commands
return False
# Check we actually ran the Python
return not "Python scripting is not supported" in stdout
DEFAULT_FEATURES += [
Feature(name='host-has-gdb-with-python',
when=check_gdb,
actions=[AddSubstitution('%{gdb}', lambda cfg: shutil.which('gdb'))]
)
]
| 56.114833
| 171
| 0.632418
|
rom libcxx.test.dsl import *
import re
import shutil
import sys
import subprocess
_isClang = lambda cfg: '__clang__' in compilerMacros(cfg) and '__apple_build_version__' not in compilerMacros(cfg)
_isAppleClang = lambda cfg: '__apple_build_version__' in compilerMacros(cfg)
_isGCC = lambda cfg: '__GNUC__' in compilerMacros(cfg) and '__clang__' not in compilerMacros(cfg)
_isMSVC = lambda cfg: '_MSC_VER' in compilerMacros(cfg)
_msvcVersion = lambda cfg: (int(compilerMacros(cfg)['_MSC_VER']) // 100, int(compilerMacros(cfg)['_MSC_VER']) % 100)
DEFAULT_FEATURES = [
Feature(name='fcoroutines-ts',
when=lambda cfg: hasCompileFlag(cfg, '-fcoroutines-ts') and
featureTestMacros(cfg, flags='-fcoroutines-ts').get('__cpp_coroutines', 0) >= 201703,
actions=[AddCompileFlag('-fcoroutines-ts')]),
Feature(name='thread-safety',
when=lambda cfg: hasCompileFlag(cfg, '-Werror=thread-safety'),
actions=[AddCompileFlag('-Werror=thread-safety')]),
Feature(name='diagnose-if-support',
when=lambda cfg: hasCompileFlag(cfg, '-Wuser-defined-warnings'),
actions=[AddCompileFlag('-Wuser-defined-warnings')]),
Feature(name='has-fblocks', when=lambda cfg: hasCompileFlag(cfg, '-fblocks')),
Feature(name='-fsized-deallocation', when=lambda cfg: hasCompileFlag(cfg, '-fsized-deallocation')),
Feature(name='-faligned-allocation', when=lambda cfg: hasCompileFlag(cfg, '-faligned-allocation')),
Feature(name='fdelayed-template-parsing', when=lambda cfg: hasCompileFlag(cfg, '-fdelayed-template-parsing')),
Feature(name='libcpp-no-concepts', when=lambda cfg: featureTestMacros(cfg).get('__cpp_concepts', 0) < 201907),
Feature(name='libcpp-no-coroutines', when=lambda cfg: featureTestMacros(cfg).get('__cpp_impl_coroutine', 0) < 201902),
Feature(name='has-fobjc-arc', when=lambda cfg: hasCompileFlag(cfg, '-xobjective-c++ -fobjc-arc') and
sys.platform.lower().strip() == 'darwin'),
Feature(name='objective-c++', when=lambda cfg: hasCompileFlag(cfg, '-xobjective-c++ -fobjc-arc')),
Feature(name='non-lockfree-atomics',
when=lambda cfg: sourceBuilds(cfg, """
#include <atomic>
struct Large { int storage[100]; };
std::atomic<Large> x;
int main(int, char**) { (void)x.load(); return 0; }
""")),
# TODO: Remove this feature once compiler-rt includes __atomic_is_lockfree()
# on all supported platforms.
Feature(name='is-lockfree-runtime-function',
when=lambda cfg: sourceBuilds(cfg, """
#include <atomic>
struct Large { int storage[100]; };
std::atomic<Large> x;
int main(int, char**) { return x.is_lock_free(); }
""")),
# Some tests rely on creating shared libraries which link in the C++ Standard Library. In some
# cases, this doesn't work (e.g. if the library was built as a static archive and wasn't compiled
# as position independent). This feature informs the test suite of whether it's possible to create
Feature(name='cant-build-shared-library',
when=lambda cfg: not sourceBuilds(cfg, """
void f() { new int(3); }
""", ['-shared'])),
Feature(name='apple-clang', when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}.{__clang_minor__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name='clang', when=_isClang,
actions=[AddCompileFlag('-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER')]),
Feature(name=lambda cfg: 'clang-{__clang_major__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}.{__clang_minor__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name='gcc', when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}.{__GNUC_MINOR__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}.{__GNUC_MINOR__}.{__GNUC_PATCHLEVEL__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name='msvc', when=_isMSVC),
Feature(name=lambda cfg: 'msvc-{}'.format(*_msvcVersion(cfg)), when=_isMSVC),
Feature(name=lambda cfg: 'msvc-{}.{}'.format(*_msvcVersion(cfg)), when=_isMSVC),
]
os = {
'_LIBCPP_HAS_NO_MONOTONIC_CLOCK': 'libcpp-has-no-monotonic-clock',
'_LIBCPP_HAS_NO_THREADS': 'libcpp-has-no-threads',
'_LIBCPP_HAS_THREAD_API_EXTERNAL': 'libcpp-has-thread-api-external',
'_LIBCPP_HAS_THREAD_API_PTHREAD': 'libcpp-has-thread-api-pthread',
'_LIBCPP_NO_VCRUNTIME': 'libcpp-no-vcruntime',
'_LIBCPP_ABI_VERSION': 'libcpp-abi-version',
'_LIBCPP_ABI_UNSTABLE': 'libcpp-abi-unstable',
'_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY': 'libcpp-has-no-filesystem-library',
'_LIBCPP_HAS_NO_RANDOM_DEVICE': 'libcpp-has-no-random-device',
'_LIBCPP_HAS_NO_LOCALIZATION': 'libcpp-has-no-localization',
'_LIBCPP_HAS_NO_WIDE_CHARACTERS': 'libcpp-has-no-wide-characters',
'_LIBCPP_HAS_NO_INCOMPLETE_FORMAT': 'libcpp-has-no-incomplete-format',
'_LIBCPP_HAS_NO_INCOMPLETE_RANGES': 'libcpp-has-no-incomplete-ranges',
'_LIBCPP_HAS_NO_UNICODE': 'libcpp-has-no-unicode',
}
for macro, feature in macros.items():
DEFAULT_FEATURES += [
Feature(name=lambda cfg, m=macro, f=feature: f + (
'={}'.format(compilerMacros(cfg)[m]) if compilerMacros(cfg)[m] else ''
),
when=lambda cfg, m=macro: m in compilerMacros(cfg),
actions=lambda cfg, m=macro: [
AddCompileFlag('-Wno-macro-redefined -D{}'.format(m) + (
'={}'.format(compilerMacros(cfg)[m]) if compilerMacros(cfg)[m] else ''
))
]
)
]
locales = {
'en_US.UTF-8': ['en_US.UTF-8', 'en_US.utf8', 'English_United States.1252'],
'fr_FR.UTF-8': ['fr_FR.UTF-8', 'fr_FR.utf8', 'French_France.1252'],
'ru_RU.UTF-8': ['ru_RU.UTF-8', 'ru_RU.utf8', 'Russian_Russia.1251'],
'zh_CN.UTF-8': ['zh_CN.UTF-8', 'zh_CN.utf8', 'Chinese_China.936'],
'fr_CA.ISO8859-1': ['fr_CA.ISO8859-1', 'French_Canada.1252'],
'cs_CZ.ISO8859-2': ['cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250']
}
for locale, alts in locales.items():
DEFAULT_FEATURES.append(Feature(name='locale.{}'.format(locale),
when=lambda cfg, alts=alts: hasAnyLocale(cfg, alts)))
DEFAULT_FEATURES += [
Feature(name='darwin', when=lambda cfg: '__APPLE__' in compilerMacros(cfg)),
Feature(name='windows', when=lambda cfg: '_WIN32' in compilerMacros(cfg)),
Feature(name='windows-dll', when=lambda cfg: '_WIN32' in compilerMacros(cfg) and not '_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS' in compilerMacros(cfg)),
Feature(name='linux', when=lambda cfg: '__linux__' in compilerMacros(cfg)),
Feature(name='netbsd', when=lambda cfg: '__NetBSD__' in compilerMacros(cfg)),
Feature(name='freebsd', when=lambda cfg: '__FreeBSD__' in compilerMacros(cfg))
]
DEFAULT_FEATURES += [
Feature(name='buildhost={}'.format(sys.platform.lower().strip())),
Feature(name='buildhost=windows', when=lambda cfg: platform.system().lower().startswith('windows'))
]
def check_gdb(cfg):
gdb_path = shutil.which('gdb')
if gdb_path is None:
return False
test_src = """\
try:
gdb.Breakpoint(\"main\").commands=\"foo\"
except AttributeError:
gdb.execute(\"quit 1\")
gdb.execute(\"quit\")"""
try:
stdout = subprocess.check_output(
[gdb_path, "-ex", "python " + test_src, "--batch"],
stderr=subprocess.DEVNULL, universal_newlines=True)
except subprocess.CalledProcessError:
return False
# Check we actually ran the Python
return not "Python scripting is not supported" in stdout
DEFAULT_FEATURES += [
Feature(name='host-has-gdb-with-python',
when=check_gdb,
actions=[AddSubstitution('%{gdb}', lambda cfg: shutil.which('gdb'))]
)
]
| true
| true
|
f70a86e1f718625d42509fc16e98474c738aa896
| 4,104
|
py
|
Python
|
lib/composite/LiPolymerDataScaler.py
|
KanHatakeyama/annealing_project
|
eac2dfe65c480450a5d12b09db2c1c9f83d03389
|
[
"MIT"
] | null | null | null |
lib/composite/LiPolymerDataScaler.py
|
KanHatakeyama/annealing_project
|
eac2dfe65c480450a5d12b09db2c1c9f83d03389
|
[
"MIT"
] | null | null | null |
lib/composite/LiPolymerDataScaler.py
|
KanHatakeyama/annealing_project
|
eac2dfe65c480450a5d12b09db2c1c9f83d03389
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from DataUtility import get_column_names
class LiPolymerDataScaler:
"""
a special class to scale the lithium polymer database
"""
def __init__(self):
self.scaling_dict = {}
self.main_val_params = ["SMILES_wt", "wt_ratio", "inorg_contain_ratio"]
self.main_txt_params = ["structureList", "inorg_name"]
self.main_params = self.main_val_params+self.main_txt_params
self.target_param = "Conductivity"
def mutual_process(self, df):
"""
convert values (to log, etc)
"""
df["Conductivity"] = np.log10(df["Conductivity"].astype('float'))
df["Temperature"] = np.log10(df["Temperature"].astype('float')+273)
# fill Nan by zero
for c in self.main_params:
target_columns = get_column_names(df, c)
df[target_columns] = df[target_columns].fillna(0)
# convert molecular weight
self.mw_column_list = get_column_names(df, "MWList")
for c in self.mw_column_list:
df[c] = np.log10(df[c].astype('float'))
return df
def fit_transform(self, original_df):
"""
scaling data, etc
Parameters
----------------
original_df: dataframe
dataframe to be scaled
Returns
----------------
df: dataframe
scaled dataframe
"""
df = original_df.copy()
df = self.mutual_process(df)
# fill lacking Molecular weight with average value
self.average_mw = sum(df[self.mw_column_list].sum()) / \
sum(df[self.mw_column_list].count())
for c in self.mw_column_list:
df[c] = df[c].fillna(self.average_mw)
# scaling
for v in self.main_val_params + ["Conductivity", "Temperature"]+self.mw_column_list:
for c in get_column_names(df, v):
sc = StandardScaler()
df[c] = sc.fit_transform(
df[c].astype('float').values.reshape(-1, 1))
self.scaling_dict[c] = sc
# onehot encoding
for v in self.main_txt_params:
df = pd.get_dummies(df, columns=get_column_names(df, v))
self.use_columns = []
for c in ["Conductivity", "Temperature"]+self.main_params + self.mw_column_list+["fp_list"]:
self.use_columns.extend(get_column_names(df, c))
"""
**********************************************************
delete some columns for easiness of machine learning
following parameters can be useful for machine learning (10.1021/jacs.9b11442), but ignored in this project.
"""
for remove_targets in ["MWList", "wt_ratio", "inorg", "structure", "Temperature"]:
del_columns = get_column_names(df, remove_targets)
for i in del_columns:
self.use_columns.remove(i)
self.tr_df = df
return df
def transform(self, original_df):
"""
scaling data, etc
Parameters
----------------
original_df: dataframe
dataframe to be scaled
Returns
----------------
df: dataframe
scaled dataframe
"""
df = original_df.copy()
df = self.mutual_process(df)
for c in self.mw_column_list:
df[c] = df[c].fillna(self.average_mw)
# scaling
for v in self.main_val_params + ["Conductivity", "Temperature"]+self.mw_column_list:
for c in get_column_names(df, v):
df[c] = self.scaling_dict[c].transform(
df[c].astype('float').values.reshape(-1, 1))
# onehot encoding
for v in self.main_txt_params:
df = pd.get_dummies(df, columns=get_column_names(df, v))
# for lacking columns, add the most frequent vals
lacking_columns = set(self.use_columns)-set(df.columns)
for i in lacking_columns:
df[i] = self.tr_df[i].mode()
return df
| 31.813953
| 116
| 0.569444
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from DataUtility import get_column_names
class LiPolymerDataScaler:
def __init__(self):
self.scaling_dict = {}
self.main_val_params = ["SMILES_wt", "wt_ratio", "inorg_contain_ratio"]
self.main_txt_params = ["structureList", "inorg_name"]
self.main_params = self.main_val_params+self.main_txt_params
self.target_param = "Conductivity"
def mutual_process(self, df):
df["Conductivity"] = np.log10(df["Conductivity"].astype('float'))
df["Temperature"] = np.log10(df["Temperature"].astype('float')+273)
for c in self.main_params:
target_columns = get_column_names(df, c)
df[target_columns] = df[target_columns].fillna(0)
self.mw_column_list = get_column_names(df, "MWList")
for c in self.mw_column_list:
df[c] = np.log10(df[c].astype('float'))
return df
def fit_transform(self, original_df):
df = original_df.copy()
df = self.mutual_process(df)
self.average_mw = sum(df[self.mw_column_list].sum()) / \
sum(df[self.mw_column_list].count())
for c in self.mw_column_list:
df[c] = df[c].fillna(self.average_mw)
for v in self.main_val_params + ["Conductivity", "Temperature"]+self.mw_column_list:
for c in get_column_names(df, v):
sc = StandardScaler()
df[c] = sc.fit_transform(
df[c].astype('float').values.reshape(-1, 1))
self.scaling_dict[c] = sc
for v in self.main_txt_params:
df = pd.get_dummies(df, columns=get_column_names(df, v))
self.use_columns = []
for c in ["Conductivity", "Temperature"]+self.main_params + self.mw_column_list+["fp_list"]:
self.use_columns.extend(get_column_names(df, c))
for remove_targets in ["MWList", "wt_ratio", "inorg", "structure", "Temperature"]:
del_columns = get_column_names(df, remove_targets)
for i in del_columns:
self.use_columns.remove(i)
self.tr_df = df
return df
def transform(self, original_df):
df = original_df.copy()
df = self.mutual_process(df)
for c in self.mw_column_list:
df[c] = df[c].fillna(self.average_mw)
for v in self.main_val_params + ["Conductivity", "Temperature"]+self.mw_column_list:
for c in get_column_names(df, v):
df[c] = self.scaling_dict[c].transform(
df[c].astype('float').values.reshape(-1, 1))
for v in self.main_txt_params:
df = pd.get_dummies(df, columns=get_column_names(df, v))
lacking_columns = set(self.use_columns)-set(df.columns)
for i in lacking_columns:
df[i] = self.tr_df[i].mode()
return df
| true
| true
|
f70a87ce62cba398e7370660217593d337998146
| 140
|
py
|
Python
|
python/data_sutram/scraper/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16
|
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/data_sutram/scraper/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8
|
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/data_sutram/scraper/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5
|
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
def test_func(i):
print(i)
if i>10:
return
else:
test_func(i+1)
if __name__ == "__main__":
test_func(2)
| 15.555556
| 26
| 0.521429
|
def test_func(i):
print(i)
if i>10:
return
else:
test_func(i+1)
if __name__ == "__main__":
test_func(2)
| true
| true
|
f70a87f89311f5320018937ff535733ef8e8f539
| 10,355
|
py
|
Python
|
curtsies/formatstringarray.py
|
toolforger/curtsies
|
7f86c07d95aa22b004db9acf8f787e1abf49b581
|
[
"MIT"
] | 3
|
2015-07-13T12:53:40.000Z
|
2018-01-21T20:38:46.000Z
|
curtsies/formatstringarray.py
|
toolforger/curtsies
|
7f86c07d95aa22b004db9acf8f787e1abf49b581
|
[
"MIT"
] | null | null | null |
curtsies/formatstringarray.py
|
toolforger/curtsies
|
7f86c07d95aa22b004db9acf8f787e1abf49b581
|
[
"MIT"
] | 1
|
2018-01-21T20:38:03.000Z
|
2018-01-21T20:38:03.000Z
|
"""
Format String 2D array
2d array for compositing term-formated strings
-autoexpanding vertically
-interesting get_item behavior (renders fmtstrs)
-caching behavior eventually
>>> a = FSArray(10, 14)
>>> a.shape
(10, 14)
>>> a[1] = 'i'
>>> a[3:4, :] = ['i' * 14]
>>> a[16:17, :] = ['j' * 14]
>>> a.shape, a[16, 0]
((17, 14), ['j'])
>>> a[200, 1] = ['i']
>>> a[200, 1]
['i']
"""
import sys
import logging
from .formatstring import fmtstr
from .formatstring import normalize_slice
from .formatstring import FmtStr
from typing import (
Any,
Union,
Text,
List,
Sequence,
overload,
Tuple,
cast,
no_type_check,
)
actualize = str
logger = logging.getLogger(__name__)
# TODO check that strings used in arrays don't have tabs or spaces in them!
def slicesize(s):
# type: (slice) -> int
return int((s.stop - s.start) / (s.step if s.step else 1))
def fsarray(strings, *args, **kwargs):
# type: (List[Union[FmtStr, Text]], *Any, **Any) -> FSArray
"""fsarray(list_of_FmtStrs_or_strings, width=None) -> FSArray
Returns a new FSArray of width of the maximum size of the provided
strings, or width provided, and height of the number of strings provided.
If a width is provided, raises a ValueError if any of the strings
are of length greater than this width"""
strings = list(strings)
if "width" in kwargs:
width = kwargs["width"]
del kwargs["width"]
if strings and any(len(s) > width for s in strings):
raise ValueError(f"Those strings won't fit for width {width}")
else:
width = max(len(s) for s in strings) if strings else 0
fstrings = [
s if isinstance(s, FmtStr) else fmtstr(s, *args, **kwargs) for s in strings
]
arr = FSArray(len(fstrings), width, *args, **kwargs)
rows = [
fs.setslice_with_length(0, len(s), s, width)
for fs, s in zip(arr.rows, fstrings)
]
arr.rows = rows
return arr
class FSArray(Sequence):
"""A 2D array of colored text.
Internally represented by a list of FmtStrs of identical size."""
# TODO add constructor that takes fmtstrs instead of dims
def __init__(self, num_rows, num_columns, *args, **kwargs):
# type: (int, int, *Any, **Any) -> None
self.saved_args, self.saved_kwargs = args, kwargs
self.rows = [
fmtstr("", *args, **kwargs) for _ in range(num_rows)
] # type: List[FmtStr]
self.num_columns = num_columns
@overload
def __getitem__(self, slicetuple):
# type: (int) -> FmtStr
pass
@overload
def __getitem__(self, slicetuple):
# type: (slice) -> List[FmtStr]
pass
@overload
def __getitem__(self, slicetuple):
# type: (Tuple[Union[slice, int], Union[slice, int]]) -> List[FmtStr]
pass
def __getitem__(self, slicetuple):
# type: (Union[int, slice, Tuple[Union[int, slice], Union[int, slice]]]) -> Union[FmtStr, List[FmtStr]]
if isinstance(slicetuple, int):
if slicetuple < 0:
slicetuple = len(self.rows) - slicetuple
if slicetuple < 0 or slicetuple >= len(self.rows):
raise IndexError("out of bounds")
return self.rows[slicetuple]
if isinstance(slicetuple, slice):
rowslice = normalize_slice(len(self.rows), slicetuple)
return self.rows[rowslice]
(
row_slice_or_int,
col_slice_or_int,
) = slicetuple # type: Tuple[Union[int, slice], Union[int, slice]]
rowslice = normalize_slice(len(self.rows), row_slice_or_int)
colslice = normalize_slice(self.num_columns, col_slice_or_int)
# TODO clean up slices
return [fs[colslice] for fs in self.rows[rowslice]]
def __len__(self):
# type: () -> int
return len(self.rows)
@property
def shape(self):
# type: () -> Tuple[int, int]
"""Tuple of (len(rows, len(num_columns)) numpy-style shape"""
return len(self.rows), self.num_columns
@property
def height(self):
# type: () -> int
"""The number of rows"""
return len(self.rows)
@property
def width(self):
# type: () -> int
"""The number of columns"""
return self.num_columns
# TODO rework this next major version bump
@no_type_check
def __setitem__(self, slicetuple, value):
"""Place a FSArray in a FSArray"""
logger.debug("slice: %r", slicetuple)
if isinstance(slicetuple, slice):
rowslice, colslice = slicetuple, slice(None)
if isinstance(value, (bytes, str)):
raise ValueError(
"if slice is 2D, value must be 2D as in of list type []"
)
elif isinstance(slicetuple, int):
normalize_slice(self.height, slicetuple)
self.rows[slicetuple] = value
return
else:
rowslice, colslice = slicetuple
# temp shim to allow numpy arrays as values
if value.__class__.__name__ == "ndarray":
value = [fmtstr("".join(line)) for line in value]
rowslice = normalize_slice(sys.maxsize, rowslice)
additional_rows = max(0, rowslice.stop - len(self.rows))
self.rows.extend(
[
fmtstr("", *self.saved_args, **self.saved_kwargs)
for _ in range(additional_rows)
]
)
logger.debug("num columns: %r", self.num_columns)
logger.debug("colslice: %r", colslice)
colslice = normalize_slice(self.num_columns, colslice)
if slicesize(colslice) == 0 or slicesize(rowslice) == 0:
return
if slicesize(colslice) > 1 and isinstance(value, str):
raise ValueError(
"""You cannot replace a multi column slice with a
string please use a list [] with strings for the
contents of each row"""
)
if slicesize(rowslice) != len(value):
area = slicesize(rowslice) * slicesize(colslice)
val_len = sum(len(i) for i in value)
grid_value = [fmtstr(" ", bg="cyan") * slicesize(colslice)] * slicesize(
rowslice
)
grid_fsarray = (
self.rows[: rowslice.start]
+ [
fs.setslice_with_length(
colslice.start, colslice.stop, v, self.num_columns
)
for fs, v in zip(self.rows[rowslice], grid_value)
]
+ self.rows[rowslice.stop :]
)
msg = "You are trying to fit this value {} into the region {}: {}".format(
fmtstr("".join(value), bg="cyan"),
fmtstr("").join(grid_value),
"\n ".join(grid_fsarray[x] for x in range(len(self.rows))),
)
raise ValueError(
"""Error you are trying to replace a region of {} rows by {}
columns for and area of {} with a value of len {}. The value
used to replace the region must equal the area of the region
replace.
{}""".format(
rowslice.stop - rowslice.start,
colslice.stop - colslice.start,
area,
val_len,
msg,
)
)
self.rows = (
self.rows[: rowslice.start]
+ [
fs.setslice_with_length(
colslice.start, colslice.stop, v, self.num_columns
)
for fs, v in zip(self.rows[rowslice], value)
]
+ self.rows[rowslice.stop :]
)
def dumb_display(self):
# type: () -> None
"""Prints each row followed by a newline without regard for the terminal window size"""
for line in self.rows:
print(line)
@classmethod
def diff(cls, a, b, ignore_formatting=False):
# type: (FSArray, FSArray, bool) -> Text
"""Returns two FSArrays with differences underlined"""
def underline(x):
# type: (Text) -> Text
return f"\x1b[4m{x}\x1b[0m"
def blink(x):
# type: (Text) -> Text
return f"\x1b[5m{x}\x1b[0m"
a_rows = []
b_rows = []
max_width = max([len(row) for row in a] + [len(row) for row in b])
a_lengths = []
b_lengths = []
for a_row, b_row in zip(a, b):
a_lengths.append(len(a_row))
b_lengths.append(len(b_row))
extra_a = "`" * (max_width - len(a_row))
extra_b = "`" * (max_width - len(b_row))
a_line = ""
b_line = ""
for a_char, b_char in zip(a_row + extra_a, b_row + extra_b):
if ignore_formatting:
a_char_for_eval = a_char.s if isinstance(a_char, FmtStr) else a_char
b_char_for_eval = b_char.s if isinstance(b_char, FmtStr) else b_char
else:
a_char_for_eval = a_char
b_char_for_eval = b_char
if a_char_for_eval == b_char_for_eval:
a_line += actualize(a_char)
b_line += actualize(b_char)
else:
a_line += underline(blink(actualize(a_char)))
b_line += underline(blink(actualize(b_char)))
a_rows.append(a_line)
b_rows.append(b_line)
hdiff = "\n".join(
a_line + " %3d | %3d " % (a_len, b_len) + b_line
for a_line, b_line, a_len, b_len in zip(
a_rows, b_rows, a_lengths, b_lengths
)
)
return hdiff
def simple_format(x):
# type: (Union[FSArray, List[FmtStr]]) -> Text
return "\n".join(actualize(l) for l in x)
if __name__ == "__main__":
a = FSArray(3, 14, bg="blue")
a[0:2, 5:11] = cast(
Tuple[FmtStr, ...],
(fmtstr("hey", "on_blue") + " " + fmtstr("yo", "on_red"), fmtstr("qwe qw")),
)
a.dumb_display()
a = fsarray(["hey", "there"], bg="cyan")
a.dumb_display()
print(FSArray.diff(a, fsarray(["hey", "there "]), ignore_formatting=True))
| 33.29582
| 111
| 0.547562
|
import sys
import logging
from .formatstring import fmtstr
from .formatstring import normalize_slice
from .formatstring import FmtStr
from typing import (
Any,
Union,
Text,
List,
Sequence,
overload,
Tuple,
cast,
no_type_check,
)
actualize = str
logger = logging.getLogger(__name__)
def slicesize(s):
# type: (slice) -> int
return int((s.stop - s.start) / (s.step if s.step else 1))
def fsarray(strings, *args, **kwargs):
# type: (List[Union[FmtStr, Text]], *Any, **Any) -> FSArray
strings = list(strings)
if "width" in kwargs:
width = kwargs["width"]
del kwargs["width"]
if strings and any(len(s) > width for s in strings):
raise ValueError(f"Those strings won't fit for width {width}")
else:
width = max(len(s) for s in strings) if strings else 0
fstrings = [
s if isinstance(s, FmtStr) else fmtstr(s, *args, **kwargs) for s in strings
]
arr = FSArray(len(fstrings), width, *args, **kwargs)
rows = [
fs.setslice_with_length(0, len(s), s, width)
for fs, s in zip(arr.rows, fstrings)
]
arr.rows = rows
return arr
class FSArray(Sequence):
def __init__(self, num_rows, num_columns, *args, **kwargs):
self.saved_args, self.saved_kwargs = args, kwargs
self.rows = [
fmtstr("", *args, **kwargs) for _ in range(num_rows)
]
self.num_columns = num_columns
@overload
def __getitem__(self, slicetuple):
pass
@overload
def __getitem__(self, slicetuple):
pass
@overload
def __getitem__(self, slicetuple):
pass
def __getitem__(self, slicetuple):
if isinstance(slicetuple, int):
if slicetuple < 0:
slicetuple = len(self.rows) - slicetuple
if slicetuple < 0 or slicetuple >= len(self.rows):
raise IndexError("out of bounds")
return self.rows[slicetuple]
if isinstance(slicetuple, slice):
rowslice = normalize_slice(len(self.rows), slicetuple)
return self.rows[rowslice]
(
row_slice_or_int,
col_slice_or_int,
) = slicetuple
rowslice = normalize_slice(len(self.rows), row_slice_or_int)
colslice = normalize_slice(self.num_columns, col_slice_or_int)
return [fs[colslice] for fs in self.rows[rowslice]]
def __len__(self):
return len(self.rows)
@property
def shape(self):
return len(self.rows), self.num_columns
@property
def height(self):
return len(self.rows)
@property
def width(self):
return self.num_columns
@no_type_check
def __setitem__(self, slicetuple, value):
logger.debug("slice: %r", slicetuple)
if isinstance(slicetuple, slice):
rowslice, colslice = slicetuple, slice(None)
if isinstance(value, (bytes, str)):
raise ValueError(
"if slice is 2D, value must be 2D as in of list type []"
)
elif isinstance(slicetuple, int):
normalize_slice(self.height, slicetuple)
self.rows[slicetuple] = value
return
else:
rowslice, colslice = slicetuple
if value.__class__.__name__ == "ndarray":
value = [fmtstr("".join(line)) for line in value]
rowslice = normalize_slice(sys.maxsize, rowslice)
additional_rows = max(0, rowslice.stop - len(self.rows))
self.rows.extend(
[
fmtstr("", *self.saved_args, **self.saved_kwargs)
for _ in range(additional_rows)
]
)
logger.debug("num columns: %r", self.num_columns)
logger.debug("colslice: %r", colslice)
colslice = normalize_slice(self.num_columns, colslice)
if slicesize(colslice) == 0 or slicesize(rowslice) == 0:
return
if slicesize(colslice) > 1 and isinstance(value, str):
raise ValueError(
"""You cannot replace a multi column slice with a
string please use a list [] with strings for the
contents of each row"""
)
if slicesize(rowslice) != len(value):
area = slicesize(rowslice) * slicesize(colslice)
val_len = sum(len(i) for i in value)
grid_value = [fmtstr(" ", bg="cyan") * slicesize(colslice)] * slicesize(
rowslice
)
grid_fsarray = (
self.rows[: rowslice.start]
+ [
fs.setslice_with_length(
colslice.start, colslice.stop, v, self.num_columns
)
for fs, v in zip(self.rows[rowslice], grid_value)
]
+ self.rows[rowslice.stop :]
)
msg = "You are trying to fit this value {} into the region {}: {}".format(
fmtstr("".join(value), bg="cyan"),
fmtstr("").join(grid_value),
"\n ".join(grid_fsarray[x] for x in range(len(self.rows))),
)
raise ValueError(
"""Error you are trying to replace a region of {} rows by {}
columns for and area of {} with a value of len {}. The value
used to replace the region must equal the area of the region
replace.
{}""".format(
rowslice.stop - rowslice.start,
colslice.stop - colslice.start,
area,
val_len,
msg,
)
)
self.rows = (
self.rows[: rowslice.start]
+ [
fs.setslice_with_length(
colslice.start, colslice.stop, v, self.num_columns
)
for fs, v in zip(self.rows[rowslice], value)
]
+ self.rows[rowslice.stop :]
)
def dumb_display(self):
for line in self.rows:
print(line)
@classmethod
def diff(cls, a, b, ignore_formatting=False):
def underline(x):
return f"\x1b[4m{x}\x1b[0m"
def blink(x):
return f"\x1b[5m{x}\x1b[0m"
a_rows = []
b_rows = []
max_width = max([len(row) for row in a] + [len(row) for row in b])
a_lengths = []
b_lengths = []
for a_row, b_row in zip(a, b):
a_lengths.append(len(a_row))
b_lengths.append(len(b_row))
extra_a = "`" * (max_width - len(a_row))
extra_b = "`" * (max_width - len(b_row))
a_line = ""
b_line = ""
for a_char, b_char in zip(a_row + extra_a, b_row + extra_b):
if ignore_formatting:
a_char_for_eval = a_char.s if isinstance(a_char, FmtStr) else a_char
b_char_for_eval = b_char.s if isinstance(b_char, FmtStr) else b_char
else:
a_char_for_eval = a_char
b_char_for_eval = b_char
if a_char_for_eval == b_char_for_eval:
a_line += actualize(a_char)
b_line += actualize(b_char)
else:
a_line += underline(blink(actualize(a_char)))
b_line += underline(blink(actualize(b_char)))
a_rows.append(a_line)
b_rows.append(b_line)
hdiff = "\n".join(
a_line + " %3d | %3d " % (a_len, b_len) + b_line
for a_line, b_line, a_len, b_len in zip(
a_rows, b_rows, a_lengths, b_lengths
)
)
return hdiff
def simple_format(x):
return "\n".join(actualize(l) for l in x)
if __name__ == "__main__":
a = FSArray(3, 14, bg="blue")
a[0:2, 5:11] = cast(
Tuple[FmtStr, ...],
(fmtstr("hey", "on_blue") + " " + fmtstr("yo", "on_red"), fmtstr("qwe qw")),
)
a.dumb_display()
a = fsarray(["hey", "there"], bg="cyan")
a.dumb_display()
print(FSArray.diff(a, fsarray(["hey", "there "]), ignore_formatting=True))
| true
| true
|
f70a8a2cc7770d9c4ef39696ddde8e9dab7893c8
| 670
|
py
|
Python
|
leetcode/[cutz]mergekarr.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | 3
|
2019-11-26T14:31:01.000Z
|
2020-01-10T18:19:46.000Z
|
leetcode/[cutz]mergekarr.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | null | null | null |
leetcode/[cutz]mergekarr.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | null | null | null |
import heapq
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
heap = []
root = res = ListNode(None)
for i in range(len(lists)):
heapq.heappush(heap, (lists[i].val, i, lists[i]))
print(heap)
while heap:
m = heapq.heappop(heap)
idx = m[1]
res.next = m[2]
res = res.next
if res.next:
heapq.heappush(heap, (res.next.val, idx, res.next))
return root.next
| 25.769231
| 67
| 0.525373
|
import heapq
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
heap = []
root = res = ListNode(None)
for i in range(len(lists)):
heapq.heappush(heap, (lists[i].val, i, lists[i]))
print(heap)
while heap:
m = heapq.heappop(heap)
idx = m[1]
res.next = m[2]
res = res.next
if res.next:
heapq.heappush(heap, (res.next.val, idx, res.next))
return root.next
| true
| true
|
f70a8a8f20d027af22a12a78590c0391f8a9a744
| 6,361
|
py
|
Python
|
monai/metrics/surface_distance.py
|
danielschulz/MONAI
|
54ef6e9e700f0de3d50184c0148f953be871a58e
|
[
"Apache-2.0"
] | null | null | null |
monai/metrics/surface_distance.py
|
danielschulz/MONAI
|
54ef6e9e700f0de3d50184c0148f953be871a58e
|
[
"Apache-2.0"
] | null | null | null |
monai/metrics/surface_distance.py
|
danielschulz/MONAI
|
54ef6e9e700f0de3d50184c0148f953be871a58e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Union
import numpy as np
import torch
from monai.metrics.utils import *
from monai.utils import MetricReduction
class SurfaceDistanceMetric:
"""
Compute Surface Distance between two tensors. It can support both multi-classes and multi-labels tasks.
It supports both symmetric and asymmetric surface distance calculation.
Input `y_pred` (BNHW[D] where N is number of classes) is compared with ground truth `y` (BNHW[D]).
`y_preds` is expected to have binarized predictions and `y` should be in one-hot format.
You can use suitable transforms in ``monai.transforms.post`` first to achieve binarized values.
Args:
include_background: whether to skip distance computation on the first channel of
the predicted output. Defaults to ``False``.
symmetric: whether to calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
"""
def __init__(
self,
include_background: bool = False,
symmetric: bool = False,
distance_metric: str = "euclidean",
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
) -> None:
super().__init__()
self.include_background = include_background
self.distance_metric = distance_metric
self.symmetric = symmetric
self.reduction = reduction
def __call__(self, y_pred: torch.Tensor, y: torch.Tensor):
"""
Args:
y_pred: input data to compute, typical segmentation model output.
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
should be binarized.
y: ground truth to compute the distance. It must be one-hot format and first dim is batch.
The values should be binarized.
Raises:
ValueError: when `y` is not a binarized tensor.
ValueError: when `y_pred` has less than three dimensions.
"""
if not torch.all(y_pred.byte() == y_pred):
warnings.warn("y_pred is not a binarized tensor here!")
if not torch.all(y.byte() == y):
raise ValueError("y should be a binarized tensor.")
dims = y_pred.ndimension()
if dims < 3:
raise ValueError("y_pred should have at least three dimensions.")
# compute (BxC) for each channel for each batch
f = compute_average_surface_distance(
y_pred=y_pred,
y=y,
include_background=self.include_background,
symmetric=self.symmetric,
distance_metric=self.distance_metric,
)
# do metric reduction
f, not_nans = do_metric_reduction(f, self.reduction)
return f, not_nans
def compute_average_surface_distance(
y_pred: Union[np.ndarray, torch.Tensor],
y: Union[np.ndarray, torch.Tensor],
include_background: bool = False,
symmetric: bool = False,
distance_metric: str = "euclidean",
):
"""
This function is used to compute the Average Surface Distance from `y_pred` to `y`
under the default setting.
In addition, if sets ``symmetric = True``, the average symmetric surface distance between
these two inputs will be returned.
Args:
y_pred: input data to compute, typical segmentation model output.
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
should be binarized.
y: ground truth to compute mean the distance. It must be one-hot format and first dim is batch.
The values should be binarized.
include_background: whether to skip distance computation on the first channel of
the predicted output. Defaults to ``False``.
symmetric: whether to calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
"""
if not include_background:
y_pred, y = ignore_background(
y_pred=y_pred,
y=y,
)
y = y.float()
y_pred = y_pred.float()
if y.shape != y_pred.shape:
raise ValueError("y_pred and y should have same shapes.")
batch_size, n_class = y_pred.shape[:2]
asd = np.empty((batch_size, n_class))
for b, c in np.ndindex(batch_size, n_class):
(edges_pred, edges_gt) = get_mask_edges(y_pred[b, c], y[b, c])
surface_distance = get_surface_distance(edges_pred, edges_gt, distance_metric=distance_metric)
if surface_distance.shape == (0,):
avg_surface_distance = np.nan
else:
avg_surface_distance = surface_distance.mean()
if not symmetric:
asd[b, c] = avg_surface_distance
else:
surface_distance_2 = get_surface_distance(edges_gt, edges_pred, distance_metric=distance_metric)
if surface_distance_2.shape == (0,):
avg_surface_distance_2 = np.nan
else:
avg_surface_distance_2 = surface_distance_2.mean()
asd[b, c] = np.mean((avg_surface_distance, avg_surface_distance_2))
return torch.from_numpy(asd)
| 42.406667
| 108
| 0.652413
|
import warnings
from typing import Union
import numpy as np
import torch
from monai.metrics.utils import *
from monai.utils import MetricReduction
class SurfaceDistanceMetric:
def __init__(
self,
include_background: bool = False,
symmetric: bool = False,
distance_metric: str = "euclidean",
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
) -> None:
super().__init__()
self.include_background = include_background
self.distance_metric = distance_metric
self.symmetric = symmetric
self.reduction = reduction
def __call__(self, y_pred: torch.Tensor, y: torch.Tensor):
if not torch.all(y_pred.byte() == y_pred):
warnings.warn("y_pred is not a binarized tensor here!")
if not torch.all(y.byte() == y):
raise ValueError("y should be a binarized tensor.")
dims = y_pred.ndimension()
if dims < 3:
raise ValueError("y_pred should have at least three dimensions.")
f = compute_average_surface_distance(
y_pred=y_pred,
y=y,
include_background=self.include_background,
symmetric=self.symmetric,
distance_metric=self.distance_metric,
)
f, not_nans = do_metric_reduction(f, self.reduction)
return f, not_nans
def compute_average_surface_distance(
y_pred: Union[np.ndarray, torch.Tensor],
y: Union[np.ndarray, torch.Tensor],
include_background: bool = False,
symmetric: bool = False,
distance_metric: str = "euclidean",
):
if not include_background:
y_pred, y = ignore_background(
y_pred=y_pred,
y=y,
)
y = y.float()
y_pred = y_pred.float()
if y.shape != y_pred.shape:
raise ValueError("y_pred and y should have same shapes.")
batch_size, n_class = y_pred.shape[:2]
asd = np.empty((batch_size, n_class))
for b, c in np.ndindex(batch_size, n_class):
(edges_pred, edges_gt) = get_mask_edges(y_pred[b, c], y[b, c])
surface_distance = get_surface_distance(edges_pred, edges_gt, distance_metric=distance_metric)
if surface_distance.shape == (0,):
avg_surface_distance = np.nan
else:
avg_surface_distance = surface_distance.mean()
if not symmetric:
asd[b, c] = avg_surface_distance
else:
surface_distance_2 = get_surface_distance(edges_gt, edges_pred, distance_metric=distance_metric)
if surface_distance_2.shape == (0,):
avg_surface_distance_2 = np.nan
else:
avg_surface_distance_2 = surface_distance_2.mean()
asd[b, c] = np.mean((avg_surface_distance, avg_surface_distance_2))
return torch.from_numpy(asd)
| true
| true
|
f70a8ae488be6f9e83f14e6becdc73cfc39e30b3
| 611
|
py
|
Python
|
src/spyne_smev/server/wsgi.py
|
barsgroup/m3-spyne-smev
|
356d190a0f341f3b91d626eba81875cde8ff12f2
|
[
"MIT"
] | 7
|
2015-10-22T02:57:33.000Z
|
2021-08-08T16:46:48.000Z
|
src/spyne_smev/server/wsgi.py
|
barsgroup/m3-spyne-smev
|
356d190a0f341f3b91d626eba81875cde8ff12f2
|
[
"MIT"
] | 2
|
2017-05-01T05:31:41.000Z
|
2020-03-18T16:26:43.000Z
|
src/spyne_smev/server/wsgi.py
|
barsgroup/m3-spyne-smev
|
356d190a0f341f3b91d626eba81875cde8ff12f2
|
[
"MIT"
] | 8
|
2015-10-22T02:57:47.000Z
|
2021-11-08T08:28:32.000Z
|
# -*- coding: utf-
"""
wsgi.py
:Created: 12 Jun 2014
:Author: tim
"""
from spyne.server.wsgi import WsgiApplication as _SpyneWsgiApplication
from spyne_smev.server import _AllYourInterfaceDocuments
class WsgiApplication(_SpyneWsgiApplication):
def __init__(self, app, chunked=True, max_content_length=2 * 1024 * 1024,
block_length=8 * 1024):
super(WsgiApplication, self).__init__(app, chunked, max_content_length,
block_length)
self.doc = _AllYourInterfaceDocuments(app.interface)
| 26.565217
| 79
| 0.631751
|
from spyne.server.wsgi import WsgiApplication as _SpyneWsgiApplication
from spyne_smev.server import _AllYourInterfaceDocuments
class WsgiApplication(_SpyneWsgiApplication):
def __init__(self, app, chunked=True, max_content_length=2 * 1024 * 1024,
block_length=8 * 1024):
super(WsgiApplication, self).__init__(app, chunked, max_content_length,
block_length)
self.doc = _AllYourInterfaceDocuments(app.interface)
| true
| true
|
f70a8c102b413f15a56d9719e7836be3413d7bfe
| 2,477
|
py
|
Python
|
Advent2020/23.py
|
SSteve/AdventOfCode
|
aed16209381ccd292fc02008f1f2da5d16ff1a05
|
[
"MIT"
] | null | null | null |
Advent2020/23.py
|
SSteve/AdventOfCode
|
aed16209381ccd292fc02008f1f2da5d16ff1a05
|
[
"MIT"
] | null | null | null |
Advent2020/23.py
|
SSteve/AdventOfCode
|
aed16209381ccd292fc02008f1f2da5d16ff1a05
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, next: int):
self.next = next
self.up = False
def MakeNodes(data: str):
values = [int(ch) - 1 for ch in data]
nodes = []
for value in range(len(values)):
index = values.index(value)
next = values[(index + 1) % len(values)]
nodes.append(Node(next))
return nodes, values[0]
def MakeNodes2(data: str):
nodes, current = MakeNodes(data)
next = nodes[current].next
for _ in range(len(nodes) - 2):
next = nodes[next].next
nodes[next].next = len(nodes)
for value in range(len(nodes), 1_000_000):
nodes.append(Node(value + 1))
nodes[999_999].next = current
return nodes, current
def Turn(current: int, nodes):
up = nodes[current].next
firstUp = up
for _ in range(3):
nodes[up].up = True
lastUp = up
up = nodes[up].next
destination = (current - 1) % len(nodes)
while nodes[destination].up:
destination = (destination - 1) % len(nodes)
nodes[current].next = nodes[lastUp].next
nodes[lastUp].next = nodes[destination].next
nodes[destination].next = firstUp
up = firstUp
for _ in range(3):
nodes[up].up = False
up = nodes[up].next
return nodes[current].next
def PrintNodes(current: int, nodes):
print(f"({current + 1})", end='')
index = nodes[current].next
for _ in range(min(len(nodes) - 1, 20)):
print(f" {index + 1}", end='')
index = nodes[index].next
print()
def Answer(nodes):
answer = ''
node = nodes[0].next
for _ in range(len(nodes) - 1):
answer += str(node + 1)
node = nodes[node].next
return answer
def Answer2(nodes):
cup1 = nodes[0].next
cup2 = nodes[cup1].next
return (cup1 + 1) * (cup2 + 1)
TEST = "389125467"
DATA = "487912365"
testNodes, current = MakeNodes(TEST)
for _ in range(100):
current = Turn(current, testNodes)
assert Answer(testNodes) == '67384529'
nodes, current = MakeNodes(DATA)
for _ in range(100):
current = Turn(current, nodes)
print(Answer(nodes))
assert Answer(nodes) == '89573246'
testNodes, current = MakeNodes2(TEST)
for _ in range(10_000_000):
current = Turn(current, testNodes)
assert Answer2(testNodes) == 149245887792
nodes, current = MakeNodes2(DATA)
for _ in range(10_000_000):
current = Turn(current, nodes)
print(Answer2(nodes))
assert Answer2(nodes == 2029056128)
| 26.634409
| 52
| 0.608801
|
class Node:
def __init__(self, next: int):
self.next = next
self.up = False
def MakeNodes(data: str):
values = [int(ch) - 1 for ch in data]
nodes = []
for value in range(len(values)):
index = values.index(value)
next = values[(index + 1) % len(values)]
nodes.append(Node(next))
return nodes, values[0]
def MakeNodes2(data: str):
nodes, current = MakeNodes(data)
next = nodes[current].next
for _ in range(len(nodes) - 2):
next = nodes[next].next
nodes[next].next = len(nodes)
for value in range(len(nodes), 1_000_000):
nodes.append(Node(value + 1))
nodes[999_999].next = current
return nodes, current
def Turn(current: int, nodes):
up = nodes[current].next
firstUp = up
for _ in range(3):
nodes[up].up = True
lastUp = up
up = nodes[up].next
destination = (current - 1) % len(nodes)
while nodes[destination].up:
destination = (destination - 1) % len(nodes)
nodes[current].next = nodes[lastUp].next
nodes[lastUp].next = nodes[destination].next
nodes[destination].next = firstUp
up = firstUp
for _ in range(3):
nodes[up].up = False
up = nodes[up].next
return nodes[current].next
def PrintNodes(current: int, nodes):
print(f"({current + 1})", end='')
index = nodes[current].next
for _ in range(min(len(nodes) - 1, 20)):
print(f" {index + 1}", end='')
index = nodes[index].next
print()
def Answer(nodes):
answer = ''
node = nodes[0].next
for _ in range(len(nodes) - 1):
answer += str(node + 1)
node = nodes[node].next
return answer
def Answer2(nodes):
cup1 = nodes[0].next
cup2 = nodes[cup1].next
return (cup1 + 1) * (cup2 + 1)
TEST = "389125467"
DATA = "487912365"
testNodes, current = MakeNodes(TEST)
for _ in range(100):
current = Turn(current, testNodes)
assert Answer(testNodes) == '67384529'
nodes, current = MakeNodes(DATA)
for _ in range(100):
current = Turn(current, nodes)
print(Answer(nodes))
assert Answer(nodes) == '89573246'
testNodes, current = MakeNodes2(TEST)
for _ in range(10_000_000):
current = Turn(current, testNodes)
assert Answer2(testNodes) == 149245887792
nodes, current = MakeNodes2(DATA)
for _ in range(10_000_000):
current = Turn(current, nodes)
print(Answer2(nodes))
assert Answer2(nodes == 2029056128)
| true
| true
|
f70a8c1d9d385ae5c5b01cb27f773a0610725826
| 4,907
|
py
|
Python
|
nextdl/extractor/palcomp3.py
|
devenu85/nextdl
|
0b458f556e2e0be80cb94bd9a9b1405ad2e9182d
|
[
"MIT"
] | 1
|
2021-12-19T13:55:20.000Z
|
2021-12-19T13:55:20.000Z
|
nextdl/extractor/palcomp3.py
|
devenu85/nextdl
|
0b458f556e2e0be80cb94bd9a9b1405ad2e9182d
|
[
"MIT"
] | null | null | null |
nextdl/extractor/palcomp3.py
|
devenu85/nextdl
|
0b458f556e2e0be80cb94bd9a9b1405ad2e9182d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import re
from ..compat import compat_str
from ..utils import int_or_none, str_or_none, try_get
from .common import InfoExtractor
class PalcoMP3BaseIE(InfoExtractor):
_GQL_QUERY_TMPL = """{
artist(slug: "%s") {
%s
}
}"""
_ARTIST_FIELDS_TMPL = """music(slug: "%%s") {
%s
}"""
_MUSIC_FIELDS = """duration
hls
mp3File
musicID
plays
title"""
def _call_api(self, artist_slug, artist_fields):
return self._download_json(
"https://www.palcomp3.com.br/graphql/",
artist_slug,
query={
"query": self._GQL_QUERY_TMPL % (artist_slug, artist_fields),
},
)["data"]
def _parse_music(self, music):
music_id = compat_str(music["musicID"])
title = music["title"]
formats = []
hls_url = music.get("hls")
if hls_url:
formats.append(
{
"url": hls_url,
"protocol": "m3u8_native",
"ext": "mp4",
}
)
mp3_file = music.get("mp3File")
if mp3_file:
formats.append(
{
"url": mp3_file,
}
)
return {
"id": music_id,
"title": title,
"formats": formats,
"duration": int_or_none(music.get("duration")),
"view_count": int_or_none(music.get("plays")),
}
def _real_initialize(self):
self._ARTIST_FIELDS_TMPL = self._ARTIST_FIELDS_TMPL % self._MUSIC_FIELDS
def _real_extract(self, url):
artist_slug, music_slug = re.match(self._VALID_URL, url).groups()
artist_fields = self._ARTIST_FIELDS_TMPL % music_slug
music = self._call_api(artist_slug, artist_fields)["artist"]["music"]
return self._parse_music(music)
class PalcoMP3IE(PalcoMP3BaseIE):
IE_NAME = "PalcoMP3:song"
_VALID_URL = (
r"https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<artist>[^/]+)/(?P<id>[^/?&#]+)"
)
_TESTS = [
{
"url": "https://www.palcomp3.com/maiaraemaraisaoficial/nossas-composicoes-cuida-bem-dela/",
"md5": "99fd6405b2d8fd589670f6db1ba3b358",
"info_dict": {
"id": "3162927",
"ext": "mp3",
"title": "Nossas Composições - CUIDA BEM DELA",
"duration": 210,
"view_count": int,
},
}
]
@classmethod
def suitable(cls, url):
return (
False
if PalcoMP3VideoIE.suitable(url)
else super(PalcoMP3IE, cls).suitable(url)
)
class PalcoMP3ArtistIE(PalcoMP3BaseIE):
IE_NAME = "PalcoMP3:artist"
_VALID_URL = r"https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<id>[^/?&#]+)"
_TESTS = [
{
"url": "https://www.palcomp3.com.br/condedoforro/",
"info_dict": {
"id": "358396",
"title": "Conde do Forró",
},
"playlist_mincount": 188,
}
]
_ARTIST_FIELDS_TMPL = """artistID
musics {
nodes {
%s
}
}
name"""
@classmethod
def suitable(cls, url):
return (
False
if re.match(PalcoMP3IE._VALID_URL, url)
else super(PalcoMP3ArtistIE, cls).suitable(url)
)
def _real_extract(self, url):
artist_slug = self._match_id(url)
artist = self._call_api(artist_slug, self._ARTIST_FIELDS_TMPL)["artist"]
def entries():
for music in try_get(artist, lambda x: x["musics"]["nodes"], list) or []:
yield self._parse_music(music)
return self.playlist_result(
entries(), str_or_none(artist.get("artistID")), artist.get("name")
)
class PalcoMP3VideoIE(PalcoMP3BaseIE):
IE_NAME = "PalcoMP3:video"
_VALID_URL = r"https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<artist>[^/]+)/(?P<id>[^/?&#]+)/?#clipe"
_TESTS = [
{
"url": "https://www.palcomp3.com/maiaraemaraisaoficial/maiara-e-maraisa-voce-faz-falta-aqui-ao-vivo-em-vicosa-mg/#clipe",
"add_ie": ["Youtube"],
"info_dict": {
"id": "_pD1nR2qqPg",
"ext": "mp4",
"title": "Maiara e Maraisa - Você Faz Falta Aqui - DVD Ao Vivo Em Campo Grande",
"description": "md5:7043342c09a224598e93546e98e49282",
"upload_date": "20161107",
"uploader_id": "maiaramaraisaoficial",
"uploader": "Maiara e Maraisa",
},
}
]
_MUSIC_FIELDS = "youtubeID"
def _parse_music(self, music):
youtube_id = music["youtubeID"]
return self.url_result(youtube_id, "Youtube", youtube_id)
| 29.035503
| 133
| 0.525372
|
from __future__ import unicode_literals
import re
from ..compat import compat_str
from ..utils import int_or_none, str_or_none, try_get
from .common import InfoExtractor
class PalcoMP3BaseIE(InfoExtractor):
_GQL_QUERY_TMPL = """{
artist(slug: "%s") {
%s
}
}"""
_ARTIST_FIELDS_TMPL = """music(slug: "%%s") {
%s
}"""
_MUSIC_FIELDS = """duration
hls
mp3File
musicID
plays
title"""
def _call_api(self, artist_slug, artist_fields):
return self._download_json(
"https://www.palcomp3.com.br/graphql/",
artist_slug,
query={
"query": self._GQL_QUERY_TMPL % (artist_slug, artist_fields),
},
)["data"]
def _parse_music(self, music):
music_id = compat_str(music["musicID"])
title = music["title"]
formats = []
hls_url = music.get("hls")
if hls_url:
formats.append(
{
"url": hls_url,
"protocol": "m3u8_native",
"ext": "mp4",
}
)
mp3_file = music.get("mp3File")
if mp3_file:
formats.append(
{
"url": mp3_file,
}
)
return {
"id": music_id,
"title": title,
"formats": formats,
"duration": int_or_none(music.get("duration")),
"view_count": int_or_none(music.get("plays")),
}
def _real_initialize(self):
self._ARTIST_FIELDS_TMPL = self._ARTIST_FIELDS_TMPL % self._MUSIC_FIELDS
def _real_extract(self, url):
artist_slug, music_slug = re.match(self._VALID_URL, url).groups()
artist_fields = self._ARTIST_FIELDS_TMPL % music_slug
music = self._call_api(artist_slug, artist_fields)["artist"]["music"]
return self._parse_music(music)
class PalcoMP3IE(PalcoMP3BaseIE):
IE_NAME = "PalcoMP3:song"
_VALID_URL = (
r"https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<artist>[^/]+)/(?P<id>[^/?&#]+)"
)
_TESTS = [
{
"url": "https://www.palcomp3.com/maiaraemaraisaoficial/nossas-composicoes-cuida-bem-dela/",
"md5": "99fd6405b2d8fd589670f6db1ba3b358",
"info_dict": {
"id": "3162927",
"ext": "mp3",
"title": "Nossas Composições - CUIDA BEM DELA",
"duration": 210,
"view_count": int,
},
}
]
@classmethod
def suitable(cls, url):
return (
False
if PalcoMP3VideoIE.suitable(url)
else super(PalcoMP3IE, cls).suitable(url)
)
class PalcoMP3ArtistIE(PalcoMP3BaseIE):
IE_NAME = "PalcoMP3:artist"
_VALID_URL = r"https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<id>[^/?&#]+)"
_TESTS = [
{
"url": "https://www.palcomp3.com.br/condedoforro/",
"info_dict": {
"id": "358396",
"title": "Conde do Forró",
},
"playlist_mincount": 188,
}
]
_ARTIST_FIELDS_TMPL = """artistID
musics {
nodes {
%s
}
}
name"""
@classmethod
def suitable(cls, url):
return (
False
if re.match(PalcoMP3IE._VALID_URL, url)
else super(PalcoMP3ArtistIE, cls).suitable(url)
)
def _real_extract(self, url):
artist_slug = self._match_id(url)
artist = self._call_api(artist_slug, self._ARTIST_FIELDS_TMPL)["artist"]
def entries():
for music in try_get(artist, lambda x: x["musics"]["nodes"], list) or []:
yield self._parse_music(music)
return self.playlist_result(
entries(), str_or_none(artist.get("artistID")), artist.get("name")
)
class PalcoMP3VideoIE(PalcoMP3BaseIE):
IE_NAME = "PalcoMP3:video"
_VALID_URL = r"https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<artist>[^/]+)/(?P<id>[^/?&#]+)/?#clipe"
_TESTS = [
{
"url": "https://www.palcomp3.com/maiaraemaraisaoficial/maiara-e-maraisa-voce-faz-falta-aqui-ao-vivo-em-vicosa-mg/#clipe",
"add_ie": ["Youtube"],
"info_dict": {
"id": "_pD1nR2qqPg",
"ext": "mp4",
"title": "Maiara e Maraisa - Você Faz Falta Aqui - DVD Ao Vivo Em Campo Grande",
"description": "md5:7043342c09a224598e93546e98e49282",
"upload_date": "20161107",
"uploader_id": "maiaramaraisaoficial",
"uploader": "Maiara e Maraisa",
},
}
]
_MUSIC_FIELDS = "youtubeID"
def _parse_music(self, music):
youtube_id = music["youtubeID"]
return self.url_result(youtube_id, "Youtube", youtube_id)
| true
| true
|
f70a8cead3e6860b7b1976560adb6005e93da51d
| 11,829
|
py
|
Python
|
vissl/data/dataset_catalog.py
|
NKI-AI/vissl
|
ddf5a97572c6640438faabba1f91426028520c4b
|
[
"MIT"
] | null | null | null |
vissl/data/dataset_catalog.py
|
NKI-AI/vissl
|
ddf5a97572c6640438faabba1f91426028520c4b
|
[
"MIT"
] | null | null | null |
vissl/data/dataset_catalog.py
|
NKI-AI/vissl
|
ddf5a97572c6640438faabba1f91426028520c4b
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data and labels file for various datasets.
"""
import json
import logging
import os
from typing import List
import numpy as np
from fvcore.common.file_io import PathManager
from vissl.data.datasets import get_coco_imgs_labels_info, get_voc_images_labels_info
from vissl.utils.misc import get_json_data_catalog_file
from vissl.utils.slurm import get_slurm_dir
class VisslDatasetCatalog(object):
"""
A catalog that stores information about the datasets and how to obtain them.
It contains a mapping from strings (which are names that identify a dataset,
e.g. "imagenet1k") to a `dict` which contains:
1) mapping of various data splits (train, test, val) to the data source
(path on the disk whether a folder path or a filelist)
2) source of the data (disk_filelist | disk_folder)
The purpose of having this catalog is to make it easy to choose different datasets,
by just using the strings in the config.
"""
__REGISTERED_DATASETS = {}
@staticmethod
def register_json(json_catalog_path):
"""
Args:
filepath: a .json filepath that contains the data to be registered
"""
with PathManager.open(json_catalog_path) as fopen:
data_catalog = json.load(fopen)
for key, value in data_catalog.items():
VisslDatasetCatalog.register_data(key, value)
@staticmethod
def register_dict(dict_catalog):
"""
Args:
dict: a dict with a bunch of datasets to be registered
"""
for key, value in dict_catalog.items():
VisslDatasetCatalog.register_data(key, value)
@staticmethod
def register_data(name, data_dict):
"""
Args:
name (str): the name that identifies a dataset, e.g. "imagenet1k_folder".
func (callable): a callable which takes no arguments and returns a list of dicts.
It must return the same results if called multiple times.
"""
assert isinstance(
data_dict, dict
), "You must register a dictionary with VisslDatasetCatalog.register_dict"
assert (
name not in VisslDatasetCatalog.__REGISTERED_DATASETS
), "Dataset '{}' is already registered!".format(name)
VisslDatasetCatalog.__REGISTERED_DATASETS[name] = data_dict
@staticmethod
def get(name):
"""
Get the registered dict and return it.
Args:
name (str): the name that identifies a dataset, e.g. "imagenet1k".
Returns:
dict: dataset information (paths, source)
"""
try:
info = VisslDatasetCatalog.__REGISTERED_DATASETS[name]
except KeyError:
raise KeyError(
"Dataset '{}' is not registered! Available datasets are: {}".format(
name, ", ".join(VisslDatasetCatalog.__REGISTERED_DATASETS.keys())
)
)
return info
@staticmethod
def list() -> List[str]:
"""
List all registered datasets.
Returns:
list[str]
"""
return list(VisslDatasetCatalog.__REGISTERED_DATASETS.keys())
@staticmethod
def clear():
"""
Remove all registered dataset.
"""
VisslDatasetCatalog.__REGISTERED_DATASETS.clear()
@staticmethod
def remove(name):
"""
Remove the dataset registered by ``name``.
"""
VisslDatasetCatalog.__REGISTERED_DATASETS.pop(name)
@staticmethod
def has_data(name):
"""
Check whether the data with ``name`` exists.
"""
data_found = name in VisslDatasetCatalog.__REGISTERED_DATASETS
return data_found
def get_local_path(input_file, dest_dir):
"""
If user specified copying data to a local directory,
get the local path where the data files were copied.
- If input_file is just a file, we return the dest_dir/filename
- If the intput_file is a directory, then we check if the
environemt is SLURM and use slurm_dir or otherwise dest_dir
to look up copy_complete file is available.
If available, we return the directory.
- If both above fail, we return the input_file as is.
"""
out = ""
if PathManager.isfile(input_file):
out = os.path.join(dest_dir, os.path.basename(input_file))
elif PathManager.isdir(input_file):
data_name = input_file.strip("/").split("/")[-1]
if "SLURM_JOBID" in os.environ:
dest_dir = get_slurm_dir(dest_dir)
dest_dir = os.path.join(dest_dir, data_name)
complete_flag = os.path.join(dest_dir, "copy_complete")
if PathManager.isfile(complete_flag):
out = dest_dir
if PathManager.exists(out):
return out
else:
return input_file
def get_local_output_filepaths(input_files, dest_dir):
"""
If we have copied the files to local disk as specified in the config, we
return those local paths. Otherwise return the original paths.
"""
output_files = []
for item in input_files:
if isinstance(item, list):
out = get_local_output_filepaths(item, dest_dir)
else:
out = get_local_path(item, dest_dir)
output_files.append(out)
return output_files
def check_data_exists(data_files):
"""
Check that the input data files exist. If the data_files is a list,
we iteratively check for each file in the list.
"""
if isinstance(data_files, list):
return np.all([PathManager.exists(item) for item in data_files])
else:
return PathManager.exists(data_files)
def register_pascal_voc():
"""
Register PASCAL VOC 2007 and 2012 datasets to the data catalog.
We first look up for these datasets paths in the dataset catalog,
if the paths exist, we register, otherwise we remove the voc_data
from the catalog registry.
"""
voc_datasets = ["voc2007_folder", "voc2012_folder"]
for voc_data in voc_datasets:
data_info = VisslDatasetCatalog.get(voc_data)
data_folder = data_info["train"][0]
if PathManager.exists(data_folder):
train_data_info = get_voc_images_labels_info("train", data_folder)
test_data_info = get_voc_images_labels_info("val", data_folder)
data_info["train"] = train_data_info
data_info["val"] = test_data_info
VisslDatasetCatalog.remove(voc_data)
VisslDatasetCatalog.register_data(voc_data, data_info)
else:
VisslDatasetCatalog.remove(voc_data)
def register_coco():
"""
Register COCO 2004 datasets to the data catalog.
We first look up for these datasets paths in the dataset catalog,
if the paths exist, we register, otherwise we remove the
coco2014_folder from the catalog registry.
"""
data_info = VisslDatasetCatalog.get("coco2014_folder")
data_folder = data_info["train"][0]
if PathManager.exists(data_folder):
train_data_info = get_coco_imgs_labels_info("train", data_folder)
test_data_info = get_coco_imgs_labels_info("val", data_folder)
data_info["train"] = train_data_info
data_info["val"] = test_data_info
VisslDatasetCatalog.remove("coco2014_folder")
VisslDatasetCatalog.register_data("coco2014_folder", data_info)
else:
VisslDatasetCatalog.remove("coco2014_folder")
def register_datasets(json_catalog_path):
"""
If the json dataset_catalog file is found, we register
the datasets specified in the catalog with VISSL.
If the catalog also specified VOC or coco datasets, we resister them
Args:
json_catalog_path (str): the path to the json dataset catalog
"""
if PathManager.exists(json_catalog_path):
logging.info(f"Registering datasets: {json_catalog_path}")
VisslDatasetCatalog.clear()
VisslDatasetCatalog.register_json(json_catalog_path)
if VisslDatasetCatalog.has_data("voc2007_folder") or VisslDatasetCatalog.has_data(
"voc2012_folder"
):
register_pascal_voc()
if VisslDatasetCatalog.has_data("coco2014_folder"):
register_coco()
def get_data_files(split, dataset_config):
"""
Get the path to the dataset (images and labels).
1. If the user has explicitly specified the data_sources, we simply
use those and don't do lookup in the datasets registered with VISSL
from the dataset catalog.
2. If the user hasn't specified the path, look for the dataset in
the datasets catalog registered with VISSL. For a given list of datasets
and a given partition (train/test), we first verify that we have the
dataset and the correct source as specified by the user.
Then for each dataset in the list, we get the data path (make sure it
exists, sources match). For the label file, the file is optional.
Once we have the dataset original paths, we replace the path with the local paths
if the data was copied to local disk.
"""
assert len(dataset_config[split].DATASET_NAMES) == len(
dataset_config[split].DATA_SOURCES
), "len(data_sources) != len(dataset_names)"
if len(dataset_config[split].DATA_PATHS) > 0:
assert len(dataset_config[split].DATA_SOURCES) == len(
dataset_config[split].DATA_PATHS
), "len(data_sources) != len(data_paths)"
data_files, label_files = [], []
data_names = dataset_config[split].DATASET_NAMES
data_sources = dataset_config[split].DATA_SOURCES
data_split = "train" if split == "TRAIN" else "val"
for idx in range(len(data_sources)):
# if there are synthetic data sources, we set the filepaths as none
if data_sources[idx] == "synthetic":
data_files.append("")
continue
# if user has specified the data path explicitly, we use it
elif len(dataset_config[split].DATA_PATHS) > 0:
data_files.append(dataset_config[split].DATA_PATHS[idx])
# otherwise retrieve from the cataloag based on the dataset name
else:
data_info = VisslDatasetCatalog.get(data_names[idx])
assert (
len(data_info[data_split]) > 0
), f"data paths list for split: { data_split } is empty"
check_data_exists(
data_info[data_split][0]
), f"Some data files dont exist: {data_info[data_split][0]}"
data_files.append(data_info[data_split][0])
# labels are optional and hence we append if we find them
if len(dataset_config[split].LABEL_PATHS) > 0:
if check_data_exists(dataset_config[split].LABEL_PATHS[idx]):
label_files.append(dataset_config[split].LABEL_PATHS[idx])
else:
label_data_info = VisslDatasetCatalog.get(data_names[idx])
if check_data_exists(label_data_info[data_split][1]):
label_files.append(label_data_info[data_split][1])
output = [data_files, label_files]
if dataset_config[split].COPY_TO_LOCAL_DISK:
dest_dir = dataset_config[split]["COPY_DESTINATION_DIR"]
local_data_files = get_local_output_filepaths(data_files, dest_dir)
local_label_files = get_local_output_filepaths(label_files, dest_dir)
output = [local_data_files, local_label_files]
return output
# get the path to dataset_catalog.json file
json_catalog_file = get_json_data_catalog_file()
# register the datasets specified in the catalog with VISSL
register_datasets(json_catalog_file)
| 38.03537
| 93
| 0.668442
|
import json
import logging
import os
from typing import List
import numpy as np
from fvcore.common.file_io import PathManager
from vissl.data.datasets import get_coco_imgs_labels_info, get_voc_images_labels_info
from vissl.utils.misc import get_json_data_catalog_file
from vissl.utils.slurm import get_slurm_dir
class VisslDatasetCatalog(object):
__REGISTERED_DATASETS = {}
@staticmethod
def register_json(json_catalog_path):
with PathManager.open(json_catalog_path) as fopen:
data_catalog = json.load(fopen)
for key, value in data_catalog.items():
VisslDatasetCatalog.register_data(key, value)
@staticmethod
def register_dict(dict_catalog):
for key, value in dict_catalog.items():
VisslDatasetCatalog.register_data(key, value)
@staticmethod
def register_data(name, data_dict):
assert isinstance(
data_dict, dict
), "You must register a dictionary with VisslDatasetCatalog.register_dict"
assert (
name not in VisslDatasetCatalog.__REGISTERED_DATASETS
), "Dataset '{}' is already registered!".format(name)
VisslDatasetCatalog.__REGISTERED_DATASETS[name] = data_dict
@staticmethod
def get(name):
try:
info = VisslDatasetCatalog.__REGISTERED_DATASETS[name]
except KeyError:
raise KeyError(
"Dataset '{}' is not registered! Available datasets are: {}".format(
name, ", ".join(VisslDatasetCatalog.__REGISTERED_DATASETS.keys())
)
)
return info
@staticmethod
def list() -> List[str]:
return list(VisslDatasetCatalog.__REGISTERED_DATASETS.keys())
@staticmethod
def clear():
VisslDatasetCatalog.__REGISTERED_DATASETS.clear()
@staticmethod
def remove(name):
VisslDatasetCatalog.__REGISTERED_DATASETS.pop(name)
@staticmethod
def has_data(name):
data_found = name in VisslDatasetCatalog.__REGISTERED_DATASETS
return data_found
def get_local_path(input_file, dest_dir):
out = ""
if PathManager.isfile(input_file):
out = os.path.join(dest_dir, os.path.basename(input_file))
elif PathManager.isdir(input_file):
data_name = input_file.strip("/").split("/")[-1]
if "SLURM_JOBID" in os.environ:
dest_dir = get_slurm_dir(dest_dir)
dest_dir = os.path.join(dest_dir, data_name)
complete_flag = os.path.join(dest_dir, "copy_complete")
if PathManager.isfile(complete_flag):
out = dest_dir
if PathManager.exists(out):
return out
else:
return input_file
def get_local_output_filepaths(input_files, dest_dir):
output_files = []
for item in input_files:
if isinstance(item, list):
out = get_local_output_filepaths(item, dest_dir)
else:
out = get_local_path(item, dest_dir)
output_files.append(out)
return output_files
def check_data_exists(data_files):
if isinstance(data_files, list):
return np.all([PathManager.exists(item) for item in data_files])
else:
return PathManager.exists(data_files)
def register_pascal_voc():
voc_datasets = ["voc2007_folder", "voc2012_folder"]
for voc_data in voc_datasets:
data_info = VisslDatasetCatalog.get(voc_data)
data_folder = data_info["train"][0]
if PathManager.exists(data_folder):
train_data_info = get_voc_images_labels_info("train", data_folder)
test_data_info = get_voc_images_labels_info("val", data_folder)
data_info["train"] = train_data_info
data_info["val"] = test_data_info
VisslDatasetCatalog.remove(voc_data)
VisslDatasetCatalog.register_data(voc_data, data_info)
else:
VisslDatasetCatalog.remove(voc_data)
def register_coco():
data_info = VisslDatasetCatalog.get("coco2014_folder")
data_folder = data_info["train"][0]
if PathManager.exists(data_folder):
train_data_info = get_coco_imgs_labels_info("train", data_folder)
test_data_info = get_coco_imgs_labels_info("val", data_folder)
data_info["train"] = train_data_info
data_info["val"] = test_data_info
VisslDatasetCatalog.remove("coco2014_folder")
VisslDatasetCatalog.register_data("coco2014_folder", data_info)
else:
VisslDatasetCatalog.remove("coco2014_folder")
def register_datasets(json_catalog_path):
if PathManager.exists(json_catalog_path):
logging.info(f"Registering datasets: {json_catalog_path}")
VisslDatasetCatalog.clear()
VisslDatasetCatalog.register_json(json_catalog_path)
if VisslDatasetCatalog.has_data("voc2007_folder") or VisslDatasetCatalog.has_data(
"voc2012_folder"
):
register_pascal_voc()
if VisslDatasetCatalog.has_data("coco2014_folder"):
register_coco()
def get_data_files(split, dataset_config):
assert len(dataset_config[split].DATASET_NAMES) == len(
dataset_config[split].DATA_SOURCES
), "len(data_sources) != len(dataset_names)"
if len(dataset_config[split].DATA_PATHS) > 0:
assert len(dataset_config[split].DATA_SOURCES) == len(
dataset_config[split].DATA_PATHS
), "len(data_sources) != len(data_paths)"
data_files, label_files = [], []
data_names = dataset_config[split].DATASET_NAMES
data_sources = dataset_config[split].DATA_SOURCES
data_split = "train" if split == "TRAIN" else "val"
for idx in range(len(data_sources)):
if data_sources[idx] == "synthetic":
data_files.append("")
continue
elif len(dataset_config[split].DATA_PATHS) > 0:
data_files.append(dataset_config[split].DATA_PATHS[idx])
else:
data_info = VisslDatasetCatalog.get(data_names[idx])
assert (
len(data_info[data_split]) > 0
), f"data paths list for split: { data_split } is empty"
check_data_exists(
data_info[data_split][0]
), f"Some data files dont exist: {data_info[data_split][0]}"
data_files.append(data_info[data_split][0])
if len(dataset_config[split].LABEL_PATHS) > 0:
if check_data_exists(dataset_config[split].LABEL_PATHS[idx]):
label_files.append(dataset_config[split].LABEL_PATHS[idx])
else:
label_data_info = VisslDatasetCatalog.get(data_names[idx])
if check_data_exists(label_data_info[data_split][1]):
label_files.append(label_data_info[data_split][1])
output = [data_files, label_files]
if dataset_config[split].COPY_TO_LOCAL_DISK:
dest_dir = dataset_config[split]["COPY_DESTINATION_DIR"]
local_data_files = get_local_output_filepaths(data_files, dest_dir)
local_label_files = get_local_output_filepaths(label_files, dest_dir)
output = [local_data_files, local_label_files]
return output
json_catalog_file = get_json_data_catalog_file()
register_datasets(json_catalog_file)
| true
| true
|
f70a8d36f31dcd5350e51b402a11c45acf9c1b33
| 100
|
py
|
Python
|
Alys/src/alys/alys.py
|
PikaBlue107/alys-pronouns
|
ff86648bdc9a5bc82beaf5c007ad88be94961324
|
[
"MIT"
] | null | null | null |
Alys/src/alys/alys.py
|
PikaBlue107/alys-pronouns
|
ff86648bdc9a5bc82beaf5c007ad88be94961324
|
[
"MIT"
] | null | null | null |
Alys/src/alys/alys.py
|
PikaBlue107/alys-pronouns
|
ff86648bdc9a5bc82beaf5c007ad88be94961324
|
[
"MIT"
] | null | null | null |
'''
Created on Nov 20, 2019
@author: Melody Griesen
'''
if __name__ == '__main__':
pass
| 12.5
| 27
| 0.59
|
if __name__ == '__main__':
pass
| true
| true
|
f70a8d437ab6062a2810b247f87863917ccd942b
| 4,412
|
py
|
Python
|
dags/python_scripts/load_staging_genre.py
|
jrderek/Movie_Analytics-Data-Engineering-
|
9789b2d4a13964b93f7f99b010137e9c4e6cc807
|
[
"MIT"
] | null | null | null |
dags/python_scripts/load_staging_genre.py
|
jrderek/Movie_Analytics-Data-Engineering-
|
9789b2d4a13964b93f7f99b010137e9c4e6cc807
|
[
"MIT"
] | null | null | null |
dags/python_scripts/load_staging_genre.py
|
jrderek/Movie_Analytics-Data-Engineering-
|
9789b2d4a13964b93f7f99b010137e9c4e6cc807
|
[
"MIT"
] | null | null | null |
import sys
import os
from datetime import datetime
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.types import (StructType, StructField as Fld, DoubleType as Dbl,
IntegerType as Int, DateType as Date,
BooleanType as Boolean, FloatType as Float,
LongType as Long, StringType as String,
ArrayType as Array)
from pyspark.sql.functions import (col, year, month, dayofmonth, weekofyear, quarter,
explode, from_json)
def create_spark_session(aws_key, aws_secret_key):
"""
Description: Creates spark session.
Returns:
spark session object
"""
spark = SparkSession \
.builder \
.config("spark.executor.heartbeatInterval", "40s") \
.getOrCreate()
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.impl",
"org.apache.hadoop.fs.s3a.S3AFileSystem")
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.access.key", aws_key)
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.secret.key", aws_secret_key)
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.endpoint", "s3.amazonaws.com")
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.connection.timeout", "100")
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.connection.maximum", "5000")
spark.conf.set("spark.sql.shuffle.partitions", 4)
return spark
def format_datetime(ts):
return datetime.fromtimestamp(ts/1000.0)
if __name__ == "__main__":
s3_bucket = sys.argv[1]
s3_key = sys.argv[2]
aws_key = sys.argv[3]
aws_secret_key = sys.argv[4]
redshift_conn_string = sys.argv[5]
db_user = sys.argv[6]
db_pass = sys.argv[7]
spark = create_spark_session(aws_key, aws_secret_key)
movies_schema = StructType([
Fld("adult", String()),
Fld("belongs_to_collection", Long()),
Fld("budget", Long()),
Fld("genres", String()),
Fld("homepage", String()),
Fld("id", Int()),
Fld("imdb_id", String()),
Fld("original_language", String()),
Fld("original_title", String()),
Fld("overview", String()),
Fld("popularity", Dbl()),
Fld("poster_path", String()),
Fld("production_company", String()),
Fld("production_country", String()),
Fld("release_date", Date()),
Fld("revenue", Long()),
Fld("runtime", Float()),
Fld("spoken_languages", String()),
Fld("status", String()),
Fld("tagline", String()),
Fld("title", String()),
Fld("video", Boolean()),
Fld("vote_average", Float()),
Fld("vote_count", Int())
])
movies_df = spark.read.option("header", "true") \
.csv("s3a://{}/{}/movies_metadata.csv".format(s3_bucket, s3_key),
schema=movies_schema)
genre_schema = Array(StructType([Fld("id", Int()), Fld("name", String())]))
movies_df = movies_df.withColumn("genres", explode(from_json("genres", genre_schema))) \
.withColumn("genre_id", col("genres.id")) \
.withColumn("genre_name", col("genres.name")) \
movie_genre = movies_df.select("id", "genre_id").distinct()
movie_genre = movie_genre.select(col("id").alias("movie_id"), col("genre_id"))
genre = movies_df.select("genre_id", "genre_name").distinct()
genre = genre.na.drop()
# Load data into staging:
genre.write \
.format("jdbc") \
.option("url", redshift_conn_string) \
.option("dbtable", "movies.stage_genre") \
.option("user", sys.argv[6]) \
.option("password", sys.argv[7]) \
.option("driver", "com.amazon.redshift.jdbc42.Driver") \
.mode("append") \
.save()
movie_genre.write \
.format("jdbc") \
.option("url", redshift_conn_string) \
.option("dbtable", "movies.stage_movie_genre") \
.option("user", sys.argv[6]) \
.option("password", sys.argv[7]) \
.option("driver", "com.amazon.redshift.jdbc42.Driver") \
.mode("append") \
.save()
| 38.365217
| 95
| 0.577289
|
import sys
import os
from datetime import datetime
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.types import (StructType, StructField as Fld, DoubleType as Dbl,
IntegerType as Int, DateType as Date,
BooleanType as Boolean, FloatType as Float,
LongType as Long, StringType as String,
ArrayType as Array)
from pyspark.sql.functions import (col, year, month, dayofmonth, weekofyear, quarter,
explode, from_json)
def create_spark_session(aws_key, aws_secret_key):
spark = SparkSession \
.builder \
.config("spark.executor.heartbeatInterval", "40s") \
.getOrCreate()
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.impl",
"org.apache.hadoop.fs.s3a.S3AFileSystem")
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.access.key", aws_key)
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.secret.key", aws_secret_key)
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.endpoint", "s3.amazonaws.com")
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.connection.timeout", "100")
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.connection.maximum", "5000")
spark.conf.set("spark.sql.shuffle.partitions", 4)
return spark
def format_datetime(ts):
return datetime.fromtimestamp(ts/1000.0)
if __name__ == "__main__":
s3_bucket = sys.argv[1]
s3_key = sys.argv[2]
aws_key = sys.argv[3]
aws_secret_key = sys.argv[4]
redshift_conn_string = sys.argv[5]
db_user = sys.argv[6]
db_pass = sys.argv[7]
spark = create_spark_session(aws_key, aws_secret_key)
movies_schema = StructType([
Fld("adult", String()),
Fld("belongs_to_collection", Long()),
Fld("budget", Long()),
Fld("genres", String()),
Fld("homepage", String()),
Fld("id", Int()),
Fld("imdb_id", String()),
Fld("original_language", String()),
Fld("original_title", String()),
Fld("overview", String()),
Fld("popularity", Dbl()),
Fld("poster_path", String()),
Fld("production_company", String()),
Fld("production_country", String()),
Fld("release_date", Date()),
Fld("revenue", Long()),
Fld("runtime", Float()),
Fld("spoken_languages", String()),
Fld("status", String()),
Fld("tagline", String()),
Fld("title", String()),
Fld("video", Boolean()),
Fld("vote_average", Float()),
Fld("vote_count", Int())
])
movies_df = spark.read.option("header", "true") \
.csv("s3a://{}/{}/movies_metadata.csv".format(s3_bucket, s3_key),
schema=movies_schema)
genre_schema = Array(StructType([Fld("id", Int()), Fld("name", String())]))
movies_df = movies_df.withColumn("genres", explode(from_json("genres", genre_schema))) \
.withColumn("genre_id", col("genres.id")) \
.withColumn("genre_name", col("genres.name")) \
movie_genre = movies_df.select("id", "genre_id").distinct()
movie_genre = movie_genre.select(col("id").alias("movie_id"), col("genre_id"))
genre = movies_df.select("genre_id", "genre_name").distinct()
genre = genre.na.drop()
genre.write \
.format("jdbc") \
.option("url", redshift_conn_string) \
.option("dbtable", "movies.stage_genre") \
.option("user", sys.argv[6]) \
.option("password", sys.argv[7]) \
.option("driver", "com.amazon.redshift.jdbc42.Driver") \
.mode("append") \
.save()
movie_genre.write \
.format("jdbc") \
.option("url", redshift_conn_string) \
.option("dbtable", "movies.stage_movie_genre") \
.option("user", sys.argv[6]) \
.option("password", sys.argv[7]) \
.option("driver", "com.amazon.redshift.jdbc42.Driver") \
.mode("append") \
.save()
| true
| true
|
f70a8ee941ed8c8f91318a8c247810c74106e4af
| 8,951
|
py
|
Python
|
scripts/enip-logix/gen_pull.py
|
Vadoola/pulr
|
d276b94b4ffcc7381b661654cc004c5b8ebc2776
|
[
"Apache-2.0"
] | 13
|
2020-08-28T17:20:23.000Z
|
2022-02-03T06:23:51.000Z
|
scripts/enip-logix/gen_pull.py
|
Vadoola/pulr
|
d276b94b4ffcc7381b661654cc004c5b8ebc2776
|
[
"Apache-2.0"
] | 1
|
2021-05-06T10:43:42.000Z
|
2021-05-12T13:21:19.000Z
|
scripts/enip-logix/gen_pull.py
|
Vadoola/pulr
|
d276b94b4ffcc7381b661654cc004c5b8ebc2776
|
[
"Apache-2.0"
] | 3
|
2020-09-02T08:10:12.000Z
|
2021-05-06T03:37:57.000Z
|
#!/usr/bin/env python3
"""
Generates Pulr "pull" config section from JSON, created with fetch-tags.py
"""
import sys
import argparse
from textwrap import dedent
try:
import rapidjson as json
except:
import json
import yaml
DEFAULT_FREQ = 1
DEFAULT_PATH = '1,0'
DEFAULT_CPU = 'LGX'
DEFAULT_TIMEOUT = 2
def generate(tag_list,
tag_file=None,
tag_data=None,
exclude=None,
config=None,
id_prefix='',
id_suffix='',
print_stats=False,
print_config=False):
def find_tag_in_struct(tag, data):
if '.' in tag:
tag_to_find, rest = tag.split('.', 1)
else:
tag_to_find = tag
rest = None
t = data[tag_to_find]
if rest is None:
return t
else:
if t['tag_type'] != 'struct':
raise ValueError(f'{tag_to_find} is not a struct!')
return find_tag_in_struct(
rest,
t['data_type']['internal_tags'],
)
def find_tag(tag, data):
if '.' in tag:
tag_to_find, rest = tag.split('.', 1)
else:
tag_to_find = tag
rest = None
for t in data:
if t['tag_name'] == tag_to_find:
if rest is None:
return t
else:
if t['tag_type'] != 'struct':
raise ValueError(f'{tag_to_find} is not a struct!')
else:
return find_tag_in_struct(
rest, t['data_type']['internal_tags'])
if tag_data is None:
if tag_file:
with open(tag_file) as fh:
tags = json.loads(fh.read())
else:
tags = json.loads(sys.stdin.read())
else:
tags = tag_data
DATA_TYPES = {
'BOOL': 'uint8',
'BYTE': 'byte',
'WORD': 'word',
'DWORD': 'dword',
'LWORD': 'qword',
'SINT': 'sint8',
'USINT': 'uint8',
'INT': 'sint16',
'UINT': 'uint16',
'DINT': 'sint32',
'UDINT': 'uint32',
'LINT': 'sint64',
'ULINT': 'uint64',
'REAL': 'real32',
'LREAL': 'real64'
}
DATA_TYPE_SIZE = {
'BOOL': 1,
'BYTE': 1,
'WORD': 2,
'DWORD': 4,
'LWORD': 8,
'SINT': 1,
'USINT': 1,
'INT': 2,
'UINT': 2,
'DINT': 4,
'UDINT': 4,
'LINT': 8,
'ULINT': 8,
'REAL': 4,
'LREAL': 8
}
def gen_offset(o1, o2, int_if_possible=False):
if o1:
o = f'{o1}+{o2}'
else:
o = o2 if int_if_possible else f'{o2}'
return o
def add_tag_info(tag_name, tag_data, coll, offset=0, base_offset=0):
nonlocal tags_count
if exclude:
for x in exclude:
if x.startswith('*'):
if tag_name.endswith(x[1:]):
return
elif x.endswith('*'):
if tag_name.startswith(x[:-1]):
return
else:
if tag_name == x:
return
arr = tag_data.get('array', 0)
if arr:
for aofs in range(0, arr):
tags_count += 1
coll.append({
'offset':
gen_offset(base_offset,
offset +
aofs * DATA_TYPE_SIZE[tag_data['data_type']],
int_if_possible=True),
'set-id':
f'{id_prefix}{tag_name}{id_suffix}[{aofs}]',
'type':
DATA_TYPES[tag_data['data_type']]
})
else:
tags_count += 1
coll.append({
'offset': gen_offset(base_offset, offset, int_if_possible=True),
'set-id': f'{id_prefix}{tag_name}{id_suffix}',
'type': DATA_TYPES[tag_data['data_type']]
})
tags_count = 0
pulls = []
def parse_offset(offset):
if isinstance(offset, int):
return offset
elif '+' in offset:
o = offset.split('+')
result = 0
for i in o:
o += int(i)
return result
else:
return int(offset)
def gen_process(data, offset, tag_name, result=[]):
for tag, d in data.items():
if d['tag_type'] == 'struct':
if d['array'] == 0:
gen_process(d['data_type']['internal_tags'],
gen_offset(offset, d['offset']),
tag_name + '.' + tag, result)
else:
for aofs in range(0, d['array']):
gen_process(
d['data_type']['internal_tags'],
gen_offset(
parse_offset(offset) + aofs *
d['data_type']['template']['structure_size'],
d['offset']), f'{tag_name}.{tag}[{aofs}]',
result)
else:
add_tag_info(f'{tag_name}.{tag}',
d,
result,
offset=d['offset'],
base_offset=offset)
return result
for TAG in tag_list:
data = find_tag(TAG, tags)
if data is None:
raise ValueError(f'Tag not found: {TAG}')
if data['tag_type'] == 'struct':
pulls.append({
'1tag':
TAG,
'process':
gen_process(data['data_type']['internal_tags'], 0, TAG, [])
})
else:
result = []
add_tag_info(TAG, data, result)
pulls.append({'1tag': TAG, 'process': result})
CFG = ''
if config:
CFG += dedent(f"""
version: 2
timeout: {config.get("timeout", DEFAULT_TIMEOUT)}
freq: {config.get("freq", DEFAULT_FREQ)}
proto:
name: enip/ab_eip
source: {config["source"]}
path: {config.get("path", DEFAULT_PATH)}
cpu: {config.get("cpu", DEFAULT_CPU)}
""").lstrip()
CFG += yaml.dump(dict(pull=pulls),
default_flow_style=False).replace('\n- 1tag', '\n- tag')
if print_config:
print(CFG)
if print_stats:
print(f'{tags_count} tag(s) generated', file=sys.stderr)
return CFG
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('tag',
metavar='TAG',
help='Tags to parse (comma separated)')
ap.add_argument('-F',
'--tag_file',
metavar='FILE',
help='JSON tags file (default: stdin)')
ap.add_argument('-s',
'--source',
metavar='ADDR',
help='PLC IP[:port] (full config is generated is defined')
ap.add_argument(
'-x',
'--exclude',
metavar='TAGS',
help='Tags to exclude (comma separated, star masks possible)')
ap.add_argument('-f',
'--freq',
metavar='HERZ',
help='Pull frequency',
default=DEFAULT_FREQ,
type=int)
ap.add_argument('--path',
metavar='PATH',
help='PLC path',
default=DEFAULT_PATH)
ap.add_argument('--cpu', metavar='CPU', help='CPU', default=DEFAULT_CPU)
ap.add_argument('--timeout',
metavar='SEC',
help='PLC TIMEOUT',
type=float,
default=DEFAULT_TIMEOUT)
ap.add_argument('--id-prefix',
metavar='VALUE',
help='ID prefix',
default='')
ap.add_argument('--id-suffix',
metavar='VALUE',
help='ID suffix',
default='')
a = ap.parse_args()
if a.source:
config = dict(source=a.source,
freq=a.freq,
path=a.path,
cpu=a.cpu,
timeout=a.timeout)
else:
config = None
generate(tag_file=a.tag_file,
tag_list=a.tag.split(','),
config=config,
exclude=a.exclude.split(',') if a.exclude else None,
id_prefix=a.id_prefix,
id_suffix=a.id_suffix,
print_stats=True,
print_config=True)
| 28.597444
| 80
| 0.431013
|
import sys
import argparse
from textwrap import dedent
try:
import rapidjson as json
except:
import json
import yaml
DEFAULT_FREQ = 1
DEFAULT_PATH = '1,0'
DEFAULT_CPU = 'LGX'
DEFAULT_TIMEOUT = 2
def generate(tag_list,
tag_file=None,
tag_data=None,
exclude=None,
config=None,
id_prefix='',
id_suffix='',
print_stats=False,
print_config=False):
def find_tag_in_struct(tag, data):
if '.' in tag:
tag_to_find, rest = tag.split('.', 1)
else:
tag_to_find = tag
rest = None
t = data[tag_to_find]
if rest is None:
return t
else:
if t['tag_type'] != 'struct':
raise ValueError(f'{tag_to_find} is not a struct!')
return find_tag_in_struct(
rest,
t['data_type']['internal_tags'],
)
def find_tag(tag, data):
if '.' in tag:
tag_to_find, rest = tag.split('.', 1)
else:
tag_to_find = tag
rest = None
for t in data:
if t['tag_name'] == tag_to_find:
if rest is None:
return t
else:
if t['tag_type'] != 'struct':
raise ValueError(f'{tag_to_find} is not a struct!')
else:
return find_tag_in_struct(
rest, t['data_type']['internal_tags'])
if tag_data is None:
if tag_file:
with open(tag_file) as fh:
tags = json.loads(fh.read())
else:
tags = json.loads(sys.stdin.read())
else:
tags = tag_data
DATA_TYPES = {
'BOOL': 'uint8',
'BYTE': 'byte',
'WORD': 'word',
'DWORD': 'dword',
'LWORD': 'qword',
'SINT': 'sint8',
'USINT': 'uint8',
'INT': 'sint16',
'UINT': 'uint16',
'DINT': 'sint32',
'UDINT': 'uint32',
'LINT': 'sint64',
'ULINT': 'uint64',
'REAL': 'real32',
'LREAL': 'real64'
}
DATA_TYPE_SIZE = {
'BOOL': 1,
'BYTE': 1,
'WORD': 2,
'DWORD': 4,
'LWORD': 8,
'SINT': 1,
'USINT': 1,
'INT': 2,
'UINT': 2,
'DINT': 4,
'UDINT': 4,
'LINT': 8,
'ULINT': 8,
'REAL': 4,
'LREAL': 8
}
def gen_offset(o1, o2, int_if_possible=False):
if o1:
o = f'{o1}+{o2}'
else:
o = o2 if int_if_possible else f'{o2}'
return o
def add_tag_info(tag_name, tag_data, coll, offset=0, base_offset=0):
nonlocal tags_count
if exclude:
for x in exclude:
if x.startswith('*'):
if tag_name.endswith(x[1:]):
return
elif x.endswith('*'):
if tag_name.startswith(x[:-1]):
return
else:
if tag_name == x:
return
arr = tag_data.get('array', 0)
if arr:
for aofs in range(0, arr):
tags_count += 1
coll.append({
'offset':
gen_offset(base_offset,
offset +
aofs * DATA_TYPE_SIZE[tag_data['data_type']],
int_if_possible=True),
'set-id':
f'{id_prefix}{tag_name}{id_suffix}[{aofs}]',
'type':
DATA_TYPES[tag_data['data_type']]
})
else:
tags_count += 1
coll.append({
'offset': gen_offset(base_offset, offset, int_if_possible=True),
'set-id': f'{id_prefix}{tag_name}{id_suffix}',
'type': DATA_TYPES[tag_data['data_type']]
})
tags_count = 0
pulls = []
def parse_offset(offset):
if isinstance(offset, int):
return offset
elif '+' in offset:
o = offset.split('+')
result = 0
for i in o:
o += int(i)
return result
else:
return int(offset)
def gen_process(data, offset, tag_name, result=[]):
for tag, d in data.items():
if d['tag_type'] == 'struct':
if d['array'] == 0:
gen_process(d['data_type']['internal_tags'],
gen_offset(offset, d['offset']),
tag_name + '.' + tag, result)
else:
for aofs in range(0, d['array']):
gen_process(
d['data_type']['internal_tags'],
gen_offset(
parse_offset(offset) + aofs *
d['data_type']['template']['structure_size'],
d['offset']), f'{tag_name}.{tag}[{aofs}]',
result)
else:
add_tag_info(f'{tag_name}.{tag}',
d,
result,
offset=d['offset'],
base_offset=offset)
return result
for TAG in tag_list:
data = find_tag(TAG, tags)
if data is None:
raise ValueError(f'Tag not found: {TAG}')
if data['tag_type'] == 'struct':
pulls.append({
'1tag':
TAG,
'process':
gen_process(data['data_type']['internal_tags'], 0, TAG, [])
})
else:
result = []
add_tag_info(TAG, data, result)
pulls.append({'1tag': TAG, 'process': result})
CFG = ''
if config:
CFG += dedent(f"""
version: 2
timeout: {config.get("timeout", DEFAULT_TIMEOUT)}
freq: {config.get("freq", DEFAULT_FREQ)}
proto:
name: enip/ab_eip
source: {config["source"]}
path: {config.get("path", DEFAULT_PATH)}
cpu: {config.get("cpu", DEFAULT_CPU)}
""").lstrip()
CFG += yaml.dump(dict(pull=pulls),
default_flow_style=False).replace('\n- 1tag', '\n- tag')
if print_config:
print(CFG)
if print_stats:
print(f'{tags_count} tag(s) generated', file=sys.stderr)
return CFG
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('tag',
metavar='TAG',
help='Tags to parse (comma separated)')
ap.add_argument('-F',
'--tag_file',
metavar='FILE',
help='JSON tags file (default: stdin)')
ap.add_argument('-s',
'--source',
metavar='ADDR',
help='PLC IP[:port] (full config is generated is defined')
ap.add_argument(
'-x',
'--exclude',
metavar='TAGS',
help='Tags to exclude (comma separated, star masks possible)')
ap.add_argument('-f',
'--freq',
metavar='HERZ',
help='Pull frequency',
default=DEFAULT_FREQ,
type=int)
ap.add_argument('--path',
metavar='PATH',
help='PLC path',
default=DEFAULT_PATH)
ap.add_argument('--cpu', metavar='CPU', help='CPU', default=DEFAULT_CPU)
ap.add_argument('--timeout',
metavar='SEC',
help='PLC TIMEOUT',
type=float,
default=DEFAULT_TIMEOUT)
ap.add_argument('--id-prefix',
metavar='VALUE',
help='ID prefix',
default='')
ap.add_argument('--id-suffix',
metavar='VALUE',
help='ID suffix',
default='')
a = ap.parse_args()
if a.source:
config = dict(source=a.source,
freq=a.freq,
path=a.path,
cpu=a.cpu,
timeout=a.timeout)
else:
config = None
generate(tag_file=a.tag_file,
tag_list=a.tag.split(','),
config=config,
exclude=a.exclude.split(',') if a.exclude else None,
id_prefix=a.id_prefix,
id_suffix=a.id_suffix,
print_stats=True,
print_config=True)
| true
| true
|
f70a9017583af2e35cffeb78d26cdec6a68df2ec
| 9,648
|
py
|
Python
|
makeReadmeMD.py
|
freehackquest/2016-tasks
|
3d4a1525213d9ef106bcfa8c5c6e33938489366d
|
[
"MIT"
] | null | null | null |
makeReadmeMD.py
|
freehackquest/2016-tasks
|
3d4a1525213d9ef106bcfa8c5c6e33938489366d
|
[
"MIT"
] | null | null | null |
makeReadmeMD.py
|
freehackquest/2016-tasks
|
3d4a1525213d9ef106bcfa8c5c6e33938489366d
|
[
"MIT"
] | 1
|
2019-01-22T18:05:26.000Z
|
2019-01-22T18:05:26.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import os
import sys
import os.path
import re
from pprint import pprint
from subprocess import Popen, PIPE
readme = open('README.md', 'w')
readme.write("# Free Hack Quest 2016\n")
def getListOfDirsWithTasks():
result = []
dirs = os.listdir('./');
for d in dirs:
print(d);
if os.path.isdir(d):
subdirs = os.listdir('./' + d)
subdirs.sort()
for sd in subdirs:
path = './' + d + '/' + sd
if os.path.isdir(path):
if os.path.isfile(path + '/main.json'):
result.append(path)
print("Found: " + path);
return result
dirs = getListOfDirsWithTasks();
dirs.sort()
game_name = 'Free Hack Quest 2016'
stat_tasks = []
table_tasks = []
errors = {}
def append_errors(path, text):
if path not in errors:
errors[path] = []
errors[path].append(text)
possible_categories = ["admin", "web", "pwn", "crypto", "forensic", "misc", "ppc", "recon", "reverse", "stego"]
def detectEncoding(path):
p = Popen(['file', '-i', path], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
pattern = re.compile('.*charset=(.*).*')
m = pattern.match(output)
if m:
return m.group(1)
return 'unknown'
def parseAuthor(path):
author = ''
with open(path) as f:
content = ''.join(f.readlines())
content = content.replace('\r', '')
content = content.replace('\n', '')
content = content.replace('\t', '')
pattern = re.compile('.*"nick"[ ]*\:[ ]*"([A-Z-a-z@!._]*)".*')
m = pattern.match(content)
if m:
author = m.group(1)
contacts = []
pattern = re.compile('.*"contacts"[ ]*\:[ ]*\[[ ]*"([A-Z-a-z@/!._]*)"[ ]*,[ ]*"([A-Z-a-z@/!._]*)".*')
m = pattern.match(content)
if m:
contacts.append(m.group(1));
contacts.append(m.group(2));
return author + '(' + ', '.join(contacts) + ')'
def appendStatCat(category, value):
for cat in stat_tasks:
if cat['category'] == category:
cat['count'] = cat['count'] + 1
cat['value'] = cat['value'] + value
return
stat_tasks.append({'category': category, 'count': 1, 'value': value})
def checkWriteUpFile(folder):
path = folder + '/WRITEUP.md'
if not os.path.isfile(path):
append_errors(folder, 'Missing file WRITEUP.md')
def getCategoryFromTask(data, folder):
category = 'unknown'
if 'category' not in data:
append_errors(folder, 'main.json: Missing field "category"')
else:
category = data['category']
if category not in possible_categories:
append_errors(folder, 'main.json: Field "category" has wrong value')
return category;
def getStatusFromTask(data, folder):
status = 'need verify'
if 'status' not in data:
append_errors(folder, 'main.json: Missing field "status"')
else:
status = data['status']
return status;
def getValueFromTask(data, folder):
value = 0
if 'value' not in data:
append_errors(folder, 'main.json: Missing field "value"')
else:
value = data['value']
if value == 0:
append_errors(folder, 'main.json: Task has 0 value')
return value
def getDescriptionFromTask(data, folder):
description = {"RU" : "", "EN": ""}
if 'name' not in data:
append_errors(folder, 'main.json: Missing field "name"')
else:
description = data['description']
if 'RU' not in description:
append_errors(folder, 'main.json: Missing subfield description "RU"')
else:
if description["RU"] == "":
append_errors(folder, 'main.json: Empty field in description "RU"')
if 'EN' not in description:
append_errors(folder, 'main.json: Missing subfield description "EN"')
else:
if description["EN"] == "":
append_errors(folder, 'main.json: Empty field in description "EN"')
return description
def getAuthorsFromTask(data, path):
authors = []
if 'authors' not in data:
append_errors(path, 'main.json: Missing field "authors"')
else:
if not isinstance(data['authors'], list):
append_errors(path, 'main.json: Field "authors" must be list')
else:
authors_ = data['authors']
for author in authors_:
name = ""
team = ""
contacts = []
if "name" not in author:
append_errors(path, 'main.json: Missing subfield author "name"')
else:
name = author["name"]
if name == "":
append_errors(path, 'main.json: Subfield author "name" is empty')
if "team" not in author:
append_errors(path, 'main.json: Missing subfield author "team"')
else:
team = author["team"]
if team == "":
append_errors(path, 'main.json: Subfield author "team" is empty')
if "contacts" not in author:
append_errors(path, 'main.json: Missing subfield author "contacts"')
else:
if not isinstance(author['contacts'], list):
append_errors(path, 'main.json: Subfield author "contacts" must be list')
else:
for c in author['contacts']:
if c == "":
append_errors(path, 'main.json: Empty field in author "contacts"')
else:
contacts.append(c);
contacts = ', '.join(contacts)
if contacts == "":
append_errors(path, 'main.json: Missing data in subfield authors "contacts"')
authors.append('[' + team + '] ' + name + ' (' + contacts + ')')
return authors
def getNameFromTask(data, folder):
name = path
if 'name' not in data:
append_errors(folder, 'main.json: Missing field "name"')
else:
name = data['name']
if name == "":
append_errors(folder, 'main.json: Field "name" is empty')
dirname = folder.split("/")[-1];
if name != dirname:
append_errors(folder, 'main.json: Field "name" has wrong value must like dirname "' + dirname + '" be "' + folder + '"')
return name
def getFlagKeyFromTask(data, folder):
flag_key = ''
if 'flag_key' not in data:
append_errors(path, 'main.json: Missing field "flag_key"')
else:
flag_key = data['flag_key']
pattern = re.compile('FHQ\(.*\)')
pattern2 = re.compile('FHQ\{.*\}')
m = pattern.match(flag_key)
m2 = pattern2.match(flag_key)
if flag_key == "":
append_errors(folder, 'main.json: Field "flag_key" is empty')
elif not m and not m2:
append_errors(folder, 'main.json: Wrong value of field "flag_key" must be format "FHQ(`md5`) or FHQ(`sometext`)"')
return flag_key
def getGameFromTask(data, folder):
game = ''
if 'game' not in data:
append_errors(folder, 'main.json: Missing field "game"')
else:
game = data['game']
if game != game_name:
append_errors(folder, 'main.json: Wrong game name "' + game + '" Please change to "' + game_name + '"')
return game
def getHintsFromTask(data, folder):
hints = []
if 'hints' not in data:
append_errors(d, 'main.json: Missing field "hints"')
else:
if not isinstance(data['hints'], list):
append_errors(d, 'main.json: Field "hints" must be list')
else:
hints = data['hints']
for hint in hints:
if 'RU' not in hint:
append_errors(folder, 'main.json: Missing subfield hint "RU"')
else:
if hint["RU"] == "":
append_errors(folder, 'main.json: Empty field in hint "RU"')
if 'EN' not in hint:
append_errors(folder, 'main.json: Missing subfield hint "EN"')
else:
if hint["EN"] == "":
append_errors(folder, 'main.json: Empty field in hint "EN"')
return hints;
for d in dirs:
path = d + '/main.json'
#encoding = detectEncoding(path);
if os.path.isfile(path):
try:
checkWriteUpFile(d);
with open(path) as main_json:
data = json.load(main_json)
category = getCategoryFromTask(data, d)
value = getValueFromTask(data, d)
status = getStatusFromTask(data, d);
authors = getAuthorsFromTask(data, d)
name = getNameFromTask(data, d)
getDescriptionFromTask(data, d)
getFlagKeyFromTask(data, d)
appendStatCat(category, value);
table_tasks.append({
'category': category,
'value': value,
'name': name,
'path': d,
'status': status,
'authors': ', '.join(authors) } )
getGameFromTask(data, d)
getHintsFromTask(data, d)
except Exception:
status = ''
encoding = detectEncoding(path);
if encoding != 'utf-8':
status = encoding
append_errors(path, 'Wrong encoding in "' + path + '", expected "utf-8", got "' + encoding + '"')
author = parseAuthor(path);
# print sys.exc_info()
table_tasks.append({'category': 'unknown', 'value': 0, 'name': d, 'status': 'invalid json', 'authors': author } )
appendStatCat('unknown', 0);
readme.write("\n## Short list of tasks\n\n")
for row in table_tasks:
readme.write(' * ' + row['category'] + ' ' + str(row['value']) + ' "' + row['name'] + '" by ' + row['authors'] + "\n")
if len(errors) > 0:
readme.write("\n\n## Errors\n\n")
for path in errors:
print(' * ' + path)
readme.write(' * ' + path + "\n")
for e in errors[path]:
print("\t * " + e)
readme.write('\t * ' + e + "\n")
readme.write("\n## Statistics by categories\n\n")
readme.write("|Category|Count|Summary value\n")
readme.write("|---|---|---\n")
stat_tasks.sort(key=lambda x: x['category'])
tasks_count_all=0
tasks_value_all=0
for cat in stat_tasks:
readme.write("|" + cat['category'] + "|" + str(cat['count']) + "|" + str(cat['value']) + "\n")
tasks_count_all = tasks_count_all + cat['count'];
tasks_value_all = tasks_value_all + cat['value'];
readme.write("|All|" + str(tasks_count_all) + "|" + str(tasks_value_all) + "\n")
# sort table
table_tasks.sort(key=lambda x: x['category'] + ' ' + str(x['value']).zfill(4))
readme.write("\n\n## Status table\n\n")
readme.write("|Category&Value|Name|Status|Author(s)\n")
readme.write("|---|---|---|---\n")
for row in table_tasks:
readme.write('|' + row['category'] + ' ' + str(row['value']) + '|' + row['name'] + '|' + row['status'] + '|' + row['authors'] + "\n")
| 29.595092
| 134
| 0.632566
|
import json
import os
import sys
import os.path
import re
from pprint import pprint
from subprocess import Popen, PIPE
readme = open('README.md', 'w')
readme.write("# Free Hack Quest 2016\n")
def getListOfDirsWithTasks():
result = []
dirs = os.listdir('./');
for d in dirs:
print(d);
if os.path.isdir(d):
subdirs = os.listdir('./' + d)
subdirs.sort()
for sd in subdirs:
path = './' + d + '/' + sd
if os.path.isdir(path):
if os.path.isfile(path + '/main.json'):
result.append(path)
print("Found: " + path);
return result
dirs = getListOfDirsWithTasks();
dirs.sort()
game_name = 'Free Hack Quest 2016'
stat_tasks = []
table_tasks = []
errors = {}
def append_errors(path, text):
if path not in errors:
errors[path] = []
errors[path].append(text)
possible_categories = ["admin", "web", "pwn", "crypto", "forensic", "misc", "ppc", "recon", "reverse", "stego"]
def detectEncoding(path):
p = Popen(['file', '-i', path], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
pattern = re.compile('.*charset=(.*).*')
m = pattern.match(output)
if m:
return m.group(1)
return 'unknown'
def parseAuthor(path):
author = ''
with open(path) as f:
content = ''.join(f.readlines())
content = content.replace('\r', '')
content = content.replace('\n', '')
content = content.replace('\t', '')
pattern = re.compile('.*"nick"[ ]*\:[ ]*"([A-Z-a-z@!._]*)".*')
m = pattern.match(content)
if m:
author = m.group(1)
contacts = []
pattern = re.compile('.*"contacts"[ ]*\:[ ]*\[[ ]*"([A-Z-a-z@/!._]*)"[ ]*,[ ]*"([A-Z-a-z@/!._]*)".*')
m = pattern.match(content)
if m:
contacts.append(m.group(1));
contacts.append(m.group(2));
return author + '(' + ', '.join(contacts) + ')'
def appendStatCat(category, value):
for cat in stat_tasks:
if cat['category'] == category:
cat['count'] = cat['count'] + 1
cat['value'] = cat['value'] + value
return
stat_tasks.append({'category': category, 'count': 1, 'value': value})
def checkWriteUpFile(folder):
path = folder + '/WRITEUP.md'
if not os.path.isfile(path):
append_errors(folder, 'Missing file WRITEUP.md')
def getCategoryFromTask(data, folder):
category = 'unknown'
if 'category' not in data:
append_errors(folder, 'main.json: Missing field "category"')
else:
category = data['category']
if category not in possible_categories:
append_errors(folder, 'main.json: Field "category" has wrong value')
return category;
def getStatusFromTask(data, folder):
status = 'need verify'
if 'status' not in data:
append_errors(folder, 'main.json: Missing field "status"')
else:
status = data['status']
return status;
def getValueFromTask(data, folder):
value = 0
if 'value' not in data:
append_errors(folder, 'main.json: Missing field "value"')
else:
value = data['value']
if value == 0:
append_errors(folder, 'main.json: Task has 0 value')
return value
def getDescriptionFromTask(data, folder):
description = {"RU" : "", "EN": ""}
if 'name' not in data:
append_errors(folder, 'main.json: Missing field "name"')
else:
description = data['description']
if 'RU' not in description:
append_errors(folder, 'main.json: Missing subfield description "RU"')
else:
if description["RU"] == "":
append_errors(folder, 'main.json: Empty field in description "RU"')
if 'EN' not in description:
append_errors(folder, 'main.json: Missing subfield description "EN"')
else:
if description["EN"] == "":
append_errors(folder, 'main.json: Empty field in description "EN"')
return description
def getAuthorsFromTask(data, path):
authors = []
if 'authors' not in data:
append_errors(path, 'main.json: Missing field "authors"')
else:
if not isinstance(data['authors'], list):
append_errors(path, 'main.json: Field "authors" must be list')
else:
authors_ = data['authors']
for author in authors_:
name = ""
team = ""
contacts = []
if "name" not in author:
append_errors(path, 'main.json: Missing subfield author "name"')
else:
name = author["name"]
if name == "":
append_errors(path, 'main.json: Subfield author "name" is empty')
if "team" not in author:
append_errors(path, 'main.json: Missing subfield author "team"')
else:
team = author["team"]
if team == "":
append_errors(path, 'main.json: Subfield author "team" is empty')
if "contacts" not in author:
append_errors(path, 'main.json: Missing subfield author "contacts"')
else:
if not isinstance(author['contacts'], list):
append_errors(path, 'main.json: Subfield author "contacts" must be list')
else:
for c in author['contacts']:
if c == "":
append_errors(path, 'main.json: Empty field in author "contacts"')
else:
contacts.append(c);
contacts = ', '.join(contacts)
if contacts == "":
append_errors(path, 'main.json: Missing data in subfield authors "contacts"')
authors.append('[' + team + '] ' + name + ' (' + contacts + ')')
return authors
def getNameFromTask(data, folder):
name = path
if 'name' not in data:
append_errors(folder, 'main.json: Missing field "name"')
else:
name = data['name']
if name == "":
append_errors(folder, 'main.json: Field "name" is empty')
dirname = folder.split("/")[-1];
if name != dirname:
append_errors(folder, 'main.json: Field "name" has wrong value must like dirname "' + dirname + '" be "' + folder + '"')
return name
def getFlagKeyFromTask(data, folder):
flag_key = ''
if 'flag_key' not in data:
append_errors(path, 'main.json: Missing field "flag_key"')
else:
flag_key = data['flag_key']
pattern = re.compile('FHQ\(.*\)')
pattern2 = re.compile('FHQ\{.*\}')
m = pattern.match(flag_key)
m2 = pattern2.match(flag_key)
if flag_key == "":
append_errors(folder, 'main.json: Field "flag_key" is empty')
elif not m and not m2:
append_errors(folder, 'main.json: Wrong value of field "flag_key" must be format "FHQ(`md5`) or FHQ(`sometext`)"')
return flag_key
def getGameFromTask(data, folder):
game = ''
if 'game' not in data:
append_errors(folder, 'main.json: Missing field "game"')
else:
game = data['game']
if game != game_name:
append_errors(folder, 'main.json: Wrong game name "' + game + '" Please change to "' + game_name + '"')
return game
def getHintsFromTask(data, folder):
hints = []
if 'hints' not in data:
append_errors(d, 'main.json: Missing field "hints"')
else:
if not isinstance(data['hints'], list):
append_errors(d, 'main.json: Field "hints" must be list')
else:
hints = data['hints']
for hint in hints:
if 'RU' not in hint:
append_errors(folder, 'main.json: Missing subfield hint "RU"')
else:
if hint["RU"] == "":
append_errors(folder, 'main.json: Empty field in hint "RU"')
if 'EN' not in hint:
append_errors(folder, 'main.json: Missing subfield hint "EN"')
else:
if hint["EN"] == "":
append_errors(folder, 'main.json: Empty field in hint "EN"')
return hints;
for d in dirs:
path = d + '/main.json'
#encoding = detectEncoding(path);
if os.path.isfile(path):
try:
checkWriteUpFile(d);
with open(path) as main_json:
data = json.load(main_json)
category = getCategoryFromTask(data, d)
value = getValueFromTask(data, d)
status = getStatusFromTask(data, d);
authors = getAuthorsFromTask(data, d)
name = getNameFromTask(data, d)
getDescriptionFromTask(data, d)
getFlagKeyFromTask(data, d)
appendStatCat(category, value);
table_tasks.append({
'category': category,
'value': value,
'name': name,
'path': d,
'status': status,
'authors': ', '.join(authors) } )
getGameFromTask(data, d)
getHintsFromTask(data, d)
except Exception:
status = ''
encoding = detectEncoding(path);
if encoding != 'utf-8':
status = encoding
append_errors(path, 'Wrong encoding in "' + path + '", expected "utf-8", got "' + encoding + '"')
author = parseAuthor(path);
# print sys.exc_info()
table_tasks.append({'category': 'unknown', 'value': 0, 'name': d, 'status': 'invalid json', 'authors': author } )
appendStatCat('unknown', 0);
readme.write("\n## Short list of tasks\n\n")
for row in table_tasks:
readme.write(' * ' + row['category'] + ' ' + str(row['value']) + ' "' + row['name'] + '" by ' + row['authors'] + "\n")
if len(errors) > 0:
readme.write("\n\n## Errors\n\n")
for path in errors:
print(' * ' + path)
readme.write(' * ' + path + "\n")
for e in errors[path]:
print("\t * " + e)
readme.write('\t * ' + e + "\n")
readme.write("\n## Statistics by categories\n\n")
readme.write("|Category|Count|Summary value\n")
readme.write("|---|---|---\n")
stat_tasks.sort(key=lambda x: x['category'])
tasks_count_all=0
tasks_value_all=0
for cat in stat_tasks:
readme.write("|" + cat['category'] + "|" + str(cat['count']) + "|" + str(cat['value']) + "\n")
tasks_count_all = tasks_count_all + cat['count'];
tasks_value_all = tasks_value_all + cat['value'];
readme.write("|All|" + str(tasks_count_all) + "|" + str(tasks_value_all) + "\n")
# sort table
table_tasks.sort(key=lambda x: x['category'] + ' ' + str(x['value']).zfill(4))
readme.write("\n\n## Status table\n\n")
readme.write("|Category&Value|Name|Status|Author(s)\n")
readme.write("|---|---|---|---\n")
for row in table_tasks:
readme.write('|' + row['category'] + ' ' + str(row['value']) + '|' + row['name'] + '|' + row['status'] + '|' + row['authors'] + "\n")
| true
| true
|
f70a90c74a64770e6bdc44f68bb42a89c0778438
| 1,441
|
py
|
Python
|
sms.py
|
Lyokolux/smsNotificationFree
|
e82290c1d643bc249c9e70bf9df54c05005da789
|
[
"MIT"
] | null | null | null |
sms.py
|
Lyokolux/smsNotificationFree
|
e82290c1d643bc249c9e70bf9df54c05005da789
|
[
"MIT"
] | null | null | null |
sms.py
|
Lyokolux/smsNotificationFree
|
e82290c1d643bc249c9e70bf9df54c05005da789
|
[
"MIT"
] | null | null | null |
import sys
import click
import json
from urllib.request import urlopen
from urllib.parse import quote
RESPONSES_CODE = {
200 : "SMS sent",
400 : "One parameter is missing (identifier, password or message).",
402 : "Too many SMS sent.",
403 : "Service not activated or false login/key.",
500 : "Server Error. Please try again later."
}
#---------------------------------------
# CREATION & CONFIGURATION DU MESSAGE
#---------------------------------------
@click.command()
@click.option("-m", "--message",
prompt="SMS content: ",
help="the message to be sent")
@click.option("-c", "--config",
type=click.Path(exists=True),
prompt="Path of the config file",
help="parse JSON file to get id and password keys")
@click.option("-v", "--verbose",
is_flag=True,
help="Print the HTTP response code of the request")
def sms(message, config, verbose):
(user, password) = getKeys(config)
url = f"https://smsapi.free-mobile.fr/sendmsg?&user={user}&pass={password}&msg={quote(message)}"
response = urlopen(url)
if verbose:
status = response.getcode()
print(f"{status} : {RESPONSES_CODE[status]}")
def getKeys(config):
with open(config) as f:
credential = json.loads(f.read())
return (credential["user"], credential["password"])
if __name__ == "__main__":
sms()
| 32.022222
| 100
| 0.581541
|
import sys
import click
import json
from urllib.request import urlopen
from urllib.parse import quote
RESPONSES_CODE = {
200 : "SMS sent",
400 : "One parameter is missing (identifier, password or message).",
402 : "Too many SMS sent.",
403 : "Service not activated or false login/key.",
500 : "Server Error. Please try again later."
}
@click.command()
@click.option("-m", "--message",
prompt="SMS content: ",
help="the message to be sent")
@click.option("-c", "--config",
type=click.Path(exists=True),
prompt="Path of the config file",
help="parse JSON file to get id and password keys")
@click.option("-v", "--verbose",
is_flag=True,
help="Print the HTTP response code of the request")
def sms(message, config, verbose):
(user, password) = getKeys(config)
url = f"https://smsapi.free-mobile.fr/sendmsg?&user={user}&pass={password}&msg={quote(message)}"
response = urlopen(url)
if verbose:
status = response.getcode()
print(f"{status} : {RESPONSES_CODE[status]}")
def getKeys(config):
with open(config) as f:
credential = json.loads(f.read())
return (credential["user"], credential["password"])
if __name__ == "__main__":
sms()
| true
| true
|
f70a91aa7357a9d5cc499152a4eeee87757d1d2b
| 1,086
|
py
|
Python
|
Experiments/ARIMA/RunnerARIMA.py
|
nj-czy/UCTB
|
bddb8b47953bef1f44cb06f1a57a3d7efbd31c3a
|
[
"MIT"
] | null | null | null |
Experiments/ARIMA/RunnerARIMA.py
|
nj-czy/UCTB
|
bddb8b47953bef1f44cb06f1a57a3d7efbd31c3a
|
[
"MIT"
] | null | null | null |
Experiments/ARIMA/RunnerARIMA.py
|
nj-czy/UCTB
|
bddb8b47953bef1f44cb06f1a57a3d7efbd31c3a
|
[
"MIT"
] | null | null | null |
import os
from tqdm import tqdm
# dataset = [['Bike','NYC','all','365','sum','0.1'],['DiDi','Xian','all','all','sum','0.1'],
# ['Metro','Chongqing','all','all','sum','0.1'],['ChargeStation','Beijing','all','all','max','0.1'],
# ['METR','LA','all','all','average','0.2'],['PEMS','BAY','all','all','average','0.2']]
dataset = [['METR','LA','all','all','average','0.2'],['PEMS','BAY','all','all','average','0.2']]
with open("ARIMAresult3.txt","w") as fp:
for index in tqdm(range(len(dataset))):
fp.write("*********************************************************\n")
fp.write("Processing city----------------{}---using ARIMA-------MergeIndex 12 --".format(dataset[index]))
f_tmp = os.popen("python -W ignore ARIMA.py --dataset {} --city {} --MergeIndex 12 --DataRange {} --TrainDays {} --MergeWay {} --test_ratio {}".format(dataset[index][0],dataset[index][1],dataset[index][2],dataset[index][3],dataset[index][4],dataset[index][5]), "r")
# to record ouput
fp.write(f_tmp.read())
fp.flush()
f_tmp.close()
fp.write("\n")
| 51.714286
| 273
| 0.529466
|
import os
from tqdm import tqdm
dataset = [['METR','LA','all','all','average','0.2'],['PEMS','BAY','all','all','average','0.2']]
with open("ARIMAresult3.txt","w") as fp:
for index in tqdm(range(len(dataset))):
fp.write("*********************************************************\n")
fp.write("Processing city----------------{}---using ARIMA-------MergeIndex 12 --".format(dataset[index]))
f_tmp = os.popen("python -W ignore ARIMA.py --dataset {} --city {} --MergeIndex 12 --DataRange {} --TrainDays {} --MergeWay {} --test_ratio {}".format(dataset[index][0],dataset[index][1],dataset[index][2],dataset[index][3],dataset[index][4],dataset[index][5]), "r")
fp.write(f_tmp.read())
fp.flush()
f_tmp.close()
fp.write("\n")
| true
| true
|
f70a94376224e4e62b4838d7f26f665e40767945
| 1,015
|
py
|
Python
|
openstack_dashboard/dashboards/project/overview/urls.py
|
hashsos/hashcloudos-horizon
|
0cc080ca6777e4a1dac5cbcc6143202baddab176
|
[
"Apache-2.0"
] | 930
|
2015-01-04T08:06:03.000Z
|
2022-03-13T18:47:13.000Z
|
openstack_dashboard/dashboards/project/overview/urls.py
|
hashsos/hashcloudos-horizon
|
0cc080ca6777e4a1dac5cbcc6143202baddab176
|
[
"Apache-2.0"
] | 106
|
2019-01-18T03:06:55.000Z
|
2019-11-29T05:06:18.000Z
|
openstack_dashboard/dashboards/project/overview/urls.py
|
hashsos/hashcloudos-horizon
|
0cc080ca6777e4a1dac5cbcc6143202baddab176
|
[
"Apache-2.0"
] | 1,040
|
2015-01-01T18:48:28.000Z
|
2022-03-19T08:35:18.000Z
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.project.overview import views
urlpatterns = [
url(r'^$', views.ProjectOverview.as_view(), name='index'),
url(r'^warning$', views.WarningView.as_view(), name='warning'),
]
| 36.25
| 78
| 0.737931
|
from django.conf.urls import url
from openstack_dashboard.dashboards.project.overview import views
urlpatterns = [
url(r'^$', views.ProjectOverview.as_view(), name='index'),
url(r'^warning$', views.WarningView.as_view(), name='warning'),
]
| true
| true
|
f70a944f916b145fc345dc057278b25d671d9ad4
| 170
|
py
|
Python
|
app/projects/TestProject.py
|
cchernn/ProjectsWebsite
|
ad5a23539f5034956076259b55f628542241d9d8
|
[
"MIT"
] | null | null | null |
app/projects/TestProject.py
|
cchernn/ProjectsWebsite
|
ad5a23539f5034956076259b55f628542241d9d8
|
[
"MIT"
] | null | null | null |
app/projects/TestProject.py
|
cchernn/ProjectsWebsite
|
ad5a23539f5034956076259b55f628542241d9d8
|
[
"MIT"
] | null | null | null |
class handler():
def __init__(self):
self.greeting = "Hello World"
def __repr__(self):
return self.greeting
if __name__ == "__main__":
pass
| 17
| 37
| 0.611765
|
class handler():
def __init__(self):
self.greeting = "Hello World"
def __repr__(self):
return self.greeting
if __name__ == "__main__":
pass
| true
| true
|
f70a94c0e06d3fd4dafc82aa2df645c9cb1dba9e
| 916
|
py
|
Python
|
script/gimp_histemul.py
|
matteli/histemul
|
61f1ea8e1263b92fd2bead0c808f67940faad802
|
[
"BSD-2-Clause"
] | 1
|
2019-07-05T09:40:50.000Z
|
2019-07-05T09:40:50.000Z
|
script/gimp_histemul.py
|
matteli/histemul
|
61f1ea8e1263b92fd2bead0c808f67940faad802
|
[
"BSD-2-Clause"
] | null | null | null |
script/gimp_histemul.py
|
matteli/histemul
|
61f1ea8e1263b92fd2bead0c808f67940faad802
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
from gimpfu import *
from gimpenums import *
import sys
import os
def color2id(color):
a = (color[0]<<16) | (color[1]<<8) | color[2]
b = (a & 0xF00000) >> 12 | (a & 0xF000) >> 8 | (a & 0xF00) << 4 | \
(a & 0xF0) >> 4
c = (b & 0xF000) | (b & 0x800) >> 11 | (b & 0x400) >> 7 | \
(b & 0x200) >> 3 | (b & 0x100) << 1 | (b & 0x80) >> 6 | \
(b & 0x40) >> 2 | (b & 0x20) << 2 | (b & 0x10) << 6 | \
(b & 0x8) >> 1 | (b & 0x4) << 3 | (b & 0x2) << 7 | (b & 0x1) << 11
return (c)
def gimp_histemul(img, layer):
idd = color2id(gimp.get_foreground())
gimp.pdb.gimp_message_set_handler (MESSAGE_BOX)
gimp.pdb.gimp_message (idd)
register(
"python_fu_histemul_id",
"",
"",
"matteli",
"matteli",
"",
"<Image>/Filters/Histemul/_id",
"RGB*",
[],
[],
gimp_histemul)
main()
| 24.756757
| 71
| 0.469432
|
from gimpfu import *
from gimpenums import *
import sys
import os
def color2id(color):
a = (color[0]<<16) | (color[1]<<8) | color[2]
b = (a & 0xF00000) >> 12 | (a & 0xF000) >> 8 | (a & 0xF00) << 4 | \
(a & 0xF0) >> 4
c = (b & 0xF000) | (b & 0x800) >> 11 | (b & 0x400) >> 7 | \
(b & 0x200) >> 3 | (b & 0x100) << 1 | (b & 0x80) >> 6 | \
(b & 0x40) >> 2 | (b & 0x20) << 2 | (b & 0x10) << 6 | \
(b & 0x8) >> 1 | (b & 0x4) << 3 | (b & 0x2) << 7 | (b & 0x1) << 11
return (c)
def gimp_histemul(img, layer):
idd = color2id(gimp.get_foreground())
gimp.pdb.gimp_message_set_handler (MESSAGE_BOX)
gimp.pdb.gimp_message (idd)
register(
"python_fu_histemul_id",
"",
"",
"matteli",
"matteli",
"",
"<Image>/Filters/Histemul/_id",
"RGB*",
[],
[],
gimp_histemul)
main()
| true
| true
|
f70a95254e35822d1e937560e483bbab7dc9a08f
| 1,510
|
py
|
Python
|
docker_engine/komand_docker_engine/actions/container_remove/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
docker_engine/komand_docker_engine/actions/container_remove/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
docker_engine/komand_docker_engine/actions/container_remove/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
FORCE = "force"
ID = "id"
LINK = "link"
V = "v"
class Output:
SUCCESS = "success"
class ContainerRemoveInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"force": {
"type": "boolean",
"title": "Force Removal",
"description": "Force the removal of a running container (uses SIGKILL)",
"default": true,
"order": 4
},
"id": {
"type": "string",
"title": "ID",
"description": "Container ID",
"order": 1
},
"link": {
"type": "boolean",
"title": "Link Removal",
"description": "Remove the specified link and not the underlying container",
"default": false,
"order": 3
},
"v": {
"type": "boolean",
"title": "Volume Removal",
"description": "Remove the volumes associated with the container",
"default": false,
"order": 2
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ContainerRemoveOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"success": {
"type": "boolean",
"title": "Success",
"description": "True if successful",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 19.868421
| 82
| 0.542384
|
import komand
import json
class Input:
FORCE = "force"
ID = "id"
LINK = "link"
V = "v"
class Output:
SUCCESS = "success"
class ContainerRemoveInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"force": {
"type": "boolean",
"title": "Force Removal",
"description": "Force the removal of a running container (uses SIGKILL)",
"default": true,
"order": 4
},
"id": {
"type": "string",
"title": "ID",
"description": "Container ID",
"order": 1
},
"link": {
"type": "boolean",
"title": "Link Removal",
"description": "Remove the specified link and not the underlying container",
"default": false,
"order": 3
},
"v": {
"type": "boolean",
"title": "Volume Removal",
"description": "Remove the volumes associated with the container",
"default": false,
"order": 2
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ContainerRemoveOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"success": {
"type": "boolean",
"title": "Success",
"description": "True if successful",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| true
| true
|
f70a95cc159cace0f85857909a871669972e3f9e
| 31,375
|
py
|
Python
|
scipy/integrate/quadrature.py
|
maxi-marufo/my-scipy
|
be6c2597fcee86419592ac512319301c7ddfc118
|
[
"BSD-3-Clause"
] | 1
|
2020-07-22T17:29:25.000Z
|
2020-07-22T17:29:25.000Z
|
scipy/integrate/quadrature.py
|
maxi-marufo/my-scipy
|
be6c2597fcee86419592ac512319301c7ddfc118
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/integrate/quadrature.py
|
maxi-marufo/my-scipy
|
be6c2597fcee86419592ac512319301c7ddfc118
|
[
"BSD-3-Clause"
] | null | null | null |
import functools
import numpy as np
import math
import types
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a NumPy function.
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
# Make See Also linking for our local copy work properly
def _copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)"""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
trapz = _copy_func(trapz)
if trapz.__doc__:
trapz.__doc__ = trapz.__doc__.replace('sum, cumsum', 'numpy.cumsum')
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
(0.1110884353741496, None)
>>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
(0.11111111111111102, None)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
(0.9999999771971152, None)
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
(1.000000000039565, None)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in range(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.quadrature(f, 0.0, 1.0)
(0.11111111111111106, 4.163336342344337e-17)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.quadrature(np.cos, 0.0, np.pi/2)
(0.9999999999999536, 3.9611425250996035e-11)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in range(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, insert this value at the beginning of the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even-spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `x`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simps(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simps(y, x)
1642.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simps(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(10, 14.25, 0.25)
>>> y = np.arange(3, 12)
>>> integrate.romb(y)
56.0
>>> y = np.sin(np.power(x, 2.5))
>>> integrate.romb(y)
-0.742561336672229
>>> integrate.romb(y, show=True)
Richardson Extrapolation Table for Romberg Integration
====================================================================
-0.81576
4.63862 6.45674
-1.10581 -3.02062 -3.65245
-2.57379 -3.06311 -3.06595 -3.05664
-1.34093 -0.92997 -0.78776 -0.75160 -0.74256
====================================================================
-0.742561336672229
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in range(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in range(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="\n")
for i in range(k+1):
for j in range(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to SciPy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in range(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in range(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e., whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in range(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in range(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Newton-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
r"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+ B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
where :math:`\xi \in [x_0,x_N]`
and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Examples
--------
Compute the integral of sin(x) in [0, :math:`\pi`]:
>>> from scipy.integrate import newton_cotes
>>> def f(x):
... return np.sin(x)
>>> a = 0
>>> b = np.pi
>>> exact = 2
>>> for N in [2, 4, 6, 8, 10]:
... x = np.linspace(a, b, N + 1)
... an, B = newton_cotes(N, 1)
... dx = (b - a) / N
... quad = dx * np.sum(an * f(x))
... error = abs(quad - exact)
... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
...
2 2.094395102 9.43951e-02
4 1.998570732 1.42927e-03
6 2.000017814 1.78136e-05
8 1.999999835 1.64725e-07
10 2.000000001 1.14677e-09
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except Exception:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| 32.278807
| 103
| 0.578135
|
import functools
import numpy as np
import math
import types
import warnings
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
# Make See Also linking for our local copy work properly
def _copy_func(f):
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
trapz = _copy_func(trapz)
if trapz.__doc__:
trapz.__doc__ = trapz.__doc__.replace('sum, cumsum', 'numpy.cumsum')
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in range(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in range(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even-spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in range(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in range(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="\n")
for i in range(k+1):
for j in range(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to SciPy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in range(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in range(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in range(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in range(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Newton-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except Exception:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| true
| true
|
f70a96e43b69336d19518c9d33a4c86634d2adbd
| 3,541
|
py
|
Python
|
indy_common/authorize/auth_cons_strategies.py
|
Rob-S/indy-node
|
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
|
[
"Apache-2.0"
] | 627
|
2017-07-06T12:38:08.000Z
|
2022-03-30T13:18:43.000Z
|
indy_common/authorize/auth_cons_strategies.py
|
Rob-S/indy-node
|
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
|
[
"Apache-2.0"
] | 580
|
2017-06-29T17:59:57.000Z
|
2022-03-29T21:37:52.000Z
|
indy_common/authorize/auth_cons_strategies.py
|
Rob-S/indy-node
|
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
|
[
"Apache-2.0"
] | 704
|
2017-06-29T17:45:34.000Z
|
2022-03-30T07:08:58.000Z
|
from abc import abstractmethod, ABCMeta
from indy_common.authorize.auth_actions import split_action_id
from indy_common.authorize.auth_constraints import AbstractAuthConstraint, AbstractConstraintSerializer
from indy_common.state import config
from plenum.common.metrics_collector import MetricsName, MetricsCollector
from state.pruning_state import PruningState
from stp_core.common.log import getlogger
logger = getlogger()
class AbstractAuthStrategy(metaclass=ABCMeta):
def __init__(self, auth_map):
self.auth_map = auth_map
@abstractmethod
def get_auth_constraint(self, action_id) -> AbstractAuthConstraint:
raise NotImplementedError()
@abstractmethod
def _find_auth_constraint_key(self, action_id, auth_map):
raise NotImplementedError()
@staticmethod
def is_accepted_action_id(from_auth_map, from_req):
am = split_action_id(from_auth_map)
r = split_action_id(from_req)
if r.prefix != am.prefix:
return False
if r.txn_type != am.txn_type:
return False
if r.field != am.field and \
am.field != '*':
return False
if r.old_value != am.old_value and \
am.old_value != '*':
return False
if r.new_value != am.new_value and \
am.new_value != '*':
return False
return True
class LocalAuthStrategy(AbstractAuthStrategy):
def get_auth_constraint(self, action_id) -> AbstractAuthConstraint:
am_id = self._find_auth_constraint_key(action_id, self.auth_map)
return self.auth_map.get(am_id)
def _find_auth_constraint_key(self, action_id, auth_map):
for am_id in auth_map.keys():
if self.is_accepted_action_id(am_id, action_id):
return am_id
class ConfigLedgerAuthStrategy(AbstractAuthStrategy):
def __init__(self,
auth_map,
state: PruningState,
serializer: AbstractConstraintSerializer,
metrics: MetricsCollector = None):
super().__init__(auth_map=auth_map)
self.state = state
self.serializer = serializer
self.metrics = metrics
self.from_state_count = 0
def get_auth_constraint(self, action_id: str) -> AbstractAuthConstraint:
"""
Find rule_id for incoming action_id and return AuthConstraint instance
"""
return self._find_auth_constraint(action_id, self.auth_map)
def _find_auth_constraint(self, action_id, auth_map):
am_id = self._find_auth_constraint_key(action_id, auth_map)
if am_id:
constraint = self.get_from_state(key=config.make_state_path_for_auth_rule(am_id))
if not constraint:
return auth_map.get(am_id)
logger.debug("Using auth constraint from state")
if self.metrics:
self.from_state_count += 1
self.metrics.add_event(MetricsName.AUTH_RULES_FROM_STATE_COUNT, self.from_state_count)
return constraint
def _find_auth_constraint_key(self, action_id, auth_map):
for am_id in auth_map.keys():
if self.is_accepted_action_id(am_id, action_id):
return am_id
def get_from_state(self, key, isCommitted=False):
from_state = self.state.get(key=key,
isCommitted=isCommitted)
if not from_state:
return None
return self.serializer.deserialize(from_state)
| 35.767677
| 103
| 0.663372
|
from abc import abstractmethod, ABCMeta
from indy_common.authorize.auth_actions import split_action_id
from indy_common.authorize.auth_constraints import AbstractAuthConstraint, AbstractConstraintSerializer
from indy_common.state import config
from plenum.common.metrics_collector import MetricsName, MetricsCollector
from state.pruning_state import PruningState
from stp_core.common.log import getlogger
logger = getlogger()
class AbstractAuthStrategy(metaclass=ABCMeta):
def __init__(self, auth_map):
self.auth_map = auth_map
@abstractmethod
def get_auth_constraint(self, action_id) -> AbstractAuthConstraint:
raise NotImplementedError()
@abstractmethod
def _find_auth_constraint_key(self, action_id, auth_map):
raise NotImplementedError()
@staticmethod
def is_accepted_action_id(from_auth_map, from_req):
am = split_action_id(from_auth_map)
r = split_action_id(from_req)
if r.prefix != am.prefix:
return False
if r.txn_type != am.txn_type:
return False
if r.field != am.field and \
am.field != '*':
return False
if r.old_value != am.old_value and \
am.old_value != '*':
return False
if r.new_value != am.new_value and \
am.new_value != '*':
return False
return True
class LocalAuthStrategy(AbstractAuthStrategy):
def get_auth_constraint(self, action_id) -> AbstractAuthConstraint:
am_id = self._find_auth_constraint_key(action_id, self.auth_map)
return self.auth_map.get(am_id)
def _find_auth_constraint_key(self, action_id, auth_map):
for am_id in auth_map.keys():
if self.is_accepted_action_id(am_id, action_id):
return am_id
class ConfigLedgerAuthStrategy(AbstractAuthStrategy):
def __init__(self,
auth_map,
state: PruningState,
serializer: AbstractConstraintSerializer,
metrics: MetricsCollector = None):
super().__init__(auth_map=auth_map)
self.state = state
self.serializer = serializer
self.metrics = metrics
self.from_state_count = 0
def get_auth_constraint(self, action_id: str) -> AbstractAuthConstraint:
return self._find_auth_constraint(action_id, self.auth_map)
def _find_auth_constraint(self, action_id, auth_map):
am_id = self._find_auth_constraint_key(action_id, auth_map)
if am_id:
constraint = self.get_from_state(key=config.make_state_path_for_auth_rule(am_id))
if not constraint:
return auth_map.get(am_id)
logger.debug("Using auth constraint from state")
if self.metrics:
self.from_state_count += 1
self.metrics.add_event(MetricsName.AUTH_RULES_FROM_STATE_COUNT, self.from_state_count)
return constraint
def _find_auth_constraint_key(self, action_id, auth_map):
for am_id in auth_map.keys():
if self.is_accepted_action_id(am_id, action_id):
return am_id
def get_from_state(self, key, isCommitted=False):
from_state = self.state.get(key=key,
isCommitted=isCommitted)
if not from_state:
return None
return self.serializer.deserialize(from_state)
| true
| true
|
f70a9784cb666d54aa8b8ed0284ab8fdc2ba59d2
| 3,518
|
py
|
Python
|
supervisor/backups/validate.py
|
mib1185/homeassistant-supervisor
|
d536ac8604e1b5a0f5008c92e3d98fcc8ab16bb5
|
[
"Apache-2.0"
] | 597
|
2017-04-27T15:10:08.000Z
|
2019-12-18T16:02:57.000Z
|
supervisor/backups/validate.py
|
mib1185/homeassistant-supervisor
|
d536ac8604e1b5a0f5008c92e3d98fcc8ab16bb5
|
[
"Apache-2.0"
] | 799
|
2017-05-02T00:26:07.000Z
|
2019-12-18T21:40:18.000Z
|
supervisor/backups/validate.py
|
mib1185/homeassistant-supervisor
|
d536ac8604e1b5a0f5008c92e3d98fcc8ab16bb5
|
[
"Apache-2.0"
] | 173
|
2017-04-26T17:03:42.000Z
|
2019-12-15T10:41:57.000Z
|
"""Validate some things around restore."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from ..backups.const import BackupType
from ..const import (
ATTR_ADDONS,
ATTR_COMPRESSED,
ATTR_CRYPTO,
ATTR_DATE,
ATTR_DOCKER,
ATTR_FOLDERS,
ATTR_HOMEASSISTANT,
ATTR_NAME,
ATTR_PROTECTED,
ATTR_REPOSITORIES,
ATTR_SIZE,
ATTR_SLUG,
ATTR_TYPE,
ATTR_VERSION,
CRYPTO_AES128,
FOLDER_ADDONS,
FOLDER_HOMEASSISTANT,
FOLDER_MEDIA,
FOLDER_SHARE,
FOLDER_SSL,
)
from ..validate import SCHEMA_DOCKER_CONFIG, repositories, version_tag
ALL_FOLDERS = [
FOLDER_SHARE,
FOLDER_ADDONS,
FOLDER_SSL,
FOLDER_MEDIA,
]
def unique_addons(addons_list):
"""Validate that an add-on is unique."""
single = {addon[ATTR_SLUG] for addon in addons_list}
if len(single) != len(addons_list):
raise vol.Invalid("Invalid addon list in backup!") from None
return addons_list
def v1_homeassistant(
homeassistant_data: dict[str, Any] | None
) -> dict[str, Any] | None:
"""Cleanup homeassistant artefacts from v1."""
if not homeassistant_data:
return None
if homeassistant_data.get(ATTR_VERSION) is None:
return None
return homeassistant_data
def v1_folderlist(folder_data: list[str]) -> list[str]:
"""Cleanup folder artefacts from v1."""
if FOLDER_HOMEASSISTANT in folder_data:
folder_data.remove(FOLDER_HOMEASSISTANT)
return folder_data
def v1_protected(protected: bool | str) -> bool:
"""Cleanup old protected handling."""
if isinstance(protected, bool):
return protected
return True
# pylint: disable=no-value-for-parameter
SCHEMA_BACKUP = vol.Schema(
{
vol.Optional(ATTR_VERSION, default=1): vol.All(vol.Coerce(int), vol.In((1, 2))),
vol.Required(ATTR_SLUG): str,
vol.Required(ATTR_TYPE): vol.Coerce(BackupType),
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_DATE): str,
vol.Optional(ATTR_COMPRESSED, default=True): vol.Boolean(),
vol.Optional(ATTR_PROTECTED, default=False): vol.All(
v1_protected, vol.Boolean()
),
vol.Optional(ATTR_CRYPTO, default=None): vol.Maybe(CRYPTO_AES128),
vol.Optional(ATTR_HOMEASSISTANT, default=None): vol.All(
v1_homeassistant,
vol.Maybe(
vol.Schema(
{
vol.Required(ATTR_VERSION): version_tag,
vol.Optional(ATTR_SIZE, default=0): vol.Coerce(float),
},
extra=vol.REMOVE_EXTRA,
)
),
),
vol.Optional(ATTR_DOCKER, default=dict): SCHEMA_DOCKER_CONFIG,
vol.Optional(ATTR_FOLDERS, default=list): vol.All(
v1_folderlist, [vol.In(ALL_FOLDERS)], vol.Unique()
),
vol.Optional(ATTR_ADDONS, default=list): vol.All(
[
vol.Schema(
{
vol.Required(ATTR_SLUG): str,
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_VERSION): version_tag,
vol.Optional(ATTR_SIZE, default=0): vol.Coerce(float),
},
extra=vol.REMOVE_EXTRA,
)
],
unique_addons,
),
vol.Optional(ATTR_REPOSITORIES, default=list): repositories,
},
extra=vol.ALLOW_EXTRA,
)
| 28.370968
| 88
| 0.608016
|
from __future__ import annotations
from typing import Any
import voluptuous as vol
from ..backups.const import BackupType
from ..const import (
ATTR_ADDONS,
ATTR_COMPRESSED,
ATTR_CRYPTO,
ATTR_DATE,
ATTR_DOCKER,
ATTR_FOLDERS,
ATTR_HOMEASSISTANT,
ATTR_NAME,
ATTR_PROTECTED,
ATTR_REPOSITORIES,
ATTR_SIZE,
ATTR_SLUG,
ATTR_TYPE,
ATTR_VERSION,
CRYPTO_AES128,
FOLDER_ADDONS,
FOLDER_HOMEASSISTANT,
FOLDER_MEDIA,
FOLDER_SHARE,
FOLDER_SSL,
)
from ..validate import SCHEMA_DOCKER_CONFIG, repositories, version_tag
ALL_FOLDERS = [
FOLDER_SHARE,
FOLDER_ADDONS,
FOLDER_SSL,
FOLDER_MEDIA,
]
def unique_addons(addons_list):
single = {addon[ATTR_SLUG] for addon in addons_list}
if len(single) != len(addons_list):
raise vol.Invalid("Invalid addon list in backup!") from None
return addons_list
def v1_homeassistant(
homeassistant_data: dict[str, Any] | None
) -> dict[str, Any] | None:
if not homeassistant_data:
return None
if homeassistant_data.get(ATTR_VERSION) is None:
return None
return homeassistant_data
def v1_folderlist(folder_data: list[str]) -> list[str]:
if FOLDER_HOMEASSISTANT in folder_data:
folder_data.remove(FOLDER_HOMEASSISTANT)
return folder_data
def v1_protected(protected: bool | str) -> bool:
if isinstance(protected, bool):
return protected
return True
SCHEMA_BACKUP = vol.Schema(
{
vol.Optional(ATTR_VERSION, default=1): vol.All(vol.Coerce(int), vol.In((1, 2))),
vol.Required(ATTR_SLUG): str,
vol.Required(ATTR_TYPE): vol.Coerce(BackupType),
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_DATE): str,
vol.Optional(ATTR_COMPRESSED, default=True): vol.Boolean(),
vol.Optional(ATTR_PROTECTED, default=False): vol.All(
v1_protected, vol.Boolean()
),
vol.Optional(ATTR_CRYPTO, default=None): vol.Maybe(CRYPTO_AES128),
vol.Optional(ATTR_HOMEASSISTANT, default=None): vol.All(
v1_homeassistant,
vol.Maybe(
vol.Schema(
{
vol.Required(ATTR_VERSION): version_tag,
vol.Optional(ATTR_SIZE, default=0): vol.Coerce(float),
},
extra=vol.REMOVE_EXTRA,
)
),
),
vol.Optional(ATTR_DOCKER, default=dict): SCHEMA_DOCKER_CONFIG,
vol.Optional(ATTR_FOLDERS, default=list): vol.All(
v1_folderlist, [vol.In(ALL_FOLDERS)], vol.Unique()
),
vol.Optional(ATTR_ADDONS, default=list): vol.All(
[
vol.Schema(
{
vol.Required(ATTR_SLUG): str,
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_VERSION): version_tag,
vol.Optional(ATTR_SIZE, default=0): vol.Coerce(float),
},
extra=vol.REMOVE_EXTRA,
)
],
unique_addons,
),
vol.Optional(ATTR_REPOSITORIES, default=list): repositories,
},
extra=vol.ALLOW_EXTRA,
)
| true
| true
|
f70a978dd0049c27244b57c32fae3ca446d6330a
| 574
|
py
|
Python
|
utils/heap_queue.py
|
yeshwanthv5/PruneFL
|
ad1f7f33b0605d1d79abfbe42ef287fcc613a943
|
[
"MIT"
] | 6
|
2021-07-01T05:35:08.000Z
|
2022-03-04T18:53:31.000Z
|
utils/heap_queue.py
|
yeshwanthv5/PruneFL
|
ad1f7f33b0605d1d79abfbe42ef287fcc613a943
|
[
"MIT"
] | null | null | null |
utils/heap_queue.py
|
yeshwanthv5/PruneFL
|
ad1f7f33b0605d1d79abfbe42ef287fcc613a943
|
[
"MIT"
] | 1
|
2021-06-21T14:24:47.000Z
|
2021-06-21T14:24:47.000Z
|
import heapq
from typing import Iterable
class HeapQueue:
def __init__(self, init_h: Iterable):
self.h = [(-val, index) for index, val in init_h]
heapq.heapify(self.h)
def replace_largest(self, new_val):
heapq.heapreplace(self.h, (-new_val, self.max_index))
def pop(self):
heapq.heappop(self.h)
@property
def max_index(self):
return self.h[0][1]
@property
def max_val(self):
return -self.h[0][0]
def __repr__(self):
return "HeapQueue instance containing data {}.".format(self.h)
| 22.076923
| 70
| 0.625436
|
import heapq
from typing import Iterable
class HeapQueue:
def __init__(self, init_h: Iterable):
self.h = [(-val, index) for index, val in init_h]
heapq.heapify(self.h)
def replace_largest(self, new_val):
heapq.heapreplace(self.h, (-new_val, self.max_index))
def pop(self):
heapq.heappop(self.h)
@property
def max_index(self):
return self.h[0][1]
@property
def max_val(self):
return -self.h[0][0]
def __repr__(self):
return "HeapQueue instance containing data {}.".format(self.h)
| true
| true
|
f70a9949d4166222fbcd0e7d65ca4dd9d870cbb4
| 628
|
py
|
Python
|
dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/gfe/request/LockChangeRequest.py
|
mjames-upc/python-awips
|
e2b05f5587b02761df3b6dd5c6ee1f196bd5f11c
|
[
"BSD-3-Clause"
] | null | null | null |
dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/gfe/request/LockChangeRequest.py
|
mjames-upc/python-awips
|
e2b05f5587b02761df3b6dd5c6ee1f196bd5f11c
|
[
"BSD-3-Clause"
] | null | null | null |
dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/gfe/request/LockChangeRequest.py
|
mjames-upc/python-awips
|
e2b05f5587b02761df3b6dd5c6ee1f196bd5f11c
|
[
"BSD-3-Clause"
] | null | null | null |
##
##
# File auto-generated against equivalent DynamicSerialize Java class
class LockChangeRequest(object):
def __init__(self):
self.requests = None
self.workstationID = None
self.siteID = None
def getRequests(self):
return self.requests
def setRequests(self, requests):
self.requests = requests
def getWorkstationID(self):
return self.workstationID
def setWorkstationID(self, workstationID):
self.workstationID = workstationID
def getSiteID(self):
return self.siteID
def setSiteID(self, siteID):
self.siteID = siteID
| 20.258065
| 68
| 0.664013
|
class LockChangeRequest(object):
def __init__(self):
self.requests = None
self.workstationID = None
self.siteID = None
def getRequests(self):
return self.requests
def setRequests(self, requests):
self.requests = requests
def getWorkstationID(self):
return self.workstationID
def setWorkstationID(self, workstationID):
self.workstationID = workstationID
def getSiteID(self):
return self.siteID
def setSiteID(self, siteID):
self.siteID = siteID
| true
| true
|
f70a998e45a9cd53af285a0aff5b0be6fe9d545d
| 1,642
|
py
|
Python
|
xua/build_tools.py
|
kmirzavaziri/xua-cli
|
e442f7522665cf6a4605acce3c023e8194f07176
|
[
"MIT"
] | null | null | null |
xua/build_tools.py
|
kmirzavaziri/xua-cli
|
e442f7522665cf6a4605acce3c023e8194f07176
|
[
"MIT"
] | null | null | null |
xua/build_tools.py
|
kmirzavaziri/xua-cli
|
e442f7522665cf6a4605acce3c023e8194f07176
|
[
"MIT"
] | null | null | null |
import os
from xua import helpers
from xua.constants import CLI, BUILD
from xua.exceptions import UserError
from xua.builders.doc import htmlOld
def getBuildEngine(project, config):
if project == CLI.PROJECT_SERVER_PHP:
# @TODO
return None
elif project == CLI.PROJECT_MARSHAL_DART:
# @TODO
return None
elif project == CLI.PROJECT_DOC_HTML:
return htmlOld.BuildEngine(config)
# return html.engine(config)
elif project == CLI.PROJECT_DOC_LATEX:
# @TODO
return None
else:
raise UserError(f"Unknown project {project}.")
def buildRecursive(path, buildEngine):
if os.path.isfile(path):
if buildEngine.config.isToBuild(path, buildEngine.project):
destination = buildEngine.config.getCorrespondingPath(
buildEngine.project, path, BUILD.MAP_PROJECT_EXTENSION[buildEngine.project])
try:
helpers.write(buildEngine.build(path), destination)
except UserError as e:
helpers.Logger.log(helpers.Logger.ERROR,
buildEngine.project, path + ": " + str(e))
else:
helpers.Logger.log(helpers.Logger.SUCCESS,
buildEngine.project, destination + ' built.')
elif buildEngine.config.isToCopy(path, buildEngine.project):
helpers.copy(path, buildEngine.config.getCorrespondingPath(
buildEngine.project, path))
elif os.path.isdir(path):
for child in os.listdir(path):
buildRecursive(os.path.join(path, child), buildEngine)
| 37.318182
| 92
| 0.629111
|
import os
from xua import helpers
from xua.constants import CLI, BUILD
from xua.exceptions import UserError
from xua.builders.doc import htmlOld
def getBuildEngine(project, config):
if project == CLI.PROJECT_SERVER_PHP:
return None
elif project == CLI.PROJECT_MARSHAL_DART:
return None
elif project == CLI.PROJECT_DOC_HTML:
return htmlOld.BuildEngine(config)
elif project == CLI.PROJECT_DOC_LATEX:
return None
else:
raise UserError(f"Unknown project {project}.")
def buildRecursive(path, buildEngine):
if os.path.isfile(path):
if buildEngine.config.isToBuild(path, buildEngine.project):
destination = buildEngine.config.getCorrespondingPath(
buildEngine.project, path, BUILD.MAP_PROJECT_EXTENSION[buildEngine.project])
try:
helpers.write(buildEngine.build(path), destination)
except UserError as e:
helpers.Logger.log(helpers.Logger.ERROR,
buildEngine.project, path + ": " + str(e))
else:
helpers.Logger.log(helpers.Logger.SUCCESS,
buildEngine.project, destination + ' built.')
elif buildEngine.config.isToCopy(path, buildEngine.project):
helpers.copy(path, buildEngine.config.getCorrespondingPath(
buildEngine.project, path))
elif os.path.isdir(path):
for child in os.listdir(path):
buildRecursive(os.path.join(path, child), buildEngine)
| true
| true
|
f70a9cb73c105b012ba90f9d50a5890ed86a8e48
| 14,491
|
py
|
Python
|
homeassistant/components/notify/html5.py
|
glogiotatidis/home-assistant
|
3b83a64f7cdf8a3b90f7f445869155c549c631b0
|
[
"Apache-2.0"
] | 3
|
2019-01-24T20:32:14.000Z
|
2022-03-22T14:25:48.000Z
|
homeassistant/components/notify/html5.py
|
abusalimov/home-assistant
|
5b53bd6aa02a45ddcd4bf4358e74ddbc0285d8d3
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/notify/html5.py
|
abusalimov/home-assistant
|
5b53bd6aa02a45ddcd4bf4358e74ddbc0285d8d3
|
[
"Apache-2.0"
] | 1
|
2022-03-22T14:25:52.000Z
|
2022-03-22T14:25:52.000Z
|
"""
HTML5 Push Messaging notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.html5/
"""
import datetime
import json
import logging
import time
import uuid
from aiohttp.hdrs import AUTHORIZATION
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.util.json import load_json, save_json
from homeassistant.exceptions import HomeAssistantError
from homeassistant.components.frontend import add_manifest_json_key
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.notify import (
ATTR_DATA, ATTR_TITLE, ATTR_TARGET, PLATFORM_SCHEMA, ATTR_TITLE_DEFAULT,
BaseNotificationService)
from homeassistant.const import (
URL_ROOT, HTTP_BAD_REQUEST, HTTP_UNAUTHORIZED, HTTP_INTERNAL_SERVER_ERROR)
from homeassistant.helpers import config_validation as cv
from homeassistant.util import ensure_unique_string
REQUIREMENTS = ['pywebpush==1.6.0']
DEPENDENCIES = ['frontend']
_LOGGER = logging.getLogger(__name__)
REGISTRATIONS_FILE = 'html5_push_registrations.conf'
ATTR_GCM_SENDER_ID = 'gcm_sender_id'
ATTR_GCM_API_KEY = 'gcm_api_key'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(ATTR_GCM_SENDER_ID): cv.string,
vol.Optional(ATTR_GCM_API_KEY): cv.string,
})
ATTR_SUBSCRIPTION = 'subscription'
ATTR_BROWSER = 'browser'
ATTR_NAME = 'name'
ATTR_ENDPOINT = 'endpoint'
ATTR_KEYS = 'keys'
ATTR_AUTH = 'auth'
ATTR_P256DH = 'p256dh'
ATTR_EXPIRATIONTIME = 'expirationTime'
ATTR_TAG = 'tag'
ATTR_ACTION = 'action'
ATTR_ACTIONS = 'actions'
ATTR_TYPE = 'type'
ATTR_URL = 'url'
ATTR_JWT = 'jwt'
# The number of days after the moment a notification is sent that a JWT
# is valid.
JWT_VALID_DAYS = 7
KEYS_SCHEMA = vol.All(
dict, vol.Schema({
vol.Required(ATTR_AUTH): cv.string,
vol.Required(ATTR_P256DH): cv.string,
})
)
SUBSCRIPTION_SCHEMA = vol.All(
dict, vol.Schema({
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_ENDPOINT): vol.Url(),
vol.Required(ATTR_KEYS): KEYS_SCHEMA,
vol.Optional(ATTR_EXPIRATIONTIME): vol.Any(None, cv.positive_int),
})
)
REGISTER_SCHEMA = vol.Schema({
vol.Required(ATTR_SUBSCRIPTION): SUBSCRIPTION_SCHEMA,
vol.Required(ATTR_BROWSER): vol.In(['chrome', 'firefox']),
vol.Optional(ATTR_NAME): cv.string
})
CALLBACK_EVENT_PAYLOAD_SCHEMA = vol.Schema({
vol.Required(ATTR_TAG): cv.string,
vol.Required(ATTR_TYPE): vol.In(['received', 'clicked', 'closed']),
vol.Required(ATTR_TARGET): cv.string,
vol.Optional(ATTR_ACTION): cv.string,
vol.Optional(ATTR_DATA): dict,
})
NOTIFY_CALLBACK_EVENT = 'html5_notification'
# Badge and timestamp are Chrome specific (not in official spec)
HTML5_SHOWNOTIFICATION_PARAMETERS = (
'actions', 'badge', 'body', 'dir', 'icon', 'image', 'lang',
'renotify', 'requireInteraction', 'tag', 'timestamp', 'vibrate')
def get_service(hass, config, discovery_info=None):
"""Get the HTML5 push notification service."""
json_path = hass.config.path(REGISTRATIONS_FILE)
registrations = _load_config(json_path)
if registrations is None:
return None
hass.http.register_view(
HTML5PushRegistrationView(registrations, json_path))
hass.http.register_view(HTML5PushCallbackView(registrations))
gcm_api_key = config.get(ATTR_GCM_API_KEY)
gcm_sender_id = config.get(ATTR_GCM_SENDER_ID)
if gcm_sender_id is not None:
add_manifest_json_key(
ATTR_GCM_SENDER_ID, config.get(ATTR_GCM_SENDER_ID))
return HTML5NotificationService(gcm_api_key, registrations, json_path)
def _load_config(filename):
"""Load configuration."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
class HTML5PushRegistrationView(HomeAssistantView):
"""Accepts push registrations from a browser."""
url = '/api/notify.html5'
name = 'api:notify.html5'
def __init__(self, registrations, json_path):
"""Init HTML5PushRegistrationView."""
self.registrations = registrations
self.json_path = json_path
async def post(self, request):
"""Accept the POST request for push registrations from a browser."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
try:
data = REGISTER_SCHEMA(data)
except vol.Invalid as ex:
return self.json_message(
humanize_error(data, ex), HTTP_BAD_REQUEST)
devname = data.get(ATTR_NAME)
data.pop(ATTR_NAME, None)
name = self.find_registration_name(data, devname)
previous_registration = self.registrations.get(name)
self.registrations[name] = data
try:
hass = request.app['hass']
await hass.async_add_job(save_json, self.json_path,
self.registrations)
return self.json_message(
'Push notification subscriber registered.')
except HomeAssistantError:
if previous_registration is not None:
self.registrations[name] = previous_registration
else:
self.registrations.pop(name)
return self.json_message(
'Error saving registration.', HTTP_INTERNAL_SERVER_ERROR)
def find_registration_name(self, data, suggested=None):
"""Find a registration name matching data or generate a unique one."""
endpoint = data.get(ATTR_SUBSCRIPTION).get(ATTR_ENDPOINT)
for key, registration in self.registrations.items():
subscription = registration.get(ATTR_SUBSCRIPTION)
if subscription.get(ATTR_ENDPOINT) == endpoint:
return key
return ensure_unique_string(suggested or 'unnamed device',
self.registrations)
async def delete(self, request):
"""Delete a registration."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
subscription = data.get(ATTR_SUBSCRIPTION)
found = None
for key, registration in self.registrations.items():
if registration.get(ATTR_SUBSCRIPTION) == subscription:
found = key
break
if not found:
# If not found, unregistering was already done. Return 200
return self.json_message('Registration not found.')
reg = self.registrations.pop(found)
try:
hass = request.app['hass']
await hass.async_add_job(save_json, self.json_path,
self.registrations)
except HomeAssistantError:
self.registrations[found] = reg
return self.json_message(
'Error saving registration.', HTTP_INTERNAL_SERVER_ERROR)
return self.json_message('Push notification subscriber unregistered.')
class HTML5PushCallbackView(HomeAssistantView):
"""Accepts push registrations from a browser."""
requires_auth = False
url = '/api/notify.html5/callback'
name = 'api:notify.html5/callback'
def __init__(self, registrations):
"""Init HTML5PushCallbackView."""
self.registrations = registrations
def decode_jwt(self, token):
"""Find the registration that signed this JWT and return it."""
import jwt
# 1. Check claims w/o verifying to see if a target is in there.
# 2. If target in claims, attempt to verify against the given name.
# 2a. If decode is successful, return the payload.
# 2b. If decode is unsuccessful, return a 401.
target_check = jwt.decode(token, verify=False)
if target_check.get(ATTR_TARGET) in self.registrations:
possible_target = self.registrations[target_check[ATTR_TARGET]]
key = possible_target[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
try:
return jwt.decode(token, key, algorithms=["ES256", "HS256"])
except jwt.exceptions.DecodeError:
pass
return self.json_message('No target found in JWT',
status_code=HTTP_UNAUTHORIZED)
# The following is based on code from Auth0
# https://auth0.com/docs/quickstart/backend/python
def check_authorization_header(self, request):
"""Check the authorization header."""
import jwt
auth = request.headers.get(AUTHORIZATION, None)
if not auth:
return self.json_message('Authorization header is expected',
status_code=HTTP_UNAUTHORIZED)
parts = auth.split()
if parts[0].lower() != 'bearer':
return self.json_message('Authorization header must '
'start with Bearer',
status_code=HTTP_UNAUTHORIZED)
if len(parts) != 2:
return self.json_message('Authorization header must '
'be Bearer token',
status_code=HTTP_UNAUTHORIZED)
token = parts[1]
try:
payload = self.decode_jwt(token)
except jwt.exceptions.InvalidTokenError:
return self.json_message('token is invalid',
status_code=HTTP_UNAUTHORIZED)
return payload
async def post(self, request):
"""Accept the POST request for push registrations event callback."""
auth_check = self.check_authorization_header(request)
if not isinstance(auth_check, dict):
return auth_check
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
event_payload = {
ATTR_TAG: data.get(ATTR_TAG),
ATTR_TYPE: data[ATTR_TYPE],
ATTR_TARGET: auth_check[ATTR_TARGET],
}
if data.get(ATTR_ACTION) is not None:
event_payload[ATTR_ACTION] = data.get(ATTR_ACTION)
if data.get(ATTR_DATA) is not None:
event_payload[ATTR_DATA] = data.get(ATTR_DATA)
try:
event_payload = CALLBACK_EVENT_PAYLOAD_SCHEMA(event_payload)
except vol.Invalid as ex:
_LOGGER.warning("Callback event payload is not valid: %s",
humanize_error(event_payload, ex))
event_name = '{}.{}'.format(NOTIFY_CALLBACK_EVENT,
event_payload[ATTR_TYPE])
request.app['hass'].bus.fire(event_name, event_payload)
return self.json({'status': 'ok', 'event': event_payload[ATTR_TYPE]})
class HTML5NotificationService(BaseNotificationService):
"""Implement the notification service for HTML5."""
def __init__(self, gcm_key, registrations, json_path):
"""Initialize the service."""
self._gcm_key = gcm_key
self.registrations = registrations
self.registrations_json_path = json_path
@property
def targets(self):
"""Return a dictionary of registered targets."""
targets = {}
for registration in self.registrations:
targets[registration] = registration
return targets
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
import jwt
from pywebpush import WebPusher
timestamp = int(time.time())
tag = str(uuid.uuid4())
payload = {
'badge': '/static/images/notification-badge.png',
'body': message,
ATTR_DATA: {},
'icon': '/static/icons/favicon-192x192.png',
ATTR_TAG: tag,
'timestamp': (timestamp*1000), # Javascript ms since epoch
ATTR_TITLE: kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
}
data = kwargs.get(ATTR_DATA)
if data:
# Pick out fields that should go into the notification directly vs
# into the notification data dictionary.
data_tmp = {}
for key, val in data.items():
if key in HTML5_SHOWNOTIFICATION_PARAMETERS:
payload[key] = val
else:
data_tmp[key] = val
payload[ATTR_DATA] = data_tmp
if (payload[ATTR_DATA].get(ATTR_URL) is None and
payload.get(ATTR_ACTIONS) is None):
payload[ATTR_DATA][ATTR_URL] = URL_ROOT
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = self.registrations.keys()
for target in list(targets):
info = self.registrations.get(target)
if info is None:
_LOGGER.error("%s is not a valid HTML5 push notification"
" target", target)
continue
jwt_exp = (datetime.datetime.fromtimestamp(timestamp) +
datetime.timedelta(days=JWT_VALID_DAYS))
jwt_secret = info[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
jwt_claims = {'exp': jwt_exp, 'nbf': timestamp,
'iat': timestamp, ATTR_TARGET: target,
ATTR_TAG: payload[ATTR_TAG]}
jwt_token = jwt.encode(jwt_claims, jwt_secret).decode('utf-8')
payload[ATTR_DATA][ATTR_JWT] = jwt_token
# Only pass the gcm key if we're actually using GCM
# If we don't, notifications break on FireFox
gcm_key = self._gcm_key \
if 'googleapis.com' in info[ATTR_SUBSCRIPTION][ATTR_ENDPOINT] \
else None
response = WebPusher(info[ATTR_SUBSCRIPTION]).send(
json.dumps(payload), gcm_key=gcm_key, ttl='86400'
)
if response.status_code == 410:
_LOGGER.info("Notification channel has expired")
reg = self.registrations.pop(target)
if not save_json(self.registrations_json_path,
self.registrations):
self.registrations[target] = reg
_LOGGER.error("Error saving registration")
else:
_LOGGER.info("Configuration saved")
| 34.502381
| 79
| 0.630736
|
import datetime
import json
import logging
import time
import uuid
from aiohttp.hdrs import AUTHORIZATION
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.util.json import load_json, save_json
from homeassistant.exceptions import HomeAssistantError
from homeassistant.components.frontend import add_manifest_json_key
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.notify import (
ATTR_DATA, ATTR_TITLE, ATTR_TARGET, PLATFORM_SCHEMA, ATTR_TITLE_DEFAULT,
BaseNotificationService)
from homeassistant.const import (
URL_ROOT, HTTP_BAD_REQUEST, HTTP_UNAUTHORIZED, HTTP_INTERNAL_SERVER_ERROR)
from homeassistant.helpers import config_validation as cv
from homeassistant.util import ensure_unique_string
REQUIREMENTS = ['pywebpush==1.6.0']
DEPENDENCIES = ['frontend']
_LOGGER = logging.getLogger(__name__)
REGISTRATIONS_FILE = 'html5_push_registrations.conf'
ATTR_GCM_SENDER_ID = 'gcm_sender_id'
ATTR_GCM_API_KEY = 'gcm_api_key'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(ATTR_GCM_SENDER_ID): cv.string,
vol.Optional(ATTR_GCM_API_KEY): cv.string,
})
ATTR_SUBSCRIPTION = 'subscription'
ATTR_BROWSER = 'browser'
ATTR_NAME = 'name'
ATTR_ENDPOINT = 'endpoint'
ATTR_KEYS = 'keys'
ATTR_AUTH = 'auth'
ATTR_P256DH = 'p256dh'
ATTR_EXPIRATIONTIME = 'expirationTime'
ATTR_TAG = 'tag'
ATTR_ACTION = 'action'
ATTR_ACTIONS = 'actions'
ATTR_TYPE = 'type'
ATTR_URL = 'url'
ATTR_JWT = 'jwt'
JWT_VALID_DAYS = 7
KEYS_SCHEMA = vol.All(
dict, vol.Schema({
vol.Required(ATTR_AUTH): cv.string,
vol.Required(ATTR_P256DH): cv.string,
})
)
SUBSCRIPTION_SCHEMA = vol.All(
dict, vol.Schema({
vol.Required(ATTR_ENDPOINT): vol.Url(),
vol.Required(ATTR_KEYS): KEYS_SCHEMA,
vol.Optional(ATTR_EXPIRATIONTIME): vol.Any(None, cv.positive_int),
})
)
REGISTER_SCHEMA = vol.Schema({
vol.Required(ATTR_SUBSCRIPTION): SUBSCRIPTION_SCHEMA,
vol.Required(ATTR_BROWSER): vol.In(['chrome', 'firefox']),
vol.Optional(ATTR_NAME): cv.string
})
CALLBACK_EVENT_PAYLOAD_SCHEMA = vol.Schema({
vol.Required(ATTR_TAG): cv.string,
vol.Required(ATTR_TYPE): vol.In(['received', 'clicked', 'closed']),
vol.Required(ATTR_TARGET): cv.string,
vol.Optional(ATTR_ACTION): cv.string,
vol.Optional(ATTR_DATA): dict,
})
NOTIFY_CALLBACK_EVENT = 'html5_notification'
HTML5_SHOWNOTIFICATION_PARAMETERS = (
'actions', 'badge', 'body', 'dir', 'icon', 'image', 'lang',
'renotify', 'requireInteraction', 'tag', 'timestamp', 'vibrate')
def get_service(hass, config, discovery_info=None):
json_path = hass.config.path(REGISTRATIONS_FILE)
registrations = _load_config(json_path)
if registrations is None:
return None
hass.http.register_view(
HTML5PushRegistrationView(registrations, json_path))
hass.http.register_view(HTML5PushCallbackView(registrations))
gcm_api_key = config.get(ATTR_GCM_API_KEY)
gcm_sender_id = config.get(ATTR_GCM_SENDER_ID)
if gcm_sender_id is not None:
add_manifest_json_key(
ATTR_GCM_SENDER_ID, config.get(ATTR_GCM_SENDER_ID))
return HTML5NotificationService(gcm_api_key, registrations, json_path)
def _load_config(filename):
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
class HTML5PushRegistrationView(HomeAssistantView):
url = '/api/notify.html5'
name = 'api:notify.html5'
def __init__(self, registrations, json_path):
self.registrations = registrations
self.json_path = json_path
async def post(self, request):
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
try:
data = REGISTER_SCHEMA(data)
except vol.Invalid as ex:
return self.json_message(
humanize_error(data, ex), HTTP_BAD_REQUEST)
devname = data.get(ATTR_NAME)
data.pop(ATTR_NAME, None)
name = self.find_registration_name(data, devname)
previous_registration = self.registrations.get(name)
self.registrations[name] = data
try:
hass = request.app['hass']
await hass.async_add_job(save_json, self.json_path,
self.registrations)
return self.json_message(
'Push notification subscriber registered.')
except HomeAssistantError:
if previous_registration is not None:
self.registrations[name] = previous_registration
else:
self.registrations.pop(name)
return self.json_message(
'Error saving registration.', HTTP_INTERNAL_SERVER_ERROR)
def find_registration_name(self, data, suggested=None):
endpoint = data.get(ATTR_SUBSCRIPTION).get(ATTR_ENDPOINT)
for key, registration in self.registrations.items():
subscription = registration.get(ATTR_SUBSCRIPTION)
if subscription.get(ATTR_ENDPOINT) == endpoint:
return key
return ensure_unique_string(suggested or 'unnamed device',
self.registrations)
async def delete(self, request):
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
subscription = data.get(ATTR_SUBSCRIPTION)
found = None
for key, registration in self.registrations.items():
if registration.get(ATTR_SUBSCRIPTION) == subscription:
found = key
break
if not found:
return self.json_message('Registration not found.')
reg = self.registrations.pop(found)
try:
hass = request.app['hass']
await hass.async_add_job(save_json, self.json_path,
self.registrations)
except HomeAssistantError:
self.registrations[found] = reg
return self.json_message(
'Error saving registration.', HTTP_INTERNAL_SERVER_ERROR)
return self.json_message('Push notification subscriber unregistered.')
class HTML5PushCallbackView(HomeAssistantView):
requires_auth = False
url = '/api/notify.html5/callback'
name = 'api:notify.html5/callback'
def __init__(self, registrations):
self.registrations = registrations
def decode_jwt(self, token):
import jwt
target_check = jwt.decode(token, verify=False)
if target_check.get(ATTR_TARGET) in self.registrations:
possible_target = self.registrations[target_check[ATTR_TARGET]]
key = possible_target[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
try:
return jwt.decode(token, key, algorithms=["ES256", "HS256"])
except jwt.exceptions.DecodeError:
pass
return self.json_message('No target found in JWT',
status_code=HTTP_UNAUTHORIZED)
def check_authorization_header(self, request):
import jwt
auth = request.headers.get(AUTHORIZATION, None)
if not auth:
return self.json_message('Authorization header is expected',
status_code=HTTP_UNAUTHORIZED)
parts = auth.split()
if parts[0].lower() != 'bearer':
return self.json_message('Authorization header must '
'start with Bearer',
status_code=HTTP_UNAUTHORIZED)
if len(parts) != 2:
return self.json_message('Authorization header must '
'be Bearer token',
status_code=HTTP_UNAUTHORIZED)
token = parts[1]
try:
payload = self.decode_jwt(token)
except jwt.exceptions.InvalidTokenError:
return self.json_message('token is invalid',
status_code=HTTP_UNAUTHORIZED)
return payload
async def post(self, request):
auth_check = self.check_authorization_header(request)
if not isinstance(auth_check, dict):
return auth_check
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
event_payload = {
ATTR_TAG: data.get(ATTR_TAG),
ATTR_TYPE: data[ATTR_TYPE],
ATTR_TARGET: auth_check[ATTR_TARGET],
}
if data.get(ATTR_ACTION) is not None:
event_payload[ATTR_ACTION] = data.get(ATTR_ACTION)
if data.get(ATTR_DATA) is not None:
event_payload[ATTR_DATA] = data.get(ATTR_DATA)
try:
event_payload = CALLBACK_EVENT_PAYLOAD_SCHEMA(event_payload)
except vol.Invalid as ex:
_LOGGER.warning("Callback event payload is not valid: %s",
humanize_error(event_payload, ex))
event_name = '{}.{}'.format(NOTIFY_CALLBACK_EVENT,
event_payload[ATTR_TYPE])
request.app['hass'].bus.fire(event_name, event_payload)
return self.json({'status': 'ok', 'event': event_payload[ATTR_TYPE]})
class HTML5NotificationService(BaseNotificationService):
def __init__(self, gcm_key, registrations, json_path):
self._gcm_key = gcm_key
self.registrations = registrations
self.registrations_json_path = json_path
@property
def targets(self):
targets = {}
for registration in self.registrations:
targets[registration] = registration
return targets
def send_message(self, message="", **kwargs):
import jwt
from pywebpush import WebPusher
timestamp = int(time.time())
tag = str(uuid.uuid4())
payload = {
'badge': '/static/images/notification-badge.png',
'body': message,
ATTR_DATA: {},
'icon': '/static/icons/favicon-192x192.png',
ATTR_TAG: tag,
'timestamp': (timestamp*1000),
ATTR_TITLE: kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
}
data = kwargs.get(ATTR_DATA)
if data:
data_tmp = {}
for key, val in data.items():
if key in HTML5_SHOWNOTIFICATION_PARAMETERS:
payload[key] = val
else:
data_tmp[key] = val
payload[ATTR_DATA] = data_tmp
if (payload[ATTR_DATA].get(ATTR_URL) is None and
payload.get(ATTR_ACTIONS) is None):
payload[ATTR_DATA][ATTR_URL] = URL_ROOT
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = self.registrations.keys()
for target in list(targets):
info = self.registrations.get(target)
if info is None:
_LOGGER.error("%s is not a valid HTML5 push notification"
" target", target)
continue
jwt_exp = (datetime.datetime.fromtimestamp(timestamp) +
datetime.timedelta(days=JWT_VALID_DAYS))
jwt_secret = info[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
jwt_claims = {'exp': jwt_exp, 'nbf': timestamp,
'iat': timestamp, ATTR_TARGET: target,
ATTR_TAG: payload[ATTR_TAG]}
jwt_token = jwt.encode(jwt_claims, jwt_secret).decode('utf-8')
payload[ATTR_DATA][ATTR_JWT] = jwt_token
# If we don't, notifications break on FireFox
gcm_key = self._gcm_key \
if 'googleapis.com' in info[ATTR_SUBSCRIPTION][ATTR_ENDPOINT] \
else None
response = WebPusher(info[ATTR_SUBSCRIPTION]).send(
json.dumps(payload), gcm_key=gcm_key, ttl='86400'
)
if response.status_code == 410:
_LOGGER.info("Notification channel has expired")
reg = self.registrations.pop(target)
if not save_json(self.registrations_json_path,
self.registrations):
self.registrations[target] = reg
_LOGGER.error("Error saving registration")
else:
_LOGGER.info("Configuration saved")
| true
| true
|
f70a9ce853cb9ce01b71a3e215447fff235084b1
| 386
|
py
|
Python
|
mysit/urls.py
|
GhasemMatoo/Mysite_Restaurants
|
f44e0b0374016850cc47f212db0d5693d6de2ee6
|
[
"MIT"
] | null | null | null |
mysit/urls.py
|
GhasemMatoo/Mysite_Restaurants
|
f44e0b0374016850cc47f212db0d5693d6de2ee6
|
[
"MIT"
] | null | null | null |
mysit/urls.py
|
GhasemMatoo/Mysite_Restaurants
|
f44e0b0374016850cc47f212db0d5693d6de2ee6
|
[
"MIT"
] | null | null | null |
from django.urls import path
from mysit.views import *
app_name = 'mysit'
urlpatterns = [
path('',index_views, name='index'),
path('about',about_views, name='about'),
path('contact',contact_views, name='contact'),
path('gallery',gallery_views, name='gallery'),
path('menu',menu_views, name='menu'),
path('reservation',reservation_views, name='reservation'),
]
| 27.571429
| 62
| 0.683938
|
from django.urls import path
from mysit.views import *
app_name = 'mysit'
urlpatterns = [
path('',index_views, name='index'),
path('about',about_views, name='about'),
path('contact',contact_views, name='contact'),
path('gallery',gallery_views, name='gallery'),
path('menu',menu_views, name='menu'),
path('reservation',reservation_views, name='reservation'),
]
| true
| true
|
f70a9def8d0fc3fb9e3a591155df239d7c97521c
| 971
|
py
|
Python
|
fizzbuzz/fizzbuzz/number_publisher_node.py
|
ericboehlke/ros_fizzbuzz
|
c1bf95a154f78c050be255caa29e6454942ff6f6
|
[
"MIT"
] | null | null | null |
fizzbuzz/fizzbuzz/number_publisher_node.py
|
ericboehlke/ros_fizzbuzz
|
c1bf95a154f78c050be255caa29e6454942ff6f6
|
[
"MIT"
] | null | null | null |
fizzbuzz/fizzbuzz/number_publisher_node.py
|
ericboehlke/ros_fizzbuzz
|
c1bf95a154f78c050be255caa29e6454942ff6f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from std_msgs.msg import Int64
class NumberPublisher(Node):
def __init__(self):
super().__init__('number_publisher')
self.publisher_ = self.create_publisher(Int64, 'numbers', 10)
timer_period = 0.5 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
def timer_callback(self):
msg = Int64()
msg.data = self.i
self.publisher_.publish(msg)
self.get_logger().info('Publishing: "%s"' % msg.data)
self.i += 1
def main(args=None):
rclpy.init(args=args)
number_publisher = NumberPublisher()
rclpy.spin(number_publisher)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
number_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 23.119048
| 73
| 0.661174
|
import rclpy
from rclpy.node import Node
from std_msgs.msg import Int64
class NumberPublisher(Node):
def __init__(self):
super().__init__('number_publisher')
self.publisher_ = self.create_publisher(Int64, 'numbers', 10)
timer_period = 0.5
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
def timer_callback(self):
msg = Int64()
msg.data = self.i
self.publisher_.publish(msg)
self.get_logger().info('Publishing: "%s"' % msg.data)
self.i += 1
def main(args=None):
rclpy.init(args=args)
number_publisher = NumberPublisher()
rclpy.spin(number_publisher)
number_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| true
| true
|
f70a9fa422465f06e33a73da4d25cfab7390d3b8
| 2,624
|
py
|
Python
|
libs/dscache/odc/dscache/apps/dstiler.py
|
MatthewJA/odc-tools
|
4bf902701b858c15f2a5f27974d05daf96df42c3
|
[
"Apache-2.0"
] | null | null | null |
libs/dscache/odc/dscache/apps/dstiler.py
|
MatthewJA/odc-tools
|
4bf902701b858c15f2a5f27974d05daf96df42c3
|
[
"Apache-2.0"
] | null | null | null |
libs/dscache/odc/dscache/apps/dstiler.py
|
MatthewJA/odc-tools
|
4bf902701b858c15f2a5f27974d05daf96df42c3
|
[
"Apache-2.0"
] | null | null | null |
from functools import partial
import click
from odc import dscache
from odc.dscache.tools.tiling import (
bin_by_native_tile,
web_gs,
extract_native_albers_tile,
parse_gridspec)
from odc.dscache._dscache import mk_group_name
from odc.index import bin_dataset_stream
@click.command('dstiler')
@click.option('--native', is_flag=True, help='Use Landsat Path/Row as grouping')
@click.option('--native-albers', is_flag=True, help='When datasets are in Albers (AU) grid already')
@click.option('--web', type=int, help='Use web map tiling regime at supplied zoom level')
@click.option('--grid', type=str,
help="Grid spec or name 'crs;pixel_resolution;shape_in_pixels'|albers_au_25",
default='albers_au_25')
@click.argument('dbfile', type=str, nargs=1)
def cli(native, native_albers, web, grid, dbfile):
"""Add spatial grouping to file db.
Default grid is Australian Albers (EPSG:3577) with 100k by 100k tiles. But
you can also group by Landsat path/row (--native), or Google's map tiling
regime (--web zoom_level)
\b
Example for custom --grid:
- rectangular: 'epsg:6933;-10x10;2000x3000'
^crs ^y ^x ^ny ^nx
- square : 'epsg:3857;10;10000'
- named : albers_au_25
albers_africa_10 (20,30,60 are also available)
"""
cache = dscache.open_rw(dbfile)
label = 'Processing {} ({:,d} datasets)'.format(dbfile, cache.count)
group_prefix = 'grid'
gs = None
cells = {}
if native:
group_prefix = 'native'
binner = partial(bin_by_native_tile, cells=cells)
elif native_albers:
group_prefix = 'albers'
binner = lambda dss: bin_by_native_tile(dss, cells, native_tile_id=extract_native_albers_tile)
elif web is not None:
gs = web_gs(web)
group_prefix = 'web_' + str(web)
binner = lambda dss: bin_dataset_stream(gs, dss, cells)
else:
gs = parse_gridspec(grid)
group_prefix = f"epsg{gs.crs.epsg:d}"
binner = lambda dss: bin_dataset_stream(gs, dss, cells)
if gs is not None:
click.echo(f'Using gridspec: {gs}')
cache.add_grid(gs, group_prefix)
with click.progressbar(cache.get_all(), length=cache.count, label=label) as dss:
for ds in binner(dss):
pass
click.echo('Total bins: {:d}'.format(len(cells)))
with click.progressbar(cells.values(), length=len(cells), label='Saving') as groups:
for group in groups:
cache.add_grid_tile(group_prefix, group.idx, group.dss)
if __name__ == '__main__':
cli()
| 34.986667
| 102
| 0.653201
|
from functools import partial
import click
from odc import dscache
from odc.dscache.tools.tiling import (
bin_by_native_tile,
web_gs,
extract_native_albers_tile,
parse_gridspec)
from odc.dscache._dscache import mk_group_name
from odc.index import bin_dataset_stream
@click.command('dstiler')
@click.option('--native', is_flag=True, help='Use Landsat Path/Row as grouping')
@click.option('--native-albers', is_flag=True, help='When datasets are in Albers (AU) grid already')
@click.option('--web', type=int, help='Use web map tiling regime at supplied zoom level')
@click.option('--grid', type=str,
help="Grid spec or name 'crs;pixel_resolution;shape_in_pixels'|albers_au_25",
default='albers_au_25')
@click.argument('dbfile', type=str, nargs=1)
def cli(native, native_albers, web, grid, dbfile):
cache = dscache.open_rw(dbfile)
label = 'Processing {} ({:,d} datasets)'.format(dbfile, cache.count)
group_prefix = 'grid'
gs = None
cells = {}
if native:
group_prefix = 'native'
binner = partial(bin_by_native_tile, cells=cells)
elif native_albers:
group_prefix = 'albers'
binner = lambda dss: bin_by_native_tile(dss, cells, native_tile_id=extract_native_albers_tile)
elif web is not None:
gs = web_gs(web)
group_prefix = 'web_' + str(web)
binner = lambda dss: bin_dataset_stream(gs, dss, cells)
else:
gs = parse_gridspec(grid)
group_prefix = f"epsg{gs.crs.epsg:d}"
binner = lambda dss: bin_dataset_stream(gs, dss, cells)
if gs is not None:
click.echo(f'Using gridspec: {gs}')
cache.add_grid(gs, group_prefix)
with click.progressbar(cache.get_all(), length=cache.count, label=label) as dss:
for ds in binner(dss):
pass
click.echo('Total bins: {:d}'.format(len(cells)))
with click.progressbar(cells.values(), length=len(cells), label='Saving') as groups:
for group in groups:
cache.add_grid_tile(group_prefix, group.idx, group.dss)
if __name__ == '__main__':
cli()
| true
| true
|
f70aa11c73981f29fb4af7f7835b063d5d965fa2
| 917
|
py
|
Python
|
flex.py
|
johndemlon/c-and-c-server
|
562e5fd21b9b93f68f4e65a4c032f20128eb9c2d
|
[
"MIT"
] | 2
|
2021-09-01T16:39:46.000Z
|
2021-09-08T16:44:56.000Z
|
flex.py
|
johndemlon/c-and-c-server
|
562e5fd21b9b93f68f4e65a4c032f20128eb9c2d
|
[
"MIT"
] | null | null | null |
flex.py
|
johndemlon/c-and-c-server
|
562e5fd21b9b93f68f4e65a4c032f20128eb9c2d
|
[
"MIT"
] | null | null | null |
# Date: 09/28/2017
# Author: Ethical-H4CK3R
# Description: A Simple C&C Server
from core.prompt import Prompt
from core.server import Server
from template.design import Designer
from core.console import MainController
from core.communicate import Communicate
__version__ = 0.1
class Flex(Prompt, Server, Designer, MainController, Communicate):
''' A Simple C&C Server '''
def __init__(self):
self.ip = '127.0.0.1'
self.port = 4444
self.botnet = []
Prompt.__init__(self)
Server.__init__(self)
Designer.__init__(self)
Communicate.__init__(self)
MainController.__init__(self)
self.wait = False
self.ping = False
self.alive = True
self.debug = True
self.activeIP = None
self.activePort = None
self.default_to_shell = True
self.prompt = self.getprompt()
def start(self):
try:self.cmdloop()
finally:self.disconnect(True)
if __name__ == '__main__':
Flex().start()
| 21.833333
| 66
| 0.718648
|
from core.prompt import Prompt
from core.server import Server
from template.design import Designer
from core.console import MainController
from core.communicate import Communicate
__version__ = 0.1
class Flex(Prompt, Server, Designer, MainController, Communicate):
def __init__(self):
self.ip = '127.0.0.1'
self.port = 4444
self.botnet = []
Prompt.__init__(self)
Server.__init__(self)
Designer.__init__(self)
Communicate.__init__(self)
MainController.__init__(self)
self.wait = False
self.ping = False
self.alive = True
self.debug = True
self.activeIP = None
self.activePort = None
self.default_to_shell = True
self.prompt = self.getprompt()
def start(self):
try:self.cmdloop()
finally:self.disconnect(True)
if __name__ == '__main__':
Flex().start()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.