code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
from django import forms
from django.contrib import admin
from django.contrib.admin import AdminSite
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.contenttypes.admin import GenericStackedInline
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.core import checks
from django.test import SimpleTestCase, override_settings
from .models import Album, Author, Book, City, Influence, Song, State, TwoAlbumFKAndAnE
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ["title"]
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(
None,
{
"fields": ("name",),
},
),
)
class MyAdmin(admin.ModelAdmin):
def check(self, **kwargs):
return ["error!"]
class AuthenticationMiddlewareSubclass(AuthenticationMiddleware):
pass
class MessageMiddlewareSubclass(MessageMiddleware):
pass
class ModelBackendSubclass(ModelBackend):
pass
class SessionMiddlewareSubclass(SessionMiddleware):
pass
@override_settings(
SILENCED_SYSTEM_CHECKS=["fields.W342"], # ForeignKey(unique=True)
INSTALLED_APPS=[
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"admin_checks",
],
)
class SystemChecksTestCase(SimpleTestCase):
databases = "__all__"
def test_checks_are_performed(self):
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ["error!"]
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
@override_settings(INSTALLED_APPS=["django.contrib.admin"])
def test_apps_dependencies(self):
errors = admin.checks.check_dependencies()
expected = [
checks.Error(
"'django.contrib.contenttypes' must be in "
"INSTALLED_APPS in order to use the admin application.",
id="admin.E401",
),
checks.Error(
"'django.contrib.auth' must be in INSTALLED_APPS in order "
"to use the admin application.",
id="admin.E405",
),
checks.Error(
"'django.contrib.messages' must be in INSTALLED_APPS in order "
"to use the admin application.",
id="admin.E406",
),
]
self.assertEqual(errors, expected)
@override_settings(TEMPLATES=[])
def test_no_template_engines(self):
self.assertEqual(
admin.checks.check_dependencies(),
[
checks.Error(
"A 'django.template.backends.django.DjangoTemplates' "
"instance must be configured in TEMPLATES in order to use "
"the admin application.",
id="admin.E403",
)
],
)
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [],
},
}
],
)
def test_context_processor_dependencies(self):
expected = [
checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id="admin.E402",
),
checks.Error(
"'django.contrib.messages.context_processors.messages' must "
"be enabled in DjangoTemplates (TEMPLATES) in order to use "
"the admin application.",
id="admin.E404",
),
checks.Warning(
"'django.template.context_processors.request' must be enabled "
"in DjangoTemplates (TEMPLATES) in order to use the admin "
"navigation sidebar.",
id="admin.W411",
),
]
self.assertEqual(admin.checks.check_dependencies(), expected)
# The first error doesn't happen if
# 'django.contrib.auth.backends.ModelBackend' isn't in
# AUTHENTICATION_BACKENDS.
with self.settings(AUTHENTICATION_BACKENDS=[]):
self.assertEqual(admin.checks.check_dependencies(), expected[1:])
@override_settings(
AUTHENTICATION_BACKENDS=["admin_checks.tests.ModelBackendSubclass"],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
def test_context_processor_dependencies_model_backend_subclass(self):
self.assertEqual(
admin.checks.check_dependencies(),
[
checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id="admin.E402",
),
],
)
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.dummy.TemplateStrings",
"DIRS": [],
"APP_DIRS": True,
},
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
],
)
def test_several_templates_backends(self):
self.assertEqual(admin.checks.check_dependencies(), [])
@override_settings(MIDDLEWARE=[])
def test_middleware_dependencies(self):
errors = admin.checks.check_dependencies()
expected = [
checks.Error(
"'django.contrib.auth.middleware.AuthenticationMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id="admin.E408",
),
checks.Error(
"'django.contrib.messages.middleware.MessageMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id="admin.E409",
),
checks.Error(
"'django.contrib.sessions.middleware.SessionMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
hint=(
"Insert "
"'django.contrib.sessions.middleware.SessionMiddleware' "
"before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
),
id="admin.E410",
),
]
self.assertEqual(errors, expected)
@override_settings(
MIDDLEWARE=[
"admin_checks.tests.AuthenticationMiddlewareSubclass",
"admin_checks.tests.MessageMiddlewareSubclass",
"admin_checks.tests.SessionMiddlewareSubclass",
]
)
def test_middleware_subclasses(self):
self.assertEqual(admin.checks.check_dependencies(), [])
@override_settings(
MIDDLEWARE=[
"django.contrib.does.not.Exist",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
]
)
def test_admin_check_ignores_import_error_in_middleware(self):
self.assertEqual(admin.checks.check_dependencies(), [])
def test_custom_adminsite(self):
class CustomAdminSite(admin.AdminSite):
pass
custom_site = CustomAdminSite()
custom_site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ["error!"]
self.assertEqual(errors, expected)
finally:
custom_site.unregister(Song)
def test_allows_checks_relying_on_other_modeladmins(self):
class MyBookAdmin(admin.ModelAdmin):
def check(self, **kwargs):
errors = super().check(**kwargs)
if not self.admin_site.is_registered(Author):
errors.append("AuthorAdmin missing!")
return errors
class MyAuthorAdmin(admin.ModelAdmin):
pass
admin.site.register(Book, MyBookAdmin)
admin.site.register(Author, MyAuthorAdmin)
try:
self.assertEqual(admin.site.check(None), [])
finally:
admin.site.unregister(Book)
admin.site.unregister(Author)
def test_field_name_not_in_list_display(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ["original_release"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not contained in 'list_display'.",
obj=SongAdmin,
id="admin.E122",
)
]
self.assertEqual(errors, expected)
def test_list_editable_not_a_list_or_tuple(self):
class SongAdmin(admin.ModelAdmin):
list_editable = "test"
self.assertEqual(
SongAdmin(Song, AdminSite()).check(),
[
checks.Error(
"The value of 'list_editable' must be a list or tuple.",
obj=SongAdmin,
id="admin.E120",
)
],
)
def test_list_editable_missing_field(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ("test",)
self.assertEqual(
SongAdmin(Song, AdminSite()).check(),
[
checks.Error(
"The value of 'list_editable[0]' refers to 'test', which is "
"not a field of 'admin_checks.Song'.",
obj=SongAdmin,
id="admin.E121",
)
],
)
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(
None,
{
"fields": ["title", "original_release"],
},
),
]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin.",
obj=SongAdmin,
id="admin.E125",
)
]
self.assertEqual(errors, expected)
def test_pk_not_editable(self):
# PKs cannot be edited in the list.
class SongAdmin(admin.ModelAdmin):
list_display = ["title", "id"]
list_editable = ["id"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'id', which is not editable "
"through the admin.",
obj=SongAdmin,
id="admin.E125",
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(
None,
{
"fields": ["title", "original_release"],
},
),
]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
The fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
"""
errors = ValidFormFieldsets(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_fieldsets_fields_non_tuple(self):
"""
The first fieldset's fields must be a list/tuple.
"""
class NotATupleAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {"fields": "title"}), # not a tuple
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[0][1]['fields']' must be a list or tuple.",
obj=NotATupleAdmin,
id="admin.E008",
)
]
self.assertEqual(errors, expected)
def test_nonfirst_fieldset(self):
"""
The second fieldset's fields must be a list/tuple.
"""
class NotATupleAdmin(admin.ModelAdmin):
fieldsets = [
(None, {"fields": ("title",)}),
("foo", {"fields": "author"}), # not a tuple
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1]['fields']' must be a list or tuple.",
obj=NotATupleAdmin,
id="admin.E008",
)
]
self.assertEqual(errors, expected)
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = "foo"
errors = ExcludedFields1(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
obj=ExcludedFields1,
id="admin.E014",
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ("name", "name")
errors = ExcludedFields2(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint="Remove duplicates of 'name'.",
obj=ExcludedFields2,
id="admin.E015",
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = "foo"
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
obj=ExcludedFieldsInline,
id="admin.E014",
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ["album"]
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'.",
obj=SongInline,
id="admin.E201",
)
]
self.assertEqual(errors, expected)
def test_valid_generic_inline_model_admin(self):
"""
Regression test for #22034 - check that generic inlines don't look for
normal ForeignKey relations.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_generic_inline_model_admin_non_generic_model(self):
"""
A model without a GenericForeignKey raises problems if it's included
in a GenericInlineModelAdmin definition.
"""
class BookInline(GenericStackedInline):
model = Book
class SongAdmin(admin.ModelAdmin):
inlines = [BookInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Book' has no GenericForeignKey.",
obj=BookInline,
id="admin.E301",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_ct_field(self):
"""
A GenericInlineModelAdmin errors if the ct_field points to a
nonexistent field.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = "nonexistent"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_field' references 'nonexistent', which is not a field on "
"'admin_checks.Influence'.",
obj=InfluenceInline,
id="admin.E302",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_fk_field(self):
"""
A GenericInlineModelAdmin errors if the ct_fk_field points to a
nonexistent field.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = "nonexistent"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_fk_field' references 'nonexistent', which is not a field on "
"'admin_checks.Influence'.",
obj=InfluenceInline,
id="admin.E303",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_ct_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_field points to a
field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = "name"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'name' and object ID field 'object_id'.",
obj=InfluenceInline,
id="admin.E304",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_fk_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_fk_field points to
a field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = "name"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'content_type' and object ID field 'name'.",
obj=InfluenceInline,
id="admin.E304",
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
class RawIdNonexistentAdmin(admin.ModelAdmin):
raw_id_fields = ("nonexistent",)
errors = RawIdNonexistentAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'raw_id_fields[0]' refers to 'nonexistent', "
"which is not a field of 'admin_checks.Album'.",
obj=RawIdNonexistentAdmin,
id="admin.E002",
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when
exclude is given) make sure fk_name is honored or things blow up when
there is more than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey "
"to 'admin_checks.Album'. You must specify a 'fk_name' "
"attribute.",
obj=TwoAlbumFKAndAnEInline,
id="admin.E202",
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inlines_property(self):
class CitiesInline(admin.TabularInline):
model = City
class StateAdmin(admin.ModelAdmin):
@property
def inlines(self):
return [CitiesInline]
errors = StateAdmin(State, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_method(self):
@admin.display
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
@admin.display
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_dynamic_attribute_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("dynamic_method",)
def __getattr__(self, item):
if item == "dynamic_method":
@admin.display
def method(obj):
pass
return method
raise AttributeError
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_nonexistent_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistent")
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'readonly_fields[1]' refers to 'nonexistent', which is "
"not a callable, an attribute of 'SongAdmin', or an attribute of "
"'admin_checks.Song'.",
obj=SongAdmin,
id="admin.E035",
)
]
self.assertEqual(errors, expected)
def test_nonexistent_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ["i_dont_exist"] # Missing attribute
errors = CityInline(State, AdminSite()).check()
expected = [
checks.Error(
"The value of 'readonly_fields[0]' refers to 'i_dont_exist', which is "
"not a callable, an attribute of 'CityInline', or an attribute of "
"'admin_checks.City'.",
obj=CityInline,
id="admin.E035",
)
]
self.assertEqual(errors, expected)
def test_readonly_fields_not_list_or_tuple(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = "test"
self.assertEqual(
SongAdmin(Song, AdminSite()).check(),
[
checks.Error(
"The value of 'readonly_fields' must be a list or tuple.",
obj=SongAdmin,
id="admin.E034",
)
],
)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
@admin.display
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M
field that specifies the 'through' option is included in the 'fields'
or the 'fieldsets' ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ["authors"]
errors = BookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model.",
obj=BookAdmin,
id="admin.E013",
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
("Header 1", {"fields": ("name",)}),
("Header 2", {"fields": ("authors",)}),
)
errors = FieldsetBookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1][\"fields\"]' cannot include the "
"ManyToManyField 'authors', because that field manually specifies a "
"relationship model.",
obj=FieldsetBookAdmin,
id="admin.E013",
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ("price", ("name", "subtitle"))
errors = NestedFieldsAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (("Main", {"fields": ("price", ("name", "subtitle"))}),)
errors = NestedFieldsetAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ["title", "extra_data"]
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with
r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = "__all__"
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ["extra_data", "title"]
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ["state", ["state"]]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint="Remove duplicates of 'state'.",
obj=MyModelAdmin,
id="admin.E006",
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {"fields": ["title", "album", ("title", "album")]}),
]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint="Remove duplicates of 'title', 'album'.",
obj=MyModelAdmin,
id="admin.E012",
)
]
self.assertEqual(errors, expected)
def test_check_multiple_duplicates_across_fieldsets(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
("Header 1", {"fields": ["title", "album"]}),
("Header 2", {"fields": ["album", "name"]}),
("Header 3", {"fields": ["name", "other", "title"]}),
]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[1][1]'.",
hint="Remove duplicates of 'album'.",
obj=MyModelAdmin,
id="admin.E012",
),
checks.Error(
"There are duplicate field(s) in 'fieldsets[2][1]'.",
hint="Remove duplicates of 'title', 'name'.",
obj=MyModelAdmin,
id="admin.E012",
),
]
self.assertEqual(errors, expected)
def test_list_filter_works_on_through_field_even_when_apps_not_ready(self):
"""
Ensure list_filter can access reverse fields even when the app registry
is not ready; refs #24146.
"""
class BookAdminWithListFilter(admin.ModelAdmin):
list_filter = ["authorsbooks__featured"]
# Temporarily pretending apps are not ready yet. This issue can happen
# if the value of 'list_filter' refers to a 'through__field'.
Book._meta.apps.ready = False
try:
errors = BookAdminWithListFilter(Book, AdminSite()).check()
self.assertEqual(errors, [])
finally:
Book._meta.apps.ready = True
def test_related_field_list_display(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "original_release", "album__title"]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_related_field_list_display_wrong_field(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "original_release", "album__hello"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_display[2]' refers to 'album__hello', which is not "
"a callable or attribute of 'SongAdmin', or an attribute, method, or "
"field on 'admin_checks.Song'.",
obj=SongAdmin,
id="admin.E108",
)
]
self.assertEqual(errors, expected) | python | github | https://github.com/django/django | tests/admin_checks/tests.py |
# This script generates a Python interface for an Apple Macintosh Manager.
# It uses the "bgen" package to generate C code.
# The function specifications are generated by scanning the mamager's header file,
# using the "scantools" package (customized for this particular manager).
import string
# Declarations that change for each manager
MACHEADERFILE = 'Appearance.h' # The Apple header file
MODNAME = '_App' # The name of the module
OBJECTNAME = 'ThemeDrawingState' # The basic name of the objects used here
KIND = '' # Usually 'Ptr' or 'Handle'
# The following is *usually* unchanged but may still require tuning
MODPREFIX = 'App' # The prefix for module-wide routines
OBJECTTYPE = OBJECTNAME + KIND # The C type used to represent them
OBJECTPREFIX = OBJECTNAME + 'Obj' # The prefix for object methods
INPUTFILE = string.lower(MODPREFIX) + 'gen.py' # The file generated by the scanner
OUTPUTFILE = MODNAME + "module.c" # The file generated by this program
from macsupport import *
# Create the type objects
#MenuRef = OpaqueByValueType("MenuRef", "MenuObj")
#WindowPeek = OpaqueByValueType("WindowPeek", OBJECTPREFIX)
RgnHandle = FakeType("(RgnHandle)0")
NULL = FakeType("NULL")
# XXXX Should be next, but this will break a lot of code...
# RgnHandle = OpaqueByValueType("RgnHandle", "OptResObj")
#KeyMap = ArrayOutputBufferType("KeyMap")
#MacOSEventKind = Type("MacOSEventKind", "h") # Old-style
#MacOSEventMask = Type("MacOSEventMask", "h") # Old-style
#EventMask = Type("EventMask", "h")
#EventKind = Type("EventKind", "h")
ThemeBrush = Type("ThemeBrush", "h")
ThemeColor = Type("ThemeColor", "h")
ThemeTextColor = Type("ThemeTextColor", "h")
ThemeMenuBarState = Type("ThemeMenuBarState", "H")
ThemeMenuState = Type("ThemeMenuState", "H")
ThemeMenuType = Type("ThemeMenuType", "H")
ThemeMenuItemType = Type("ThemeMenuItemType", "H")
ThemeFontID = Type("ThemeFontID", "H")
ThemeTabStyle = Type("ThemeTabStyle", "H")
ThemeTabDirection = Type("ThemeTabDirection", "H")
ThemeDrawState = Type("ThemeDrawState", "l")
ThemeCursor = Type("ThemeCursor", "l")
ThemeCheckBoxStyle = Type("ThemeCheckBoxStyle", "H")
ThemeScrollBarArrowStyle = Type("ThemeScrollBarArrowStyle", "H")
ThemeScrollBarThumbStyle = Type("ThemeScrollBarThumbStyle", "H")
CTabHandle = OpaqueByValueType("CTabHandle", "ResObj")
ThemeTrackEnableState = Type("ThemeTrackEnableState", "b")
ThemeTrackPressState = Type("ThemeTrackPressState", "b")
ThemeThumbDirection = Type("ThemeThumbDirection", "b")
ThemeTrackAttributes = Type("ThemeTrackAttributes", "H")
ControlPartCode = Type("ControlPartCode", "h")
ThemeWindowAttributes = Type("ThemeWindowAttributes", "l")
ThemeWindowType = Type("ThemeWindowType", "H")
ThemeTitleBarWidget = Type("ThemeTitleBarWidget", "H")
ThemeArrowOrientation = Type("ThemeArrowOrientation", "H")
ThemePopupArrowSize = Type("ThemePopupArrowSize", "H")
ThemeGrowDirection = Type("ThemeGrowDirection", "H")
ThemeSoundKind = OSTypeType("ThemeSoundKind")
ThemeDragSoundKind = OSTypeType("ThemeDragSoundKind")
ThemeBackgroundKind = Type("ThemeBackgroundKind", "l")
ThemeMetric = Type("ThemeMetric", "l")
RGBColor = OpaqueType("RGBColor", "QdRGB")
TruncCode = Type("TruncCode", "h")
ThemeButtonKind = UInt16
ThemeButtonDrawInfo_ptr = OpaqueType("ThemeButtonDrawInfo", "ThemeButtonDrawInfo")
ThemeEraseUPP = FakeType("NULL")
ThemeButtonDrawUPP = FakeType("NULL")
includestuff = includestuff + """
#include <Carbon/Carbon.h>
int ThemeButtonDrawInfo_Convert(PyObject *v, ThemeButtonDrawInfo *p_itself)
{
return PyArg_Parse(v, "(iHH)", &p_itself->state, &p_itself->value, &p_itself->adornment);
}
"""
class MyObjectDefinition(PEP253Mixin, GlobalObjectDefinition):
pass
## def outputCheckNewArg(self):
## Output("if (itself == NULL) return PyMac_Error(resNotFound);")
## def outputCheckConvertArg(self):
## OutLbrace("if (DlgObj_Check(v))")
## Output("*p_itself = ((WindowObject *)v)->ob_itself;")
## Output("return 1;")
## OutRbrace()
## Out("""
## if (v == Py_None) { *p_itself = NULL; return 1; }
## if (PyInt_Check(v)) { *p_itself = (WindowPtr)PyInt_AsLong(v); return 1; }
## """)
# From here on it's basically all boiler plate...
# Create the generator groups and link them
module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff)
object = MyObjectDefinition(OBJECTNAME, OBJECTPREFIX, OBJECTTYPE)
module.addobject(object)
ThemeDrawingState = OpaqueByValueType("ThemeDrawingState", "ThemeDrawingStateObj")
Method = WeakLinkMethodGenerator
# Create the generator classes used to populate the lists
Function = OSErrWeakLinkFunctionGenerator
##Method = OSErrWeakLinkMethodGenerator
# Create and populate the lists
functions = []
methods = []
execfile(INPUTFILE)
# add the populated lists to the generator groups
# (in a different wordl the scan program would generate this)
for f in functions: module.add(f)
for f in methods: object.add(f)
# generate output (open the output file as late as possible)
SetOutputFileName(OUTPUTFILE)
module.generate() | unknown | codeparrot/codeparrot-clean | ||
from django_filters import rest_framework as filters
from entitlements.models import CourseEntitlement
class CharListFilter(filters.CharFilter):
""" Filters a field via a comma-delimited list of values. """
def filter(self, qs, value): # pylint: disable=method-hidden
if value not in (None, ''):
value = value.split(',')
return super(CharListFilter, self).filter(qs, value)
class UUIDListFilter(CharListFilter):
""" Filters a field via a comma-delimited list of UUIDs. """
def __init__(self, name='uuid', label=None, widget=None, method=None, lookup_expr='in', required=False,
distinct=False, exclude=False, **kwargs):
super(UUIDListFilter, self).__init__(
name=name,
label=label,
widget=widget,
method=method,
lookup_expr=lookup_expr,
required=required,
distinct=distinct,
exclude=exclude,
**kwargs
)
class CourseEntitlementFilter(filters.FilterSet):
uuid = UUIDListFilter()
user = filters.CharFilter(name='user__username')
class Meta:
model = CourseEntitlement
fields = ('uuid', 'user') | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility functions for invoking RPCs."""
import sys
import threading
from grpc.framework.base import interfaces as base_interfaces
from grpc.framework.base import util as base_util
from grpc.framework.face import _control
from grpc.framework.face import interfaces
from grpc.framework.foundation import callable_util
from grpc.framework.foundation import future
_ITERATOR_EXCEPTION_LOG_MESSAGE = 'Exception iterating over requests!'
_DONE_CALLBACK_LOG_MESSAGE = 'Exception calling Future "done" callback!'
class _RendezvousServicedIngestor(base_interfaces.ServicedIngestor):
def __init__(self, rendezvous):
self._rendezvous = rendezvous
def consumer(self, operation_context):
return self._rendezvous
class _EventServicedIngestor(base_interfaces.ServicedIngestor):
def __init__(self, result_consumer, abortion_callback):
self._result_consumer = result_consumer
self._abortion_callback = abortion_callback
def consumer(self, operation_context):
operation_context.add_termination_callback(
_control.as_operation_termination_callback(self._abortion_callback))
return self._result_consumer
def _rendezvous_subscription(rendezvous):
return base_util.full_serviced_subscription(
_RendezvousServicedIngestor(rendezvous))
def _unary_event_subscription(completion_callback, abortion_callback):
return base_util.full_serviced_subscription(
_EventServicedIngestor(
_control.UnaryConsumer(completion_callback), abortion_callback))
def _stream_event_subscription(result_consumer, abortion_callback):
return base_util.full_serviced_subscription(
_EventServicedIngestor(result_consumer, abortion_callback))
# NOTE(nathaniel): This class has some extremely special semantics around
# cancellation that allow it to be used by both "blocking" APIs and "futures"
# APIs.
#
# Since futures.Future defines its own exception for cancellation, we want these
# objects, when returned by methods of a returning-Futures-from-other-methods
# object, to raise the same exception for cancellation. But that's weird in a
# blocking API - why should this object, also returned by methods of blocking
# APIs, raise exceptions from the "future" module? Should we do something like
# have this class be parameterized by the type of exception that it raises in
# cancellation circumstances?
#
# We don't have to take such a dramatic step: since blocking APIs define no
# cancellation semantics whatsoever, there is no supported way for
# blocking-API-users of these objects to cancel RPCs, and thus no supported way
# for them to see an exception the type of which would be weird to them.
#
# Bonus: in both blocking and futures APIs, this object still properly raises
# exceptions.CancellationError for any *server-side cancellation* of an RPC.
class _OperationCancellableIterator(interfaces.CancellableIterator):
"""An interfaces.CancellableIterator for response-streaming operations."""
def __init__(self, rendezvous, operation):
self._lock = threading.Lock()
self._rendezvous = rendezvous
self._operation = operation
self._cancelled = False
def __iter__(self):
return self
def next(self):
with self._lock:
if self._cancelled:
raise future.CancelledError()
return next(self._rendezvous)
def cancel(self):
with self._lock:
self._cancelled = True
self._operation.cancel()
self._rendezvous.set_outcome(base_interfaces.Outcome.CANCELLED)
class _OperationFuture(future.Future):
"""A future.Future interface to an operation."""
def __init__(self, rendezvous, operation):
self._condition = threading.Condition()
self._rendezvous = rendezvous
self._operation = operation
self._cancelled = False
self._computed = False
self._payload = None
self._exception = None
self._traceback = None
self._callbacks = []
def cancel(self):
"""See future.Future.cancel for specification."""
with self._condition:
if not self._cancelled and not self._computed:
self._operation.cancel()
self._cancelled = True
self._condition.notify_all()
return False
def cancelled(self):
"""See future.Future.cancelled for specification."""
with self._condition:
return self._cancelled
def running(self):
"""See future.Future.running for specification."""
with self._condition:
return not self._cancelled and not self._computed
def done(self):
"""See future.Future.done for specification."""
with self._condition:
return self._cancelled or self._computed
def result(self, timeout=None):
"""See future.Future.result for specification."""
with self._condition:
if self._cancelled:
raise future.CancelledError()
if self._computed:
if self._payload is None:
raise self._exception # pylint: disable=raising-bad-type
else:
return self._payload
condition = threading.Condition()
def notify_condition(unused_future):
with condition:
condition.notify()
self._callbacks.append(notify_condition)
with condition:
condition.wait(timeout=timeout)
with self._condition:
if self._cancelled:
raise future.CancelledError()
elif self._computed:
if self._payload is None:
raise self._exception # pylint: disable=raising-bad-type
else:
return self._payload
else:
raise future.TimeoutError()
def exception(self, timeout=None):
"""See future.Future.exception for specification."""
with self._condition:
if self._cancelled:
raise future.CancelledError()
if self._computed:
return self._exception
condition = threading.Condition()
def notify_condition(unused_future):
with condition:
condition.notify()
self._callbacks.append(notify_condition)
with condition:
condition.wait(timeout=timeout)
with self._condition:
if self._cancelled:
raise future.CancelledError()
elif self._computed:
return self._exception
else:
raise future.TimeoutError()
def traceback(self, timeout=None):
"""See future.Future.traceback for specification."""
with self._condition:
if self._cancelled:
raise future.CancelledError()
if self._computed:
return self._traceback
condition = threading.Condition()
def notify_condition(unused_future):
with condition:
condition.notify()
self._callbacks.append(notify_condition)
with condition:
condition.wait(timeout=timeout)
with self._condition:
if self._cancelled:
raise future.CancelledError()
elif self._computed:
return self._traceback
else:
raise future.TimeoutError()
def add_done_callback(self, fn):
"""See future.Future.add_done_callback for specification."""
with self._condition:
if self._callbacks is not None:
self._callbacks.append(fn)
return
callable_util.call_logging_exceptions(fn, _DONE_CALLBACK_LOG_MESSAGE, self)
def on_operation_termination(self, operation_outcome):
"""Indicates to this object that the operation has terminated.
Args:
operation_outcome: A base_interfaces.Outcome value indicating the
outcome of the operation.
"""
with self._condition:
cancelled = self._cancelled
if cancelled:
callbacks = list(self._callbacks)
self._callbacks = None
else:
rendezvous = self._rendezvous
if not cancelled:
payload = None
exception = None
traceback = None
if operation_outcome == base_interfaces.Outcome.COMPLETED:
try:
payload = next(rendezvous)
except Exception as e: # pylint: disable=broad-except
exception = e
traceback = sys.exc_info()[2]
else:
try:
# We raise and then immediately catch in order to create a traceback.
raise _control.abortion_outcome_to_exception(operation_outcome)
except Exception as e: # pylint: disable=broad-except
exception = e
traceback = sys.exc_info()[2]
with self._condition:
if not self._cancelled:
self._computed = True
self._payload = payload
self._exception = exception
self._traceback = traceback
callbacks = list(self._callbacks)
self._callbacks = None
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, _DONE_CALLBACK_LOG_MESSAGE, self)
class _Call(interfaces.Call):
def __init__(self, operation):
self._operation = operation
self.context = _control.RpcContext(operation.context)
def cancel(self):
self._operation.cancel()
def blocking_value_in_value_out(front, name, payload, timeout, trace_id):
"""Services in a blocking fashion a value-in value-out servicer method."""
rendezvous = _control.Rendezvous()
subscription = _rendezvous_subscription(rendezvous)
operation = front.operate(
name, payload, True, timeout, subscription, trace_id)
operation.context.add_termination_callback(rendezvous.set_outcome)
return next(rendezvous)
def future_value_in_value_out(front, name, payload, timeout, trace_id):
"""Services a value-in value-out servicer method by returning a Future."""
rendezvous = _control.Rendezvous()
subscription = _rendezvous_subscription(rendezvous)
operation = front.operate(
name, payload, True, timeout, subscription, trace_id)
operation.context.add_termination_callback(rendezvous.set_outcome)
operation_future = _OperationFuture(rendezvous, operation)
operation.context.add_termination_callback(
operation_future.on_operation_termination)
return operation_future
def inline_value_in_stream_out(front, name, payload, timeout, trace_id):
"""Services a value-in stream-out servicer method."""
rendezvous = _control.Rendezvous()
subscription = _rendezvous_subscription(rendezvous)
operation = front.operate(
name, payload, True, timeout, subscription, trace_id)
operation.context.add_termination_callback(rendezvous.set_outcome)
return _OperationCancellableIterator(rendezvous, operation)
def blocking_stream_in_value_out(
front, name, payload_iterator, timeout, trace_id):
"""Services in a blocking fashion a stream-in value-out servicer method."""
rendezvous = _control.Rendezvous()
subscription = _rendezvous_subscription(rendezvous)
operation = front.operate(name, None, False, timeout, subscription, trace_id)
operation.context.add_termination_callback(rendezvous.set_outcome)
for payload in payload_iterator:
operation.consumer.consume(payload)
operation.consumer.terminate()
return next(rendezvous)
def future_stream_in_value_out(
front, name, payload_iterator, timeout, trace_id, pool):
"""Services a stream-in value-out servicer method by returning a Future."""
rendezvous = _control.Rendezvous()
subscription = _rendezvous_subscription(rendezvous)
operation = front.operate(name, None, False, timeout, subscription, trace_id)
operation.context.add_termination_callback(rendezvous.set_outcome)
pool.submit(
callable_util.with_exceptions_logged(
_control.pipe_iterator_to_consumer, _ITERATOR_EXCEPTION_LOG_MESSAGE),
payload_iterator, operation.consumer, lambda: True, True)
operation_future = _OperationFuture(rendezvous, operation)
operation.context.add_termination_callback(
operation_future.on_operation_termination)
return operation_future
def inline_stream_in_stream_out(
front, name, payload_iterator, timeout, trace_id, pool):
"""Services a stream-in stream-out servicer method."""
rendezvous = _control.Rendezvous()
subscription = _rendezvous_subscription(rendezvous)
operation = front.operate(name, None, False, timeout, subscription, trace_id)
operation.context.add_termination_callback(rendezvous.set_outcome)
pool.submit(
callable_util.with_exceptions_logged(
_control.pipe_iterator_to_consumer, _ITERATOR_EXCEPTION_LOG_MESSAGE),
payload_iterator, operation.consumer, lambda: True, True)
return _OperationCancellableIterator(rendezvous, operation)
def event_value_in_value_out(
front, name, payload, completion_callback, abortion_callback, timeout,
trace_id):
subscription = _unary_event_subscription(
completion_callback, abortion_callback)
operation = front.operate(
name, payload, True, timeout, subscription, trace_id)
return _Call(operation)
def event_value_in_stream_out(
front, name, payload, result_payload_consumer, abortion_callback, timeout,
trace_id):
subscription = _stream_event_subscription(
result_payload_consumer, abortion_callback)
operation = front.operate(
name, payload, True, timeout, subscription, trace_id)
return _Call(operation)
def event_stream_in_value_out(
front, name, completion_callback, abortion_callback, timeout, trace_id):
subscription = _unary_event_subscription(
completion_callback, abortion_callback)
operation = front.operate(name, None, False, timeout, subscription, trace_id)
return _Call(operation), operation.consumer
def event_stream_in_stream_out(
front, name, result_payload_consumer, abortion_callback, timeout, trace_id):
subscription = _stream_event_subscription(
result_payload_consumer, abortion_callback)
operation = front.operate(name, None, False, timeout, subscription, trace_id)
return _Call(operation), operation.consumer | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2025 The Nari Labs and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dia model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class DiaEncoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DiaEncoder`]. It is used to instantiate a Dia
encoder according to the specified arguments, defining the encoder architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
Number of key and value heads for each attention layer in the Transformer encoder.
head_dim (`int`, *optional*, defaults to 128):
Dimensionality of the attention head.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the normalization layers.
vocab_size (`int`, *optional*, defaults to 256):
Vocabulary size of the Dia model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DiaModel`].
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"swish"` and `"gelu_new"` are supported.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "dia_encoder"
def __init__(
self,
max_position_embeddings: int = 1024,
num_hidden_layers: int = 12,
hidden_size: int = 1024,
num_attention_heads: int = 16,
num_key_value_heads: int = 16,
head_dim: int = 128,
intermediate_size: int = 4096,
norm_eps: float = 1e-5,
vocab_size: int = 256,
hidden_act: str = "silu",
rope_parameters: RopeParameters | None = None,
initializer_range: float = 0.02,
**kwargs,
):
self.max_position_embeddings = max_position_embeddings
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.norm_eps = norm_eps
self.vocab_size = vocab_size
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rope_parameters = rope_parameters
super().__init__(**kwargs)
class DiaDecoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DiaDecoder`]. It is used to instantiate a Dia
decoder according to the specified arguments, defining the decoder architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
max_position_embeddings (`int`, *optional*, defaults to 3072):
The maximum sequence length that this model might ever be used with.
num_hidden_layers (`int`, *optional*, defaults to 18):
Number of hidden layers in the Transformer decoder.
hidden_size (`int`, *optional*, defaults to 2048):
Dimensionality of the decoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
Number of key and value heads for each attention layer in the Transformer decoder.
head_dim (`int`, *optional*, defaults to 128):
Dimensionality of the attention head.
cross_num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each cross-attention layer in the Transformer decoder.
cross_head_dim (`int`, *optional*, defaults to 128):
Dimensionality of the cross-attention head.
cross_num_key_value_heads (`int`, *optional*, defaults to 16):
Number of key and value heads for each cross-attention layer in the Transformer decoder.
cross_hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the cross-attention layers.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the normalization layers.
vocab_size (`int`, *optional*, defaults to 1028):
Vocabulary size of the Dia model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DiaModel`].
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder. If string, `"gelu"`, `"relu"`,
`"swish"` and `"gelu_new"` are supported.
num_channels (`int`, *optional*, defaults to 9):
Number of channels for the Dia decoder.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Indicating that this model is part of an encoder-decoder architecture.
pad_token_id (`int`, *optional*, defaults to 1025):
The token id used for padding sequences to the same length within a batch.
eos_token_id (`int`, *optional*, defaults to 1024):
The token id representing the end-of-sequence token, indicating that generation should stop.
bos_token_id (`int`, *optional*, defaults to 1026):
The token id representing the beginning-of-sequence token, used to initialize decoding.
"""
model_type = "dia_decoder"
def __init__(
self,
max_position_embeddings: int = 3072,
num_hidden_layers: int = 18,
hidden_size: int = 2048,
intermediate_size: int = 8192,
num_attention_heads: int = 16,
num_key_value_heads: int = 4,
head_dim: int = 128,
cross_num_attention_heads: int = 16,
cross_head_dim: int = 128,
cross_num_key_value_heads: int = 16,
cross_hidden_size: int = 1024,
norm_eps: float = 1e-5,
vocab_size: int = 1028,
hidden_act: str = "silu",
num_channels: int = 9,
rope_parameters: RopeParameters | None = None,
initializer_range: float = 0.02,
use_cache: bool = True,
is_encoder_decoder: bool = True,
pad_token_id: int = 1025,
eos_token_id: int = 1024,
bos_token_id: int = 1026,
**kwargs,
):
self.max_position_embeddings = max_position_embeddings
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.cross_num_key_value_heads = cross_num_key_value_heads
self.cross_num_attention_heads = cross_num_attention_heads
self.cross_head_dim = cross_head_dim
self.cross_hidden_size = cross_hidden_size
self.norm_eps = norm_eps
self.vocab_size = vocab_size
self.hidden_act = hidden_act
self.num_channels = num_channels
self.initializer_range = initializer_range
self.use_cache = use_cache
self.rope_parameters = rope_parameters
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
class DiaConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DiaModel`]. It is used to instantiate a
Dia model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[nari-labs/Dia-1.6B](https://huggingface.co/nari-labs/Dia-1.6B) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
encoder_config (`DiaEncoderConfig`, *optional*):
Configuration for the encoder part of the model. If not provided, a default `DiaEncoderConfig` will be used.
decoder_config (`DiaDecoderConfig`, *optional*):
Configuration for the decoder part of the model. If not provided, a default `DiaDecoderConfig` will be used.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the normalization layers.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Indicating that this model uses an encoder-decoder architecture.
pad_token_id (`int`, *optional*):
Deprecated. Please set this on `DiaDecoderConfig` directly. If provided, it will be forwarded
to `decoder_config`.
eos_token_id (`int`, *optional*):
Deprecated. Please set this on `DiaDecoderConfig` directly. If provided, it will be forwarded
to `decoder_config`.
bos_token_id (`int`, *optional*):
Deprecated. Please set this on `DiaDecoderConfig` directly. If provided, it will be forwarded
to `decoder_config`.
delay_pattern (`list[int]`, *optional*, defaults to `[0, 8, 9, 10, 11, 12, 13, 14, 15]`):
The delay pattern for the decoder. The length of this list must match `decoder_config.num_channels`.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Example:
```python
>>> from transformers import DiaConfig, DiaModel
>>> # Initializing a DiaConfig with default values
>>> configuration = DiaConfig()
>>> # Initializing a DiaModel (with random weights) from the configuration
>>> model = DiaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "dia"
keys_to_ignore_at_inference = ["past_key_values"]
sub_configs = {"encoder_config": DiaEncoderConfig, "decoder_config": DiaDecoderConfig}
def __init__(
self,
encoder_config: DiaEncoderConfig | None = None,
decoder_config: DiaDecoderConfig | None = None,
norm_eps: float = 1e-5,
is_encoder_decoder: bool = True,
pad_token_id: int | None = None,
eos_token_id: int | None = None,
bos_token_id: int | None = None,
delay_pattern: list[int] | None = None,
initializer_range: float = 0.02,
use_cache: bool = True,
**kwargs,
):
if isinstance(encoder_config, dict):
encoder_config = DiaEncoderConfig(**encoder_config)
if isinstance(decoder_config, dict):
decoder_config = DiaDecoderConfig(**decoder_config)
self.encoder_config = encoder_config if encoder_config is not None else DiaEncoderConfig()
self.decoder_config = decoder_config if decoder_config is not None else DiaDecoderConfig()
self.norm_eps = norm_eps
self.delay_pattern = delay_pattern if delay_pattern is not None else [0, 8, 9, 10, 11, 12, 13, 14, 15]
self.initializer_range = initializer_range
self.use_cache = use_cache
# TODO: Remove token ID forwarding once the `nari-labs/Dia-1.6B`
# checkpoint is updated
if pad_token_id is not None:
logger.warning_once(
"Passing `pad_token_id` to `DiaConfig` is deprecated. "
"Please set it directly on `DiaDecoderConfig` instead."
)
self.decoder_config.pad_token_id = pad_token_id
if eos_token_id is not None:
logger.warning_once(
"Passing `eos_token_id` to `DiaConfig` is deprecated. "
"Please set it directly on `DiaDecoderConfig` instead."
)
self.decoder_config.eos_token_id = eos_token_id
if bos_token_id is not None:
logger.warning_once(
"Passing `bos_token_id` to `DiaConfig` is deprecated. "
"Please set it directly on `DiaDecoderConfig` instead."
)
self.decoder_config.bos_token_id = bos_token_id
assert self.decoder_config.num_channels == len(self.delay_pattern), (
"Number of channels must match delay pattern length."
)
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
def get_text_config(self, *args, **kwargs):
"""Defaulting to audio config as it's the decoder in this case which is usually the text backbone"""
return self.decoder_config
__all__ = ["DiaConfig", "DiaEncoderConfig", "DiaDecoderConfig"] | python | github | https://github.com/huggingface/transformers | src/transformers/models/dia/configuration_dia.py |
import sys
from artiq.experiment import *
class Mandelbrot(EnvExperiment):
"""Mandelbrot set demo"""
def build(self):
self.setattr_device("core")
def col(self, i):
sys.stdout.write(" .,-:;i+hHM$*#@ "[i])
def row(self):
print("")
# based on: http://warp.povusers.org/MandScripts/python.html
@kernel
def run(self):
minX = -2.0
maxX = 1.0
width = 78
height = 36
aspectRatio = 2
yScale = (maxX-minX)*(height/width)*aspectRatio
for y in range(height):
for x in range(width):
c_r = minX+x*(maxX-minX)/width
c_i = y*yScale/height-yScale/2
z_r = c_r
z_i = c_i
i = 0
for i in range(16):
if z_r*z_r + z_i*z_i > 4:
break
new_z_r = (z_r*z_r)-(z_i*z_i) + c_r
z_i = 2*z_r*z_i + c_i
z_r = new_z_r
self.col(i)
self.row() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
@test_util.with_c_api
class ImportGraphDefTest(test.TestCase):
def _MakeGraphDef(self,
text,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (producer,
min_consumer,
text)
ret = graph_pb2.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutputFloatOutput' }
node { name: 'B' op: 'ListOutput'
attr { key: 'T'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=["A", "B", "C", "D"],
name="import")
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, "IntOutputFloatOutput")
self.assertEqual(b.type, "ListOutput")
self.assertEqual(c.type, "ListInput")
self.assertEqual(d.type, "ListInput")
self.assertEqual(a.outputs[0].dtype, dtypes.int32)
self.assertEqual(a.outputs[1].dtype, dtypes.float32)
self.assertEqual(b.outputs[0].dtype, dtypes.int32)
self.assertEqual(b.outputs[1].dtype, dtypes.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, "import/A")
self.assertEqual(b.name, "import/B")
self.assertEqual(c.name, "import/C")
self.assertEqual(d.name, "import/D")
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
def testMultipleImport(self):
graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
""")
with ops.Graph().as_default():
# Initial import
a, b = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a.name, "A")
self.assertEqual(b.name, "B")
self.assertEqual(list(b.inputs), [a.outputs[0]])
# Repeat the same import
a1, b1 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a1.name, "A_1")
self.assertEqual(b1.name, "B_1")
self.assertEqual(list(b1.inputs), [a1.outputs[0]])
# Repeat the same import again
a2, b2 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a2.name, "A_2")
self.assertEqual(b2.name, "B_2")
self.assertEqual(list(b2.inputs), [a2.outputs[0]])
# Import with an already-used name
a3, b3 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="A")
self.assertEqual(a3.name, "A_3/A")
self.assertEqual(b3.name, "A_3/B")
self.assertEqual(list(b3.inputs), [a3.outputs[0]])
# Import with existing de-duped node names
a1_1, b1_1 = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A_1' op: 'IntOutput' }
node { name: 'B_1' op: 'IntInput' input: 'A_1:0' }
"""),
return_elements=["A_1", "B_1"],
name="")
self.assertEqual(a1_1.name, "A_1_1")
self.assertEqual(b1_1.name, "B_1_1")
self.assertEqual(list(b1_1.inputs), [a1_1.outputs[0]])
# Create a name scope and then import node with same name
with ops.name_scope("foo"):
constant_op.constant(1)
foo, = importer.import_graph_def(
self._MakeGraphDef("node { name: 'foo' op: 'IntOutput' }"),
return_elements=["foo"],
name="")
self.assertEqual(foo.name, "foo_1")
# Imported node name can't conflict with intermediate name scope (but can
# conflict with outer scope and full name scope)
with ops.name_scope("outer"):
with ops.name_scope("inner"):
c = constant_op.constant(1, name="c")
self.assertEqual(c.op.name, "outer/inner/c")
outer, inner, new_c, outer_inner, outer_inner_c = (
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'outer' op: 'IntOutput' }"
"node { name: 'inner' op: 'IntOutput' }"
"node { name: 'c' op: 'IntOutput' }"
"node { name: 'outer/inner' op: 'IntOutput' }"
"node { name: 'outer/inner/c' op: 'IntOutput' }"),
return_elements=["outer", "inner", "c", "outer/inner",
"outer/inner/c"],
name=""))
self.assertEqual(outer.name, "outer_1")
self.assertEqual(inner.name, "inner")
self.assertEqual(new_c.name, "c")
self.assertEqual(outer_inner.name, "outer/inner_1")
self.assertEqual(outer_inner_c.name, "outer/inner/c_1")
def testInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={"A:0": feed_a_0,
"B:1": feed_b_1},
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b"A:0": feed_a_0,
b"B:1": feed_b_1},
return_elements=[b"A", b"B", b"C", b"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u"A:0": feed_a_0,
u"B:1": feed_b_1},
return_elements=[u"A", u"B", u"C", u"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'IntInput' input: 'A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
"""),
input_map={"A": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'RefOutput' }
node { name: 'B' op: 'IntOutput' }
node { name: 'C' op: 'TwoIntInputs' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'RefInputIntInput' input: 'A:0' input: 'B:0' }
"""),
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, dtypes.int32_ref)
self.assertEqual(c._input_dtypes, [dtypes.int32, dtypes.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_dtypes, [dtypes.int32_ref, dtypes.int32])
self.assertEqual(d.outputs, [])
def testWhileLoop(self):
# Produce GraphDef containing while loop.
graph = ops.Graph()
with graph.as_default():
r = control_flow_ops.while_loop(lambda i: i < 10, lambda i: i + 1, [0])
graph_def = graph.as_graph_def()
# Import the GraphDef and make sure it runs.
with ops.Graph().as_default():
imported_r, = importer.import_graph_def(graph_def,
return_elements=[r.name])
self.assertEqual(imported_r.name, "import/" + r.name)
with self.test_session() as sess:
self.assertEqual(sess.run(imported_r), 10)
def testTypeMismatchInGraphDef(self):
if ops._USE_C_API:
# TODO(skyewm): improve error message
error_msg = ("Input 0 of node import/B was passed int32 from import/A:0 "
"incompatible with expected float.")
else:
error_msg = ("Cannot convert a tensor of type int32 to an input of type "
"float")
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""))
def testShapeWhitelist(self):
# Barrier's shape is an output vector of 2, but the
# graph says it's a scalar. This is currently whitelisted.
with ops.Graph().as_default():
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Barrier'
attr { key: '_output_shapes'
value { list { shape { } } } }
attr { key: 'component_types'
value { list { type: DT_FLOAT } } } }
"""),
return_elements=["A"],
name="import")
def testShapeWhitelistViolation(self):
# L2 loss produces a scalar shape, but the graph
# has the wrong shape, so raise an error.
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'FloatOutput' }
node { name: 'B' op: 'L2Loss'
input: 'A:0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: '_output_shapes'
value { list { shape { dim { size: 43 } } } } } }
"""),
return_elements=["B"],
name="import")
self.assertTrue(
"Shapes () and (43,) are not compatible" in str(e.exception))
def testInvalidSignatureTooManyInputsInGraphDef(self):
if ops._USE_C_API:
# TODO(skyewm): improve error message
error_msg = "NodeDef expected inputs '' do not match 1 inputs specified"
else:
error_msg = r"More inputs specified \('A:0'\) than the op expects"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'None' input: 'A:0' }
"""))
def testInvalidSignatureNotEnoughInputsInGraphDef(self):
if ops._USE_C_API:
# TODO(skyewm): improve error message
error_msg = ("NodeDef expected inputs 'int32, float' do not match 1 "
"inputs specified")
else:
error_msg = (r"Input types mismatch \(expected 'int32, float32' but "
r"got 'int32'\)")
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInputFloatInput' input: 'A:0' }
"""))
def testMissingInputOpInGraphDef(self):
if ops._USE_C_API:
error_msg = "Node 'B': Unknown input node 'A:0'"
else:
error_msg = "Input tensor 'A:0' not found"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""))
def testMissingInputOpInGraphDefButAppearsInInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(5.0)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""),
input_map={"A:0": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testMissingInputTensorInGraphDef(self):
if ops._USE_C_API:
error_msg = ("Node 'B': Connecting to invalid output 1 of source node A "
"which has 1 outputs")
else:
error_msg = "Input tensor 'A:1' not found"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'FloatOutput' }
node { name: 'B' op: 'FloatInput' input: 'A:1' }
"""))
def testMissingControlInputInGraphDef(self):
if ops._USE_C_API:
error_msg = r"Node 'B': Unknown input node '\^A'"
else:
error_msg = r"Control input '\^A' not found"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: '^A' }
"""))
def testInvalidTensorNameOutputIndexInGraphDef(self):
if ops._USE_C_API:
error_msg = "Node 'B': Unknown input node 'A:B'"
else:
error_msg = "Cannot convert 'A:B' to a tensor name."
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B' }
"""))
def testInvalidTensorNameInGraphDef(self):
if ops._USE_C_API:
error_msg = "Node 'B': Unknown input node 'A:B:0'"
else:
error_msg = "Cannot convert 'A:B:0' to a tensor name."
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B:0' }
"""))
def testMissingReturnOperation(self):
if ops._USE_C_API:
error_msg = "Requested return node 'B' not found in graph def"
else:
error_msg = "return_element 'B' not found in graph_def."
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["B"])
def testMissingReturnTensor(self):
if ops._USE_C_API:
error_msg = (r"Invalid return output 1 of node 'A', which has 1 "
r"output\(s\)")
else:
error_msg = "return_element 'A:1' not found in graph_def."
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["A:1"])
if ops._USE_C_API:
error_msg = "Requested return tensor 'B:0' not found in graph def"
else:
error_msg = "return_element 'B:0' not found in graph_def."
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["B:0"])
if ops._USE_C_API:
error_msg = "Cannot convert 'A:B:0' to a tensor name."
else:
error_msg = "return_element 'A:B:0' not found in graph_def."
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["A:B:0"])
def testMissingInputMap(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
r"Attempted to map inputs that were not found in graph_def: \[B:0\]"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
input_map={"B:0": constant_op.constant(5.0)})
def testInputMapUnusedAsInput(self):
with ops.Graph().as_default():
# Mapping an unused node output should succeed.
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
# Mapping a non-existent output of an existing node should fail.
with self.assertRaisesRegexp(
ValueError,
r"Attempted to map inputs that were not found in graph_def: \[A:2\]"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
input_map={"A:2": constant_op.constant(5.0)})
def testInputMapTypeMismatch(self):
if ops._USE_C_API:
error_msg = ("Input 0 of node import/B was passed float from Const:0 "
"incompatible with expected int32.")
else:
error_msg = ("Cannot convert a tensor of type float32 to an input of "
"type int32.")
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
def testNoReturns(self):
with ops.Graph().as_default() as g:
ret = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""))
self.assertEqual(ret, None)
a = g.get_operation_by_name("import/A")
self.assertEqual(a.type, "None")
def testOverrideNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name="imported_graph")
self.assertEqual(a.name, "imported_graph/A")
def testDefaultNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name=None)
self.assertEqual(a.name, "import/A")
def testNamePrefixColocationAttrs(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
b, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
self.assertTrue("_class" in b.node_def.attr)
self.assertProtoEquals(
"list { s: 'loc:@imported_graph/A' }",
b.node_def.attr["_class"])
def testColocationWithDeviceFn(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
# A device function that places "A" on one device and "B" on
# another device. Because B is colocated with A, we test that B's
# device function is overridden by A.
def CustomDeviceFn(op):
if "A" in op.name:
return "/device:A:0"
else:
return "/device:B:0"
with ops.Graph().as_default():
with ops.device(CustomDeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "/device:A:0")
self.assertEqual(b.device, "/device:A:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
# Test a scenario where 'A' doesn't get a device; 'A' should not have a
# device, but during runtime will get colocated with 'B' because of the
# colocation attribute. B's device function is still overridden by A.
def BDeviceFn(op):
if "B" in op.name:
return "/device:B:0"
return ""
with ops.Graph().as_default():
with ops.device(BDeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "")
self.assertEqual(b.device, "")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
# Only A gets a device, so B inherits it implicitly.
def ADeviceFn(op):
if "A" in op.name:
return "/device:A:0"
return ""
with ops.Graph().as_default():
with ops.device(ADeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "/device:A:0")
self.assertEqual(b.device, "/device:A:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
def testMultipleColocationWithDeviceFn(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None'}
node { name: 'B' op: 'None'}
node { name: 'C' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' s: 'loc:@B' } }
} }""")
# A device function that places "B" on a device, and "A" is empty.
#
# B and C should contain "/device:B". A will not right now. But
# because of the colocation property, at runtime it would be
# placed with B and C.
def CustomDeviceFn(op):
if "B" in op.name:
return "/device:B:0"
return ""
with ops.Graph().as_default():
with ops.device(CustomDeviceFn):
a, b, c = importer.import_graph_def(original_graph_def,
return_elements=["A", "B", "C"],
name="imported_graph")
self.assertEqual(a.device, "")
self.assertEqual(b.device, "/device:B:0")
self.assertEqual(c.device, "/device:B:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/B"])
self.assertEqual(c.colocation_groups(),
[b"loc:@imported_graph/A", b"loc:@imported_graph/B"])
def testNamePrefixColocationAttrsMultipleImport(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
a, b = importer.import_graph_def(
original_graph_def, return_elements=["A", "B"], name="")
a_1, b_1 = importer.import_graph_def(
original_graph_def, return_elements=["A", "B"], name="")
self.assertEqual(a.name, "A")
self.assertEqual(b.name, "B")
self.assertEqual(b.colocation_groups(), [b"loc:@A"])
self.assertEqual(a_1.name, "A_1")
self.assertEqual(b_1.name, "B_1")
self.assertEqual(b_1.colocation_groups(), [b"loc:@A_1"])
def testNamePrefixColocationAttrsNotFound(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
if ops._USE_C_API:
error_msg = "Node 'B' expects to be colocated with unknown node 'A'"
else:
error_msg = "does not exist during import"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
def testEmptyGraph(self):
with ops.Graph().as_default() as g:
init_version = g.version
importer.import_graph_def(self._MakeGraphDef(""))
self.assertEqual(init_version, g.version)
def testInvalidInputForGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def("")
self.assertEqual("graph_def must be a GraphDef proto.", str(e.exception))
def testInvalidInputForInputMap(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def(
self._MakeGraphDef(""), input_map=[constant_op.constant(5.0)])
self.assertEqual("input_map must be a dictionary mapping strings to "
"Tensor objects.", str(e.exception))
graph_def = self._MakeGraphDef("""
node { name: 'a' op: 'Placeholder'
attr { key: 'dtype' value { type: DT_FLOAT } }}
node { name: 'id' op: 'Identity' input: 'a:0'
attr { key: 'T' value { type: DT_FLOAT } }}""")
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
graph_def,
input_map={"a:0": variables.Variable(5.0)},
name="")
self.assertStartsWith(str(e.exception),
"tf.import_graph_def() requires a non-empty `name` "
"if `input_map` contains non-Tensor values.")
with ops.Graph().as_default():
t, = importer.import_graph_def(
graph_def,
input_map={"a:0": constant_op.constant(5.0)},
name="",
return_elements=["id:0"])
with self.test_session():
self.assertEqual(5.0, t.eval())
def testInvalidInputForReturnOperations(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
TypeError, "return_elements must be a list of strings."):
importer.import_graph_def(self._MakeGraphDef(""), return_elements=[7])
if ops._USE_C_API:
error_msg = "Cannot convert 'a:b:c' to a tensor name."
else:
error_msg = "Requested return_element 'a:b:c' not found in graph_def."
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(self._MakeGraphDef(""),
return_elements=["a:b:c"])
def testDuplicateOperationNames(self):
if ops._USE_C_API:
error_msg = "Node 'A' is not unique"
else:
error_msg = "Duplicate name 'A' in GraphDef."
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntOutput' }
node { name: 'A' op: 'IntOutput' }
"""))
def testWithExtensionAndAttr(self):
with ops.Graph().as_default() as g:
c = constant_op.constant(5.0, dtype=dtypes.float32, name="c")
array_ops.stack([c, c], name="pack")
gdef = g.as_graph_def()
with self.test_session():
pack, = importer.import_graph_def(gdef, return_elements=["pack"])
self.assertAllEqual(pack.outputs[0].eval(), [5.0, 5.0])
def testWithDevice(self):
with ops.Graph().as_default() as g:
# No device.
a = constant_op.constant(3.0, name="a")
with ops.device("/cpu:0"):
b = constant_op.constant(4.0, name="b")
with ops.device("/job:worker"):
c = constant_op.constant(5.0, name="c")
gdef = g.as_graph_def()
with ops.Graph().as_default():
a2, b2, c2 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual(a.device, a2.device)
self.assertEqual(b.device, b2.device)
self.assertEqual(c.device, c2.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/task:0")):
a3, b3, c3 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/task:0", a3.device)
self.assertEqual("/task:0/device:CPU:0", b3.device) # canonicalized.
self.assertEqual(c.device + "/task:0", c3.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/job:ps")):
a4, b4, c4 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/job:ps", a4.device)
self.assertEqual("/job:ps/device:CPU:0", b4.device) # canonicalized.
self.assertEqual(c.device, c4.device) # worker overrides ps.
with ops.Graph().as_default():
with ops.device(device.merge_device("/device:GPU:0")):
a5, b5, c5 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/device:GPU:0", a5.device)
self.assertEqual("/device:CPU:0", b5.device) # cpu overrides gpu.
self.assertEqual(c.device + "/device:GPU:0", c5.device)
def testWithDeviceFunctionDependingOnInputs(self):
with ops.Graph().as_default() as g:
with ops.device("/job:ps"):
v1 = constant_op.constant(1.0)
v2 = constant_op.constant(1.0)
_ = v1 + v2
_ = v1 - v2
_ = array_ops.identity(v1)
gdef = g.as_graph_def()
# We'll use the following device function to observe ops with two inputs.
ops_with_two_inputs = []
def InputCounter(op):
if len(op.inputs) == 2:
ops_with_two_inputs.append(op)
return ""
with ops.Graph().as_default() as g:
with ops.device(InputCounter):
importer.import_graph_def(gdef)
# We expect to see the add and subtract, but not identity.
self.assertEqual(2, len(ops_with_two_inputs))
def testGradient(self):
with ops.Graph().as_default() as g:
inputs = array_ops.placeholder(
dtypes.float32, shape=[None, 100], name="input")
weights = array_ops.placeholder(
dtypes.float32, shape=[100, 10], name="weights")
biases = array_ops.placeholder(dtypes.float32, shape=[10], name="biases")
activations = nn_ops.relu(
math_ops.matmul(inputs, weights) + biases, name="activations")
loss = math_ops.reduce_mean(activations, name="loss")
gdef = g.as_graph_def()
with ops.Graph().as_default() as g:
input_placeholder = array_ops.placeholder(dtypes.float32, shape=[32, 100])
weights_var = variables.Variable(
random_ops.truncated_normal([100, 10]), name="weights")
biases_var = variables.Variable(array_ops.zeros([10]), name="biases")
activations, loss = importer.import_graph_def(
gdef,
input_map={
"input:0": input_placeholder,
"weights:0": weights_var,
"biases:0": biases_var
},
return_elements=["activations:0", "loss:0"])
self.assertEqual([32, 10], activations.get_shape())
self.assertEqual([], loss.get_shape())
weights_grad, biases_grad = gradients_impl.gradients(
loss, [weights_var, biases_var])
self.assertEqual([100, 10], weights_grad.get_shape())
self.assertEqual([10], biases_grad.get_shape())
def testLargeGraph(self):
with self.test_session():
# The default message byte limit is 64M. Ours is 2G with a warning at 512.
# Adding a 130M entries float32 tensor should exceed the warning, but not
# the hard limit.
input_shape = [130, 1000, 1000]
tensor_input = np.ones(input_shape, dtype=np.float32)
t = constant_op.constant(tensor_input, shape=input_shape)
g = array_ops.identity(t)
g.eval()
def testVersion(self):
v0 = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
v2 = versions.GRAPH_DEF_VERSION
v1 = (v0 + v2) // 2
for producer in v0, v1, v2:
for min_consumer in v0, v1, v2:
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'TwoIntOutputs' }",
producer=producer,
min_consumer=min_consumer),
return_elements=["A"])
self.assertEqual(a.graph.graph_def_versions.producer, producer)
self.assertEqual(a.graph.graph_def_versions.min_consumer,
min_consumer)
def testVersionLow(self):
with ops.Graph().as_default() as g:
pat = (r"GraphDef producer version -1 below min producer %d supported "
r"by TensorFlow \S+\. Please regenerate your graph.$" %
versions.GRAPH_DEF_VERSION_MIN_PRODUCER)
# C API throws error during import, Python-only throws error during run
if ops._USE_C_API:
with self.assertRaisesRegexp(Exception, pat):
importer.import_graph_def(self._MakeGraphDef("", producer=-1))
else:
importer.import_graph_def(self._MakeGraphDef("", producer=-1))
x = constant_op.constant(
7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionHigh(self):
with ops.Graph().as_default() as g:
pat = (r"GraphDef min consumer version %d above current version %d "
r"for TensorFlow \S+\. Please upgrade TensorFlow\.$" %
(1 << 30, versions.GRAPH_DEF_VERSION))
if ops._USE_C_API:
with self.assertRaisesRegexp(ValueError, pat):
importer.import_graph_def(self._MakeGraphDef("",
min_consumer=1 << 30))
else:
# Python API only throws when graph is run
importer.import_graph_def(self._MakeGraphDef("", min_consumer=1 << 30))
x = constant_op.constant(
7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionAppliesToOpConstruction(self):
"""These tests rely on shape fns in test_ops.cc."""
with ops.Graph().as_default():
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'RequiresOlderGraphVersion' }",
producer=versions.GRAPH_DEF_VERSION - 1),
return_elements=["A"])
with ops.Graph().as_default():
with self.assertRaisesWithPredicateMatch(ValueError,
"Wrong graph version.*"):
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'RequiresOlderGraphVersion' }",
producer=versions.GRAPH_DEF_VERSION),
return_elements=["A"])
def testDefaultAttrsAdded(self):
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithDefaultAttr' }
"""),
return_elements=["A"])
self.assertEqual(123.0, a[0].get_attr("default_float"))
def testDefaultAttrsRemoved(self):
producer_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'OpWithFutureDefaultAttr'
attr { name: 'default_int' type: 'int' default_value { i: 456 } }
}
""", producer_op_list)
# Attr only in producer_op_list with default value gets removed.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 456 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
if ops._USE_C_API:
error_msg = "Operation 'import/A' has no attr named 'default_int'."
else:
error_msg = "No attr named 'default_int'"
with self.assertRaisesRegexp(ValueError, error_msg):
a[0].get_attr("default_int")
# Unknown attrs cannot be imported using C API. This test will eventually be
# deleted.
if not ops._USE_C_API:
# Attr only in producer_op_list with non-default value is preserved.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 987 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
self.assertEqual(987, a[0].get_attr("default_int"))
def testFunctions(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype, dtype)
def Grad(x, y, dout1, dout2): # pylint: disable=unused-argument
# Return the inputs for simplicity of testing. The correct return value
# would be (dout1 + dout2, dout1 - dout2)
return x, y
@function.Defun(dtype, dtype, grad_func=Grad)
def FuncWithGrad(x, y):
return x + y, x - y
@function.Defun(dtypes.int32)
def ExternalTensorFunc(x):
# c must be defined in the containing graph
return x + c
@function.Defun(dtypes.int32, dtypes.int32)
def OuterFunc(x, y):
@function.Defun(dtypes.int32)
def InnerFunc(x):
return x + x
return InnerFunc(x) + y
# Create graph with function calls and export to GraphDef
with ops.Graph().as_default() as g1:
p1 = array_ops.placeholder(dtype, name="p1")
p2 = array_ops.placeholder(dtype, name="p2")
# pylint: disable=unexpected-keyword-arg
a, b = FuncWithGrad(p1, p2, name="f")
c = constant_op.constant(10, dtype=dtypes.int32)
ExternalTensorFunc(1, name="external")
OuterFunc(10, 1, name="outer")
# pylint: enable=unexpected-keyword-arg
gdef = g1.as_graph_def()
# Import GraphDef into new graph, add imported gradients, and test that
# imported functions can be run
with ops.Graph().as_default() as g2:
p1, p2, a, b = importer.import_graph_def(
gdef, return_elements=["p1:0", "p2:0", "f:0", "f:1"], name="")
grad = gradients_impl.gradients([a], [p1, p2])
with self.test_session(graph=g2) as sess:
feed_dict = {p1: 1, p2: 2}
a_val, b_val, grad_val = sess.run([a, b, grad], feed_dict=feed_dict)
self.assertEqual(a_val, 3.0)
self.assertEqual(b_val, -1.0)
# Grad function returns inputs values for testing
self.assertEqual(grad_val, [1.0, 2.0])
self.assertEqual(sess.run("external:0"), 11)
self.assertEqual(sess.run("outer:0"), 21)
# Export the new graph and reimport to test that imported functions can be
# successfully exported/imported again
gdef = g2.as_graph_def()
with ops.Graph().as_default() as g3:
p1, p2, a, b = importer.import_graph_def(
gdef, return_elements=["p1:0", "p2:0", "f:0", "f:1"], name="")
# Create new gradient functions (in additional to the imported gradient
# functions created in g2).
grad = gradients_impl.gradients([a], [p1, p2])
with self.test_session(graph=g3) as sess:
feed_dict = {p1: 1, p2: 2}
a_val, b_val, grad_val = sess.run([a, b, grad], feed_dict=feed_dict)
self.assertEqual(a_val, 3.0)
self.assertEqual(b_val, -1.0)
self.assertEqual(grad_val, [1.0, 2.0])
self.assertEqual(sess.run("external:0"), 11)
self.assertEqual(sess.run("outer:0"), 21)
def testImportInsideDefun(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = constant_op.constant(3.0, dtype=dtypes.float32)
y = constant_op.constant(-5.0, dtype=dtypes.float32)
z = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
@function.Defun()
def TestFunc():
return importer.import_graph_def(gdef, return_elements=["z:0"])[0]
z = TestFunc()
with self.test_session():
z_val = z.eval()
self.assertEqual(z_val, -2.0)
def testImportGraphWithFunctionTwice(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = array_ops.placeholder(dtype=dtypes.float32, name="x")
y = array_ops.placeholder(dtype=dtypes.float32, name="y")
_ = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
x = random_ops.random_uniform(dtype=dtypes.float32, shape=())
y = random_ops.random_uniform(dtype=dtypes.float32, shape=())
input_map = {"x:0": x, "y:0": y}
with ops.name_scope("first"):
z1 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with ops.name_scope("second"):
z2 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with self.test_session() as sess:
z1_val, z2_val = sess.run((z1, z2))
self.assertAllEqual(z1_val, z2_val)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# file lyx_pot.py
# This file is part of LyX, the document processor.
# Licence details can be found in the file COPYING.
#
# \author Bo Peng
#
# Full author contact details are available in file CREDITS
# Usage: use
# lyx_pot.py -h
# to get usage message
# This script will extract translatable strings from input files and write
# to output in gettext .pot format.
#
import sys, os, re, getopt
if sys.version_info < (2, 4, 0):
from sets import Set as set
def relativePath(path, base):
'''return relative path from top source dir'''
# full pathname of path
path1 = os.path.normpath(os.path.realpath(path)).split(os.sep)
path2 = os.path.normpath(os.path.realpath(base)).split(os.sep)
if path1[:len(path2)] != path2:
print "Path %s is not under top source directory" % path
path3 = os.path.join(*path1[len(path2):]);
# replace all \ by / such that we get the same comments on Windows and *nix
path3 = path3.replace('\\', '/')
return path3
def writeString(outfile, infile, basefile, lineno, string):
string = string.replace('\\', '\\\\').replace('"', '')
if string == "":
return
print >> outfile, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(infile, basefile), lineno, string)
def ui_l10n(input_files, output, base):
'''Generate pot file from lib/ui/*'''
output = open(output, 'w')
Submenu = re.compile(r'^[^#]*Submenu\s+"([^"]*)"', re.IGNORECASE)
Popupmenu = re.compile(r'^[^#]*PopupMenu\s+"[^"]+"\s+"([^"]*)"', re.IGNORECASE)
IconPalette = re.compile(r'^[^#]*IconPalette\s+"[^"]+"\s+"([^"]*)"', re.IGNORECASE)
Toolbar = re.compile(r'^[^#]*Toolbar\s+"[^"]+"\s+"([^"]*)"', re.IGNORECASE)
Item = re.compile(r'[^#]*Item\s+"([^"]*)"', re.IGNORECASE)
TableInsert = re.compile(r'[^#]*TableInsert\s+"([^"]*)"', re.IGNORECASE)
for src in input_files:
input = open(src)
for lineno, line in enumerate(input.readlines()):
if Submenu.match(line):
(string,) = Submenu.match(line).groups()
string = string.replace('_', ' ')
elif Popupmenu.match(line):
(string,) = Popupmenu.match(line).groups()
elif IconPalette.match(line):
(string,) = IconPalette.match(line).groups()
elif Toolbar.match(line):
(string,) = Toolbar.match(line).groups()
elif Item.match(line):
(string,) = Item.match(line).groups()
elif TableInsert.match(line):
(string,) = TableInsert.match(line).groups()
else:
continue
string = string.replace('"', '')
if string != "":
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(src, base), lineno+1, string)
input.close()
output.close()
def layouts_l10n(input_files, output, base, layouttranslations):
'''Generate pot file from lib/layouts/*.{layout,inc,module}'''
ClassDescription = re.compile(r'^\s*#\s*\\Declare(LaTeX|DocBook)Class.*\{(.*)\}$', re.IGNORECASE)
ClassCategory = re.compile(r'^\s*#\s*\\DeclareCategory\{(.*)\}$', re.IGNORECASE)
Style = re.compile(r'^\s*Style\s+(.*\S)\s*$', re.IGNORECASE)
# match LabelString, EndLabelString, LabelStringAppendix and maybe others but no comments
LabelString = re.compile(r'^[^#]*LabelString\S*\s+(.*\S)\s*$', re.IGNORECASE)
MenuString = re.compile(r'^[^#]*MenuString\S*\s+(.*\S)\s*$', re.IGNORECASE)
Tooltip = re.compile(r'^[^#]*Tooltip\S*\s+(.*\S)\s*$', re.IGNORECASE)
GuiName = re.compile(r'^\s*GuiName\s+(.*\S)\s*$', re.IGNORECASE)
ListName = re.compile(r'^\s*ListName\s+(.*\S)\s*$', re.IGNORECASE)
CategoryName = re.compile(r'^\s*Category\s+(.*\S)\s*$', re.IGNORECASE)
NameRE = re.compile(r'^\s*#\s*\\DeclareLyXModule.*{(.*)}$', re.IGNORECASE)
InsetLayout = re.compile(r'^InsetLayout\s+\"?(.*)\"?\s*$', re.IGNORECASE)
FlexCheck = re.compile(r'^Flex:(.*)', re.IGNORECASE)
CaptionCheck = re.compile(r'^Caption:(.*)', re.IGNORECASE)
DescBegin = re.compile(r'^\s*#DescriptionBegin\s*$', re.IGNORECASE)
DescEnd = re.compile(r'^\s*#\s*DescriptionEnd\s*$', re.IGNORECASE)
Category = re.compile(r'^\s*#\s*Category:\s+(.*\S)\s*$', re.IGNORECASE)
I18nPreamble = re.compile(r'^\s*((Lang)|(Babel))Preamble\s*$', re.IGNORECASE)
EndI18nPreamble = re.compile(r'^\s*End((Lang)|(Babel))Preamble\s*$', re.IGNORECASE)
I18nString = re.compile(r'_\(([^\)]+)\)')
CounterFormat = re.compile(r'^\s*PrettyFormat\s+"?(.*)"?\s*$', re.IGNORECASE)
CiteFormat = re.compile(r'^\s*CiteFormat', re.IGNORECASE)
KeyVal = re.compile(r'^\s*_\w+\s+(.*\S)\s*$')
Float = re.compile(r'^\s*Float\s*$', re.IGNORECASE)
UsesFloatPkg = re.compile(r'^\s*UsesFloatPkg\s+(.*\S)\s*$', re.IGNORECASE)
IsPredefined = re.compile(r'^\s*IsPredefined\s+(.*\S)\s*$', re.IGNORECASE)
End = re.compile(r'^\s*End', re.IGNORECASE)
Comment = re.compile(r'^(.*)#')
Translation = re.compile(r'^\s*Translation\s+(.*\S)\s*$', re.IGNORECASE)
KeyValPair = re.compile(r'\s*"(.*)"\s+"(.*)"')
oldlanguages = []
languages = []
keyset = set()
oldtrans = dict()
if layouttranslations:
linguas_file = os.path.join(base, 'po/LINGUAS')
for line in open(linguas_file).readlines():
res = Comment.search(line)
if res:
line = res.group(1)
if line.strip() != '':
languages.extend(line.split())
# read old translations if available
try:
input = open(output)
lang = ''
for line in input.readlines():
res = Comment.search(line)
if res:
line = res.group(1)
if line.strip() == '':
continue
res = Translation.search(line)
if res:
lang = res.group(1)
if lang not in languages:
oldlanguages.append(lang)
languages.append(lang)
oldtrans[lang] = dict()
continue
res = End.search(line)
if res:
lang = ''
continue
res = KeyValPair.search(line)
if res and lang != '':
key = res.group(1).decode('utf-8')
val = res.group(2).decode('utf-8')
key = key.replace('\\"', '"').replace('\\\\', '\\')
val = val.replace('\\"', '"').replace('\\\\', '\\')
oldtrans[lang][key] = val
keyset.add(key)
continue
print "Error: Unable to handle line:"
print line
except IOError:
print "Warning: Unable to open %s for reading." % output
print " Old translations will be lost."
# walon is not a known document language
# FIXME: Do not hardcode, read from lib/languages!
if 'wa' in languages:
languages.remove('wa')
out = open(output, 'w')
for src in input_files:
readingDescription = False
readingI18nPreamble = False
readingFloat = False
readingCiteFormats = False
isPredefined = False
usesFloatPkg = True
listname = ''
floatname = ''
descStartLine = -1
descLines = []
lineno = 0
for line in open(src).readlines():
lineno += 1
res = ClassDescription.search(line)
if res != None:
string = res.group(2)
if not layouttranslations:
writeString(out, src, base, lineno + 1, string)
continue
res = ClassCategory.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno + 1, string)
continue
if readingDescription:
res = DescEnd.search(line)
if res != None:
readingDescription = False
desc = " ".join(descLines)
if not layouttranslations:
writeString(out, src, base, lineno + 1, desc)
continue
descLines.append(line[1:].strip())
continue
res = DescBegin.search(line)
if res != None:
readingDescription = True
descStartLine = lineno
continue
if readingI18nPreamble:
res = EndI18nPreamble.search(line)
if res != None:
readingI18nPreamble = False
continue
res = I18nString.search(line)
if res != None:
string = res.group(1)
if layouttranslations:
keyset.add(string)
else:
writeString(out, src, base, lineno, string)
continue
res = I18nPreamble.search(line)
if res != None:
readingI18nPreamble = True
continue
res = NameRE.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno + 1, string)
continue
res = Style.search(line)
if res != None:
string = res.group(1)
string = string.replace('_', ' ')
# Style means something else inside a float definition
if not readingFloat:
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = LabelString.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = MenuString.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = Tooltip.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = GuiName.search(line)
if res != None:
string = res.group(1)
if layouttranslations:
# gui name must only be added for floats
if readingFloat:
floatname = string
else:
writeString(out, src, base, lineno, string)
continue
res = CategoryName.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = ListName.search(line)
if res != None:
string = res.group(1)
if layouttranslations:
listname = string.strip('"')
else:
writeString(out, src, base, lineno, string)
continue
res = InsetLayout.search(line)
if res != None:
string = res.group(1)
string = string.replace('_', ' ')
#Flex:xxx is not used in translation
#if not layouttranslations:
# writeString(out, src, base, lineno, string)
m = FlexCheck.search(string)
if m:
if not layouttranslations:
writeString(out, src, base, lineno, m.group(1))
m = CaptionCheck.search(string)
if m:
if not layouttranslations:
writeString(out, src, base, lineno, m.group(1))
continue
res = Category.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = CounterFormat.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = Float.search(line)
if res != None:
readingFloat = True
continue
res = IsPredefined.search(line)
if res != None:
string = res.group(1).lower()
if string == 'true':
isPredefined = True
else:
isPredefined = False
continue
res = UsesFloatPkg.search(line)
if res != None:
string = res.group(1).lower()
if string == 'true':
usesFloatPkg = True
else:
usesFloatPkg = False
continue
res = CiteFormat.search(line)
if res != None:
readingCiteFormats = True
continue
res = End.search(line)
if res != None:
# If a float is predefined by the package and it does not need
# the float package then it uses the standard babel translations.
# This is even true for MarginFigure, MarginTable (both from
# tufte-book.layout) and Planotable, Plate (both from aguplus.inc).
if layouttranslations and readingFloat and usesFloatPkg and not isPredefined:
if floatname != '':
keyset.add(floatname)
if listname != '':
keyset.add(listname)
isPredefined = False
usesFloatPkg = True
listname = ''
floatname = ''
readingCiteFormats = False
readingFloat = False
continue
if readingCiteFormats:
res = KeyVal.search(line)
if res != None:
val = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, val)
if layouttranslations:
# Extract translations of layout files
import polib
# Sort languages and key to minimize the diff between different runs
# with changed translations
languages.sort()
keys = []
for key in keyset:
keys.append(key)
keys.sort()
ContextRe = re.compile(r'(.*)(\[\[.*\]\])')
print >> out, '''# This file has been automatically generated by po/lyx_pot.py.
# PLEASE MODIFY ONLY THE LAGUAGES HAVING NO .po FILE! If you want to regenerate
# this file from the translations, run `make ../lib/layouttranslations' in po.
# Python polib library is needed for building the output file.
#
# This file should remain fixed during minor LyX releases.
# For more comments see README.localization file.'''
for lang in languages:
print >> out, '\nTranslation %s' % lang
if lang in oldtrans.keys():
trans = oldtrans[lang]
else:
trans = dict()
if not lang in oldlanguages:
poname = os.path.join(base, 'po/' + lang + '.po')
po = polib.pofile(poname)
# Iterate through po entries and not keys for speed reasons.
# FIXME: The code is still too slow
for entry in po:
if not entry.translated():
continue
if entry.msgid in keys:
key = entry.msgid
val = entry.msgstr
# some translators keep untranslated entries
if val != key:
trans[key] = val
for key in keys:
if key in trans.keys():
val = trans[key].replace('\\', '\\\\').replace('"', '\\"')
key = key.replace('\\', '\\\\').replace('"', '\\"')
print >> out, '\t"%s" "%s"' % \
(key.encode('utf-8'), val.encode('utf-8'))
# also print untranslated entries to help translators
elif not lang in oldlanguages:
key = key.replace('\\', '\\\\').replace('"', '\\"')
res = ContextRe.search(key)
if res != None:
val = res.group(1)
else:
val = key
print >> out, '\t"%s" "%s"' % \
(key.encode('utf-8'), val.encode('utf-8'))
print >> out, 'End'
out.close()
def qt4_l10n(input_files, output, base):
'''Generate pot file from src/frontends/qt4/ui/*.ui'''
output = open(output, 'w')
pat = re.compile(r'\s*<string>(.*)</string>')
prop = re.compile(r'\s*<property.*name.*=.*shortcut')
for src in input_files:
input = open(src)
skipNextLine = False
for lineno, line in enumerate(input.readlines()):
# skip the line after <property name=shortcut>
if skipNextLine:
skipNextLine = False
continue
if prop.match(line):
skipNextLine = True
continue
# get lines that match <string>...</string>
if pat.match(line):
(string,) = pat.match(line).groups()
string = string.replace('&', '&').replace('"', '"')
string = string.replace('<', '<').replace('>', '>')
string = string.replace('\\', '\\\\').replace('"', r'\"')
string = string.replace('
', r'\n')
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(src, base), lineno+1, string)
input.close()
output.close()
def languages_l10n(input_files, output, base):
'''Generate pot file from lib/languages'''
out = open(output, 'w')
GuiName = re.compile(r'^[^#]*GuiName\s+(.*)', re.IGNORECASE)
for src in input_files:
descStartLine = -1
descLines = []
lineno = 0
for line in open(src).readlines():
lineno += 1
res = GuiName.search(line)
if res != None:
string = res.group(1)
writeString(out, src, base, lineno, string)
continue
out.close()
def latexfonts_l10n(input_files, output, base):
'''Generate pot file from lib/latexfonts'''
out = open(output, 'w')
GuiName = re.compile(r'^[^#]*GuiName\s+(.*)', re.IGNORECASE)
for src in input_files:
descStartLine = -1
descLines = []
lineno = 0
for line in open(src).readlines():
lineno += 1
res = GuiName.search(line)
if res != None:
string = res.group(1)
writeString(out, src, base, lineno, string)
continue
out.close()
def external_l10n(input_files, output, base):
'''Generate pot file from lib/external_templates'''
output = open(output, 'w')
Template = re.compile(r'^Template\s+(.*)', re.IGNORECASE)
GuiName = re.compile(r'\s*GuiName\s+(.*)', re.IGNORECASE)
HelpTextStart = re.compile(r'\s*HelpText\s', re.IGNORECASE)
HelpTextSection = re.compile(r'\s*(\S.*\S)\s*$')
HelpTextEnd = re.compile(r'\s*HelpTextEnd\s', re.IGNORECASE)
i = -1
for src in input_files:
input = open(src)
inHelp = False
hadHelp = False
prev_help_string = ''
for lineno, line in enumerate(input.readlines()):
if Template.match(line):
(string,) = Template.match(line).groups()
elif GuiName.match(line):
(string,) = GuiName.match(line).groups()
elif inHelp:
if HelpTextEnd.match(line):
if hadHelp:
print >> output, '\nmsgstr ""\n'
inHelp = False
hadHelp = False
prev_help_string = ''
elif HelpTextSection.match(line):
(help_string,) = HelpTextSection.match(line).groups()
help_string = help_string.replace('"', '')
if help_string != "" and prev_help_string == '':
print >> output, '#: %s:%d\nmsgid ""\n"%s\\n"' % \
(relativePath(src, base), lineno+1, help_string)
hadHelp = True
elif help_string != "":
print >> output, '"%s\\n"' % help_string
prev_help_string = help_string
elif HelpTextStart.match(line):
inHelp = True
prev_help_string = ''
else:
continue
string = string.replace('"', '')
if string != "" and not inHelp:
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(src, base), lineno+1, string)
input.close()
output.close()
def formats_l10n(input_files, output, base):
'''Generate pot file from configure.py'''
output = open(output, 'w')
GuiName = re.compile(r'.*\\Format\s+\S+\s+\S+\s+"([^"]*)"\s+(\S*)\s+.*', re.IGNORECASE)
GuiName2 = re.compile(r'.*\\Format\s+\S+\s+\S+\s+([^"]\S+)\s+(\S*)\s+.*', re.IGNORECASE)
input = open(input_files[0])
for lineno, line in enumerate(input.readlines()):
label = ""
labelsc = ""
if GuiName.match(line):
label = GuiName.match(line).group(1)
shortcut = GuiName.match(line).group(2).replace('"', '')
elif GuiName2.match(line):
label = GuiName2.match(line).group(1)
shortcut = GuiName2.match(line).group(2).replace('"', '')
else:
continue
label = label.replace('\\', '\\\\').replace('"', '')
if shortcut != "":
labelsc = label + "|" + shortcut
if label != "":
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(input_files[0], base), lineno+1, label)
if labelsc != "":
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(input_files[0], base), lineno+1, labelsc)
input.close()
output.close()
def encodings_l10n(input_files, output, base):
'''Generate pot file from lib/encodings'''
output = open(output, 'w')
# assuming only one encodings file
# Encoding utf8 utf8 "Unicode (utf8)" UTF-8 variable inputenc
reg = re.compile('Encoding [\w-]+\s+[\w-]+\s+"([\w \-\(\)]+)"\s+[\w-]+\s+(fixed|variable|variableunsafe)\s+\w+.*')
input = open(input_files[0])
for lineno, line in enumerate(input.readlines()):
if not line.startswith('Encoding'):
continue
if reg.match(line):
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(input_files[0], base), lineno+1, reg.match(line).groups()[0])
else:
print "Error: Unable to handle line:"
print line
# No need to abort if the parsing fails
# sys.exit(1)
input.close()
output.close()
Usage = '''
lyx_pot.py [-b|--base top_src_dir] [-o|--output output_file] [-h|--help] [-s|src_file filename] -t|--type input_type input_files
where
--base:
path to the top source directory. default to '.'
--output:
output pot file, default to './lyx.pot'
--src_file
filename that contains a list of input files in each line
--input_type can be
ui: lib/ui/*
layouts: lib/layouts/*
layouttranslations: create lib/layouttranslations from po/*.po and lib/layouts/*
qt4: qt4 ui files
languages: file lib/languages
latexfonts: file lib/latexfonts
encodings: file lib/encodings
external: external templates file
formats: formats predefined in lib/configure.py
'''
if __name__ == '__main__':
input_type = None
output = 'lyx.pot'
base = '.'
input_files = []
#
optlist, args = getopt.getopt(sys.argv[1:], 'ht:o:b:s:',
['help', 'type=', 'output=', 'base=', 'src_file='])
for (opt, value) in optlist:
if opt in ['-h', '--help']:
print Usage
sys.exit(0)
elif opt in ['-o', '--output']:
output = value
elif opt in ['-b', '--base']:
base = value
elif opt in ['-t', '--type']:
input_type = value
elif opt in ['-s', '--src_file']:
input_files = [f.strip() for f in open(value)]
if input_type not in ['ui', 'layouts', 'layouttranslations', 'qt4', 'languages', 'latexfonts', 'encodings', 'external', 'formats'] or output is None:
print 'Wrong input type or output filename.'
sys.exit(1)
input_files += args
if input_type == 'ui':
ui_l10n(input_files, output, base)
elif input_type == 'latexfonts':
latexfonts_l10n(input_files, output, base)
elif input_type == 'layouts':
layouts_l10n(input_files, output, base, False)
elif input_type == 'layouttranslations':
layouts_l10n(input_files, output, base, True)
elif input_type == 'qt4':
qt4_l10n(input_files, output, base)
elif input_type == 'external':
external_l10n(input_files, output, base)
elif input_type == 'formats':
formats_l10n(input_files, output, base)
elif input_type == 'encodings':
encodings_l10n(input_files, output, base)
else:
languages_l10n(input_files, output, base) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = r"""
attributes:
check_mode:
description: Can run in check_mode and return changed status prediction without modifying target, if not supported the action will be skipped.
diff_mode:
description: Will return details on what has changed (or possibly needs changing in check_mode), when in diff mode
platform:
description: Target OS/families that can be operated against
support: N/A
"""
ACTIONGROUPS = r"""
attributes:
action_group:
description: Action is part of action_group(s), for convenient setting of module_defaults.
support: N/A
membership: []
"""
CONN = r"""
attributes:
become:
description: Is usable alongside become keywords
connection:
description: Uses the target's configured connection information to execute code on it
delegation:
description: Can be used in conjunction with delegate_to and related keywords
"""
FACTS = r"""
attributes:
facts:
description: Action returns an C(ansible_facts) dictionary that will update existing host facts
"""
FILES = r"""
attributes:
safe_file_operations:
description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption
vault:
description: Can automatically decrypt Ansible vaulted files
"""
FLOW = r"""
attributes:
action:
description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller
async:
description: Supports being used with the C(async) keyword
bypass_host_loop:
description:
- Forces a 'global' task that does not execute per host, this bypasses per host templating and serial,
throttle and other loop considerations
- Conditionals will work as if C(run_once) is being used, variables used will be from the first available host
- This action will not work normally outside of lockstep strategies
"""
RAW = r"""
attributes:
raw:
description: Indicates if an action takes a 'raw' or 'free form' string as an option and has it's own special parsing of it
""" | python | github | https://github.com/ansible/ansible | lib/ansible/plugins/doc_fragments/action_common_attributes.py |
- hosts: localhost
gather_facts: false
tasks:
- name: test missing role rescue
vars:
rescue_1: false
block:
- name: Include a role that doesn't exist
include_role:
name: missing_role
rescuable: '{{ rescueme | default(omit) }}'
rescue:
- set_fact:
rescue_1: true
always:
- assert:
that:
- rescue_1 == rescueme|default(True)
- name: Test _from rescue
vars:
rescue_2: false
block:
- name: Include a task file that doesn't exist, but role exists
include_role:
name: include_roles
tasks_from: missing_task_list
rescuable: '{{ rescueme | default(omit) }}'
rescue:
- set_fact:
rescue_2: true
always:
- assert:
that:
- rescue_2 == rescueme|default(True) | unknown | github | https://github.com/ansible/ansible | test/integration/targets/includes/include_role_error_handling.yml |
from awacs.aws import (
Allow,
Condition,
Policy,
Statement,
StringEquals,
StringLike,
)
import awacs.logs
import awacs.s3
import awacs.firehose
import awacs.kms
from awacs.helpers.trust import make_simple_assume_statement
from stacker.blueprints.base import Blueprint
from troposphere import (
iam,
logs,
firehose,
Join,
GetAtt,
Output,
Ref,
Sub,
)
from ..policies import (
s3_arn,
write_to_cloudwatch_logs_stream_statements,
)
from ..cloudwatch_logs import (
LOG_RETENTION_STRINGS,
validate_cloudwatch_log_retention,
)
LOG_GROUP = "LogGroup"
S3_LOG_STREAM = "S3LogStream"
ROLE = "Role"
REGION = Ref("AWS::Region")
NOVALUE = Ref("AWS::NoValue")
def make_simple_assume_policy(*principals):
return Policy(
Statement=[
make_simple_assume_statement(*principals)
]
)
def s3_write_statements(bucket_name):
return [
Statement(
Effect=Allow,
Action=[
awacs.s3.AbortMultipartUpload,
awacs.s3.GetBucketLocation,
awacs.s3.GetObject,
awacs.s3.ListBucket,
awacs.s3.ListBucketMultipartUploads,
awacs.s3.PutObject,
],
Resource=[
s3_arn(bucket_name),
s3_arn(Join("/", [bucket_name, "*"]))
],
),
]
def kms_key_statements(key_arn, bucket_arn, bucket_prefix):
s3_endpoint = Join(
'',
[
"s3.", REGION, "amazonaws.com"
]
)
return [
Statement(
Effect=Allow,
Action=[
awacs.kms.Decrypt,
awacs.kms.GenerateDataKey,
],
Resource=[key_arn],
Condition=Condition(
[
StringEquals(
"kms:ViaService", s3_endpoint
),
StringLike(
"kms:EncryptionContext:aws:s3:arn",
Join('', [bucket_arn, bucket_prefix, "*"])
)
]
)
)
]
class BaseDeliveryStream(Blueprint):
VARIABLES = {
"BucketName": {
"type": str,
"description": "Name of existing bucket to stream firehose "
"data to."
},
"S3Prefix": {
"type": str,
"description": "The prefix used when writing objects in the s3 "
"bucket.",
"default": "/",
},
"EncryptionKeyArn": {
"type": str,
"description": "ARN of the KMS key to use to encrypt objects in "
"the s3 bucket.",
"default": "",
},
"BufferingHints": {
"type": dict,
"description": "A dictionary with buffering hints for writing "
"objects to the s3 bucket. Valid keys are: "
"IntervalInSeconds, SizeInMBs",
"default": {"IntervalInSeconds": 300, "SizeInMBs": 5},
},
"CompressionFormat": {
"type": str,
"description": "The compression format used by the Firehose when "
"writing objects in the s3 bucket.",
"default": "UNCOMPRESSED",
},
"LogRetentionDays": {
"type": int,
"description": "Time in days to retain Cloudwatch Logs. Accepted "
"values: %s. Default 0 - retain forever." % (
', '.join(LOG_RETENTION_STRINGS)),
"default": 0,
"validator": validate_cloudwatch_log_retention,
}
}
def buffering_hints(self):
hints_config = self.get_variables()["BufferingHints"]
return firehose.BufferingHints(**hints_config)
def encryption_config(self):
key_arn = self.get_variables()["EncryptionKeyArn"]
if key_arn:
return firehose.EncryptionConfiguration(
KMSEncryptionConfig=firehose.KMSEncryptionConfig(
AWSKMSKeyARN=key_arn
)
)
else:
return NOVALUE
def s3_bucket_arn(self):
bucket_name = self.get_variables()["BucketName"]
return s3_arn(bucket_name)
def cloudwatch_logging_options(self, log_group, log_stream):
return firehose.CloudWatchLoggingOptions(
Enabled=True,
LogGroupName=Ref(log_group),
LogStreamName=Ref(log_stream),
)
def s3_destination_config_dict(self):
t = self.template
variables = self.get_variables()
t.add_output(Output("BucketName", Value=variables["BucketName"]))
return {
"BucketARN": self.s3_bucket_arn(),
"RoleARN": GetAtt(self.role, "Arn"),
"CompressionFormat": variables['CompressionFormat'],
"BufferingHints": self.buffering_hints(),
"Prefix": variables["S3Prefix"],
"EncryptionConfiguration": self.encryption_config(),
"CloudWatchLoggingOptions": self.cloudwatch_logging_options(
self.log_group,
self.s3_log_stream
)
}
def generate_iam_policy_statements(self):
variables = self.get_variables()
bucket_name = variables["BucketName"]
bucket_arn = self.s3_bucket_arn()
s3_prefix = variables["S3Prefix"]
key_arn = variables["EncryptionKeyArn"]
statements = []
statements.extend(s3_write_statements(bucket_name))
statements.extend(
write_to_cloudwatch_logs_stream_statements(
Ref(self.log_group), Ref(self.s3_log_stream)
)
)
if key_arn:
statements.extend(
kms_key_statements(
key_arn, bucket_arn, s3_prefix
)
)
return statements
def generate_iam_policy(self):
return iam.Policy(
PolicyName=Sub("${AWS::StackName}-policy"),
PolicyDocument=Policy(
Statement=self.generate_iam_policy_statements()
)
)
def create_role(self):
t = self.template
self.role = t.add_resource(
iam.Role(
ROLE,
AssumeRolePolicyDocument=make_simple_assume_policy(
"firehose.amazonaws.com"
),
Path="/",
Policies=[self.generate_iam_policy()]
)
)
t.add_output(Output("RoleName", Value=Ref(self.role)))
t.add_output(Output("RoleArn", Value=GetAtt(self.role, "Arn")))
def create_log_group(self):
t = self.template
variables = self.get_variables()
log_retention = variables["LogRetentionDays"] or NOVALUE
self.log_group = t.add_resource(
logs.LogGroup(
LOG_GROUP,
RetentionInDays=log_retention,
)
)
t.add_output(Output("LogGroupName", Value=Ref(self.log_group)))
t.add_output(
Output("LogGroupArn", Value=GetAtt(self.log_group, "Arn"))
)
def create_s3_log_stream(self):
t = self.template
self.s3_log_stream = t.add_resource(
logs.LogStream(
S3_LOG_STREAM,
LogGroupName=Ref(self.log_group),
DependsOn=self.log_group.title
)
)
t.add_output(Output("S3LogStreamName", Value=Ref(self.s3_log_stream)))
def create_log_stream(self):
self.create_s3_log_stream()
def create_delivery_stream(self):
raise NotImplementedError("create_delivery_stream must be implemented "
"by a subclass.")
def create_delivery_stream_output(self):
t = self.template
t.add_output(
Output("DeliveryStreamName", Value=Ref(self.delivery_stream))
)
def create_template(self):
self.create_log_group()
self.create_log_stream()
self.create_role()
self.create_delivery_stream()
self.create_delivery_stream_output() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
#
# this script attempts to turn doc comment attributes (#[doc = "..."])
# into sugared-doc-comments (/** ... */ and /// ...)
#
# it sugarises all .rs/.rc files underneath the working directory
#
import sys, os, fnmatch, re
DOC_PATTERN = '^(?P<indent>[\\t ]*)#\\[(\\s*)doc(\\s*)=' + \
'(\\s*)"(?P<text>(\\"|[^"])*?)"(\\s*)\\]' + \
'(?P<semi>;)?'
ESCAPES = [("\\'", "'"),
('\\"', '"'),
("\\n", "\n"),
("\\r", "\r"),
("\\t", "\t")]
def unescape(s):
for (find, repl) in ESCAPES:
s = s.replace(find, repl)
return s
def block_trim(s):
lns = s.splitlines()
# remove leading/trailing whitespace-lines
while lns and not lns[0].strip():
lns = lns[1:]
while lns and not lns[-1].strip():
lns = lns[:-1]
# remove leading horizontal whitespace
n = sys.maxint
for ln in lns:
if ln.strip():
n = min(n, len(re.search('^\s*', ln).group()))
if n != sys.maxint:
lns = [ln[n:] for ln in lns]
# strip trailing whitespace
lns = [ln.rstrip() for ln in lns]
return lns
def replace_doc(m):
indent = m.group('indent')
text = block_trim(unescape(m.group('text')))
if len(text) > 1:
inner = '!' if m.group('semi') else '*'
starify = lambda s: indent + ' *' + (' ' + s if s else '')
text = '\n'.join(map(starify, text))
repl = indent + '/*' + inner + '\n' + text + '\n' + indent + ' */'
else:
inner = '!' if m.group('semi') else '/'
repl = indent + '//' + inner + ' ' + text[0]
return repl
def sugarise_file(path):
s = open(path).read()
r = re.compile(DOC_PATTERN, re.MULTILINE | re.DOTALL)
ns = re.sub(r, replace_doc, s)
if s != ns:
open(path, 'w').write(ns)
for (dirpath, dirnames, filenames) in os.walk('.'):
for name in fnmatch.filter(filenames, '*.r[sc]'):
sugarise_file(os.path.join(dirpath, name)) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""Base classes for statistical test results
Created on Mon Apr 22 14:03:21 2013
Author: Josef Perktold
"""
from statsmodels.compat.python import lzip, zip
import numpy as np
class AllPairsResults(object):
'''Results class for pairwise comparisons, based on p-values
Parameters
----------
pvals_raw : array_like, 1-D
p-values from a pairwise comparison test
all_pairs : list of tuples
list of indices, one pair for each comparison
multitest_method : string
method that is used by default for p-value correction. This is used
as default by the methods like if the multiple-testing method is not
specified as argument.
levels : None or list of strings
optional names of the levels or groups
n_levels : None or int
If None, then the number of levels or groups is inferred from the
other arguments. It can be explicitly specified, if the inferred
number is incorrect.
Notes
-----
This class can also be used for other pairwise comparisons, for example
comparing several treatments to a control (as in Dunnet's test).
'''
def __init__(self, pvals_raw, all_pairs, multitest_method='hs',
levels=None, n_levels=None):
self.pvals_raw = pvals_raw
self.all_pairs = all_pairs
if n_levels is None:
# for all_pairs nobs*(nobs-1)/2
#self.n_levels = (1. + np.sqrt(1 + 8 * len(all_pairs))) * 0.5
self.n_levels = np.max(all_pairs) + 1
else:
self.n_levels = n_levels
self.multitest_method = multitest_method
self.levels = levels
if levels is None:
self.all_pairs_names = ['%r' % (pairs,) for pairs in all_pairs]
else:
self.all_pairs_names = ['%s-%s' % (levels[pairs[0]],
levels[pairs[1]])
for pairs in all_pairs]
def pval_corrected(self, method=None):
'''p-values corrected for multiple testing problem
This uses the default p-value correction of the instance stored in
``self.multitest_method`` if method is None.
'''
import statsmodels.stats.multitest as smt
if method is None:
method = self.multitest_method
#TODO: breaks with method=None
return smt.multipletests(self.pvals_raw, method=method)[1]
def __str__(self):
return self.summary()
def pval_table(self):
'''create a (n_levels, n_levels) array with corrected p_values
this needs to improve, similar to R pairwise output
'''
k = self.n_levels
pvals_mat = np.zeros((k, k))
# if we don't assume we have all pairs
pvals_mat[lzip(*self.all_pairs)] = self.pval_corrected()
#pvals_mat[np.triu_indices(k, 1)] = self.pval_corrected()
return pvals_mat
def summary(self):
'''returns text summarizing the results
uses the default pvalue correction of the instance stored in
``self.multitest_method``
'''
import statsmodels.stats.multitest as smt
maxlevel = max((len(ss) for ss in self.all_pairs_names))
text = 'Corrected p-values using %s p-value correction\n\n' % \
smt.multitest_methods_names[self.multitest_method]
text += 'Pairs' + (' ' * (maxlevel - 5 + 1)) + 'p-values\n'
text += '\n'.join(('%s %6.4g' % (pairs, pv) for (pairs, pv) in
zip(self.all_pairs_names, self.pval_corrected())))
return text | unknown | codeparrot/codeparrot-clean | ||
"""passlib.exc -- exceptions & warnings raised by passlib"""
#=============================================================================
# exceptions
#=============================================================================
class MissingBackendError(RuntimeError):
"""Error raised if multi-backend handler has no available backends;
or if specifically requested backend is not available.
:exc:`!MissingBackendError` derives
from :exc:`RuntimeError`, since it usually indicates
lack of an external library or OS feature.
This is primarily raised by handlers which depend on
external libraries (which is currently just
:class:`~passlib.hash.bcrypt`).
"""
class PasswordSizeError(ValueError):
"""Error raised if a password exceeds the maximum size allowed
by Passlib (4096 characters).
Many password hash algorithms take proportionately larger amounts of time and/or
memory depending on the size of the password provided. This could present
a potential denial of service (DOS) situation if a maliciously large
password is provided to an application. Because of this, Passlib enforces
a maximum size limit, but one which should be *much* larger
than any legitimate password. :exc:`!PasswordSizeError` derives
from :exc:`!ValueError`.
.. note::
Applications wishing to use a different limit should set the
``PASSLIB_MAX_PASSWORD_SIZE`` environmental variable before
Passlib is loaded. The value can be any large positive integer.
.. versionadded:: 1.6
"""
def __init__(self):
ValueError.__init__(self, "password exceeds maximum allowed size")
# this also prevents a glibc crypt segfault issue, detailed here ...
# http://www.openwall.com/lists/oss-security/2011/11/15/1
#=============================================================================
# warnings
#=============================================================================
class PasslibWarning(UserWarning):
"""base class for Passlib's user warnings,
derives from the builtin :exc:`UserWarning`.
.. versionadded:: 1.6
"""
class PasslibConfigWarning(PasslibWarning):
"""Warning issued when non-fatal issue is found related to the configuration
of a :class:`~passlib.context.CryptContext` instance.
This occurs primarily in one of two cases:
* The CryptContext contains rounds limits which exceed the hard limits
imposed by the underlying algorithm.
* An explicit rounds value was provided which exceeds the limits
imposed by the CryptContext.
In both of these cases, the code will perform correctly & securely;
but the warning is issued as a sign the configuration may need updating.
.. versionadded:: 1.6
"""
class PasslibHashWarning(PasslibWarning):
"""Warning issued when non-fatal issue is found with parameters
or hash string passed to a passlib hash class.
This occurs primarily in one of two cases:
* A rounds value or other setting was explicitly provided which
exceeded the handler's limits (and has been clamped
by the :ref:`relaxed<relaxed-keyword>` flag).
* A malformed hash string was encountered which (while parsable)
should be re-encoded.
.. versionadded:: 1.6
"""
class PasslibRuntimeWarning(PasslibWarning):
"""Warning issued when something unexpected happens during runtime.
The fact that it's a warning instead of an error means Passlib
was able to correct for the issue, but that it's anonmalous enough
that the developers would love to hear under what conditions it occurred.
.. versionadded:: 1.6
"""
class PasslibSecurityWarning(PasslibWarning):
"""Special warning issued when Passlib encounters something
that might affect security.
.. versionadded:: 1.6
"""
#=============================================================================
# error constructors
#
# note: these functions are used by the hashes in Passlib to raise common
# error messages. They are currently just functions which return ValueError,
# rather than subclasses of ValueError, since the specificity isn't needed
# yet; and who wants to import a bunch of error classes when catching
# ValueError will do?
#=============================================================================
def _get_name(handler):
return handler.name if handler else "<unnamed>"
#------------------------------------------------------------------------
# generic helpers
#------------------------------------------------------------------------
def type_name(value):
"return pretty-printed string containing name of value's type"
cls = value.__class__
if cls.__module__ and cls.__module__ not in ["__builtin__", "builtins"]:
return "%s.%s" % (cls.__module__, cls.__name__)
elif value is None:
return 'None'
else:
return cls.__name__
def ExpectedTypeError(value, expected, param):
"error message when param was supposed to be one type, but found another"
# NOTE: value is never displayed, since it may sometimes be a password.
name = type_name(value)
return TypeError("%s must be %s, not %s" % (param, expected, name))
def ExpectedStringError(value, param):
"error message when param was supposed to be unicode or bytes"
return ExpectedTypeError(value, "unicode or bytes", param)
#------------------------------------------------------------------------
# encrypt/verify parameter errors
#------------------------------------------------------------------------
def MissingDigestError(handler=None):
"raised when verify() method gets passed config string instead of hash"
name = _get_name(handler)
return ValueError("expected %s hash, got %s config string instead" %
(name, name))
def NullPasswordError(handler=None):
"raised by OS crypt() supporting hashes, which forbid NULLs in password"
name = _get_name(handler)
return ValueError("%s does not allow NULL bytes in password" % name)
#------------------------------------------------------------------------
# errors when parsing hashes
#------------------------------------------------------------------------
def InvalidHashError(handler=None):
"error raised if unrecognized hash provided to handler"
return ValueError("not a valid %s hash" % _get_name(handler))
def MalformedHashError(handler=None, reason=None):
"error raised if recognized-but-malformed hash provided to handler"
text = "malformed %s hash" % _get_name(handler)
if reason:
text = "%s (%s)" % (text, reason)
return ValueError(text)
def ZeroPaddedRoundsError(handler=None):
"error raised if hash was recognized but contained zero-padded rounds field"
return MalformedHashError(handler, "zero-padded rounds")
#------------------------------------------------------------------------
# settings / hash component errors
#------------------------------------------------------------------------
def ChecksumSizeError(handler, raw=False):
"error raised if hash was recognized, but checksum was wrong size"
# TODO: if handler.use_defaults is set, this came from app-provided value,
# not from parsing a hash string, might want different error msg.
checksum_size = handler.checksum_size
unit = "bytes" if raw else "chars"
reason = "checksum must be exactly %d %s" % (checksum_size, unit)
return MalformedHashError(handler, reason)
#=============================================================================
# eof
#============================================================================= | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.web.websocket.stomp.websocketstomporderedmessages
import org.springframework.context.annotation.Configuration
import org.springframework.messaging.simp.config.MessageBrokerRegistry
import org.springframework.web.socket.config.annotation.EnableWebSocketMessageBroker
import org.springframework.web.socket.config.annotation.WebSocketMessageBrokerConfigurer
// tag::snippet[]
@Configuration
@EnableWebSocketMessageBroker
class PublishOrderWebSocketConfiguration : WebSocketMessageBrokerConfigurer {
override fun configureMessageBroker(registry: MessageBrokerRegistry) {
// ...
registry.setPreservePublishOrder(true)
}
}
// end::snippet[] | kotlin | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/kotlin/org/springframework/docs/web/websocket/stomp/websocketstomporderedmessages/PublishOrderWebSocketConfiguration.kt |
"""
VimPdb.py
Pdb simulation within Vim (in an IDE like fashion).
Author:
Yaron Budowski
"""
import bdb
import vim
import time
import sys
import os
class PdbIDE(bdb.Bdb):
"""Simulates a Python debugger in an IDE-like mode (unlike PDB, which acts as a command-line console debugger)."""
#
# Constants
#
# The number of seconds to wait in the wait_in_debug() waiting loop.
PAUSE_DEBUG_WAIT_TIME = 0.2
# Various messages displayed to the user.
MESSAGE_NOT_IN_DEBUG_MODE = 'Error: Debugging not started yet'
MESSAGE_STARTING_DEBUG = 'Starting debugging...'
MESSAGE_PROGRAM_ENDED = 'Program ended. Restart debug to rerun program'
MESSAGE_ALREADY_AT_OLDEST_FRAME = 'Error: Already at oldest stack frame'
MESSAGE_ALREADY_AT_NEWEST_FRAME = 'Error: Already at newest stack frame'
MESSAGE_PROGRAM_ENDED_VIA_SYS_EXIT = 'Program ended via sys.exit(). Exit status: %d'
MESSAGE_PROGRAM_ENDED_UNCAUGHT_EXCEPTION = 'Program ended due to an uncaught exception.'
MESSAGE_NO_CONDITIONAL_BREAKPOINT = 'Error: No conditional breakpoint in current line'
MESSAGE_BREAKPOINT_CONDITION = 'Breakpoint Condition: %s'
MESSAGE_JUMP_ONLY_AT_BOTTOM_FRAME = 'Error: Can only jump to line within the bottom stack frame'
MESSAGE_JUMP_ONLY_IN_CURRENT_FILE = 'Error: Can only jump to line within the currently debugged file'
# Breakpoint types (used when saving\loading breakpoints from files).
BREAKPOINT_TYPE_REGULAR = 'regular'
BREAKPOINT_TYPE_TEMPORARY = 'temporary'
BREAKPOINT_TYPE_CONDITIONAL = 'conditional'
BREAKPOINT_TYPES = [BREAKPOINT_TYPE_REGULAR, BREAKPOINT_TYPE_CONDITIONAL, BREAKPOINT_TYPE_TEMPORARY]
def __init__(self):
# Initialize the parent Bdb class.
bdb.Bdb.__init__(self)
# Used so we won't pause until the main script is loaded completely.
self.wait_for_script_start = False
self.main_filename = None
# Used in wait_in_debug method (method doesn't return until pause_debug == False).
self.pause_debug = False
# Current debugged filename & line.
self.current_filename = None
self.current_line = -1
# Current debugged frame.
self.current_frame = None
self.current_stack_index = 0
self.stack = []
# A queue of Bdb methods to run. This is used when VimPdb methods (as opposed to Bdb methods) are called directly
# from the Vim file (VimPdb.vim) - these methods (such as do_toggle_breakpoint) use this queue to call Bdb methods
# (such as set_break) indirectly - it's done this way so the Bdb methods will be called from this instance's thread,
# and not from the Vim thread (which is the main thread). If the Bdb methods were called directly, it would screw up Python
# and Vim, and Vim will sometimes freeze\crash.
# The run_queued_methods method goes through this queue and executes the commands in it: each item is a list of
# function name and parameters.
self.methods_to_run = []
# The return value of the last method.
self.last_method_return_value = None
def start_debugging(self, filename, stop_immediately = True, args = []):
"""Starts a debug session for a file. If stop_immediately is set, session is paused on the first line of program."""
self.print_message(self.MESSAGE_STARTING_DEBUG)
new_globals = { '__name__': '__main__' }
new_locals = new_globals
self.wait_for_script_start = True # So we won't break before we reach the first line of the script being debugged.
self.stop_immediately = stop_immediately
self.main_filename = self.canonic(filename)
self.current_filename = self.main_filename
self.current_line = 1
# Highlight the breakpoints.
self.highlight_breakpoints(self.main_filename, *self.get_breakpoints_for_file(self.main_filename))
# Replace main directory with running script's directory in front of module search path.
sys.path[0] = os.path.dirname(self.main_filename)
try:
# Set command line arguments.
sys.argv = [self.main_filename] + args
# Run the script.
statement = 'execfile(r"%s")' % (self.main_filename)
self.run(statement, globals = new_globals, locals = new_locals)
# Program ended.
self.print_message(self.MESSAGE_PROGRAM_ENDED)
self.clear_current_line_highlighting()
self.clear_breakpoints_highlighting()
except SystemExit:
self.print_message(self.MESSAGE_PROGRAM_ENDED_VIA_SYS_EXIT % (sys.exc_info()[1]))
self.clear_current_line_highlighting()
self.clear_breakpoints_highlighting()
except:
self.print_message(self.MESSAGE_PROGRAM_ENDED_UNCAUGHT_EXCEPTION)
raise
self.clear_current_line_highlighting()
self.clear_breakpoints_highlighting()
def stop_debugging(self):
"""Stops the debugging session."""
if (not self.is_debugged()):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
self.quitting = True
#
# Debugging methods
#
def do_continue(self):
"""Continues the deugging session until reaching a breakpoint, etc."""
if (self.current_frame is None):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
self.set_continue()
self.pause_debug = False
def do_continue_until_return(self):
"""Continues running until returning from the current frame."""
if (self.current_frame is None):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
self.set_return(self.current_frame)
self.pause_debug = False
def do_step_into(self):
"""Does step into."""
if (self.current_frame is None):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
self.set_step()
self.pause_debug = False
def do_step_over(self):
"""Does step over (doesn't enter any functions in between)."""
if (self.current_frame is None):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
self.set_next(self.current_frame)
self.pause_debug = False
def do_move_up_in_stack_frame(self):
"""Moves up one level in the stack frame."""
if (not self.is_debugged()):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
if (self.current_stack_index <= 2):
self.print_message(self.MESSAGE_ALREADY_AT_OLDEST_FRAME)
return
self.current_stack_index -= 1
self.current_frame = self.stack[self.current_stack_index][0]
self.goto_current_line(self.current_frame)
def do_move_down_in_stack_frame(self):
"""Moves down one level in the stack frame."""
if (not self.is_debugged()):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
if (self.current_stack_index + 1== len(self.stack)):
self.print_message(self.MESSAGE_ALREADY_AT_NEWEST_FRAME)
return
self.current_stack_index += 1
self.current_frame = self.stack[self.current_stack_index][0]
self.goto_current_line(self.current_frame)
def do_toggle_breakpoint(self, filename, line_number, condition = None, temporary = False):
"""Sets\unsets a breakpoint."""
if (not self.is_debugged()):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
if (not self.is_code_line(filename, line_number)):
# Not a code line.
return
# First, prepare a list of all available breakpoints for this file.
breakpoints = self.get_file_breaks(filename)[:] # Make a copy so we won't be affected by changes.
if (line_number in breakpoints):
# Unset breakpoint.
self.clear_break(filename, line_number)
else:
# Set the breakpoint.
self.set_break(filename, line_number, int(temporary), condition)
# Re-Highlight the breakpoints.
self.highlight_breakpoints(filename, *self.get_breakpoints_for_file(filename))
def do_print_breakpoint_condition(self, filename, line_number):
"""Prints the condition of a breakpoint at the specified line number."""
if (not self.is_debugged()):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
# First, prepare a list of all available breakpoints for this file.
conditional_breakpoints = self.get_conditional_breakpoints(filename)
if (line_number not in conditional_breakpoints):
self.print_message(self.MESSAGE_NO_CONDITIONAL_BREAKPOINT)
return
breakpoint_instances = self.get_breaks(filename, line_number)
for breakpoint in breakpoint_instances:
if (breakpoint.cond):
self.print_message(self.MESSAGE_BREAKPOINT_CONDITION % (breakpoint.cond))
return
def do_clear_all_breakpoints(self, filename = None):
"""Clears all breakpoints. If filename is specified, only breakpoints for that filename are cleared."""
if (filename is None):
self.clear_all_breaks()
# Re-Highlight the breakpoints.
self.highlight_breakpoints(filename, *self.get_breakpoints_for_file(filename))
return
# Get all breakpoints for specified file.
file_breaks = self.get_file_breaks(filename)
for line_number in file_breaks:
self.clear_break(filename, line_number)
# Re-Highlight the breakpoints.
self.highlight_breakpoints(filename, *self.get_breakpoints_for_file(filename))
def do_clear(self, breakpoint_number):
"""Clears a specified breakpoint by number."""
self.clear_bpbynumber(breakpoint_number)
# Re-Highlight the breakpoints.
self.highlight_breakpoints(self.current_filename, *self.get_breakpoints_for_file(self.current_filename))
def do_eval(self, expression):
"""Evaluates an expression in the current debugging context."""
if (self.current_frame is None):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
try:
value = eval(expression, self.current_frame.f_globals, self.current_frame.f_locals)
self.print_message(value)
except:
(exc_type, value, traceback) = sys.exc_info()
if (not isinstance(exc_type, str)):
exc_type_name = exc_type.__name__
else:
exc_type_name = exc_type
self.print_message('%s: %s' % (exc_type_name, value))
def do_exec(self, statement):
"""Executes a statement in the current debugging context."""
if (self.current_frame is None):
self.print_message(self.MESSAGE_NOT_IN_DEBUG_MODE)
return
exec_locals = self.current_frame.f_locals
exec_globals = self.current_frame.f_locals
try:
code = compile(statement + '\n', '<stdin>', 'single')
exec code in exec_globals, exec_locals
except:
(exc_type, value, traceback) = sys.exc_info()
if (not isinstance(exc_type, str)):
exc_type_name = exc_type.__name__
else:
exc_type_name = exc_type
self.print_message('%s: %s' % (exc_type_name, value))
def do_jump(self, filename, line_number):
"""Jumps to a specified line in the currently debugged file."""
if (self.current_stack_index + 1 != len(self.stack)):
self.print_message(self.MESSAGE_JUMP_ONLY_AT_BOTTOM_FRAME)
return
if (self.canonic(filename) != self.current_filename):
self.print_message(self.MESSAGE_JUMP_ONLY_IN_CURRENT_FILE)
return
try:
self.current_frame.f_lineno = line_number
self.stack[self.current_stack_index] = (self.stack[self.current_stack_index][0], line_number)
self.goto_current_line(self.current_frame)
except ValueError, exc:
self.print_message('Error: %s' % (exc))
def do_print_stack_trace(self):
"""Prints the stack trace."""
output_stack_traces = []
# Prepare the stack trace string.
for current_stack_frame in self.stack[2:]: # Skip the first two entries (which aren't really part of the debugged code)
(frame, line_number) = current_stack_frame
if (frame is self.current_frame):
output_stack_traces.append(self.current_stack_entry_prefix + self.format_stack_entry(current_stack_frame))
else:
output_stack_traces.append(self.stack_entry_prefix + self.format_stack_entry(current_stack_frame))
final_stack_trace = self.stack_entries_joiner.join(output_stack_traces)
self.print_message('Stack Trace:\n' + final_stack_trace)
def goto_current_line(self, frame, display = True):
"""Moves the cursor to the currently debugged line, in the appropriate file. If display == False, don't highlight or move the cursor."""
if (not self.is_debugged()):
return
# Get the line number & filename.
line_number = frame.f_lineno
filename = self.canonic(frame.f_code.co_filename)
self.current_filename = filename
self.current_line = line_number
if (display):
# Load the file for editing (even if the file is not currently opened).
self.open_file(filename)
self.set_cursor_position(self.current_line, 0)
self.highlight_current_line(self.current_filename, self.current_line)
#
# Queue related methods
#
def add_queued_method(self, function_name, *parameters):
"""Adds a method to the methods to run queue. It will be called indirectly by run_queued_methods"""
self.methods_to_run.append([function_name, parameters])
def run_queued_methods(self):
"""Executes any methods queued for execution. Used so that the methods will be executed from this instance's
thread context (and not from the main Vim thread)."""
while (len(self.methods_to_run) > 0):
# Get the next method to run.
method_to_run = self.methods_to_run[0]
self.methods_to_run = self.methods_to_run[1:]
(function_name, parameters) = method_to_run
if (not hasattr(self, function_name)):
# Function doesn't exist.
raise
# TODO
#continue
# Run the function.
function_pointer = getattr(self, function_name)
self.last_method_return_value = function_pointer(*parameters)
def wait_in_debug(self, frame, traceback = None):
"""Loops as long as self.pause_debug is True."""
# Save the current frame, etc.
(self.stack, self.current_stack_index) = self.get_stack(frame, traceback)
self.current_frame = self.stack[self.current_stack_index][0]
self.goto_current_line(frame)
while ((self.pause_debug) and (not self.quitting)):
time.sleep(self.PAUSE_DEBUG_WAIT_TIME)
# Run any queued methods.
self.run_queued_methods()
self.pause_debug = True
#
# Saving\Restoring breakpoints methods
#
def is_breakpoint_enabled(self, filename, line):
"""Returns True if a breakpoint is enabled at the specified filename & line. False otherwise."""
if (self.get_breaks(filename, line)):
return True
else:
return False
def highlight_breakpoints_for_file(self, filename):
"""Highlights breakpoints for a given filename."""
self.highlight_breakpoints(self.canonic(filename), *self.get_breakpoints_for_file(self.canonic(filename)))
def highlight_current_line_for_file(self, filename):
"""Highlights current line for a given filename."""
canonic_filename = self.canonic(filename)
if (self.current_filename != canonic_filename):
# The given filename is not the currently debugged file.
return
self.highlight_current_line(canonic_filename, self.current_line)
def get_breakpoints(self):
"""Returns a list of active breakpoints."""
file_breakpoints = self.get_all_breaks()
returned_breakpoints = []
for filename in file_breakpoints.keys():
for line_number in file_breakpoints[filename]:
for breakpoint in self.get_breaks(filename, line_number):
new_breakpoint = {}
new_breakpoint['filename'] = filename
new_breakpoint['line'] = breakpoint.line
if (breakpoint.cond):
new_breakpoint['type'] = self.BREAKPOINT_TYPE_CONDITIONAL
new_breakpoint['condition'] = breakpoint.cond
elif (breakpoint.temporary):
new_breakpoint['type'] = self.BREAKPOINT_TYPE_TEMPORARY
else:
new_breakpoint['type'] = self.BREAKPOINT_TYPE_REGULAR
returned_breakpoints.append(new_breakpoint)
return returned_breakpoints
def set_breakpoints(self, breakpoints):
"""Sets\Adds breakpoints from a list of breakpoints."""
for breakpoint in breakpoints:
condition = None
temporary = False
if (breakpoint['type'] == self.BREAKPOINT_TYPE_CONDITIONAL):
condition = breakpoint['condition']
elif (breakpoint['type'] == self.BREAKPOINT_TYPE_TEMPORARY):
temporary = True
# Set the breakpoint
self.set_break(breakpoint['filename'], breakpoint['line'], int(temporary), condition)
# Re-highlight all of the breakpoints.
self.highlight_breakpoints(self.get_active_filename(), *self.get_breakpoints_for_file(self.get_active_filename()))
def load_breakpoints_from_file(self, filename):
"""Loads breakpoints from a file."""
if (not os.path.exists(filename)):
self.print_message('Error: File "%s" does not exist!' % (filename))
return
new_breakpoints = []
# First, clear all breakpoints.
#self.do_clear_all_breakpoints()
breakpoints_file = open(filename, 'rb')
# Load the breakpoints from the given file.
index = 0
for line in breakpoints_file.xreadlines():
line = line.strip()
index += 1
if (len(line) == 0):
continue
breakpoint_properties = line.split('\t')
if ((len(breakpoint_properties) < 3) or (len(breakpoint_properties) > 4)):
self.print_message('Error: Invalid line #%d at file "%s"' % (index, filename))
return
(breakpoint_filename, breakpoint_line, breakpoint_type) = breakpoint_properties[:3]
breakpoint_type = breakpoint_type.lower()
try:
breakpoint_line = int(breakpoint_line)
except ValueError:
self.print_message('Error: Invalid breakpoint line number in line #%d at file "%s"' % (index, filename))
return
if (breakpoint_type not in self.BREAKPOINT_TYPES):
self.print_message('Error: Invalid breakpoint type in line #%d at file "%s"' % (index, filename))
return
if ((breakpoint_type == self.BREAKPOINT_TYPE_CONDITIONAL) and (len(breakpoint_properties) != 4)):
self.print_message('Error: Missing/invalid breakpoint condition in line #%d at file "%s"' % (index, filename))
return
condition = None
temporary = False
if (breakpoint_type == self.BREAKPOINT_TYPE_CONDITIONAL):
condition = breakpoint_properties[3]
elif (breakpoint_type == self.BREAKPOINT_TYPE_TEMPORARY):
temporary = True
new_breakpoint = {}
new_breakpoint['filename'] = breakpoint_filename
new_breakpoint['line'] = breakpoint_line
new_breakpoint['type'] = breakpoint_type
new_breakpoint['condition'] = condition
new_breakpoint['temporary'] = temporary
new_breakpoints.append(new_breakpoint)
breakpoints_file.close()
# Set the loaded breakpoints.
self.set_breakpoints(new_breakpoints)
def save_breakpoints_to_file(self, filename):
"""Saves all active breakpoints to a file."""
breakpoints_file = open(filename, 'wb')
breakpoints = self.get_breakpoints()
for breakpoint in breakpoints:
line = '%s\t%s\t%s' % (breakpoint['filename'], breakpoint['line'], breakpoint['type'])
if (breakpoint['type'] == self.BREAKPOINT_TYPE_CONDITIONAL):
line += '\t' + breakpoint['condition']
breakpoints_file.write(line + '\n')
breakpoints_file.close()
#
# Helper methods
#
def is_debugged(self):
"""Checks whether or not there active debugging currently enabled."""
#if ((not hasattr(self, 'quitting')) or (self.quitting) or (not self.current_frame)):
if ((not hasattr(self, 'quitting')) or (self.quitting)):
return False
else:
return True
def is_exit_frame(self, frame):
"""Tests whether or not the current frame is of the exit frame."""
if (self.canonic(frame.f_code.co_filename) == '<string>'):
return True
else:
return False
def get_conditional_breakpoints(self, filename):
"""Returns a list of line numbers with conditional breakpoints for a given filename."""
conditional_breakpoints = []
# First, get the line numbers which have breakpoints set in them.
file_breaks = self.get_file_breaks(filename)
for line_number in file_breaks:
breakpoint_instances = self.get_breaks(filename, line_number)
for breakpoint in breakpoint_instances:
if (breakpoint.cond):
# Found a conditional breakpoint - add it to the list.
conditional_breakpoints.append(line_number)
return conditional_breakpoints
def get_temporary_breakpoints(self, filename):
"""Returns a list of line numbers with temporary breakpoints for a given filename."""
temporary_breakpoints = []
# First, get the line numbers which have breakpoints set in them.
file_breaks = self.get_file_breaks(filename)
for line_number in file_breaks:
breakpoint_instances = self.get_breaks(filename, line_number)
for breakpoint in breakpoint_instances:
if (breakpoint.temporary):
# Found a temporary breakpoint - add it to the list.
temporary_breakpoints.append(line_number)
return temporary_breakpoints
def get_breakpoints_for_file(self, filename):
"""Returns a tuple of (regular_breakpoints, conditional_breakpoints, temporary_breakpoints) for
a given filename."""
regular_breakpoints = self.get_file_breaks(filename)[:] # Make a copy so we won't be affected by changes.
conditional_breakpoints = self.get_conditional_breakpoints(filename)
temporary_breakpoints = self.get_temporary_breakpoints(filename)
# Remove any breakpoints which appear in the regular_breakpoints list, and are actually
# conditional or temporary breakpoints.
for breakpoint in regular_breakpoints:
if ((breakpoint in conditional_breakpoints) or (breakpoint in temporary_breakpoints)):
regular_breakpoints.remove(breakpoint)
return (regular_breakpoints, conditional_breakpoints, temporary_breakpoints)
def is_code_line(self, filename, line):
"""Returns True if the given line is a code line; False otherwise.
Warning: not comprehensive enough."""
import linecache
source_line = linecache.getline(self.canonic(filename), line)
if (not source_line):
return False
source_line = source_line.strip()
if ((len(source_line) == 0) or (source_line[0] == '#') or
(source_line[:3] == '"""') or (source_line[:3] == "'''")):
return False
return True
#
# Overridden Bdb methods
#
def format_stack_entry(self, stack_frame):
"""Formats the stack frame into a printable string."""
import linecache
(frame, line_number) = stack_frame
filename = self.canonic(frame.f_code.co_filename)
(directory, filename) = os.path.split(filename)
if (frame.f_code.co_name):
function_name = frame.f_code.co_name
else:
function_name = '<lambda>'
if ('__args__' in frame.f_locals.keys()):
args = frame.f_locals['__args__']
else:
args = ''
if ('__return__' in frame.f_locals.keys()):
return_value = '-> %s' % (frame.f_locals['__return__'])
else:
return_value = ''
source_line = linecache.getline(filename, line_number)
if (not source_line):
source_line = ''
else:
source_line = source_line.strip()
stack_entry_string = self.stack_entry_format % (
{'filename': filename, 'dir': directory, 'line': line_number, 'function': function_name,
'args': args, 'return_value': return_value, 'source_line': source_line})
return stack_entry_string
def user_call(self, frame, args):
if ((self.wait_for_script_start) or (self.quitting)):
# Haven't reached the start of the script yet.
return
if (self.stop_here(frame)):
# Change the cursor position to the currently debugged line.
self.wait_in_debug(frame)
def user_line(self, frame):
"""Called when we stop or break at this line."""
if (self.quitting):
return
if (self.wait_for_script_start):
if ((self.main_filename != self.canonic(frame.f_code.co_filename)) or (frame.f_lineno <= 0)):
# Haven't reached the start of the script yet.
return
# Reached the start of the main script being debugged.
self.wait_for_script_start = False
if (not self.stop_immediately):
# Debugging should start without pausing immediately.
self.set_continue()
self.pause_debug = False
else:
self.pause_debug = True
# Move to the current line being debugged.
self.wait_in_debug(frame)
def user_return(self, frame, return_value):
"""Called when a return trap is set here."""
if (self.quitting):
return
if (self.is_exit_frame(frame)):
# It's the last frame.
self.print_message(self.MESSAGE_PROGRAM_ENDED)
self.clear_current_line_highlighting()
self.clear_breakpoints_highlighting()
return
frame.f_locals['__return__'] = return_value
self.pause_debug = False
self.wait_in_debug(frame)
def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
"""Called if an exception occurs, but only if we are to stop at or just below this level."""
if (self.quitting):
return
frame.f_locals['__exception__'] = exc_type, exc_value
if (type(exc_type) == type('')):
exc_type_name = exc_type
else:
exc_type_name = exc_type.__name__
if (self.is_exit_frame(frame)):
# It's the last frame.
self.print_message(self.MESSAGE_PROGRAM_ENDED)
self.clear_current_line_highlighting()
self.clear_breakpoints_highlighting()
return
self.print_message("%s: %s" % (exc_type_name, exc_value))
self.wait_in_debug(frame)
#
# Methods to be overridden by the editor-specific child class.
#
def print_message(self, message):
"""Prints a message to the editor console"""
raise NotImplementedError()
def set_cursor_position(self, row, column):
"""Sets the cursor position for the current editor window."""
raise NotImplementedError()
def highlight_breakpoints(self, filename, regular_breakpoints, conditional_breakpoints, temporary_breakpoints):
"""Highlights the active breakpoints in the given file."""
raise NotImplementedError()
def highlight_current_line(self, filename, line):
"""Highlights the current debugged line."""
raise NotImplementedError()
def clear_current_line_highlighting(self):
"""Clears the highlighting of the current debugged line."""
raise NotImplementedError()
def clear_breakpoints_highlighting(self):
"""Clears the highlighting for the breakpoints."""
raise NotImplementedError()
def open_file(self, filename):
"""Opens a file for editing."""
raise NotImplementedError()
def get_active_filename(self):
"""Returns the filename of the active window."""
raise NotImplementedError()
class VimPdb(PdbIDE):
"""Integrates the Pdb IDE into Vim."""
#
# Constants
#
# The Vim group name used for highlighting the currently debugged line.
CURRENT_LINE_GROUP = 'PdbCurrentLineTemp'
USER_DEFINED_CURRENT_LINE_GROUP = 'PdbCurrentLine'
# The Vim group name used for highlighting the breakpoint line.
BREAKPOINT_GROUP = 'PdbBreakpoint'
# The Vim group name used for highlighting the conditional breakpoint line.
CONDITIONAL_BREAKPOINT_GROUP = 'PdbConditionalBreakpoint'
# The Vim group name used for highlighting the temporary breakpoint line.
TEMPORARY_BREAKPOINT_GROUP = 'PdbTemporaryBreakpoint'
def __init__(self):
# Initialize the parent PdbIDE class.
PdbIDE.__init__(self)
# The output buffer used when print_message() is called.
self.output_buffer = None
self.save_to_output_buffer = False
#
# Overridden methods, which implement the editor-specific functionalities.
#
def print_message(self, message):
"""Prints a message to the Vim console."""
if (self.save_to_output_buffer):
self.output_buffer = message
else:
print message
def set_cursor_position(self, row, column):
"""Sets the cursor position for the current Vim buffer."""
# Move to the right line.
self.normal_command('%dG' % (row))
# Move to the right column.
self.normal_command('0%dl' % (column))
def highlight_breakpoints(self, filename, regular_breakpoints, conditional_breakpoints, temporary_breakpoints):
"""Highlights the active breakpoints in the given file."""
self.clear_breakpoints_highlighting()
self._set_lines_highlighting(regular_breakpoints, self.BREAKPOINT_GROUP)
self._set_lines_highlighting(conditional_breakpoints, self.CONDITIONAL_BREAKPOINT_GROUP)
self._set_lines_highlighting(temporary_breakpoints, self.TEMPORARY_BREAKPOINT_GROUP)
def highlight_current_line(self, filename, line):
"""Highlights the current debugged line."""
if (self.canonic(vim.current.buffer.name) != filename):
# Current buffer isn't the last debugged filename.
return
self.command(r'highlight link %s %s' % (self.CURRENT_LINE_GROUP, self.USER_DEFINED_CURRENT_LINE_GROUP))
self.command(r'match %s "\%%%dl.\+"' % (self.CURRENT_LINE_GROUP, line))
def clear_current_line_highlighting(self):
"""Clears the highlighting of the current debugged line."""
self.command(r'highlight link %s NONE' % (self.CURRENT_LINE_GROUP))
def clear_breakpoints_highlighting(self):
"""Clears the highlighting for the breakpoints."""
self.command(r'syntax clear %s' % (self.BREAKPOINT_GROUP))
self.command(r'syntax clear %s' % (self.CONDITIONAL_BREAKPOINT_GROUP))
self.command(r'syntax clear %s' % (self.TEMPORARY_BREAKPOINT_GROUP))
def open_file(self, filename):
"""Opens a file for editing."""
if (self.canonic(vim.current.buffer.name) != filename):
vim_filename = filename.replace(' ', r'\ ')
self.command('e ' + filename)
def get_active_filename(self):
"""Returns the filename of the active buffer."""
return vim.current.buffer.name.replace(r'\ ', ' ')
def set_cursor_to_current_line(self):
"""Moves the cursor to the current debugged line."""
self.open_file(self.current_filename)
self.set_cursor_position(self.current_line, 0)
#
# Queue related methods
#
def run_method(self, function_name, *parameters):
"""Runs a method (using add_queued_method) and waits for its output; then prints it onto the screen."""
self.output_buffer = None
self.save_to_output_buffer = True
self.add_queued_method(function_name, *parameters)
while (self.output_buffer == None):
time.sleep(self.PAUSE_DEBUG_WAIT_TIME)
self.save_to_output_buffer = False
self.print_message(self.output_buffer)
def run_method_and_return_output(self, function_name, *parameters):
"""Runs a method (using add_queued_method) and waits for it to finish running;
then returns its return value."""
self.save_to_output_buffer = False
self.last_method_return_value = None
self.add_queued_method(function_name, *parameters)
while (self.last_method_return_value == None):
time.sleep(self.PAUSE_DEBUG_WAIT_TIME)
return self.last_method_return_value
#
# Helper methods
#
def normal_command(self, command):
"""Runs a command in normal mode."""
self.command('normal ' + command)
def command(self, command):
"""Runs a Vim (ex-mode) command"""
vim.command(command)
def _set_lines_highlighting(self, line_numbers, group_name):
"""Sets highlighting for a group of line numbers (given a group name)."""
for line_number in line_numbers:
self.command(r'syntax match %s "\%%%dl.\+"' % (group_name, line_number))
# Old method - doesn't work for line #1, and when the previous line ends with a quotation mark
# of the end of a string, for example.
# Highlight each group of lines.
#for line_range in line_ranges:
# self.command(r'syntax region %s start="\%%%dl$" end="\%%%dl.\+"' %
# (group_name, line_range['start'] - 1, line_range['end'])) | unknown | codeparrot/codeparrot-clean | ||
"""
destroy_cached_images.py
This script is used to clean up Glance images that are cached in the SR. By
default, this script will only cleanup unused cached images.
Options:
--dry_run - Don't actually destroy the VDIs
--all_cached - Destroy all cached images instead of just unused cached
images.
"""
import eventlet
eventlet.monkey_patch()
import os
import sys
from oslo_config import cfg
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
import nova.conf
from nova import config
from nova import utils
from nova.virt.xenapi.client import session
from nova.virt.xenapi import vm_utils
destroy_opts = [
cfg.BoolOpt('all_cached',
default=False,
help='Destroy all cached images instead of just unused cached'
' images.'),
cfg.BoolOpt('dry_run',
default=False,
help='Don\'t actually delete the VDIs.')
]
CONF = nova.conf.CONF
CONF.register_cli_opts(destroy_opts)
def main():
config.parse_args(sys.argv)
utils.monkey_patch()
_session = session.XenAPISession(CONF.xenserver.connection_url,
CONF.xenserver.connection_username,
CONF.xenserver.connection_password)
sr_ref = vm_utils.safe_find_sr(_session)
destroyed = vm_utils.destroy_cached_images(
_session, sr_ref, all_cached=CONF.all_cached,
dry_run=CONF.dry_run)
if '--verbose' in sys.argv:
print '\n'.join(destroyed)
print "Destroyed %d cached VDIs" % len(destroyed)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
"""
WSGI config for icecreamratings_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "icecreamratings_project.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application) | unknown | codeparrot/codeparrot-clean | ||
###########################################################
#
# Copyright (c) 2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['PipelineInterpreter']
from xml.dom.minidom import parseString
from tactic_client_lib.common import Common
from handler import Handler
class PipelineInterpreter(object):
def __init__(my, pipeline_xml, first_process=None):
my.pipeline_xml = pipeline_xml
my.first_process = first_process
my.handlers = []
my.handlers_dict = {}
my.package = {}
def set_server(my, server):
my.server = server
def set_package(my, package):
my.package = package
def get_handler(my, process_name):
return my.handlers_dict.get(process_name)
def get_handlers(my):
return my.handlers
def execute(my):
from pipeline import Pipeline
my.pipeline = Pipeline(my.pipeline_xml)
try:
# if an initial process is not specified, use an implicit one
if not my.first_process:
my.first_process = my.pipeline.get_first_process_name()
my.handle_process(my.first_process)
except Exception, e:
if not my.handlers:
raise
print("Failed at handler: ", my.handlers[-1])
try:
# make a copy and reverse the handlers
handlers = my.handlers[:]
handlers.reverse()
for handler in handlers:
handler.undo()
except Exception, e:
print("Could not undo:", str(e) )
raise
raise
def handle_process(my, process, input_process=None):
'''handle the individual process
@params
process - the name of the process to be handled
input_process - the name of the input process that called
this process
'''
# get the handler and instantiate it
handler_class = my.pipeline.get_handler_class(process)
if handler_class:
try:
handler = Common.create_from_class_path(handler_class)
except ImportError:
raise ImportError("Could not import handler class [%s]" % handler_class)
else:
handler = Handler()
# pass the options to the handler
options = my.pipeline.get_action_options(process)
handler.set_options(options)
# pass relevant information to the handler
handler.set_process_name(process)
handler.set_server(my.server)
handler.set_pipeline(my.pipeline)
handler.set_package(my.package)
# if this is the first process (no input process, then the package
# is the input
if not input_process:
output = my.package.copy()
else:
# get input processes and hand over the delivery
input_handler = my.handlers_dict.get(input_process)
if input_handler:
output = input_handler.get_output()
else:
output = {}
# By default, inputs travel through
handler.set_input( output )
handler.set_output( output )
# store the handler and execute
my.handlers.append(handler)
my.handlers_dict[process] = handler
handler.execute()
# process all of the output handlers. First ask the current handler
# for the next process
output_processes = handler.get_next_processes()
# if output processes is None, then stop this branch completely
if output_processes == None:
return
# otherwise, use the pipeline
if not output_processes:
output_processes = my.pipeline.get_output_process_names(process)
for output_process in output_processes:
my.handle_process(output_process, process)
"""
def execute(my):
dom = parseString(my.pipeline_xml)
root = dom.documentElement
nodes = root.childNodes
try:
for node in nodes:
node_name = node.nodeName
if node_name == "process":
my.handle_process(node)
elif node_name == "pipeline":
my.handle_process(node)
elif node_name == "package":
my.handle_package(node)
except Exception, e:
if not my.handlers:
raise
print("Failed at handler: ", my.handlers[-1])
try:
# make a copy and reverse the handlers
handlers = my.handlers[:]
handlers.reverse()
for handler in handlers:
handler.undo()
except Exception, e:
print("Could not undo:", e.__str())
raise
raise
def handle_process(my, process_node):
# intantiate the package to be delivered to this handler
package = my.package
nodes = process_node.childNodes
for node in nodes:
node_name = node.nodeName
if node_name == "action":
my.handle_action(node, package)
elif node_name == "#text":
continue
else:
attrs = {}
for attr, value in node.attributes.items():
attrs[attr] = value
package[node_name] = attrs
def handle_package(my, package_node):
# intantiate the package to be delivered to this handler
package = my.package
nodes = package_node.childNodes
for node in nodes:
node_name = node.nodeName
if node_name == "#text":
continue
else:
# handle the attributes
attrs = {}
for attr, value in node.attributes.items():
attrs[attr] = value
# handle the vale
if node.firstChild:
value = node.firstChild.nodeValue
attrs["__VALUE__"] = value
package[node_name] = attrs
def handle_action(my, action_node, package):
handler_cls = action_node.getAttribute("class")
try:
handler = Common.create_from_class_path(handler_cls)
except ImportError:
raise ImportError("Could not import handler class [%s]" % handler_cls)
handler.set_server(my.server)
handler.set_package(package)
# hand over the delivery
if my.handlers:
handler.set_input( my.handlers[-1].get_output() )
my.handlers.append(handler)
handler.execute()
""" | unknown | codeparrot/codeparrot-clean | ||
class DictDiff:
"""
Represents the difference between two dictionaries
"""
__slots__ = ('added', 'removed', 'intersection', 'changed', 'unchanged')
def __init__(self, added, removed, intersection, changed, unchanged):
self.added = added
"""
`set` ( `mixed` ) : Keys that were added in the new dictionary
"""
self.removed = removed
"""
`set` ( `mixed` ) : Keys that were removed in the new dictionary
"""
self.intersection = intersection
"""
`set` ( `mixed` ) : Keys that appear in both dictionaries
"""
self.changed = changed
"""
`set` ( `mixed` ) : Keys that appear in both dictionaries, but the
values differ
"""
self.unchanged = unchanged
"""
`set` ( `mixed` ) : Keys that appear in both dictionaries and have
equivalent values
"""
def diff_dicts(a, b):
"""
Generates a diff between two dictionaries.
:Parameters:
a : `dict`
A dict to diff or `None`
b : `dict`
B dict to diff
"""
a = a or {}
added = b.keys() - a.keys()
removed = a.keys() - b.keys()
intersection = a.keys() & b.keys()
changed = set()
unchanged = set()
for key in intersection:
if a[key] == b[key]:
unchanged.add(key)
else:
changed.add(key)
return DictDiff(added, removed, intersection, changed, unchanged) | unknown | codeparrot/codeparrot-clean | ||
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""Base classes handy for use with PB clients.
"""
from __future__ import absolute_import
from __future__ import print_function
from twisted.cred import error
from twisted.internet import protocol
from twisted.internet import reactor
from twisted.python import log
from twisted.spread import pb
from twisted.spread.pb import PBClientFactory
class ReconnectingPBClientFactory(PBClientFactory,
protocol.ReconnectingClientFactory):
"""Reconnecting client factory for PB brokers.
Like PBClientFactory, but if the connection fails or is lost, the factory
will attempt to reconnect.
Instead of using f.getRootObject (which gives a Deferred that can only
be fired once), override the gotRootObject method.
Instead of using the newcred f.login (which is also one-shot), call
f.startLogin() with the credentials and client, and override the
gotPerspective method.
gotRootObject and gotPerspective will be called each time the object is
received (once per successful connection attempt). You will probably want
to use obj.notifyOnDisconnect to find out when the connection is lost.
If an authorization error occurs, failedToGetPerspective() will be
invoked.
To use me, subclass, then hand an instance to a connector (like
TCPClient).
"""
def clientConnectionFailed(self, connector, reason):
PBClientFactory.clientConnectionFailed(self, connector, reason)
if self.continueTrying:
self.connector = connector
self.retry()
def clientConnectionLost(self, connector, reason):
PBClientFactory.clientConnectionLost(self, connector, reason,
reconnecting=True)
RCF = protocol.ReconnectingClientFactory
RCF.clientConnectionLost(self, connector, reason)
def clientConnectionMade(self, broker):
self.resetDelay()
PBClientFactory.clientConnectionMade(self, broker)
self.doLogin(self._root, broker)
self.gotRootObject(self._root)
def buildProtocol(self, addr):
return PBClientFactory.buildProtocol(self, addr)
# newcred methods
def login(self, *args):
raise RuntimeError("login is one-shot: use startLogin instead")
def startLogin(self, credentials, client=None):
self._credentials = credentials
self._client = client
def doLogin(self, root, broker):
# newcred login()
d = self._cbSendUsername(root, self._credentials.username,
self._credentials.password, self._client)
d.addCallbacks(self.gotPerspective, self.failedToGetPerspective,
errbackArgs=(broker,))
# methods to override
def gotPerspective(self, perspective):
"""The remote avatar or perspective (obtained each time this factory
connects) is now available."""
def gotRootObject(self, root):
"""The remote root object (obtained each time this factory connects)
is now available. This method will be called each time the connection
is established and the object reference is retrieved."""
def failedToGetPerspective(self, why, broker):
"""The login process failed, most likely because of an authorization
failure (bad password), but it is also possible that we lost the new
connection before we managed to send our credentials.
"""
log.msg("ReconnectingPBClientFactory.failedToGetPerspective")
# put something useful in the logs
if why.check(pb.PBConnectionLost):
log.msg("we lost the brand-new connection")
# fall through
elif why.check(error.UnauthorizedLogin):
log.msg("unauthorized login; check worker name and password")
# fall through
else:
log.err(why, 'While trying to connect:')
self.stopTrying()
reactor.stop()
return
# lose the current connection, which will trigger a retry
broker.transport.loseConnection() | unknown | codeparrot/codeparrot-clean | ||
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# Derived from source in PyFlag developed by:
# Copyright 2004: Commonwealth of Australia.
# Michael Cohen <scudette@users.sourceforge.net>
# David Collett <daveco@users.sourceforge.net>
#
# Subclassing plugin code developed by:
#
# Mike Auty <mike.auty@gmail.com>
#
# ******************************************************
# Version: FLAG $Version: 0.84RC4 Date: Wed May 30 20:48:31 EST 2007$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# *****************************************************
#pylint: disable-msg=C0111
""" This module implements a class registry.
We scan the memory_plugins directory for all python files and add those
classes which should be registered into their own lookup tables. These
are then ordered as required. The rest of Volatility will then call onto the
registered classes when needed.
This mechanism allows us to reorganise the code according to
functionality. For example we may include a Scanner, Report and File
classes in the same plugin and have them all automatically loaded.
"""
import os, zipfile
import volatility.debug as debug
import volatility.plugins as plugins
class PluginImporter(object):
"""This class searches through a comma-separated list of plugins and
imports all classes found, based on their path and a fixed prefix.
"""
def __init__(self):
"""Gathers all the plugins from config.PLUGINS
Determines their namespaces and maintains a dictionary of modules to filepaths
Then imports all modules found
"""
self.modnames = {}
# Handle additional plugins
for path in plugins.__path__:
path = os.path.abspath(path)
for relfile in self.walkzip(path):
module_path, ext = os.path.splitext(relfile)
namespace = ".".join(['volatility.plugins'] + [ x for x in module_path.split(os.path.sep) if x ])
#Lose the extension for the module name
if ext in [".py", ".pyc", ".pyo"]:
filepath = os.path.join(path, relfile)
# Handle Init files
initstr = '.__init__'
if namespace.endswith(initstr):
self.modnames[namespace[:-len(initstr)]] = filepath
else:
self.modnames[namespace] = filepath
self.run_imports()
def walkzip(self, path):
"""Walks a path independent of whether it includes a zipfile or not"""
if os.path.exists(path) and os.path.isdir(path):
for dirpath, _dirnames, filenames in os.walk(path):
for filename in filenames:
# Run through files as we always used to
yield os.path.join(dirpath[len(path) + len(os.path.sep):], filename)
else:
index = -1
zippath = None
while path.find(os.path.sep, index + 1) > -1:
index = path.find(os.path.sep, index + 1)
if zipfile.is_zipfile(path[:index]):
zippath = path[:index]
break
else:
if zipfile.is_zipfile(path):
zippath = path
# Now yield the files
if zippath:
zipf = zipfile.ZipFile(zippath)
prefix = path[len(zippath):].strip(os.path.sep)
# If there's a prefix, ensure it ends in a slash
if len(prefix):
prefix += os.path.sep
for fn in zipf.namelist():
# Zipfiles seem to always list contents using / as their separator
fn = fn.replace('/', os.path.sep)
if fn.startswith(prefix) and not fn.endswith(os.path.sep):
# We're a file in the zipfile
yield fn[len(prefix):]
def run_imports(self):
"""Imports all the already found modules"""
for i in self.modnames.keys():
if self.modnames[i] is not None:
try:
__import__(i)
except Exception, e:
print "*** Failed to import " + i + " (" + str(e.__class__.__name__) + ": " + str(e) + ")"
# This is too early to have had the debug filter lowered to include debugging messages
debug.post_mortem(2)
def _get_subclasses(cls):
""" Run through subclasses of a particular class
This returns all classes descended from the main class,
_including_ the main class itself. If showall is set to
False (the default) then classes starting with Abstract
will not be returned.
"""
for i in cls.__subclasses__():
for c in _get_subclasses(i):
yield c
yield cls
def get_plugin_classes(cls, showall = False, lower = False):
"""Returns a dictionary of plugins"""
# Plugins all make use of the Abstract concept
result = {}
for plugin in set(_get_subclasses(cls)):
if showall or not (plugin.__name__.startswith("Abstract") or plugin == cls):
# FIXME: This is due to not having done things correctly at the start
if not showall and plugin.__name__ in ['BufferAddressSpace', 'HiveFileAddressSpace', 'HiveAddressSpace']:
continue
name = plugin.__name__.split('.')[-1]
if lower:
name = name.lower()
if name not in result:
result[name] = plugin
else:
raise Exception("Object {0} has already been defined by {1}".format(name, plugin))
return result
def register_global_options(config, cls):
## Register all register_options for the various classes
for m in get_plugin_classes(cls, True).values():
if hasattr(m, 'register_options'):
m.register_options(config) | unknown | codeparrot/codeparrot-clean | ||
use rustc_hir::def_id::LocalDefId;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{AllocInit, Allocation, GlobalAlloc, InterpResult, Pointer};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{TyCtxt, TypeVisitable, TypeVisitableExt};
use tracing::debug;
use super::{InterpCx, MPlaceTy, MemoryKind, interp_ok, throw_inval};
use crate::const_eval::{CompileTimeInterpCx, CompileTimeMachine, InterpretationResult};
/// Checks whether a type contains generic parameters which must be instantiated.
///
/// In case it does, returns a `TooGeneric` const eval error.
pub(crate) fn ensure_monomorphic_enough<'tcx, T>(_tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
where
T: TypeVisitable<TyCtxt<'tcx>>,
{
debug!("ensure_monomorphic_enough: ty={:?}", ty);
if ty.has_param() {
throw_inval!(TooGeneric);
}
interp_ok(())
}
impl<'tcx> InterpretationResult<'tcx> for mir::interpret::ConstAllocation<'tcx> {
fn make_result(
mplace: MPlaceTy<'tcx>,
ecx: &mut InterpCx<'tcx, CompileTimeMachine<'tcx>>,
) -> Self {
let alloc_id = mplace.ptr().provenance.unwrap().alloc_id();
let alloc = ecx.memory.alloc_map.swap_remove(&alloc_id).unwrap().1;
ecx.tcx.mk_const_alloc(alloc)
}
}
pub(crate) fn create_static_alloc<'tcx>(
ecx: &mut CompileTimeInterpCx<'tcx>,
static_def_id: LocalDefId,
layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
// Inherit size and align from the `GlobalAlloc::Static` so we can avoid duplicating
// the alignment attribute logic.
let (size, align) =
GlobalAlloc::Static(static_def_id.into()).size_and_align(*ecx.tcx, ecx.typing_env);
assert_eq!(size, layout.size);
assert!(align >= layout.align.abi);
let alloc = Allocation::try_new(size, align, AllocInit::Uninit, ())?;
let alloc_id = ecx.tcx.reserve_and_set_static_alloc(static_def_id.into());
assert_eq!(ecx.machine.static_root_ids, None);
ecx.machine.static_root_ids = Some((alloc_id, static_def_id));
assert!(ecx.memory.alloc_map.insert(alloc_id, (MemoryKind::Stack, alloc)).is_none());
interp_ok(ecx.ptr_to_mplace(Pointer::from(alloc_id).into(), layout))
}
/// A marker trait returned by [crate::interpret::Machine::enter_trace_span], identifying either a
/// real [tracing::span::EnteredSpan] in case tracing is enabled, or the dummy type `()` when
/// tracing is disabled. Also see [crate::enter_trace_span!] below.
pub trait EnteredTraceSpan {
/// Allows executing an alternative function when tracing is disabled. Useful for example if you
/// want to open a trace span when tracing is enabled, and alternatively just log a line when
/// tracing is disabled.
fn or_if_tracing_disabled(self, f: impl FnOnce()) -> Self;
}
impl EnteredTraceSpan for () {
fn or_if_tracing_disabled(self, f: impl FnOnce()) -> Self {
f(); // tracing is disabled, execute the function
self
}
}
impl EnteredTraceSpan for tracing::span::EnteredSpan {
fn or_if_tracing_disabled(self, _f: impl FnOnce()) -> Self {
self // tracing is enabled, don't execute anything
}
}
/// Shorthand for calling [crate::interpret::Machine::enter_trace_span] on a [tracing::info_span!].
/// This is supposed to be compiled out when [crate::interpret::Machine::enter_trace_span] has the
/// default implementation (i.e. when it does not actually enter the span but instead returns `()`).
/// This macro takes a type implementing the [crate::interpret::Machine] trait as its first argument
/// and otherwise accepts the same syntax as [tracing::span!] (see some tips below).
/// Note: the result of this macro **must be used** because the span is exited when it's dropped.
///
/// ### Syntax accepted by this macro
///
/// The full documentation for the [tracing::span!] syntax can be found at [tracing] under "Using the
/// Macros". A few possibly confusing syntaxes are listed here:
/// ```rust
/// # use rustc_const_eval::enter_trace_span;
/// # type M = rustc_const_eval::const_eval::CompileTimeMachine<'static>;
/// # let my_display_var = String::new();
/// # let my_debug_var = String::new();
/// // logs a span named "hello" with a field named "arg" of value 42 (works only because
/// // 42 implements the tracing::Value trait, otherwise use one of the options below)
/// let _trace = enter_trace_span!(M, "hello", arg = 42);
/// // logs a field called "my_display_var" using the Display implementation
/// let _trace = enter_trace_span!(M, "hello", %my_display_var);
/// // logs a field called "my_debug_var" using the Debug implementation
/// let _trace = enter_trace_span!(M, "hello", ?my_debug_var);
/// ```
///
/// ### `NAME::SUBNAME` syntax
///
/// In addition to the syntax accepted by [tracing::span!], this macro optionally allows passing
/// the span name (i.e. the first macro argument) in the form `NAME::SUBNAME` (without quotes) to
/// indicate that the span has name "NAME" (usually the name of the component) and has an additional
/// more specific name "SUBNAME" (usually the function name). The latter is passed to the [tracing]
/// infrastructure as a span field with the name "NAME". This allows not being distracted by
/// subnames when looking at the trace in <https://ui.perfetto.dev>, but when deeper introspection
/// is needed within a component, it's still possible to view the subnames directly in the UI by
/// selecting a span, clicking on the "NAME" argument on the right, and clicking on "Visualize
/// argument values".
/// ```rust
/// # use rustc_const_eval::enter_trace_span;
/// # type M = rustc_const_eval::const_eval::CompileTimeMachine<'static>;
/// // for example, the first will expand to the second
/// let _trace = enter_trace_span!(M, borrow_tracker::on_stack_pop, /* ... */);
/// let _trace = enter_trace_span!(M, "borrow_tracker", borrow_tracker = "on_stack_pop", /* ... */);
/// ```
///
/// ### `tracing_separate_thread` parameter
///
/// This macro was introduced to obtain better traces of Miri without impacting release performance.
/// Miri saves traces using the `tracing_chrome` `tracing::Layer` so that they can be visualized
/// in <https://ui.perfetto.dev>. To instruct `tracing_chrome` to put some spans on a separate trace
/// thread/line than other spans when viewed in <https://ui.perfetto.dev>, you can pass
/// `tracing_separate_thread = tracing::field::Empty` to the tracing macros. This is useful to
/// separate out spans which just indicate the current step or program frame being processed by the
/// interpreter. You should use a value of [tracing::field::Empty] so that other tracing layers
/// (e.g. the logger) will ignore the `tracing_separate_thread` field. For example:
/// ```rust
/// # use rustc_const_eval::enter_trace_span;
/// # type M = rustc_const_eval::const_eval::CompileTimeMachine<'static>;
/// let _trace = enter_trace_span!(M, step::eval_statement, tracing_separate_thread = tracing::field::Empty);
/// ```
///
/// ### Executing something else when tracing is disabled
///
/// [crate::interpret::Machine::enter_trace_span] returns [EnteredTraceSpan], on which you can call
/// [EnteredTraceSpan::or_if_tracing_disabled], to e.g. log a line as an alternative to the tracing
/// span for when tracing is disabled. For example:
/// ```rust
/// # use rustc_const_eval::enter_trace_span;
/// # use rustc_const_eval::interpret::EnteredTraceSpan;
/// # type M = rustc_const_eval::const_eval::CompileTimeMachine<'static>;
/// let _trace = enter_trace_span!(M, step::eval_statement)
/// .or_if_tracing_disabled(|| tracing::info!("eval_statement"));
/// ```
#[macro_export]
macro_rules! enter_trace_span {
($machine:ty, $name:ident :: $subname:ident $($tt:tt)*) => {
$crate::enter_trace_span!($machine, stringify!($name), $name = %stringify!($subname) $($tt)*)
};
($machine:ty, $($tt:tt)*) => {
<$machine as $crate::interpret::Machine>::enter_trace_span(|| tracing::info_span!($($tt)*))
};
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_const_eval/src/interpret/util.rs |
from ryu.services.protocols.bgp.rtconf.base import ConfWithStats
from ryu.services.protocols.bgp.rtconf.common import CommonConfListener
from ryu.services.protocols.bgp.rtconf.neighbors import NeighborsConfListener
from ryu.services.protocols.bgp.rtconf import vrfs
from ryu.services.protocols.bgp.rtconf.vrfs import VrfConf
from ryu.services.protocols.bgp.rtconf.vrfs import VrfsConfListener
import logging
LOG = logging.getLogger('bgpspeaker.core_managers.table_mixin')
class ConfigurationManager(CommonConfListener, VrfsConfListener,
NeighborsConfListener):
def __init__(self, core_service, common_conf, vrfs_conf, neighbors_conf):
self._signal_bus = core_service.signal_bus
self._common_config = common_conf
self._peer_manager = core_service.peer_manager
self._table_manager = core_service.table_manager
self._rt_manager = core_service.rt_manager
CommonConfListener.__init__(self, common_conf)
VrfsConfListener.__init__(self, vrfs_conf)
NeighborsConfListener.__init__(self, neighbors_conf)
def on_update_common_conf(self, evt):
raise NotImplementedError()
def on_add_neighbor_conf(self, evt):
neigh_conf = evt.value
self._peer_manager.add_peer(neigh_conf, self._common_config)
def on_remove_neighbor_conf(self, evt):
neigh_conf = evt.value
self._peer_manager.remove_peer(neigh_conf)
def on_chg_vrf_conf(self, evt):
evt_value = evt.value
vrf_conf = evt.src
new_imp_rts, removed_imp_rts, import_maps, re_export, re_import = \
evt_value
route_family = vrf_conf.route_family
vrf_table = self._table_manager.get_vrf_table(
vrf_conf.route_dist, route_family
)
assert vrf_table
# If we have new import RTs we have to update RTC table and make route
# refresh request to peers not participating in RT address-family
self._table_manager.update_vrf_table_links(
vrf_table, new_imp_rts, removed_imp_rts
)
# If other properties of VRF changed we re-install local paths.
if re_export:
self._table_manager.re_install_net_ctrl_paths(vrf_table)
# We have to withdraw paths that do not have any RT that are or
# interest
vrf_table.clean_uninteresting_paths()
if import_maps is not None:
vrf_table.init_import_maps(import_maps)
changed_dests = vrf_table.apply_import_maps()
for dest in changed_dests:
self._signal_bus.dest_changed(dest)
# import new rts
if re_import:
LOG.debug(
"RE-importing prefixes from VPN table to VRF %r", vrf_table
)
self._table_manager.import_all_vpn_paths_to_vrf(vrf_table)
else:
self._table_manager.import_all_vpn_paths_to_vrf(
vrf_table, new_imp_rts
)
# Update local/global RT NLRIs
self._rt_manager.update_local_rt_nlris()
def on_remove_vrf_conf(self, evt):
"""Removes VRF table associated with given `vrf_conf`.
Cleans up other links to this table as well.
"""
vrf_conf = evt.value
# Detach VrfConf change listener.
vrf_conf.remove_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf)
self._table_manager.remove_vrf_by_vrf_conf(vrf_conf)
# Update local RT NLRIs
self._rt_manager.update_local_rt_nlris()
self._signal_bus.vrf_removed(vrf_conf.route_dist)
# Remove AttributeMaps under the removed vrf
rd = vrf_conf.route_dist
rf = vrf_conf.route_family
peers = self._peer_manager.iterpeers
for peer in peers:
key = ':'.join([rd, rf])
peer.attribute_maps.pop(key, None)
def on_add_vrf_conf(self, evt):
"""Event handler for new VrfConf.
Creates a VrfTable to store routing information related to new Vrf.
Also arranges for related paths to be imported to this VrfTable.
"""
vrf_conf = evt.value
route_family = vrf_conf.route_family
assert route_family in vrfs.SUPPORTED_VRF_RF
# Create VRF table with given configuration.
vrf_table = self._table_manager.create_and_link_vrf_table(vrf_conf)
# Attach VrfConf change listeners.
vrf_conf.add_listener(ConfWithStats.UPDATE_STATS_LOG_ENABLED_EVT,
self.on_stats_config_change)
vrf_conf.add_listener(ConfWithStats.UPDATE_STATS_TIME_EVT,
self.on_stats_config_change)
vrf_conf.add_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf)
# Import paths from VPN table that match this VRF/VPN.
self._table_manager.import_all_vpn_paths_to_vrf(vrf_table)
# Update local RT NLRIs
self._rt_manager.update_local_rt_nlris()
self._signal_bus.vrf_added(vrf_conf)
def on_stats_config_change(self, evt):
vrf_conf = evt.src
self._signal_bus.stats_config_changed(vrf_conf) | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
from skimage.feature.util import (FeatureDetector, DescriptorExtractor,
_mask_border_keypoints,
_prepare_grayscale_input_2D)
from skimage.feature import (corner_fast, corner_orientations, corner_peaks,
corner_harris)
from skimage.transform import pyramid_gaussian
from .orb_cy import _orb_loop
OFAST_MASK = np.zeros((31, 31))
OFAST_UMAX = [15, 15, 15, 15, 14, 14, 14, 13, 13, 12, 11, 10, 9, 8, 6, 3]
for i in range(-15, 16):
for j in range(-OFAST_UMAX[abs(i)], OFAST_UMAX[abs(i)] + 1):
OFAST_MASK[15 + j, 15 + i] = 1
class ORB(FeatureDetector, DescriptorExtractor):
"""Oriented FAST and rotated BRIEF feature detector and binary descriptor
extractor.
Parameters
----------
n_keypoints : int, optional
Number of keypoints to be returned. The function will return the best
`n_keypoints` according to the Harris corner response if more than
`n_keypoints` are detected. If not, then all the detected keypoints
are returned.
fast_n : int, optional
The `n` parameter in `skimage.feature.corner_fast`. Minimum number of
consecutive pixels out of 16 pixels on the circle that should all be
either brighter or darker w.r.t test-pixel. A point c on the circle is
darker w.r.t test pixel p if ``Ic < Ip - threshold`` and brighter if
``Ic > Ip + threshold``. Also stands for the n in ``FAST-n`` corner
detector.
fast_threshold : float, optional
The ``threshold`` parameter in ``feature.corner_fast``. Threshold used
to decide whether the pixels on the circle are brighter, darker or
similar w.r.t. the test pixel. Decrease the threshold when more
corners are desired and vice-versa.
harris_k : float, optional
The `k` parameter in `skimage.feature.corner_harris`. Sensitivity
factor to separate corners from edges, typically in range ``[0, 0.2]``.
Small values of `k` result in detection of sharp corners.
downscale : float, optional
Downscale factor for the image pyramid. Default value 1.2 is chosen so
that there are more dense scales which enable robust scale invariance
for a subsequent feature description.
n_scales : int, optional
Maximum number of scales from the bottom of the image pyramid to
extract the features from.
Attributes
----------
keypoints : (N, 2) array
Keypoint coordinates as ``(row, col)``.
scales : (N, ) array
Corresponding scales.
orientations : (N, ) array
Corresponding orientations in radians.
responses : (N, ) array
Corresponding Harris corner responses.
descriptors : (Q, `descriptor_size`) array of dtype bool
2D array of binary descriptors of size `descriptor_size` for Q
keypoints after filtering out border keypoints with value at an
index ``(i, j)`` either being ``True`` or ``False`` representing
the outcome of the intensity comparison for i-th keypoint on j-th
decision pixel-pair. It is ``Q == np.sum(mask)``.
References
----------
.. [1] Ethan Rublee, Vincent Rabaud, Kurt Konolige and Gary Bradski
"ORB: An efficient alternative to SIFT and SURF"
http://www.vision.cs.chubu.ac.jp/CV-R/pdf/Rublee_iccv2011.pdf
Examples
--------
>>> from skimage.feature import ORB, match_descriptors
>>> img1 = np.zeros((100, 100))
>>> img2 = np.zeros_like(img1)
>>> np.random.seed(1)
>>> square = np.random.rand(20, 20)
>>> img1[40:60, 40:60] = square
>>> img2[53:73, 53:73] = square
>>> detector_extractor1 = ORB(n_keypoints=5)
>>> detector_extractor2 = ORB(n_keypoints=5)
>>> detector_extractor1.detect_and_extract(img1)
>>> detector_extractor2.detect_and_extract(img2)
>>> matches = match_descriptors(detector_extractor1.descriptors,
... detector_extractor2.descriptors)
>>> matches
array([[0, 0],
[1, 1],
[2, 2],
[3, 3],
[4, 4]])
>>> detector_extractor1.keypoints[matches[:, 0]]
array([[ 42., 40.],
[ 47., 58.],
[ 44., 40.],
[ 59., 42.],
[ 45., 44.]])
>>> detector_extractor2.keypoints[matches[:, 1]]
array([[ 55., 53.],
[ 60., 71.],
[ 57., 53.],
[ 72., 55.],
[ 58., 57.]])
"""
def __init__(self, downscale=1.2, n_scales=8,
n_keypoints=500, fast_n=9, fast_threshold=0.08,
harris_k=0.04):
self.downscale = downscale
self.n_scales = n_scales
self.n_keypoints = n_keypoints
self.fast_n = fast_n
self.fast_threshold = fast_threshold
self.harris_k = harris_k
self.keypoints = None
self.scales = None
self.responses = None
self.orientations = None
self.descriptors = None
def _build_pyramid(self, image):
image = _prepare_grayscale_input_2D(image)
return list(pyramid_gaussian(image, self.n_scales - 1, self.downscale))
def _detect_octave(self, octave_image):
# Extract keypoints for current octave
fast_response = corner_fast(octave_image, self.fast_n,
self.fast_threshold)
keypoints = corner_peaks(fast_response, min_distance=1)
if len(keypoints) == 0:
return (np.zeros((0, 2), dtype=np.double),
np.zeros((0, ), dtype=np.double),
np.zeros((0, ), dtype=np.double))
mask = _mask_border_keypoints(octave_image.shape, keypoints,
distance=16)
keypoints = keypoints[mask]
orientations = corner_orientations(octave_image, keypoints,
OFAST_MASK)
harris_response = corner_harris(octave_image, method='k',
k=self.harris_k)
responses = harris_response[keypoints[:, 0], keypoints[:, 1]]
return keypoints, orientations, responses
def detect(self, image):
"""Detect oriented FAST keypoints along with the corresponding scale.
Parameters
----------
image : 2D array
Input image.
"""
pyramid = self._build_pyramid(image)
keypoints_list = []
orientations_list = []
scales_list = []
responses_list = []
for octave in range(len(pyramid)):
octave_image = np.ascontiguousarray(pyramid[octave])
keypoints, orientations, responses = \
self._detect_octave(octave_image)
keypoints_list.append(keypoints * self.downscale ** octave)
orientations_list.append(orientations)
scales_list.append(self.downscale ** octave
* np.ones(keypoints.shape[0], dtype=np.intp))
responses_list.append(responses)
keypoints = np.vstack(keypoints_list)
orientations = np.hstack(orientations_list)
scales = np.hstack(scales_list)
responses = np.hstack(responses_list)
if keypoints.shape[0] < self.n_keypoints:
self.keypoints = keypoints
self.scales = scales
self.orientations = orientations
self.responses = responses
else:
# Choose best n_keypoints according to Harris corner response
best_indices = responses.argsort()[::-1][:self.n_keypoints]
self.keypoints = keypoints[best_indices]
self.scales = scales[best_indices]
self.orientations = orientations[best_indices]
self.responses = responses[best_indices]
def _extract_octave(self, octave_image, keypoints, orientations):
mask = _mask_border_keypoints(octave_image.shape, keypoints,
distance=20)
keypoints = np.array(keypoints[mask], dtype=np.intp, order='C',
copy=False)
orientations = np.array(orientations[mask], dtype=np.double, order='C',
copy=False)
descriptors = _orb_loop(octave_image, keypoints, orientations)
return descriptors, mask
def extract(self, image, keypoints, scales, orientations):
"""Extract rBRIEF binary descriptors for given keypoints in image.
Note that the keypoints must be extracted using the same `downscale`
and `n_scales` parameters. Additionally, if you want to extract both
keypoints and descriptors you should use the faster
`detect_and_extract`.
Parameters
----------
image : 2D array
Input image.
keypoints : (N, 2) array
Keypoint coordinates as ``(row, col)``.
scales : (N, ) array
Corresponding scales.
orientations : (N, ) array
Corresponding orientations in radians.
"""
pyramid = self._build_pyramid(image)
descriptors_list = []
mask_list = []
# Determine octaves from scales
octaves = (np.log(scales) / np.log(self.downscale)).astype(np.intp)
for octave in range(len(pyramid)):
# Mask for all keypoints in current octave
octave_mask = octaves == octave
if np.sum(octave_mask) > 0:
octave_image = np.ascontiguousarray(pyramid[octave])
octave_keypoints = keypoints[octave_mask]
octave_keypoints /= self.downscale ** octave
octave_orientations = orientations[octave_mask]
descriptors, mask = self._extract_octave(octave_image,
octave_keypoints,
octave_orientations)
descriptors_list.append(descriptors)
mask_list.append(mask)
self.descriptors = np.vstack(descriptors_list).view(np.bool)
self.mask_ = np.hstack(mask_list)
def detect_and_extract(self, image):
"""Detect oriented FAST keypoints and extract rBRIEF descriptors.
Note that this is faster than first calling `detect` and then
`extract`.
Parameters
----------
image : 2D array
Input image.
"""
pyramid = self._build_pyramid(image)
keypoints_list = []
responses_list = []
scales_list = []
orientations_list = []
descriptors_list = []
for octave in range(len(pyramid)):
octave_image = np.ascontiguousarray(pyramid[octave])
keypoints, orientations, responses = \
self._detect_octave(octave_image)
if len(keypoints) == 0:
keypoints_list.append(keypoints)
responses_list.append(responses)
descriptors_list.append(np.zeros((0, 256), dtype=np.bool))
continue
descriptors, mask = self._extract_octave(octave_image, keypoints,
orientations)
keypoints_list.append(keypoints[mask] * self.downscale ** octave)
responses_list.append(responses[mask])
orientations_list.append(orientations[mask])
scales_list.append(self.downscale ** octave
* np.ones(keypoints.shape[0], dtype=np.intp))
descriptors_list.append(descriptors)
keypoints = np.vstack(keypoints_list)
responses = np.hstack(responses_list)
scales = np.hstack(scales_list)
orientations = np.hstack(orientations_list)
descriptors = np.vstack(descriptors_list).view(np.bool)
if keypoints.shape[0] < self.n_keypoints:
self.keypoints = keypoints
self.scales = scales
self.orientations = orientations
self.responses = responses
self.descriptors = descriptors
else:
# Choose best n_keypoints according to Harris corner response
best_indices = responses.argsort()[::-1][:self.n_keypoints]
self.keypoints = keypoints[best_indices]
self.scales = scales[best_indices]
self.orientations = orientations[best_indices]
self.responses = responses[best_indices]
self.descriptors = descriptors[best_indices] | unknown | codeparrot/codeparrot-clean | ||
"""Unit tests for feature_flag_tags_check.py."""
import unittest
from unittest.mock import patch
from buildscripts import feature_flag_tags_check
class TestFindTestsInGitDiff(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.requires_fcv_tag = "requires_fcv_51"
cls.original_requires_fcv_tag = feature_flag_tags_check.REQUIRES_FCV_TAG_LATEST
feature_flag_tags_check.REQUIRES_FCV_TAG_LATEST = cls.requires_fcv_tag
@classmethod
def tearDownClass(cls):
feature_flag_tags_check.REQUIRES_FCV_TAG_LATEST = cls.original_requires_fcv_tag
def test_get_tests_missing_fcv_tag_no_tag(self):
tests = ["dummy_jstest_file.js"]
with patch.object(feature_flag_tags_check.jscomment, "get_tags", return_value=[]):
result = feature_flag_tags_check.get_tests_missing_fcv_tag(tests)
self.assertCountEqual(tests, result)
def test_get_tests_missing_fcv_tag_have_tag(self):
tests = ["dummy_jstest_file.js"]
with patch.object(
feature_flag_tags_check.jscomment, "get_tags", return_value=[self.requires_fcv_tag]
):
result = feature_flag_tags_check.get_tests_missing_fcv_tag(tests)
self.assertCountEqual([], result)
def test_get_tests_missing_fcv_tag_test_file_deleted(self):
tests = ["some/non/existent/jstest_file.js"]
result = feature_flag_tags_check.get_tests_missing_fcv_tag(tests)
self.assertCountEqual([], result) | python | github | https://github.com/mongodb/mongo | buildscripts/tests/test_feature_flag_tags_check.py |
import base64
import json
import os
import uuid
from multiprocessing import Process
from multiprocessing.managers import BaseManager, DictProxy
class ServerDictManager(BaseManager):
shared_data = {}
def _get_shared():
return ServerDictManager.shared_data
ServerDictManager.register("get_dict",
callable=_get_shared,
proxytype=DictProxy)
class ClientDictManager(BaseManager):
pass
ClientDictManager.register("get_dict")
class StashServer(object):
def __init__(self, address=None, authkey=None):
self.address = address
self.authkey = authkey
self.manager = None
def __enter__(self):
self.manager, self.address, self.authkey = start_server(self.address, self.authkey)
store_env_config(self.address, self.authkey)
def __exit__(self, *args, **kwargs):
if self.manager is not None:
self.manager.shutdown()
def load_env_config():
address, authkey = json.loads(os.environ["WPT_STASH_CONFIG"])
if isinstance(address, list):
address = tuple(address)
else:
address = str(address)
authkey = base64.decodestring(authkey)
return address, authkey
def store_env_config(address, authkey):
authkey = base64.encodestring(authkey)
os.environ["WPT_STASH_CONFIG"] = json.dumps((address, authkey))
def start_server(address=None, authkey=None):
manager = ServerDictManager(address, authkey)
manager.start()
return (manager, manager._address, manager._authkey)
#TODO: Consider expiring values after some fixed time for long-running
#servers
class Stash(object):
"""Key-value store for persisting data across HTTP/S and WS/S requests.
This data store is specifically designed for persisting data across server
requests. The synchronization is achieved by using the BaseManager from
the multiprocessing module so different processes can acccess the same data.
Stash can be used interchangeably between HTTP, HTTPS, WS and WSS servers.
A thing to note about WS/S servers is that they require additional steps in
the handlers for accessing the same underlying shared data in the Stash.
This can usually be achieved by using load_env_config(). When using Stash
interchangeably between HTTP/S and WS/S request, the path part of the key
should be expliclitly specified if accessing the same key/value subset.
The store has several unusual properties. Keys are of the form (path,
uuid), where path is, by default, the path in the HTTP request and
uuid is a unique id. In addition, the store is write-once, read-once,
i.e. the value associated with a particular key cannot be changed once
written and the read operation (called "take") is destructive. Taken together,
these properties make it difficult for data to accidentally leak
between different resources or different requests for the same
resource.
"""
_proxy = None
def __init__(self, default_path, address=None, authkey=None):
self.default_path = default_path
self.data = self._get_proxy(address, authkey)
def _get_proxy(self, address=None, authkey=None):
if address is None and authkey is None:
Stash._proxy = {}
if Stash._proxy is None:
manager = ClientDictManager(address, authkey)
manager.connect()
Stash._proxy = manager.get_dict()
return Stash._proxy
def _wrap_key(self, key, path):
if path is None:
path = self.default_path
# This key format is required to support using the path. Since the data
# passed into the stash can be a DictProxy which wouldn't detect changes
# when writing to a subdict.
return (str(path), str(uuid.UUID(key)))
def put(self, key, value, path=None):
"""Place a value in the shared stash.
:param key: A UUID to use as the data's key.
:param value: The data to store. This can be any python object.
:param path: The path that has access to read the data (by default
the current request path)"""
if value is None:
raise ValueError("SharedStash value may not be set to None")
internal_key = self._wrap_key(key, path)
if internal_key in self.data:
raise StashError("Tried to overwrite existing shared stash value "
"for key %s (old value was %s, new value is %s)" %
(internal_key, self.data[str(internal_key)], value))
else:
self.data[internal_key] = value
def take(self, key, path=None):
"""Remove a value from the shared stash and return it.
:param key: A UUID to use as the data's key.
:param path: The path that has access to read the data (by default
the current request path)"""
internal_key = self._wrap_key(key, path)
value = self.data.get(internal_key, None)
if not value is None:
try:
self.data.pop(internal_key)
except KeyError:
# Silently continue when pop error occurs.
pass
return value
class StashError(Exception):
pass | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015 Jacob Mendt
Created on 22.01.16
@author: mendt
The following scripts compresses all files within a tms cache. It is based on the PIL library.
'''
import argparse
import os
from PIL import Image
def compressTMSCache(path):
""" Functions runs a pngs compression on the given tms cache.
:type str: path
:return:
"""
print 'Run png compression on %s ...' % path
pngs = getPngsInDirTree(path)
for png in pngs:
Image.open(png).convert('RGBA').quantize(method=2).save(png)
def getPngsInDirTree(baseDir):
""" Functions iteratore of the baseDir and the subdirectory tree and and returns a list of pngs paths found
in the directory structure.
:type baseDir: str
:return: list<str>
"""
def getAllPngsFromFilesList(baseDir, files):
""" Functions returns all pngs within a files list.
:type baseDir: str
:type files: list<str>
:return: list<str>
"""
pngs = []
for file in files:
if os.path.splitext(file)[1][1:] == 'png':
pngs.append(os.path.join(baseDir, file))
return pngs
allPngs = []
for root, dirs, files in os.walk(baseDir):
# first check that directory doesn't start with "."
dirName = str(root).rsplit('/')[-1]
# only look in directories which doesn't start with "."
if dirName[0] is not '.':
allPngs.extend(getAllPngsFromFilesList(root, files))
return allPngs
if __name__ == '__main__':
script_name = 'compresstms.py'
parser = argparse.ArgumentParser(description = 'The scripts tooks a TMS cache as input and runs a compression of the contained pngs files.', prog = 'Script %s'%script_name)
parser.add_argument('tms', metavar='TMS_DIR', type=str, help='Path to the TMS cache.')
arguments = parser.parse_args()
tmsPath = os.path.abspath(arguments.tms) if os.path.exists(arguments.tms) else None
if tmsPath is None or not os.path.exists(tmsPath):
raise 'TMS directory doesn\'t exists!'
compressTMSCache(tmsPath) | unknown | codeparrot/codeparrot-clean | ||
import gtk
class PageDrawer (gtk.DrawingArea):
def __init__(self, page_width=None, page_height=None,
sub_areas=[],xalign=0.5,yalign=0.5
):
"""Draw a page based on page areas given to us.
The areas can be given in any scale they like.
sub_areas are each (X1,Y1,WIDTH,HEIGHT) where the point defines
the upper-left corner of the rectangle.
"""
self.xalign = xalign
self.yalign = yalign
gtk.DrawingArea.__init__(self)
self.gc = None # initialized in realize-event handler
self.width = 0 # updated in size-allocate handler
self.height = 0 # idem
if page_width and page_height:
self.set_page_area(page_width,page_height,sub_areas)
self.connect('size-allocate', self.on_size_allocate)
self.connect('expose-event', self.on_expose_event)
self.connect('realize', self.on_realize)
def set_page_area (self, page_width, page_height, sub_areas=[]):
self.xy_ratio = page_width/page_height
self.areas = []
for x1,y1,w,h in sub_areas:
width = float(w)/page_width
height = float(h)/page_height
x = float(x1)/page_width
y = float(y1)/page_height
self.areas.append(
(x,y,width,height)
)
def on_realize(self, widget):
self.gc = widget.window.new_gc()
#self.gc.set_line_attributes(3, gtk.gdk.LINE_ON_OFF_DASH,
# gtk.gdk.CAP_ROUND, gtk.gdk.JOIN_ROUND)
def on_size_allocate(self, widget, allocation):
self.width = allocation.width
self.height = allocation.height
def on_expose_event(self, widget, event):
if not hasattr(self,'xy_ratio'): return
# This is where the drawing takes place
if self.xy_ratio * self.height > self.width:
width = int(self.width * 0.9)
height = int((self.width / self.xy_ratio) * 0.9)
else:
width = int(self.xy_ratio*self.height*0.9)
height = int(self.height*0.9)
xpadding = int((self.width - width)*self.xalign)
ypadding = int((self.height - height)*self.yalign)
self.gc.set_line_attributes(3,
gtk.gdk.LINE_SOLID,
gtk.gdk.CAP_BUTT,
gtk.gdk.JOIN_MITER)
widget.window.draw_rectangle(self.gc, False,
xpadding, ypadding, width, height)
self.gc.set_line_attributes(1,
gtk.gdk.LINE_ON_OFF_DASH,
gtk.gdk.CAP_BUTT,gtk.gdk.JOIN_MITER)
for sub_area in self.areas:
x,y,w,h = sub_area
self.window.draw_rectangle(
self.gc, False,
int(xpadding+(x*width)),int(ypadding+(y*height)),int(w*width),int(h*height)
)
#widget.window.draw_line(self.gc,
# 0, 0, self.width - 1, self.height - 1)
#widget.window.draw_line(self.gc,
# self.width - 1, 0, 0, self.height - 1)
if __name__ == '__main__':
w = gtk.Window()
w.add(PageDrawer(8.5,11,[(1,1,3,9.5),
(4.5,1,3,9.5),
]))
w.show_all()
w.connect('delete-event',lambda *args: gtk.main_quit())
gtk.main() | unknown | codeparrot/codeparrot-clean | ||
import logging
import sys
import openerp
from openerp import tools
from openerp.modules import module
_logger = logging.getLogger(__name__)
commands = {}
class CommandType(type):
def __init__(cls, name, bases, attrs):
super(CommandType, cls).__init__(name, bases, attrs)
name = getattr(cls, name, cls.__name__.lower())
cls.name = name
if name != 'command':
commands[name] = cls
class Command(object):
"""Subclass this class to define new openerp subcommands """
__metaclass__ = CommandType
def run(self, args):
pass
class Help(Command):
def run(self, args):
print "Available commands:\n"
for k, v in commands.items():
print " %s" % k
import server
def main():
args = sys.argv[1:]
# The only shared option is '--addons-path=' needed to discover additional
# commands from modules
if len(args) > 1 and args[0].startswith('--addons-path=') and not args[1].startswith("-"):
# parse only the addons-path, do not setup the logger...
tools.config._parse_config([args[0]])
args = args[1:]
# Default legacy command
command = "server"
# Subcommand discovery
if len(args) and not args[0].startswith("-"):
for m in module.get_modules():
m = 'openerp.addons.' + m
__import__(m)
#try:
#except Exception, e:
# raise
# print e
command = args[0]
args = args[1:]
if command in commands:
o = commands[command]()
o.run(args)
# vim:et:ts=4:sw=4: | unknown | codeparrot/codeparrot-clean | ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Template tags for parsing date strings.
"""
import datetime
from django import template
from dateutil import tz
register = template.Library()
def _parse_datetime(dtstr):
fmts = ["%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%d %H:%M:%S.%f",
"%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
for fmt in fmts:
try:
return datetime.datetime.strptime(dtstr, fmt)
except:
pass
class ParseDateNode(template.Node):
def render(self, context):
"""Turn an iso formatted time back into a datetime."""
if context == None:
return "None"
date_obj = _parse_datetime(context)
return date_obj.strftime("%m/%d/%y at %H:%M:%S")
@register.filter(name='parse_date')
def parse_date(value):
return ParseDateNode().render(value)
@register.filter(name='parse_datetime')
def parse_datetime(value):
return _parse_datetime(value)
@register.filter(name='parse_local_datetime')
def parse_local_datetime(value):
dt = _parse_datetime(value)
local_tz = tz.tzlocal()
utc = tz.gettz('UTC')
local_dt = dt.replace(tzinfo=utc)
return local_dt.astimezone(local_tz)
@register.filter(name='pretty_date')
def pretty_date(value):
return value.strftime("%d/%m/%y at %H:%M:%S") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_message_routing_protocol
short_description: Manage generic message parser profile.
description:
- Manages generic message parser profile for use with the message routing framework.
version_added: 2.9
options:
name:
description:
- Specifies the name of the generic parser profile.
required: True
type: str
description:
description:
- The user defined description of the generic parser profile.
type: str
parent:
description:
- The parent template of this parser profile. Once this value has been set, it cannot be changed.
- When creating a new profile, if this parameter is not specified,
the default is the system-supplied C(genericmsg) profile.
type: str
disable_parser:
description:
- When C(yes), the generic message parser will be disabled ignoring all incoming packets and not directly
send message data.
- This mode supports iRule script protocol implementations that will generate messages from the incoming transport
stream and send outgoing messages on the outgoing transport stream.
type: bool
max_egress_buffer:
description:
- Specifies the maximum size of the send buffer in bytes. If the number of bytes in the send buffer for a
connection exceeds this value, the generic message protocol will stop receiving outgoing messages from the
router until the size of the size of the buffer drops below this setting.
- The accepted range is between 0 and 4294967295 inclusive.
type: int
max_msg_size:
description:
- Specifies the maximum size of a received message. If a message exceeds this size, the connection will be reset.
- The accepted range is between 0 and 4294967295 inclusive.
type: int
msg_terminator:
description:
- The string of characters used to terminate a message. If the message-terminator is not specified,
the generic message parser will not separate the input stream into messages.
type: str
no_response:
description:
- When set, matching of responses to requests is disabled.
type: bool
partition:
description:
- Device partition to create route object on.
type: str
default: Common
state:
description:
- When C(present), ensures that the route exists.
- When C(absent), ensures the route is removed.
type: str
choices:
- present
- absent
default: present
notes:
- Requires BIG-IP >= 14.0.0
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a generic parser
bigip_message_routing_protocol:
name: foo
description: 'This is parser'
no_response: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Modify a generic parser
bigip_message_routing_protocol:
name: foo
no_response: no
max_egress_buffer: 10000
max_msg_size: 2000
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Remove generic parser
bigip_message_routing_protocol:
name: foo
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: The user defined description of the parser profile.
returned: changed
type: str
sample: My description
parent:
description: The parent template of this parser profile.
returned: changed
type: str
sample: /Common/genericmsg
disable_parser:
description: Disables generic message parser.
returned: changed
type: bool
sample: yes
max_egress_buffer:
description: The maximum size of the send buffer in bytes.
returned: changed
type: int
sample: 10000
max_msg_size:
description: The maximum size of a received message.
returned: changed
type: int
sample: 4000
msg_terminator:
description: The string of characters used to terminate a message.
returned: changed
type: str
sample: '%%%%'
no_response:
description: Disables matching of responses to requests.
returned: changed
type: bool
sample: yes
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.compare import cmp_str_with_none
from library.module_utils.network.f5.icontrol import tmos_version
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.compare import cmp_str_with_none
from ansible.module_utils.network.f5.icontrol import tmos_version
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'disableParser': 'disable_parser',
'maxEgressBuffer': 'max_egress_buffer',
'maxMessageSize': 'max_msg_size',
'messageTerminator': 'msg_terminator',
'noResponse': 'no_response',
}
api_attributes = [
'description',
'defaultsFrom',
'disableParser',
'maxEgressBuffer',
'maxMessageSize',
'messageTerminator',
'noResponse',
]
returnables = [
'description',
'parent',
'disable_parser',
'max_egress_buffer',
'max_msg_size',
'msg_terminator',
'no_response',
]
updatables = [
'description',
'parent',
'disable_parser',
'max_egress_buffer',
'max_msg_size',
'msg_terminator',
'no_response',
]
@property
def no_response(self):
return flatten_boolean(self._values['no_response'])
@property
def disable_parser(self):
return flatten_boolean(self._values['disable_parser'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def max_msg_size(self):
if self._values['max_msg_size'] is None:
return None
if 0 <= self._values['max_msg_size'] <= 4294967295:
return self._values['max_msg_size']
raise F5ModuleError(
"Valid 'max_msg_size' must be in range 0 - 4294967295."
)
@property
def max_egress_buffer(self):
if self._values['max_egress_buffer'] is None:
return None
if 0 <= self._values['max_egress_buffer'] <= 4294967295:
return self._values['max_egress_buffer']
raise F5ModuleError(
"Valid 'max_egress_buffer' must be in range 0 - 4294967295."
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent is None:
return None
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent router profile cannot be changed."
)
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
@property
def msg_terminator(self):
return cmp_str_with_none(self.want.msg_terminator, self.have.msg_terminator)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def version_less_than_14(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('14.0.0'):
return True
return False
def exec_module(self):
if self.version_less_than_14():
raise F5ModuleError('Message routing is not supported on TMOS version below 14.x')
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
parent=dict(),
disable_parser=dict(type='bool'),
max_egress_buffer=dict(type='int'),
max_msg_size=dict(type='int'),
msg_terminator=dict(),
no_response=dict(type='bool'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
use crate::loom::sync::Arc;
use crate::runtime::context;
use crate::runtime::scheduler::{self, current_thread, Inject};
use crate::task::Id;
use backtrace::BacktraceFrame;
use std::cell::Cell;
use std::collections::VecDeque;
use std::ffi::c_void;
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::ptr::{self, NonNull};
use std::task::{self, Poll};
mod symbol;
mod tree;
use symbol::Symbol;
use tree::Tree;
use super::{Notified, OwnedTasks, Schedule};
type Backtrace = Vec<BacktraceFrame>;
type SymbolTrace = Vec<Symbol>;
/// The ambient backtracing context.
pub(crate) struct Context {
/// The address of [`Trace::root`] establishes an upper unwinding bound on
/// the backtraces in `Trace`.
active_frame: Cell<Option<NonNull<Frame>>>,
/// The place to stash backtraces.
collector: Cell<Option<Trace>>,
}
/// A [`Frame`] in an intrusive, doubly-linked tree of [`Frame`]s.
struct Frame {
/// The location associated with this frame.
inner_addr: *const c_void,
/// The parent frame, if any.
parent: Option<NonNull<Frame>>,
}
/// An tree execution trace.
///
/// Traces are captured with [`Trace::capture`], rooted with [`Trace::root`]
/// and leaved with [`trace_leaf`].
#[derive(Clone, Debug)]
pub(crate) struct Trace {
// The linear backtraces that comprise this trace. These linear traces can
// be re-knitted into a tree.
backtraces: Vec<Backtrace>,
}
pin_project_lite::pin_project! {
#[derive(Debug, Clone)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
/// A future wrapper that roots traces (captured with [`Trace::capture`]).
pub struct Root<T> {
#[pin]
future: T,
}
}
const FAIL_NO_THREAD_LOCAL: &str = "The Tokio thread-local has been destroyed \
as part of shutting down the current \
thread, so collecting a taskdump is not \
possible.";
impl Context {
pub(crate) const fn new() -> Self {
Context {
active_frame: Cell::new(None),
collector: Cell::new(None),
}
}
/// SAFETY: Callers of this function must ensure that trace frames always
/// form a valid linked list.
unsafe fn try_with_current<F, R>(f: F) -> Option<R>
where
F: FnOnce(&Self) -> R,
{
unsafe { crate::runtime::context::with_trace(f) }
}
/// SAFETY: Callers of this function must ensure that trace frames always
/// form a valid linked list.
unsafe fn with_current_frame<F, R>(f: F) -> R
where
F: FnOnce(&Cell<Option<NonNull<Frame>>>) -> R,
{
unsafe {
Self::try_with_current(|context| f(&context.active_frame)).expect(FAIL_NO_THREAD_LOCAL)
}
}
fn with_current_collector<F, R>(f: F) -> R
where
F: FnOnce(&Cell<Option<Trace>>) -> R,
{
// SAFETY: This call can only access the collector field, so it cannot
// break the trace frame linked list.
unsafe {
Self::try_with_current(|context| f(&context.collector)).expect(FAIL_NO_THREAD_LOCAL)
}
}
/// Produces `true` if the current task is being traced; otherwise false.
pub(crate) fn is_tracing() -> bool {
Self::with_current_collector(|maybe_collector| {
let collector = maybe_collector.take();
let result = collector.is_some();
maybe_collector.set(collector);
result
})
}
}
impl Trace {
/// Invokes `f`, returning both its result and the collection of backtraces
/// captured at each sub-invocation of [`trace_leaf`].
#[inline(never)]
pub(crate) fn capture<F, R>(f: F) -> (R, Trace)
where
F: FnOnce() -> R,
{
let collector = Trace { backtraces: vec![] };
let previous = Context::with_current_collector(|current| current.replace(Some(collector)));
let result = f();
let collector =
Context::with_current_collector(|current| current.replace(previous)).unwrap();
(result, collector)
}
/// The root of a trace.
#[inline(never)]
pub(crate) fn root<F>(future: F) -> Root<F> {
Root { future }
}
pub(crate) fn backtraces(&self) -> &[Backtrace] {
&self.backtraces
}
}
/// If this is a sub-invocation of [`Trace::capture`], capture a backtrace.
///
/// The captured backtrace will be returned by [`Trace::capture`].
///
/// Invoking this function does nothing when it is not a sub-invocation
/// [`Trace::capture`].
// This function is marked `#[inline(never)]` to ensure that it gets a distinct `Frame` in the
// backtrace, below which frames should not be included in the backtrace (since they reflect the
// internal implementation details of this crate).
#[inline(never)]
pub(crate) fn trace_leaf(cx: &mut task::Context<'_>) -> Poll<()> {
// Safety: We don't manipulate the current context's active frame.
let did_trace = unsafe {
Context::try_with_current(|context_cell| {
if let Some(mut collector) = context_cell.collector.take() {
let mut frames = vec![];
let mut above_leaf = false;
if let Some(active_frame) = context_cell.active_frame.get() {
let active_frame = active_frame.as_ref();
backtrace::trace(|frame| {
let below_root = !ptr::eq(frame.symbol_address(), active_frame.inner_addr);
// only capture frames above `Trace::leaf` and below
// `Trace::root`.
if above_leaf && below_root {
frames.push(frame.to_owned().into());
}
if ptr::eq(frame.symbol_address(), trace_leaf as *const _) {
above_leaf = true;
}
// only continue unwinding if we're below `Trace::root`
below_root
});
}
collector.backtraces.push(frames);
context_cell.collector.set(Some(collector));
true
} else {
false
}
})
.unwrap_or(false)
};
if did_trace {
// Use the same logic that `yield_now` uses to send out wakeups after
// the task yields.
context::with_scheduler(|scheduler| {
if let Some(scheduler) = scheduler {
match scheduler {
scheduler::Context::CurrentThread(s) => s.defer.defer(cx.waker()),
#[cfg(feature = "rt-multi-thread")]
scheduler::Context::MultiThread(s) => s.defer.defer(cx.waker()),
}
}
});
Poll::Pending
} else {
Poll::Ready(())
}
}
impl fmt::Display for Trace {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Tree::from_trace(self.clone()).fmt(f)
}
}
fn defer<F: FnOnce() -> R, R>(f: F) -> impl Drop {
use std::mem::ManuallyDrop;
struct Defer<F: FnOnce() -> R, R>(ManuallyDrop<F>);
impl<F: FnOnce() -> R, R> Drop for Defer<F, R> {
#[inline(always)]
fn drop(&mut self) {
unsafe {
ManuallyDrop::take(&mut self.0)();
}
}
}
Defer(ManuallyDrop::new(f))
}
impl<T: Future> Future for Root<T> {
type Output = T::Output;
#[inline(never)]
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
// SAFETY: The context's current frame is restored to its original state
// before `frame` is dropped.
unsafe {
let mut frame = Frame {
inner_addr: Self::poll as *const c_void,
parent: None,
};
Context::with_current_frame(|current| {
frame.parent = current.take();
current.set(Some(NonNull::from(&frame)));
});
let _restore = defer(|| {
Context::with_current_frame(|current| {
current.set(frame.parent);
});
});
let this = self.project();
this.future.poll(cx)
}
}
}
/// Trace and poll all tasks of the `current_thread` runtime.
pub(in crate::runtime) fn trace_current_thread(
owned: &OwnedTasks<Arc<current_thread::Handle>>,
local: &mut VecDeque<Notified<Arc<current_thread::Handle>>>,
injection: &Inject<Arc<current_thread::Handle>>,
) -> Vec<(Id, Trace)> {
// clear the local and injection queues
let mut dequeued = Vec::new();
while let Some(task) = local.pop_back() {
dequeued.push(task);
}
while let Some(task) = injection.pop() {
dequeued.push(task);
}
// precondition: We have drained the tasks from the injection queue.
trace_owned(owned, dequeued)
}
cfg_rt_multi_thread! {
use crate::loom::sync::Mutex;
use crate::runtime::scheduler::multi_thread;
use crate::runtime::scheduler::multi_thread::Synced;
use crate::runtime::scheduler::inject::Shared;
/// Trace and poll all tasks of the `current_thread` runtime.
///
/// ## Safety
///
/// Must be called with the same `synced` that `injection` was created with.
pub(in crate::runtime) unsafe fn trace_multi_thread(
owned: &OwnedTasks<Arc<multi_thread::Handle>>,
local: &mut multi_thread::queue::Local<Arc<multi_thread::Handle>>,
synced: &Mutex<Synced>,
injection: &Shared<Arc<multi_thread::Handle>>,
) -> Vec<(Id, Trace)> {
let mut dequeued = Vec::new();
// clear the local queue
while let Some(notified) = local.pop() {
dequeued.push(notified);
}
// clear the injection queue
let mut synced = synced.lock();
// Safety: exactly the same safety requirements as `trace_multi_thread` function.
while let Some(notified) = unsafe { injection.pop(&mut synced.inject) } {
dequeued.push(notified);
}
drop(synced);
// precondition: we have drained the tasks from the local and injection
// queues.
trace_owned(owned, dequeued)
}
}
/// Trace the `OwnedTasks`.
///
/// # Preconditions
///
/// This helper presumes exclusive access to each task. The tasks must not exist
/// in any other queue.
fn trace_owned<S: Schedule>(owned: &OwnedTasks<S>, dequeued: Vec<Notified<S>>) -> Vec<(Id, Trace)> {
let mut tasks = dequeued;
// Notify and trace all un-notified tasks. The dequeued tasks are already
// notified and so do not need to be re-notified.
owned.for_each(|task| {
// Notify the task (and thus make it poll-able) and stash it. This fails
// if the task is already notified. In these cases, we skip tracing the
// task.
if let Some(notified) = task.notify_for_tracing() {
tasks.push(notified);
}
// We do not poll tasks here, since we hold a lock on `owned` and the
// task may complete and need to remove itself from `owned`. Polling
// such a task here would result in a deadlock.
});
tasks
.into_iter()
.map(|task| {
let local_notified = owned.assert_owner(task);
let id = local_notified.task.id();
let ((), trace) = Trace::capture(|| local_notified.run());
(id, trace)
})
.collect()
} | rust | github | https://github.com/tokio-rs/tokio | tokio/src/runtime/task/trace/mod.rs |
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import subprocess
import requests
import sys
parser = argparse.ArgumentParser(description='Script to get coins from a faucet.', epilog='You may need to start with double-dash (--) when providing bitcoin-cli arguments.')
parser.add_argument('-c', '--cmd', dest='cmd', default='bitcoin-cli', help='bitcoin-cli command to use')
parser.add_argument('-f', '--faucet', dest='faucet', default='https://signetfaucet.com/claim', help='URL of the faucet')
parser.add_argument('-a', '--addr', dest='addr', default='', help='Bitcoin address to which the faucet should send')
parser.add_argument('-p', '--password', dest='password', default='', help='Faucet password, if any')
parser.add_argument('bitcoin_cli_args', nargs='*', help='Arguments to pass on to bitcoin-cli (default: -signet)')
args = parser.parse_args()
if args.addr == '':
if args.bitcoin_cli_args == []:
args.bitcoin_cli_args = ['-signet']
# get address for receiving coins
try:
args.addr = subprocess.check_output([args.cmd] + args.bitcoin_cli_args + ['getnewaddress', 'faucet', 'bech32']).strip()
except FileNotFoundError:
print('The binary', args.cmd, 'could not be found.')
exit()
data = {'address': args.addr, 'password': args.password}
try:
res = requests.post(args.faucet, data=data)
except:
print('Unexpected error when contacting faucet:', sys.exc_info()[0])
exit()
print(res.text) | unknown | codeparrot/codeparrot-clean | ||
//===--- FunctionConvention.swift - function conventions ------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2023 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
import AST
import SILBridging
/// SIL function parameter and result conventions based on the AST
/// function type and SIL stage.
///
/// Provides Results and Parameters collections. This does not know
/// anything about FunctionArguments. Use ArgumentConventions instead to
/// maps FunctionArguments down to these conventions.
///
/// The underlying FunctionType must be contextual and expanded. SIL
/// has no use for interface types or unexpanded types.
public struct FunctionConvention : CustomStringConvertible {
let functionType: CanonicalType
let hasLoweredAddresses: Bool
public init(for functionType: CanonicalType, in function: Function) {
self.init(for: functionType, hasLoweredAddresses: function.hasLoweredAddresses)
}
public init(for functionType: CanonicalType, hasLoweredAddresses: Bool) {
assert(!functionType.hasTypeParameter, "requires contextual type")
self.functionType = functionType
self.hasLoweredAddresses = hasLoweredAddresses
}
/// All results including the error.
public var results: Results {
Results(bridged: SILFunctionType_getResultsWithError(functionType.bridged),
hasLoweredAddresses: hasLoweredAddresses)
}
public var errorResult: ResultInfo? {
return ResultInfo(
bridged: SILFunctionType_getErrorResult(functionType.bridged),
hasLoweredAddresses: hasLoweredAddresses)
}
/// Number of indirect results including the error.
/// This avoids quadratic lazy iteration on indirectResults.count.
public var indirectSILResultCount: Int {
// TODO: Return packs directly in lowered-address mode
return hasLoweredAddresses
? SILFunctionType_getNumIndirectFormalResultsWithError(functionType.bridged)
: SILFunctionType_getNumPackResults(functionType.bridged)
}
/// Returns the indirect result - including the error - at `index`.
public func indirectSILResult(at index: Int) -> ResultInfo {
let indirectResults = results.lazy.filter {
hasLoweredAddresses ? $0.isSILIndirect : $0.convention == .pack
}
// Note that subscripting a LazyFilterCollection (with the base index, e.g. `Int`) does not work
// as expected, because it returns the nth element of the base collection!
// Therefore we need to implement the subscript "manually".
return indirectResults.enumerated().first{ $0.offset == index }!.element
}
public var parameters: Parameters {
Parameters(bridged: SILFunctionType_getParameters(functionType.bridged),
hasLoweredAddresses: hasLoweredAddresses)
}
public var hasSelfParameter: Bool {
SILFunctionType_hasSelfParam(functionType.bridged)
}
public var yields: Yields {
Yields(bridged: SILFunctionType_getYields(functionType.bridged),
hasLoweredAddresses: hasLoweredAddresses)
}
/// If the function result depends on any parameters, return a Collection of LifetimeDependenceConventions for the
/// dependence source parameters.
public var resultDependencies: LifetimeDependencies? {
lifetimeDependencies(for: parameters.count)
}
/// If the parameter indexed by 'targetParameterIndex' is the target of any dependencies on other parameters, return a
/// Collection of LifetimeDependenceConventions for the dependence source parameters.
public func parameterDependencies(for targetParameterIndex: Int) -> LifetimeDependencies? {
lifetimeDependencies(for: targetParameterIndex)
}
public func hasLifetimeDependencies() -> Bool {
return SILFunctionType_getLifetimeDependencies(functionType.bridged).count() != 0
}
public var hasGuaranteedResult: Bool {
if results.count != 1 {
return false
}
if hasLoweredAddresses {
return results[0].convention == .guaranteed
}
return results[0].convention == .guaranteed || results[0].convention == .guaranteedAddress
}
public var hasAddressResult: Bool {
if results.count != 1 {
return false
}
if hasLoweredAddresses {
return results[0].convention == .guaranteedAddress || results[0].convention == .inout
}
return results[0].convention == .inout
}
public var description: String {
var str = functionType.description
for paramIdx in 0..<parameters.count {
str += "\nparameter: " + parameters[paramIdx].description
if let deps = parameterDependencies(for: paramIdx) {
str += "\n lifetime: \(deps)"
}
}
results.forEach { str += "\n result: " + $0.description }
if let deps = resultDependencies {
str += "\n lifetime: \(deps)"
}
return str
}
}
/// A function result type and the rules for returning it in SIL.
public struct ResultInfo : CustomStringConvertible {
/// The unsubstituted parameter type that describes the abstract
/// calling convention of the parameter.
///
/// TODO: For most purposes, you probably want \c returnValueType.
public let type: CanonicalType
public let convention: ResultConvention
public let options: UInt8
public let hasLoweredAddresses: Bool
// Must be kept consistent with 'SILResultInfo::Flag'
public enum Flag : UInt8 {
case notDifferentiable = 0x1
case isSending = 0x2
};
public init(type: CanonicalType, convention: ResultConvention, options: UInt8, hasLoweredAddresses: Bool) {
self.type = type
self.convention = convention
self.options = options
self.hasLoweredAddresses = hasLoweredAddresses
}
/// Is this result returned indirectly in SIL? Most formally
/// indirect results can be returned directly in SIL. This depends
/// on whether the calling function has lowered addresses.
public var isSILIndirect: Bool {
switch convention {
case .indirect:
return hasLoweredAddresses || type.isExistentialArchetypeWithError
case .pack:
return true
case .owned, .unowned, .unownedInnerPointer, .autoreleased, .guaranteed, .guaranteedAddress, .inout:
return false
}
}
public var description: String {
convention.description + ": " + type.description
}
public func getReturnValueType(function: Function) -> CanonicalType {
CanonicalType(bridged: self._bridged.getReturnValueType(function.bridged))
}
}
extension FunctionConvention {
public struct Results : Collection {
let bridged: BridgedResultInfoArray
let hasLoweredAddresses: Bool
public var startIndex: Int { 0 }
public var endIndex: Int { bridged.count() }
public func index(after index: Int) -> Int {
return index + 1
}
public subscript(_ index: Int) -> ResultInfo {
return ResultInfo(bridged: bridged.at(index),
hasLoweredAddresses: hasLoweredAddresses)
}
}
}
public struct ParameterInfo : CustomStringConvertible {
/// The parameter type that describes the abstract calling
/// convention of the parameter.
public let type: CanonicalType
public let convention: ArgumentConvention
public let options: UInt8
public let hasLoweredAddresses: Bool
// Must be kept consistent with 'SILParameterInfo::Flag'
public enum Flag : UInt8 {
case notDifferentiable = 0x1
case sending = 0x2
case isolated = 0x4
case implicitLeading = 0x8
case const = 0x10
};
public init(type: CanonicalType, convention: ArgumentConvention, options: UInt8, hasLoweredAddresses: Bool) {
self.type = type
self.convention = convention
self.options = options
self.hasLoweredAddresses = hasLoweredAddresses
}
/// Is this parameter passed indirectly in SIL? Most formally
/// indirect results can be passed directly in SIL (opaque values
/// mode). This depends on whether the calling function has lowered
/// addresses.
public var isSILIndirect: Bool {
switch convention {
case .indirectIn, .indirectInGuaranteed:
return hasLoweredAddresses || type.isExistentialArchetypeWithError
case .indirectInout, .indirectInoutAliasable, .indirectInCXX:
return true
case .directOwned, .directUnowned, .directGuaranteed:
return false
case .packInout, .packOwned, .packGuaranteed:
return true
case .indirectOut, .packOut:
fatalError("invalid parameter convention")
}
}
public var description: String {
"\(convention): \(type)"
}
public func hasOption(_ flag: Flag) -> Bool {
return options & flag.rawValue != 0
}
public func getArgumentType(function: Function) -> CanonicalType {
CanonicalType(bridged: self._bridged.getArgumentType(function.bridged))
}
}
extension FunctionConvention {
public struct Parameters : RandomAccessCollection {
let bridged: BridgedParameterInfoArray
let hasLoweredAddresses: Bool
public var startIndex: Int { 0 }
public var endIndex: Int { bridged.count() }
public func index(after index: Int) -> Int {
return index + 1
}
public subscript(_ index: Int) -> ParameterInfo {
return ParameterInfo(bridged: bridged.at(index),
hasLoweredAddresses: hasLoweredAddresses)
}
}
}
extension FunctionConvention {
public struct Yields : Collection {
let bridged: BridgedYieldInfoArray
let hasLoweredAddresses: Bool
public var startIndex: Int { 0 }
public var endIndex: Int { bridged.count() }
public func index(after index: Int) -> Int {
return index + 1
}
public subscript(_ index: Int) -> ParameterInfo {
return ParameterInfo(bridged: bridged.at(index),
hasLoweredAddresses: hasLoweredAddresses)
}
}
}
public enum LifetimeDependenceConvention : CustomStringConvertible {
case inherit
case scope(addressable: Bool, addressableForDeps: Bool)
public var isScoped: Bool {
switch self {
case .inherit:
return false
case .scope:
return true
}
}
public func isAddressable(for value: Value) -> Bool {
switch self {
case .inherit:
return false
case let .scope(addressable, addressableForDeps):
return addressable || (addressableForDeps && value.type.isAddressableForDeps(in: value.parentFunction))
}
}
public var description: String {
switch self {
case .inherit:
return "inherit"
case .scope:
return "scope"
}
}
}
extension FunctionConvention {
// 'targetIndex' is either the parameter index or parameters.count for the function result.
private func lifetimeDependencies(for targetIndex: Int) -> LifetimeDependencies? {
let bridgedDependenceInfoArray = SILFunctionType_getLifetimeDependencies(functionType.bridged)
for infoIndex in 0..<bridgedDependenceInfoArray.count() {
let bridgedDependenceInfo = bridgedDependenceInfoArray.at(infoIndex)
if bridgedDependenceInfo.targetIndex == targetIndex {
return LifetimeDependencies(bridged: bridgedDependenceInfo,
parameterCount: parameters.count,
hasSelfParameter: hasSelfParameter)
}
}
return nil
}
/// Collection of LifetimeDependenceConvention? that parallels parameters.
public struct LifetimeDependencies : Collection, CustomStringConvertible {
let bridged: BridgedLifetimeDependenceInfo
let paramCount: Int
let hasSelfParam: Bool
init(bridged: BridgedLifetimeDependenceInfo, parameterCount: Int,
hasSelfParameter: Bool) {
assert(!bridged.empty())
self.bridged = bridged
self.paramCount = parameterCount
self.hasSelfParam = hasSelfParameter
}
public var startIndex: Int { 0 }
public var endIndex: Int { paramCount }
public func index(after index: Int) -> Int {
return index + 1
}
public subscript(_ index: Int) -> LifetimeDependenceConvention? {
let inherit = bridged.checkInherit(bridgedIndex(parameterIndex: index))
let scope = bridged.checkScope(bridgedIndex(parameterIndex: index))
if inherit {
assert(!scope, "mutualy exclusive lifetime specifiers")
return .inherit
}
if scope {
let addressable = bridged.checkAddressable(bridgedIndex(parameterIndex: index))
let addressableForDeps = bridged.checkConditionallyAddressable(bridgedIndex(parameterIndex: index))
return .scope(addressable: addressable, addressableForDeps: addressableForDeps)
}
return nil
}
private func bridgedIndex(parameterIndex: Int) -> Int {
return parameterIndex
}
public var description: String {
String(taking: bridged.getDebugDescription()) +
"\nparamCount: \(paramCount) self: \(hasSelfParam)"
}
}
}
public enum ResultConvention : CustomStringConvertible {
/// This result is returned indirectly, i.e. by passing the address of an uninitialized object in memory. The callee is responsible for leaving an initialized object at this address. The callee may assume that the address does not alias any valid object.
case indirect
/// The caller is responsible for destroying this return value. Its type is non-trivial.
case owned
/// The caller is responsible for using the returned address within a valid
/// scope. This is valid only for borrow accessors.
case guaranteedAddress
/// The caller is responsible for using the returned value within a valid
/// scope. This is valid only for borrow accessors.
case guaranteed
/// The caller is responsible for mutating the returned address within a valid
/// scope. This is valid only for mutate accessors.
case `inout`
/// The caller is not responsible for destroying this return value. Its type may be trivial, or it may simply be offered unsafely. It is valid at the instant of the return, but further operations may invalidate it.
case unowned
/// The caller is not responsible for destroying this return value. The validity of the return value is dependent on the 'self' parameter, so it may be invalidated if that parameter is released.
case unownedInnerPointer
/// This value has been (or may have been) returned autoreleased. The caller should make an effort to reclaim the autorelease. The type must be a class or class existential type, and this must be the only return value.
case autoreleased
/// This value is a pack that is returned indirectly by passing a pack address (which may or may not be further indirected, depending on the pact type). The callee is responsible for leaving an initialized object in each element of the pack.
case pack
/// Does this result convention require indirect storage? This reflects a FunctionType's conventions, as opposed to the SIL conventions that dictate SILValue types.
public var isASTIndirect: Bool {
switch self {
case .indirect, .pack:
return true
default:
return false
}
}
public var isASTDirect: Bool {
return !isASTIndirect
}
public var description: String {
switch self {
case .indirect:
return "indirect"
case .owned:
return "owned"
case .unowned:
return "unowned"
case .unownedInnerPointer:
return "unownedInnerPointer"
case .autoreleased:
return "autoreleased"
case .pack:
return "pack"
case .guaranteed:
return "guaranteed"
case .guaranteedAddress:
return "guaranteedAddress"
case .inout:
return "inout"
}
}
}
// Bridging utilities
extension ResultInfo {
init(bridged: BridgedResultInfo, hasLoweredAddresses: Bool) {
self.type = CanonicalType(bridged: bridged.type)
self.convention = ResultConvention(bridged: bridged.convention)
self.hasLoweredAddresses = hasLoweredAddresses
self.options = bridged.options
}
init?(bridged: OptionalBridgedResultInfo, hasLoweredAddresses: Bool) {
if bridged.type.getRawType().type == nil {
return nil
}
self.type = CanonicalType(bridged: bridged.type)
self.convention = ResultConvention(bridged: bridged.convention)
self.hasLoweredAddresses = hasLoweredAddresses
self.options = bridged.options
}
public var _bridged: BridgedResultInfo {
BridgedResultInfo(type.bridged, convention.bridged, options)
}
}
extension ResultConvention {
init(bridged: BridgedResultConvention) {
switch bridged {
case .Indirect: self = .indirect
case .Owned: self = .owned
case .Unowned: self = .unowned
case .UnownedInnerPointer: self = .unownedInnerPointer
case .Autoreleased: self = .autoreleased
case .Pack: self = .pack
case .Guaranteed: self = .guaranteed
case .GuaranteedAddress: self = .guaranteedAddress
case .Inout: self = .inout
default:
fatalError("unsupported result convention")
}
}
var bridged: BridgedResultConvention {
switch self {
case .indirect: return .Indirect
case .owned: return .Owned
case .unowned: return .Unowned
case .unownedInnerPointer: return .UnownedInnerPointer
case .autoreleased: return .Autoreleased
case .pack: return .Pack
case .guaranteed: return .Guaranteed
case .guaranteedAddress: return .GuaranteedAddress
case .inout: return .Inout
}
}
}
extension ParameterInfo {
init(bridged: BridgedParameterInfo, hasLoweredAddresses: Bool) {
self.type = CanonicalType(bridged: bridged.type)
self.convention = bridged.convention.convention
self.options = bridged.options
self.hasLoweredAddresses = hasLoweredAddresses
}
public var _bridged: BridgedParameterInfo {
BridgedParameterInfo(type.bridged, convention.bridged, options)
}
} | swift | github | https://github.com/apple/swift | SwiftCompilerSources/Sources/SIL/FunctionConvention.swift |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from keystone.common import json_home
from keystone.common import wsgi
from keystone.contrib.federation import controllers
build_resource_relation = functools.partial(
json_home.build_v3_extension_resource_relation,
extension_name='OS-FEDERATION', extension_version='1.0')
build_parameter_relation = functools.partial(
json_home.build_v3_extension_parameter_relation,
extension_name='OS-FEDERATION', extension_version='1.0')
IDP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='idp_id')
PROTOCOL_ID_PARAMETER_RELATION = build_parameter_relation(
parameter_name='protocol_id')
SP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='sp_id')
class FederationExtension(wsgi.V3ExtensionRouter):
"""API Endpoints for the Federation extension.
The API looks like::
PUT /OS-FEDERATION/identity_providers/{idp_id}
GET /OS-FEDERATION/identity_providers
GET /OS-FEDERATION/identity_providers/{idp_id}
DELETE /OS-FEDERATION/identity_providers/{idp_id}
PATCH /OS-FEDERATION/identity_providers/{idp_id}
PUT /OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}
GET /OS-FEDERATION/identity_providers/
{idp_id}/protocols
GET /OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}
PATCH /OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}
DELETE /OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}
PUT /OS-FEDERATION/mappings
GET /OS-FEDERATION/mappings
PATCH /OS-FEDERATION/mappings/{mapping_id}
GET /OS-FEDERATION/mappings/{mapping_id}
DELETE /OS-FEDERATION/mappings/{mapping_id}
GET /OS-FEDERATION/projects
GET /OS-FEDERATION/domains
PUT /OS-FEDERATION/service_providers/{sp_id}
GET /OS-FEDERATION/service_providers
GET /OS-FEDERATION/service_providers/{sp_id}
DELETE /OS-FEDERATION/service_providers/{sp_id}
PATCH /OS-FEDERATION/service_providers/{sp_id}
GET /OS-FEDERATION/identity_providers/{identity_provider}/
protocols/{protocol}/auth
POST /OS-FEDERATION/identity_providers/{identity_provider}/
protocols/{protocol}/auth
GET /auth/OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}/websso
?origin=https%3A//horizon.example.com
POST /auth/OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}/websso
?origin=https%3A//horizon.example.com
POST /auth/OS-FEDERATION/saml2
POST /auth/OS-FEDERATION/saml2/ecp
GET /OS-FEDERATION/saml2/metadata
GET /auth/OS-FEDERATION/websso/{protocol_id}
?origin=https%3A//horizon.example.com
POST /auth/OS-FEDERATION/websso/{protocol_id}
?origin=https%3A//horizon.example.com
"""
def _construct_url(self, suffix):
return "/OS-FEDERATION/%s" % suffix
def add_routes(self, mapper):
auth_controller = controllers.Auth()
idp_controller = controllers.IdentityProvider()
protocol_controller = controllers.FederationProtocol()
mapping_controller = controllers.MappingController()
project_controller = controllers.ProjectAssignmentV3()
domain_controller = controllers.DomainV3()
saml_metadata_controller = controllers.SAMLMetadataV3()
sp_controller = controllers.ServiceProvider()
# Identity Provider CRUD operations
self._add_resource(
mapper, idp_controller,
path=self._construct_url('identity_providers/{idp_id}'),
get_action='get_identity_provider',
put_action='create_identity_provider',
patch_action='update_identity_provider',
delete_action='delete_identity_provider',
rel=build_resource_relation(resource_name='identity_provider'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, idp_controller,
path=self._construct_url('identity_providers'),
get_action='list_identity_providers',
rel=build_resource_relation(resource_name='identity_providers'))
# Protocol CRUD operations
self._add_resource(
mapper, protocol_controller,
path=self._construct_url('identity_providers/{idp_id}/protocols/'
'{protocol_id}'),
get_action='get_protocol',
put_action='create_protocol',
patch_action='update_protocol',
delete_action='delete_protocol',
rel=build_resource_relation(
resource_name='identity_provider_protocol'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, protocol_controller,
path=self._construct_url('identity_providers/{idp_id}/protocols'),
get_action='list_protocols',
rel=build_resource_relation(
resource_name='identity_provider_protocols'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
})
# Mapping CRUD operations
self._add_resource(
mapper, mapping_controller,
path=self._construct_url('mappings/{mapping_id}'),
get_action='get_mapping',
put_action='create_mapping',
patch_action='update_mapping',
delete_action='delete_mapping',
rel=build_resource_relation(resource_name='mapping'),
path_vars={
'mapping_id': build_parameter_relation(
parameter_name='mapping_id'),
})
self._add_resource(
mapper, mapping_controller,
path=self._construct_url('mappings'),
get_action='list_mappings',
rel=build_resource_relation(resource_name='mappings'))
# Service Providers CRUD operations
self._add_resource(
mapper, sp_controller,
path=self._construct_url('service_providers/{sp_id}'),
get_action='get_service_provider',
put_action='create_service_provider',
patch_action='update_service_provider',
delete_action='delete_service_provider',
rel=build_resource_relation(resource_name='service_provider'),
path_vars={
'sp_id': SP_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, sp_controller,
path=self._construct_url('service_providers'),
get_action='list_service_providers',
rel=build_resource_relation(resource_name='service_providers'))
self._add_resource(
mapper, domain_controller,
path=self._construct_url('domains'),
new_path='/auth/domains',
get_action='list_domains_for_groups',
rel=build_resource_relation(resource_name='domains'))
self._add_resource(
mapper, project_controller,
path=self._construct_url('projects'),
new_path='/auth/projects',
get_action='list_projects_for_groups',
rel=build_resource_relation(resource_name='projects'))
# Auth operations
self._add_resource(
mapper, auth_controller,
path=self._construct_url('identity_providers/{identity_provider}/'
'protocols/{protocol}/auth'),
get_post_action='federated_authentication',
rel=build_resource_relation(
resource_name='identity_provider_protocol_auth'),
path_vars={
'identity_provider': IDP_ID_PARAMETER_RELATION,
'protocol': PROTOCOL_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url('saml2'),
post_action='create_saml_assertion',
rel=build_resource_relation(resource_name='saml2'))
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url('saml2/ecp'),
post_action='create_ecp_assertion',
rel=build_resource_relation(resource_name='ecp'))
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url('websso/{protocol_id}'),
get_post_action='federated_sso_auth',
rel=build_resource_relation(resource_name='websso'),
path_vars={
'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url(
'identity_providers/{idp_id}/protocols/{protocol_id}/websso'),
get_post_action='federated_idp_specific_sso_auth',
rel=build_resource_relation(resource_name='identity_providers'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
})
# Keystone-Identity-Provider metadata endpoint
self._add_resource(
mapper, saml_metadata_controller,
path=self._construct_url('saml2/metadata'),
get_action='get_metadata',
rel=build_resource_relation(resource_name='metadata')) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright 2014-16 Akretion - Alexis de Lattre <alexis.delattre@akretion.com>
# Copyright 2014 Serv. Tecnol. Avanzados - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import models, fields, api
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
payment_mode_id = fields.Many2one(
comodel_name='account.payment.mode', string="Payment Mode",
ondelete='restrict',
readonly=True, states={'draft': [('readonly', False)]})
bank_account_required = fields.Boolean(
related='payment_mode_id.payment_method_id.bank_account_required',
readonly=True)
partner_bank_id = fields.Many2one(ondelete='restrict')
@api.onchange('partner_id', 'company_id')
def _onchange_partner_id(self):
res = super(AccountInvoice, self)._onchange_partner_id()
if self.partner_id:
if self.type == 'in_invoice':
pay_mode = self.partner_id.supplier_payment_mode_id
self.payment_mode_id = pay_mode
if (
pay_mode and
pay_mode.payment_type == 'outbound' and
pay_mode.payment_method_id.bank_account_required and
self.commercial_partner_id.bank_ids):
self.partner_bank_id =\
self.commercial_partner_id.bank_ids[0]
elif self.type == 'out_invoice':
# No bank account assignation is done here as this is only
# needed for printing purposes and it can conflict with
# SEPA direct debit payments. Current report prints it.
self.payment_mode_id = self.partner_id.customer_payment_mode_id
else:
self.payment_mode_id = False
if self.type == 'in_invoice':
self.partner_bank_id = False
return res
@api.model
def create(self, vals):
"""Fill the payment_mode_id from the partner if none is provided on
creation, using same method as upstream."""
onchanges = {
'_onchange_partner_id': ['payment_mode_id'],
}
for onchange_method, changed_fields in onchanges.items():
if any(f not in vals for f in changed_fields):
invoice = self.new(vals)
getattr(invoice, onchange_method)()
for field in changed_fields:
if field not in vals and invoice[field]:
vals[field] = invoice._fields[field].convert_to_write(
invoice[field], invoice,
)
return super(AccountInvoice, self).create(vals)
@api.onchange('payment_mode_id')
def payment_mode_id_change(self):
if (
self.payment_mode_id and
self.payment_mode_id.payment_type == 'outbound' and
not self.payment_mode_id.payment_method_id.
bank_account_required):
self.partner_bank_id = False
elif not self.payment_mode_id:
self.partner_bank_id = False
@api.model
def line_get_convert(self, line, part):
"""Copy payment mode from invoice to account move line"""
res = super(AccountInvoice, self).line_get_convert(line, part)
if line.get('type') == 'dest' and line.get('invoice_id'):
invoice = self.browse(line['invoice_id'])
res['payment_mode_id'] = invoice.payment_mode_id.id or False
return res
# I think copying payment mode from invoice to refund by default
# is a good idea because the most common way of "paying" a refund is to
# deduct it on the payment of the next invoice (and OCA/bank-payment
# allows to have negative payment lines since March 2016)
@api.model
def _prepare_refund(
self, invoice, date_invoice=None, date=None, description=None,
journal_id=None):
vals = super(AccountInvoice, self)._prepare_refund(
invoice, date_invoice=date_invoice, date=date,
description=description, journal_id=journal_id)
vals['payment_mode_id'] = invoice.payment_mode_id.id
if invoice.type == 'in_invoice':
vals['partner_bank_id'] = invoice.partner_bank_id.id
return vals
def partner_banks_to_show(self):
self.ensure_one()
if self.partner_bank_id:
return self.partner_bank_id
if self.payment_mode_id.show_bank_account_from_journal:
if self.payment_mode_id.bank_account_link == 'fixed':
return self.payment_mode_id.fixed_journal_id.bank_account_id
else:
return self.payment_mode_id.variable_journal_ids.mapped(
'bank_account_id')
if self.payment_mode_id.payment_method_id.code == \
'sepa_direct_debit': # pragma: no cover
return (self.mandate_id.partner_bank_id or
self.partner_id.valid_mandate_id.partner_bank_id)
# Return this as empty recordset
return self.partner_bank_id | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkg5
author: "Peter Oliver (@mavit)"
short_description: Manages packages with the Solaris 11 Image Packaging System
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
notes:
- The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
options:
name:
description:
- An FRMI of the package(s) to be installed/removed/updated.
- Multiple packages may be specified, separated by C(,).
required: true
state:
description:
- Whether to install (I(present), I(latest)), or remove (I(absent)) a
package.
required: false
default: present
choices: [ present, latest, absent ]
accept_licenses:
description:
- Accept any licences.
required: false
default: false
choices: [ true, false ]
aliases: [ accept_licences, accept ]
'''
EXAMPLES = '''
# Install Vim:
- pkg5:
name: editor/vim
# Remove finger daemon:
- pkg5:
name: service/network/finger
state: absent
# Install several packages at once:
- pkg5:
name:
- /file/gnu-findutils
- /text/gnu-grep
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='list'),
state=dict(
default='present',
choices=[
'present',
'installed',
'latest',
'absent',
'uninstalled',
'removed',
]
),
accept_licenses=dict(
type='bool',
default=False,
aliases=['accept_licences', 'accept'],
),
),
supports_check_mode=True,
)
params = module.params
packages = []
# pkg(5) FRMIs include a comma before the release number, but
# AnsibleModule will have split this into multiple items for us.
# Try to spot where this has happened and fix it.
for fragment in params['name']:
if (
re.search('^\d+(?:\.\d+)*', fragment)
and packages and re.search('@[^,]*$', packages[-1])
):
packages[-1] += ',' + fragment
else:
packages.append(fragment)
if params['state'] in ['present', 'installed']:
ensure(module, 'present', packages, params)
elif params['state'] in ['latest']:
ensure(module, 'latest', packages, params)
elif params['state'] in ['absent', 'uninstalled', 'removed']:
ensure(module, 'absent', packages, params)
def ensure(module, state, packages, params):
response = {
'results': [],
'msg': '',
}
behaviour = {
'present': {
'filter': lambda p: not is_installed(module, p),
'subcommand': 'install',
},
'latest': {
'filter': lambda p: (
not is_installed(module, p) or not is_latest(module, p)
),
'subcommand': 'install',
},
'absent': {
'filter': lambda p: is_installed(module, p),
'subcommand': 'uninstall',
},
}
if module.check_mode:
dry_run = ['-n']
else:
dry_run = []
if params['accept_licenses']:
accept_licenses = ['--accept']
else:
accept_licenses = []
to_modify = filter(behaviour[state]['filter'], packages)
if to_modify:
rc, out, err = module.run_command(
[
'pkg', behaviour[state]['subcommand']
]
+ dry_run
+ accept_licenses
+ [
'-q', '--'
] + to_modify
)
response['rc'] = rc
response['results'].append(out)
response['msg'] += err
response['changed'] = True
if rc != 0:
module.fail_json(**response)
module.exit_json(**response)
def is_installed(module, package):
rc, out, err = module.run_command(['pkg', 'list', '--', package])
return not bool(int(rc))
def is_latest(module, package):
rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
return bool(int(rc))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/classMemberInitializerWithLamdaScoping3.ts] ////
//// [classMemberInitializerWithLamdaScoping3_0.ts]
var field1: string;
//// [classMemberInitializerWithLamdaScoping3_1.ts]
declare var console: {
log(msg?: any): void;
};
export class Test1 {
constructor(private field1: string) {
}
messageHandler = () => {
console.log(field1); // But this should be error as the field1 will resolve to var field1
// but since this code would be generated inside constructor, in generated js
// it would resolve to private field1 and thats not what user intended here.
};
}
//// [classMemberInitializerWithLamdaScoping3_0.js]
"use strict";
var field1;
//// [classMemberInitializerWithLamdaScoping3_1.js]
export class Test1 {
constructor(field1) {
this.field1 = field1;
this.messageHandler = () => {
console.log(field1); // But this should be error as the field1 will resolve to var field1
// but since this code would be generated inside constructor, in generated js
// it would resolve to private field1 and thats not what user intended here.
};
}
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/classMemberInitializerWithLamdaScoping3.js |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_target_canonical_facts
short_description: Return canonical (NAA) from an ESXi host system
description:
- This module can be used to gather facts about canonical (NAA) from an ESXi host based on SCSI target ID.
version_added: "2.0"
author:
- Joseph Callen (@jcpowermac)
- Abhijeet Kasurde (@Akasurde)
notes:
requirements:
- Tested on vSphere 5.5 and 6.5
- PyVmomi installed
options:
target_id:
description:
- The target id based on order of scsi device.
- version 2.6 onwards, this parameter is optional.
required: False
cluster_name:
description:
- Name of the cluster.
- Facts about all SCSI devices for all host system in the given cluster is returned.
- This parameter is required, if C(esxi_hostname) is not provided.
version_added: 2.6
esxi_hostname:
description:
- Name of the ESXi host system.
- Facts about all SCSI devices for the given ESXi host system is returned.
- This parameter is required, if C(cluster_name) is not provided.
version_added: 2.6
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Get Canonical name of particular target on particular ESXi host system
vmware_target_canonical_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
target_id: 7
esxi_hostname: esxi_hostname
delegate_to: localhost
- name: Get Canonical name of all target on particular ESXi host system
vmware_target_canonical_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
- name: Get Canonical name of all ESXi hostname on particular Cluster
vmware_target_canonical_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
'''
RETURN = r"""
canonical:
description: metadata about SCSI Target device
returned: if host system and target id is given
type: str
sample: "mpx.vmhba0:C0:T0:L0"
scsi_tgt_facts:
description: metadata about all SCSI Target devices
returned: if host system or cluster is given
type: dict
sample: {
"DC0_C0_H0": {
"scsilun_canonical": {
"key-vim.host.ScsiDisk-0000000000766d686261303a303a30": "mpx.vmhba0:C0:T0:L0",
"key-vim.host.ScsiLun-0005000000766d686261313a303a30": "mpx.vmhba1:C0:T0:L0"
},
"target_lun_uuid": {
"0": "key-vim.host.ScsiDisk-0000000000766d686261303a303a30"
}
},
"DC0_C0_H1": {
"scsilun_canonical": {
"key-vim.host.ScsiDisk-0000000000766d686261303a303a30": "mpx.vmhba0:C0:T0:L0",
"key-vim.host.ScsiLun-0005000000766d686261313a303a30": "mpx.vmhba1:C0:T0:L0"
},
"target_lun_uuid": {
"0": "key-vim.host.ScsiDisk-0000000000766d686261303a303a30"
}
},
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class ScsiTargetFactsManager(PyVmomi):
def __init__(self, module):
super(ScsiTargetFactsManager, self).__init__(module)
cluster_name = self.module.params.get('cluster_name')
self.esxi_hostname = self.module.params.get('esxi_hostname')
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=self.esxi_hostname)
def gather_scsi_device_facts(self):
"""
Function to gather facts about SCSI target devices
"""
scsi_tgt_facts = {}
target_lun_uuid = {}
scsilun_canonical = {}
target_id = self.module.params['target_id']
for host in self.hosts:
# Associate the scsiLun key with the canonicalName (NAA)
for scsilun in host.config.storageDevice.scsiLun:
scsilun_canonical[scsilun.key] = scsilun.canonicalName
# Associate target number with LUN uuid
for target in host.config.storageDevice.scsiTopology.adapter[0].target:
for lun in target.lun:
target_lun_uuid[target.target] = lun.scsiLun
scsi_tgt_facts[host.name] = dict(scsilun_canonical=scsilun_canonical,
target_lun_uuid=target_lun_uuid)
if target_id is not None and self.esxi_hostname is not None:
canonical = ''
temp_lun_data = scsi_tgt_facts[self.esxi_hostname]['target_lun_uuid']
if self.esxi_hostname in scsi_tgt_facts and \
target_id in temp_lun_data:
temp_scsi_data = scsi_tgt_facts[self.esxi_hostname]['scsilun_canonical']
temp_target = temp_lun_data[target_id]
canonical = temp_scsi_data[temp_target]
self.module.exit_json(changed=False, canonical=canonical)
self.module.exit_json(changed=False, scsi_tgt_facts=scsi_tgt_facts)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dict(
target_id=dict(required=False, type='int'),
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
)
scsi_tgt_manager = ScsiTargetFactsManager(module)
scsi_tgt_manager.gather_scsi_device_facts()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/bin/sh
#
# This program resolves merge conflicts in git
#
# Copyright (c) 2006 Theodore Y. Ts'o
# Copyright (c) 2009-2016 David Aguilar
#
# This file is licensed under the GPL v2, or a later version
# at the discretion of Junio C Hamano.
#
USAGE='[--tool=tool] [--tool-help] [-y|--no-prompt|--prompt] [-g|--gui|--no-gui] [-O<orderfile>] [file to merge] ...'
SUBDIRECTORY_OK=Yes
NONGIT_OK=Yes
OPTIONS_SPEC=
TOOL_MODE=merge
. git-sh-setup
. git-mergetool--lib
# Returns true if the mode reflects a symlink
is_symlink () {
test "$1" = 120000
}
is_submodule () {
test "$1" = 160000
}
local_present () {
test -n "$local_mode"
}
remote_present () {
test -n "$remote_mode"
}
base_present () {
test -n "$base_mode"
}
mergetool_tmpdir_init () {
if test "$(git config --bool mergetool.writeToTemp)" != true
then
MERGETOOL_TMPDIR=.
return 0
fi
if MERGETOOL_TMPDIR=$(mktemp -d -t "git-mergetool-XXXXXX" 2>/dev/null)
then
return 0
fi
die "error: mktemp is needed when 'mergetool.writeToTemp' is true"
}
cleanup_temp_files () {
if test "$1" = --save-backup
then
rm -rf -- "$MERGED.orig"
test -e "$BACKUP" && mv -- "$BACKUP" "$MERGED.orig"
rm -f -- "$LOCAL" "$REMOTE" "$BASE"
else
rm -f -- "$LOCAL" "$REMOTE" "$BASE" "$BACKUP"
fi
if test "$MERGETOOL_TMPDIR" != "."
then
rmdir "$MERGETOOL_TMPDIR"
fi
}
describe_file () {
mode="$1"
branch="$2"
file="$3"
printf " {%s}: " "$branch"
if test -z "$mode"
then
echo "deleted"
elif is_symlink "$mode"
then
echo "a symbolic link -> '$(cat "$file")'"
elif is_submodule "$mode"
then
echo "submodule commit $file"
elif base_present
then
echo "modified file"
else
echo "created file"
fi
}
resolve_symlink_merge () {
while true
do
printf "Use (l)ocal or (r)emote, or (a)bort? "
read ans || return 1
case "$ans" in
[lL]*)
git checkout-index -f --stage=2 -- "$MERGED"
git add -- "$MERGED"
cleanup_temp_files --save-backup
return 0
;;
[rR]*)
git checkout-index -f --stage=3 -- "$MERGED"
git add -- "$MERGED"
cleanup_temp_files --save-backup
return 0
;;
[aA]*)
return 1
;;
esac
done
}
resolve_deleted_merge () {
while true
do
if base_present
then
printf "Use (m)odified or (d)eleted file, or (a)bort? "
else
printf "Use (c)reated or (d)eleted file, or (a)bort? "
fi
read ans || return 1
case "$ans" in
[mMcC]*)
git add -- "$MERGED"
if test "$merge_keep_backup" = "true"
then
cleanup_temp_files --save-backup
else
cleanup_temp_files
fi
return 0
;;
[dD]*)
git rm -- "$MERGED" > /dev/null
cleanup_temp_files
return 0
;;
[aA]*)
if test "$merge_keep_temporaries" = "false"
then
cleanup_temp_files
fi
return 1
;;
esac
done
}
resolve_submodule_merge () {
while true
do
printf "Use (l)ocal or (r)emote, or (a)bort? "
read ans || return 1
case "$ans" in
[lL]*)
if ! local_present
then
if test -n "$(git ls-tree HEAD -- "$MERGED")"
then
# Local isn't present, but it's a subdirectory
git ls-tree --full-name -r HEAD -- "$MERGED" |
git update-index --index-info || exit $?
else
test -e "$MERGED" && mv -- "$MERGED" "$BACKUP"
git update-index --force-remove "$MERGED"
cleanup_temp_files --save-backup
fi
elif is_submodule "$local_mode"
then
stage_submodule "$MERGED" "$local_sha1"
else
git checkout-index -f --stage=2 -- "$MERGED"
git add -- "$MERGED"
fi
return 0
;;
[rR]*)
if ! remote_present
then
if test -n "$(git ls-tree MERGE_HEAD -- "$MERGED")"
then
# Remote isn't present, but it's a subdirectory
git ls-tree --full-name -r MERGE_HEAD -- "$MERGED" |
git update-index --index-info || exit $?
else
test -e "$MERGED" && mv -- "$MERGED" "$BACKUP"
git update-index --force-remove "$MERGED"
fi
elif is_submodule "$remote_mode"
then
! is_submodule "$local_mode" &&
test -e "$MERGED" &&
mv -- "$MERGED" "$BACKUP"
stage_submodule "$MERGED" "$remote_sha1"
else
test -e "$MERGED" && mv -- "$MERGED" "$BACKUP"
git checkout-index -f --stage=3 -- "$MERGED"
git add -- "$MERGED"
fi
cleanup_temp_files --save-backup
return 0
;;
[aA]*)
return 1
;;
esac
done
}
stage_submodule () {
path="$1"
submodule_sha1="$2"
mkdir -p "$path" ||
die "fatal: unable to create directory for module at $path"
# Find $path relative to work tree
work_tree_root=$(cd_to_toplevel && pwd)
work_rel_path=$(cd "$path" &&
GIT_WORK_TREE="${work_tree_root}" git rev-parse --show-prefix
)
test -n "$work_rel_path" ||
die "fatal: unable to get path of module $path relative to work tree"
git update-index --add --replace --cacheinfo 160000 "$submodule_sha1" "${work_rel_path%/}" || die
}
checkout_staged_file () {
tmpfile="$(git checkout-index --temp --stage="$1" "$2" 2>/dev/null)" &&
tmpfile=${tmpfile%%' '*}
if test $? -eq 0 && test -n "$tmpfile"
then
mv -- "$(git rev-parse --show-cdup)$tmpfile" "$3"
else
>"$3"
fi
}
hide_resolved () {
git merge-file --ours -q -p "$LOCAL" "$BASE" "$REMOTE" >"$LCONFL"
git merge-file --theirs -q -p "$LOCAL" "$BASE" "$REMOTE" >"$RCONFL"
mv -- "$LCONFL" "$LOCAL"
mv -- "$RCONFL" "$REMOTE"
}
merge_file () {
MERGED="$1"
f=$(git ls-files -u -- "$MERGED")
if test -z "$f"
then
if test ! -f "$MERGED"
then
echo "$MERGED: file not found"
else
echo "$MERGED: file does not need merging"
fi
return 1
fi
# extract file extension from the last path component
case "${MERGED##*/}" in
*.*)
ext=.${MERGED##*.}
BASE=${MERGED%"$ext"}
;;
*)
BASE=$MERGED
ext=
esac
initialize_merge_tool "$merge_tool" || return
mergetool_tmpdir_init
if test "$MERGETOOL_TMPDIR" != "."
then
# If we're using a temporary directory then write to the
# top-level of that directory.
BASE=${BASE##*/}
fi
BACKUP="$MERGETOOL_TMPDIR/${BASE}_BACKUP_$$$ext"
LOCAL="$MERGETOOL_TMPDIR/${BASE}_LOCAL_$$$ext"
LCONFL="$MERGETOOL_TMPDIR/${BASE}_LOCAL_LCONFL_$$$ext"
REMOTE="$MERGETOOL_TMPDIR/${BASE}_REMOTE_$$$ext"
RCONFL="$MERGETOOL_TMPDIR/${BASE}_REMOTE_RCONFL_$$$ext"
BASE="$MERGETOOL_TMPDIR/${BASE}_BASE_$$$ext"
base_mode= local_mode= remote_mode=
# here, $IFS is just a LF
for line in $f
do
mode=${line%% *} # 1st word
sha1=${line#"$mode "}
sha1=${sha1%% *} # 2nd word
case "${line#$mode $sha1 }" in # remainder
'1 '*)
base_mode=$mode
;;
'2 '*)
local_mode=$mode local_sha1=$sha1
;;
'3 '*)
remote_mode=$mode remote_sha1=$sha1
;;
esac
done
if is_submodule "$local_mode" || is_submodule "$remote_mode"
then
echo "Submodule merge conflict for '$MERGED':"
describe_file "$local_mode" "local" "$local_sha1"
describe_file "$remote_mode" "remote" "$remote_sha1"
resolve_submodule_merge
return
fi
if test -f "$MERGED"
then
mv -- "$MERGED" "$BACKUP"
cp -- "$BACKUP" "$MERGED"
fi
# Create a parent directory to handle delete/delete conflicts
# where the base's directory no longer exists.
mkdir -p "$(dirname "$MERGED")"
checkout_staged_file 1 "$MERGED" "$BASE"
checkout_staged_file 2 "$MERGED" "$LOCAL"
checkout_staged_file 3 "$MERGED" "$REMOTE"
# hideResolved preferences hierarchy.
global_config="mergetool.hideResolved"
tool_config="mergetool.${merge_tool}.hideResolved"
if enabled=$(git config --type=bool "$tool_config")
then
# The user has a specific preference for a specific tool and no
# other preferences should override that.
: ;
elif enabled=$(git config --type=bool "$global_config")
then
# The user has a general preference for all tools.
#
# 'true' means the user likes the feature so we should use it
# where possible but tool authors can still override.
#
# 'false' means the user doesn't like the feature so we should
# not use it anywhere.
if test "$enabled" = true && hide_resolved_enabled
then
enabled=true
else
enabled=false
fi
else
# The user does not have a preference. Default to disabled.
enabled=false
fi
if test "$enabled" = true
then
hide_resolved
fi
if test -z "$local_mode" || test -z "$remote_mode"
then
echo "Deleted merge conflict for '$MERGED':"
describe_file "$local_mode" "local" "$LOCAL"
describe_file "$remote_mode" "remote" "$REMOTE"
resolve_deleted_merge
status=$?
rmdir -p "$(dirname "$MERGED")" 2>/dev/null
return $status
fi
if is_symlink "$local_mode" || is_symlink "$remote_mode"
then
echo "Symbolic link merge conflict for '$MERGED':"
describe_file "$local_mode" "local" "$LOCAL"
describe_file "$remote_mode" "remote" "$REMOTE"
resolve_symlink_merge
return
fi
echo "Normal merge conflict for '$MERGED':"
describe_file "$local_mode" "local" "$LOCAL"
describe_file "$remote_mode" "remote" "$REMOTE"
if test "$guessed_merge_tool" = true || test "$prompt" = true
then
printf "Hit return to start merge resolution tool (%s): " "$merge_tool"
read ans || return 1
fi
if base_present
then
present=true
else
present=false
fi
if ! run_merge_tool "$merge_tool" "$present"
then
echo "merge of $MERGED failed" 1>&2
mv -- "$BACKUP" "$MERGED"
if test "$merge_keep_temporaries" = "false"
then
cleanup_temp_files
fi
return 1
fi
if test "$merge_keep_backup" = "true"
then
mv -- "$BACKUP" "$MERGED.orig"
else
rm -- "$BACKUP"
fi
git add -- "$MERGED"
cleanup_temp_files
return 0
}
prompt_after_failed_merge () {
while true
do
printf "Continue merging other unresolved paths [y/n]? "
read ans || return 1
case "$ans" in
[yY]*)
return 0
;;
[nN]*)
return 1
;;
esac
done
}
print_noop_and_exit () {
echo "No files need merging"
exit 0
}
main () {
prompt=$(git config --bool mergetool.prompt)
GIT_MERGETOOL_GUI=
guessed_merge_tool=false
orderfile=
while test $# != 0
do
case "$1" in
--tool-help=*)
TOOL_MODE=${1#--tool-help=}
show_tool_help
;;
--tool-help)
show_tool_help
;;
-t|--tool*)
case "$#,$1" in
*,*=*)
merge_tool=${1#*=}
;;
1,*)
usage ;;
*)
merge_tool="$2"
shift ;;
esac
;;
--no-gui)
GIT_MERGETOOL_GUI=false
;;
-g|--gui)
GIT_MERGETOOL_GUI=true
;;
-y|--no-prompt)
prompt=false
;;
--prompt)
prompt=true
;;
-O*)
orderfile="${1#-O}"
;;
--)
shift
break
;;
-*)
usage
;;
*)
break
;;
esac
shift
done
git_dir_init
require_work_tree
if test -z "$merge_tool"
then
merge_tool=$(get_merge_tool)
subshell_exit_status=$?
if test $subshell_exit_status = 1
then
guessed_merge_tool=true
elif test $subshell_exit_status -gt 1
then
exit $subshell_exit_status
fi
fi
merge_keep_backup="$(git config --bool mergetool.keepBackup || echo true)"
merge_keep_temporaries="$(git config --bool mergetool.keepTemporaries || echo false)"
prefix=$(git rev-parse --show-prefix) || exit 1
cd_to_toplevel
if test -n "$orderfile"
then
orderfile=$(
git rev-parse --prefix "$prefix" -- "$orderfile" |
sed -e 1d
)
fi
if test $# -eq 0 && test -e "$GIT_DIR/MERGE_RR"
then
set -- $(git rerere remaining)
if test $# -eq 0
then
print_noop_and_exit
fi
elif test $# -ge 0
then
# rev-parse provides the -- needed for 'set'
eval "set $(git rev-parse --sq --prefix "$prefix" -- "$@")"
fi
files=$(git -c core.quotePath=false \
diff --name-only --diff-filter=U \
${orderfile:+"-O$orderfile"} -- "$@")
if test -z "$files"
then
print_noop_and_exit
fi
printf "Merging:\n"
printf "%s\n" "$files"
rc=0
set -- $files
while test $# -ne 0
do
printf "\n"
if ! merge_file "$1"
then
rc=1
test $# -ne 1 && prompt_after_failed_merge || exit 1
fi
shift
done
exit $rc
}
main "$@" | unknown | github | https://github.com/git/git | git-mergetool.sh |
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
# (c) 2016, Steve Baker <sbaker@redhat.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_stack
short_description: Add/Remove Heat Stack
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Mathieu Bultel (matbu), Steve Baker (steveb)"
description:
- Add or Remove a Stack to an OpenStack Heat
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Name of the stack that should be created, name could be char and digit, no space
required: true
template:
description:
- Path of the template file to use for the stack creation
required: false
default: None
environment:
description:
- List of environment files that should be used for the stack creation
required: false
default: None
parameters:
description:
- Dictionary of parameters for the stack creation
required: false
default: None
rollback:
description:
- Rollback stack creation
required: false
default: false
timeout:
description:
- Maximum number of seconds to wait for the stack creation
required: false
default: 3600
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
---
- name: create stack
ignore_errors: True
register: stack_create
os_stack:
name: "{{ stack_name }}"
state: present
template: "/path/to/my_stack.yaml"
environment:
- /path/to/resource-registry.yaml
- /path/to/environment.yaml
parameters:
bmc_flavor: m1.medium
bmc_image: CentOS
key_name: default
private_net: "{{ private_net_param }}"
node_count: 2
name: undercloud
image: CentOS
my_flavor: m1.large
external_net: "{{ external_net_param }}"
'''
RETURN = '''
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
returned: always
stack:
description: stack info
type: complex
returned: always
contains:
action:
description: Action, could be Create or Update.
type: string
sample: "CREATE"
creation_time:
description: Time when the action has been made.
type: string
sample: "2016-07-05T17:38:12Z"
description:
description: Description of the Stack provided in the heat template.
type: string
sample: "HOT template to create a new instance and networks"
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
name:
description: Name of the Stack
type: string
sample: "test-stack"
identifier:
description: Identifier of the current Stack action.
type: string
sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
links:
description: Links to the current Stack.
type: list of dict
sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
outputs:
description: Output returned by the Stack.
type: list of dict
sample: "{'description': 'IP address of server1 in private network',
'output_key': 'server1_private_ip',
'output_value': '10.1.10.103'}"
parameters:
description: Parameters of the current Stack
type: dict
sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
'OS::stack_name': 'test-stack',
'stack_status': 'CREATE_COMPLETE',
'stack_status_reason': 'Stack CREATE completed successfully',
'status': 'COMPLETE',
'template_description': 'HOT template to create a new instance and networks',
'timeout_mins': 60,
'updated_time': null}"
'''
from time import sleep
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _create_stack(module, stack, cloud):
try:
stack = cloud.create_stack(module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**module.params['parameters'])
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
return stack
else:
return False
module.fail_json(msg="Failure in creating stack: {0}".format(stack))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _update_stack(module, stack, cloud):
try:
stack = cloud.update_stack(
module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
rollback=module.params['rollback'],
wait=module.params['wait'],
**module.params['parameters'])
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
module.fail_json(msg = "Failure in updating stack: %s" %
stack['stack_status_reason'])
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
if not stack:
return True
if state == 'absent' and stack:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
template=dict(default=None),
environment=dict(default=None, type='list'),
parameters=dict(default={}, type='dict'),
rollback=dict(default=False, type='bool'),
timeout=dict(default=3600, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# stack API introduced in 1.8.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.8.0')):
module.fail_json(msg='shade 1.8.0 or higher is required for this module')
state = module.params['state']
name = module.params['name']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['template']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
try:
cloud = shade.openstack_cloud(**module.params)
stack = cloud.get_stack(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, stack,
cloud))
if state == 'present':
if not stack:
stack = _create_stack(module, stack, cloud)
else:
stack = _update_stack(module, stack, cloud)
changed = True
module.exit_json(changed=changed,
stack=stack,
id=stack.id)
elif state == 'absent':
if not stack:
changed = False
else:
changed = True
if not cloud.delete_stack(name, wait=module.params['wait']):
module.fail_json(msg='delete stack failed for stack: %s' % name)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
use crate::spec::{Abi, Arch, FloatAbi, Target, TargetMetadata, TargetOptions, base};
pub(crate) fn target() -> Target {
Target {
llvm_target: "arm-unknown-linux-musleabihf".into(),
metadata: TargetMetadata {
description: Some("Armv6 Linux with musl 1.2.5, hardfloat".into()),
tier: Some(2),
host_tools: Some(false),
std: Some(true),
},
pointer_width: 32,
data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
arch: Arch::Arm,
options: TargetOptions {
abi: Abi::EabiHf,
llvm_floatabi: Some(FloatAbi::Hard),
// Most of these settings are copied from the arm_unknown_linux_gnueabihf
// target.
features: "+strict-align,+v6,+vfp2".into(),
max_atomic_width: Some(64),
mcount: "\u{1}mcount".into(),
// FIXME(compiler-team#422): musl targets should be dynamically linked by default.
crt_static_default: true,
..base::linux_musl::opts()
},
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_target/src/spec/targets/arm_unknown_linux_musleabihf.rs |
test_kind: js_test
selector:
roots:
- jstests/change_streams/**/*.js
exclude_files:
# This test creates a collection (and index) inside a transaction. Even though the collections are
# unsharded this suite enables sharding in the test database which makes transactions against
# it distributed. This causes the following tests to fail since creating a collection in a
# distributed transaction is not allowed.
- jstests/change_streams/ddl_create_index_txn.js
# This test exercises the internal behavior of $changeStream v1 and is not compatible with v2.
- jstests/change_streams/create_event_from_chunk_migration.js
# TODO: SERVER-114511 re-enable this test.
- jstests/change_streams/migrate_last_chunk_from_shard_event.js
# TODO: SERVER-117391 Ensure change_streams/timeseries.js test passes when running change streams in v2 mode.
- jstests/change_streams/timeseries.js
exclude_with_any_tags:
##
# The next tags correspond to the special errors thrown by the
# set_read_and_write_concerns.js override when it refuses to replace the readConcern or
# writeConcern of a particular command. Above each tag are the message(s) that cause the tag to be
# warranted.
##
# "Cowardly refusing to override write concern of command: ..."
- assumes_write_concern_unchanged
# Exclude any that assume sharding is disabled
- assumes_against_mongod_not_mongos
executor:
archive:
hooks:
- CheckReplDBHash
- CheckMetadataConsistencyInBackground
- ValidateCollections
config:
shell_options:
global_vars:
TestData:
defaultReadConcernLevel: null
# Enable causal consistency for change streams suites using 1 node replica sets. See
# change_streams.yml for detailed explanation.
eval: >-
globalThis.testingReplication = true;
await import('jstests/libs/override_methods/set_read_and_write_concerns.js');
await import('jstests/libs/override_methods/enable_sessions.js');
await import('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
await import('jstests/libs/override_methods/implicit_change_stream_v2.js');
# Set longer host discovery time to handle change stream resumable errors.
setShellParameter: defaultFindReplicaSetHostTimeoutMS=120000
hooks:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: RunQueryStats
- class: ValidateCollections
- class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
class: ShardedClusterFixture
# Use two shards to make sure we will only talk to the primary shard for the database and will
# not delay changes to wait for notifications or a clock advancement from other shards.
num_shards: 2
num_mongos: 3
mongos_options:
bind_ip_all: ""
set_parameters:
enableTestCommands: 1
mongod_options:
bind_ip_all: ""
set_parameters:
enableTestCommands: 1
periodicNoopIntervalSecs: 1
writePeriodicNoops: true | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/change_streams_mongos_sessions_passthrough_v2.yml |
# coding: utf-8
from __future__ import unicode_literals
import os
import re
import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urllib_parse_unquote,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
is_html,
js_to_json,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
orderedSet,
sanitized_Request,
smuggle_url,
unescapeHTML,
unified_strdate,
unsmuggle_url,
UnsupportedError,
xpath_text,
)
from .commonprotocols import RtmpIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .nexx import (
NexxIE,
NexxEmbedIE,
)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxIE
from .smotri import SmotriIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .tnaflix import TNAFlixNetworkEmbedIE
from .drtuber import DrTuberIE
from .redtube import RedTubeIE
from .tube8 import Tube8IE
from .mofosex import MofosexEmbedIE
from .spankwire import SpankwireIE
from .youporn import YouPornIE
from .vimeo import VimeoIE
from .dailymotion import DailymotionIE
from .dailymail import DailyMailIE
from .onionstudios import OnionStudiosIE
from .viewlift import ViewLiftEmbedIE
from .mtv import MTVServicesEmbeddedIE
from .pladform import PladformIE
from .videomore import VideomoreIE
from .webcaster import WebcasterFeedIE
from .googledrive import GoogleDriveIE
from .jwplatform import JWPlatformIE
from .digiteka import DigitekaIE
from .arkena import ArkenaIE
from .instagram import InstagramIE
from .liveleak import LiveLeakIE
from .threeqsdn import ThreeQSDNIE
from .theplatform import ThePlatformIE
from .kaltura import KalturaIE
from .eagleplatform import EaglePlatformIE
from .facebook import FacebookIE
from .soundcloud import SoundcloudEmbedIE
from .tunein import TuneInBaseIE
from .vbox7 import Vbox7IE
from .dbtv import DBTVIE
from .piksel import PikselIE
from .videa import VideaIE
from .twentymin import TwentyMinutenIE
from .ustream import UstreamIE
from .videopress import VideoPressIE
from .rutube import RutubeIE
from .limelight import LimelightBaseIE
from .anvato import AnvatoIE
from .washingtonpost import WashingtonPostIE
from .wistia import WistiaIE
from .mediaset import MediasetIE
from .joj import JojIE
from .megaphone import MegaphoneIE
from .vzaar import VzaarIE
from .channel9 import Channel9IE
from .vshare import VShareIE
from .mediasite import MediasiteIE
from .springboardplatform import SpringboardPlatformIE
from .yapfiles import YapFilesIE
from .vice import ViceIE
from .xfileshare import XFileShareIE
from .cloudflarestream import CloudflareStreamIE
from .peertube import PeerTubeIE
from .teachable import TeachableIE
from .indavideo import IndavideoEmbedIE
from .apa import APAIE
from .foxnews import FoxNewsIE
from .viqeo import ViqeoIE
from .expressen import ExpressenIE
from .zype import ZypeIE
from .odnoklassniki import OdnoklassnikiIE
from .kinja import KinjaEmbedIE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
# Direct link to media delivered compressed (until Accept-Encoding is *)
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
],
'skip': 'URL invalid',
},
# Direct download with broken HEAD
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True, # infinite live stream
},
'expected_warnings': [
r'501.*Not Implemented',
r'400.*Bad Request',
],
},
# Direct link with incorrect MIME type
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
# RSS feed with enclosure
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
'ext': 'm4v',
'upload_date': '20150228',
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
}
},
# RSS feed with enclosures and unsupported link URLs
{
'url': 'http://www.hellointernet.fm/podcast?format=rss',
'info_dict': {
'id': 'http://www.hellointernet.fm/podcast?format=rss',
'description': 'CGP Grey and Brady Haran talk about YouTube, life, work, whatever.',
'title': 'Hello Internet',
},
'playlist_mincount': 100,
},
# SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
# SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from https://www.restudy.dk/video/play/id/1637
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
# MPD from http://dash-mse-test.appspot.com/media.html
{
'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd',
'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53',
'info_dict': {
'id': 'car-20120827-manifest',
'ext': 'mp4',
'title': 'car-20120827-manifest',
'formats': 'mincount:9',
'upload_date': '20130904',
},
'params': {
'format': 'bestvideo',
},
},
# m3u8 served with Content-Type: audio/x-mpegURL; charset=utf-8
{
'url': 'http://once.unicornmedia.com/now/master/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/93677179-2d99-4ef4-9e17-fe70d49abfbf/content.m3u8',
'info_dict': {
'id': 'content',
'ext': 'mp4',
'title': 'content',
'formats': 'mincount:8',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# m3u8 served with Content-Type: text/plain
{
'url': 'http://www.nacentapps.com/m3u8/index.m3u8',
'info_dict': {
'id': 'index',
'ext': 'mp4',
'title': 'index',
'upload_date': '20140720',
'formats': 'mincount:11',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': r're:^Chris Ziegler takes a look at the\.*',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
},
{
# redirect in Refresh HTTP header
'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
'info_dict': {
'id': 'pO8h3EaFRdo',
'ext': 'mp4',
'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
'upload_date': '20150917',
'uploader_id': 'brtvofficial',
'uploader': 'Boiler Room',
},
'params': {
'skip_download': False,
},
},
{
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
'id': '13601338388002',
'ext': 'mp4',
'uploader': 'www.hodiho.fr',
'title': 'R\u00e9gis plante sa Jeep',
}
},
# bandcamp page with custom domain
{
'add_ie': ['Bandcamp'],
'url': 'http://bronyrock.com/track/the-pony-mash',
'info_dict': {
'id': '3235767654',
'ext': 'mp3',
'title': 'The Pony Mash',
'uploader': 'M_Pallante',
},
'skip': 'There is a limit of 200 free downloads / month for the test song',
},
{
# embedded brightcove video
# it also tests brightcove videos that need to set the 'Referer'
# in the http requests
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
'ext': 'mp4',
'title': 'Le cours de bourse : l’analyse technique',
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
'uploader': 'BFM BUSINESS',
},
'params': {
'skip_download': True,
},
},
{
# embedded with itemprop embedURL and video id spelled as `idVideo`
'add_id': ['BrightcoveLegacy'],
'url': 'http://bfmbusiness.bfmtv.com/mediaplayer/chroniques/olivier-delamarche/',
'info_dict': {
'id': '5255628253001',
'ext': 'mp4',
'title': 'md5:37c519b1128915607601e75a87995fc0',
'description': 'md5:37f7f888b434bb8f8cc8dbd4f7a4cf26',
'uploader': 'BFM BUSINESS',
'uploader_id': '876450612001',
'timestamp': 1482255315,
'upload_date': '20161220',
},
'params': {
'skip_download': True,
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/2253
'url': 'http://bcove.me/i6nfkrc3',
'md5': '0ba9446db037002366bab3b3eb30c88c',
'info_dict': {
'id': '3101154703001',
'ext': 'mp4',
'title': 'Still no power',
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
'add_ie': ['BrightcoveLegacy'],
'skip': 'video gone',
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/3541
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True, # m3u8 download
},
},
{
# Brightcove video in <iframe>
'url': 'http://www.un.org/chinese/News/story.asp?NewsID=27724',
'md5': '36d74ef5e37c8b4a2ce92880d208b968',
'info_dict': {
'id': '5360463607001',
'ext': 'mp4',
'title': '叙利亚失明儿童在废墟上演唱《心跳》 呼吁获得正常童年生活',
'description': '联合国儿童基金会中东和北非区域大使、作曲家扎德·迪拉尼(Zade Dirani)在3月15日叙利亚冲突爆发7周年纪念日之际发布了为叙利亚谱写的歌曲《心跳》(HEARTBEAT),为受到六年冲突影响的叙利亚儿童发出强烈呐喊,呼吁世界做出共同努力,使叙利亚儿童重新获得享有正常童年生活的权利。',
'uploader': 'United Nations',
'uploader_id': '1362235914001',
'timestamp': 1489593889,
'upload_date': '20170315',
},
'add_ie': ['BrightcoveLegacy'],
},
{
# Brightcove with alternative playerID key
'url': 'http://www.nature.com/nmeth/journal/v9/n7/fig_tab/nmeth.2062_SV1.html',
'info_dict': {
'id': 'nmeth.2062_SV1',
'title': 'Simultaneous multiview imaging of the Drosophila syncytial blastoderm : Quantitative high-speed imaging of entire developing embryos with simultaneous multiview light-sheet microscopy : Nature Methods : Nature Research',
},
'playlist': [{
'info_dict': {
'id': '2228375078001',
'ext': 'mp4',
'title': 'nmeth.2062-sv1',
'description': 'nmeth.2062-sv1',
'timestamp': 1363357591,
'upload_date': '20130315',
'uploader': 'Nature Publishing Group',
'uploader_id': '1964492299001',
},
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
'skip': 'video rotates...weekly?',
},
{
# Brightcove:new type [2].
'url': 'http://www.delawaresportszone.com/video-st-thomas-more-earns-first-trip-to-basketball-semis',
'md5': '2b35148fcf48da41c9fb4591650784f3',
'info_dict': {
'id': '5348741021001',
'ext': 'mp4',
'upload_date': '20170306',
'uploader_id': '4191638492001',
'timestamp': 1488769918,
'title': 'VIDEO: St. Thomas More earns first trip to basketball semis',
},
},
{
# Alternative brightcove <video> attributes
'url': 'http://www.programme-tv.net/videos/extraits/81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche/',
'info_dict': {
'id': '81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche, Extraits : toutes les vidéos avec Télé-Loisirs",
},
'playlist': [{
'md5': '732d22ba3d33f2f3fc253c39f8f36523',
'info_dict': {
'id': '5311302538001',
'ext': 'mp4',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche",
'description': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche (France 2, 5 février 2017)",
'timestamp': 1486321708,
'upload_date': '20170205',
'uploader_id': '800000640001',
},
'only_matching': True,
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
},
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'md5': '166dd577b433b4d4ebfee10b0824d8ff',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
{
# ooyala video embedded with http://player.ooyala.com/iframe.js
'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
'info_dict': {
'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
'duration': 135.427,
},
'params': {
'skip_download': True,
},
'skip': 'movie expired',
},
# ooyala video embedded with http://player.ooyala.com/static/v4/production/latest/core.min.js
{
'url': 'http://wnep.com/2017/07/22/steampunk-fest-comes-to-honesdale/',
'info_dict': {
'id': 'lwYWYxYzE6V5uJMjNGyKtwwiw9ZJD7t2',
'ext': 'mp4',
'title': 'Steampunk Fest Comes to Honesdale',
'duration': 43.276,
},
'params': {
'skip_download': True,
}
},
# embed.ly video
{
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
'info_dict': {
'id': '9ODmcdjQcHQ',
'ext': 'mp4',
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
'upload_date': '20140225',
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
'uploader': 'Tested',
'uploader_id': 'testedcom',
},
# No need to test YoutubeIE here
'params': {
'skip_download': True,
},
},
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
# HEAD requests lead to endless 301, while GET is OK
'expected_warnings': ['301'],
},
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
'info_dict': {
'id': '776940',
'ext': 'mp4',
'title': 'Охотское море стало целиком российским',
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# TVC embed
{
'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
'info_dict': {
'id': '55304',
'ext': 'mp4',
'title': 'Дошкольное воспитание',
},
},
# SportBox embed
{
'url': 'http://www.vestifinance.ru/articles/25753',
'info_dict': {
'id': '25753',
'title': 'Прямые трансляции с Форума-выставки "Госзаказ-2013"',
},
'playlist': [{
'info_dict': {
'id': '370908',
'title': 'Госзаказ. День 3',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370905',
'title': 'Госзаказ. День 2',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370902',
'title': 'Госзаказ. День 1',
'ext': 'mp4',
}
}],
'params': {
# m3u8 download
'skip_download': True,
},
},
# Myvi.ru embed
{
'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1',
'info_dict': {
'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e',
'ext': 'mp4',
'title': 'Ужастики, русский трейлер (2015)',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 153,
}
},
# XHamster embed
{
'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8',
'info_dict': {
'id': 'showthread',
'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )',
},
'playlist_mincount': 7,
# This forum does not allow <iframe> syntaxes anymore
# Now HTML tags are displayed as-is
'skip': 'No videos on this page',
},
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '1969',
'ext': 'mp4',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# nowvideo embed hidden behind percent encoding
{
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
'info_dict': {
'id': '06e53103ca9aa',
'ext': 'flv',
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
'description': 'No description',
},
},
# arte embed
{
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
'md5': '7653032cbb25bf6c80d80f217055fa43',
'info_dict': {
'id': '048195-004_PLUS7-F',
'ext': 'flv',
'title': 'X:enius',
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
'upload_date': '20140320',
},
'params': {
'skip_download': 'Requires rtmpdump'
},
'skip': 'video gone',
},
# francetv embed
{
'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
'info_dict': {
'id': 'EV_30231',
'ext': 'mp4',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'expected_warnings': [
'Forbidden'
]
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
}
},
# Dailymotion embed
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'description': 'md5:faf028e48a461b8b7fad38f1e104b119',
'uploader': 'Spi0n',
'uploader_id': 'xgditw',
'upload_date': '20140425',
'timestamp': 1398441542,
},
'add_ie': ['Dailymotion'],
},
# DailyMail embed
{
'url': 'http://www.bumm.sk/krimi/2017/07/05/biztonsagi-kamera-buktatta-le-az-agg-ferfit-utlegelo-apolot',
'info_dict': {
'id': '1495629',
'ext': 'mp4',
'title': 'Care worker punches elderly dementia patient in head 11 times',
'description': 'md5:3a743dee84e57e48ec68bf67113199a5',
},
'add_ie': ['DailyMail'],
'params': {
'skip_download': True,
},
},
# YouTube embed
{
'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
'info_dict': {
'id': 'FXRb4ykk4S0',
'ext': 'mp4',
'title': 'The NBL Auction 2014',
'uploader': 'BADMINTON England',
'uploader_id': 'BADMINTONEvents',
'upload_date': '20140603',
'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
}
},
# MTVSercices embed
{
'url': 'http://www.vulture.com/2016/06/new-key-peele-sketches-released.html',
'md5': 'ca1aef97695ef2c1d6973256a57e5252',
'info_dict': {
'id': '769f7ec0-0692-4d62-9b45-0d88074bffc1',
'ext': 'mp4',
'title': 'Key and Peele|October 10, 2012|2|203|Liam Neesons - Uncensored',
'description': 'Two valets share their love for movie star Liam Neesons.',
'timestamp': 1349922600,
'upload_date': '20121011',
},
},
# YouTube embed via <data-embed-url="">
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
}
},
# YouTube <object> embed
{
'url': 'http://www.improbable.com/2017/04/03/untrained-modern-youths-and-ancient-masters-in-selfie-portraits/',
'md5': '516718101ec834f74318df76259fb3cc',
'info_dict': {
'id': 'msN87y-iEx0',
'ext': 'webm',
'title': 'Feynman: Mirrors FUN TO IMAGINE 6',
'upload_date': '20080526',
'description': 'md5:0ffc78ea3f01b2e2c247d5f8d1d3c18d',
'uploader': 'Christopher Sykes',
'uploader_id': 'ChristopherJSykes',
},
'add_ie': ['Youtube'],
},
# Camtasia studio
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
}
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
}
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
},
# Flowplayer
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
}
},
# Multiple brightcove videos
# https://github.com/ytdl-org/youtube-dl/issues/2283
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
# MLB embed
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://study.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '1953f3a698ab51cfc948ed3992a0b7ff',
'info_dict': {
'id': '6e2wtrbdaf',
'ext': 'mov',
'title': 'paywall_north-american-exploration-failed-colonies-of-spain-france-england',
'description': 'a Paywall Videos video from Remilon',
'duration': 644.072,
'uploader': 'study.com',
'timestamp': 1459678540,
'upload_date': '20160403',
'filesize': 24687186,
},
},
{
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
'info_dict': {
'id': 'uxjb0lwrcz',
'ext': 'mp4',
'title': 'Conversation about Hexagonal Rails Part 1',
'description': 'a Martin Fowler video from ThoughtWorks',
'duration': 1715.0,
'uploader': 'thoughtworks.wistia.com',
'timestamp': 1401832161,
'upload_date': '20140603',
},
},
# Wistia standard embed (async)
{
'url': 'https://www.getdrip.com/university/brennan-dunn-drip-workshop/',
'info_dict': {
'id': '807fafadvk',
'ext': 'mp4',
'title': 'Drip Brennan Dunn Workshop',
'description': 'a JV Webinars video from getdrip-1',
'duration': 4986.95,
'timestamp': 1463607249,
'upload_date': '20160518',
},
'params': {
'skip_download': True,
}
},
# Soundcloud embed
{
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
'info_dict': {
'id': '174391317',
'ext': 'mp3',
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
'uploader': 'Sophos Security',
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
},
# Soundcloud multiple embeds
{
'url': 'http://www.guitarplayer.com/lessons/1014/legato-workout-one-hour-to-more-fluid-performance---tab/52809',
'info_dict': {
'id': '52809',
'title': 'Guitar Essentials: Legato Workout—One-Hour to Fluid Performance | TAB + AUDIO',
},
'playlist_mincount': 7,
},
# TuneIn station embed
{
'url': 'http://radiocnrv.com/promouvoir-radio-cnrv/',
'info_dict': {
'id': '204146',
'ext': 'mp3',
'title': 'CNRV',
'location': 'Paris, France',
'is_live': True,
},
'params': {
# Live stream
'skip_download': True,
},
},
# Livestream embed
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
# Another Livestream embed, without 'new.' in URL
{
'url': 'https://www.freespeech.org/',
'info_dict': {
'id': '123537347',
'ext': 'mp4',
'title': 're:^FSTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# Live stream
'skip_download': True,
},
},
# LazyYT
{
'url': 'https://skiplagged.com/',
'info_dict': {
'id': 'skiplagged',
'title': 'Skiplagged: The smart way to find cheap flights',
},
'playlist_mincount': 1,
'add_ie': ['Youtube'],
},
# Cinchcast embed
{
'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
'info_dict': {
'id': '7141703',
'ext': 'mp3',
'upload_date': '20141126',
'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
}
},
# Cinerama player
{
'url': 'http://www.abc.net.au/7.30/content/2015/s4164797.htm',
'info_dict': {
'id': '730m_DandD_1901_512k',
'ext': 'mp4',
'uploader': 'www.abc.net.au',
'title': 'Game of Thrones with dice - Dungeons and Dragons fantasy role-playing game gets new life - 19/01/2015',
}
},
# embedded viddler video
{
'url': 'http://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'uploader': 'deadspin',
'title': 'WALL-TO-GORTAT',
'timestamp': 1422285291,
'upload_date': '20150126',
},
'add_ie': ['Viddler'],
},
# Libsyn embed
{
'url': 'http://thedailyshow.cc.com/podcast/episodetwelve',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
'skip': 'All The Daily Show URLs now redirect to http://www.cc.com/shows/',
},
# jwplayer YouTube
{
'url': 'http://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'description': 'md5:8078af856dca76edc42910b61273dbbf',
'uploader_id': 'NationalArchives08',
'title': 'Webinar: Using Discovery, The National Archives’ online catalogue',
},
},
# jwplayer rtmp
{
'url': 'http://www.suffolk.edu/sjc/live.php',
'info_dict': {
'id': 'live',
'ext': 'flv',
'title': 'Massachusetts Supreme Judicial Court Oral Arguments',
'uploader': 'www.suffolk.edu',
},
'params': {
'skip_download': True,
},
'skip': 'Only has video a few mornings per month, see http://www.suffolk.edu/sjc/',
},
# Complex jwplayer
{
'url': 'http://www.indiedb.com/games/king-machine/videos',
'info_dict': {
'id': 'videos',
'ext': 'mp4',
'title': 'king machine trailer 1',
'description': 'Browse King Machine videos & audio for sweet media. Your eyes will thank you.',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
# JWPlayer config passed as variable
'url': 'http://www.txxx.com/videos/3326530/ariele/',
'info_dict': {
'id': '3326530_hq',
'ext': 'mp4',
'title': 'ARIELE | Tube Cup',
'uploader': 'www.txxx.com',
'age_limit': 18,
},
'params': {
'skip_download': True,
}
},
{
# JWPlatform iframe
'url': 'https://www.mediaite.com/tv/dem-senator-claims-gary-cohn-faked-a-bad-connection-during-trump-call-to-get-him-off-the-phone/',
'md5': 'ca00a040364b5b439230e7ebfd02c4e9',
'info_dict': {
'id': 'O0c5JcKT',
'ext': 'mp4',
'upload_date': '20171122',
'timestamp': 1511366290,
'title': 'Dem Senator Claims Gary Cohn Faked a Bad Connection During Trump Call to Get Him Off the Phone',
},
'add_ie': [JWPlatformIE.ie_key()],
},
{
# Video.js embed, multiple formats
'url': 'http://ortcam.com/solidworks-урок-6-настройка-чертежа_33f9b7351.html',
'info_dict': {
'id': 'yygqldloqIk',
'ext': 'mp4',
'title': 'SolidWorks. Урок 6 Настройка чертежа',
'description': 'md5:baf95267792646afdbf030e4d06b2ab3',
'upload_date': '20130314',
'uploader': 'PROстое3D',
'uploader_id': 'PROstoe3D',
},
'params': {
'skip_download': True,
},
},
{
# Video.js embed, single format
'url': 'https://www.vooplayer.com/v3/watch/watch.php?v=NzgwNTg=',
'info_dict': {
'id': 'watch',
'ext': 'mp4',
'title': 'Step 1 - Good Foundation',
'description': 'md5:d1e7ff33a29fc3eb1673d6c270d344f4',
},
'params': {
'skip_download': True,
},
},
# rtl.nl embed
{
'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen',
'playlist_mincount': 5,
'info_dict': {
'id': 'aanslagen-kopenhagen',
'title': 'Aanslagen Kopenhagen',
}
},
# Zapiks embed
{
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
'info_dict': {
'id': '118046',
'ext': 'mp4',
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
}
},
# Kaltura embed (different embed code)
{
'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014',
'info_dict': {
'id': '1_a52wc67y',
'ext': 'flv',
'upload_date': '20150127',
'uploader_id': 'PremierMedia',
'timestamp': int,
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
# Kaltura embed with single quotes
{
'url': 'http://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY',
'info_dict': {
'id': '0_izeg5utt',
'ext': 'mp4',
'title': '35871',
'timestamp': 1355743100,
'upload_date': '20121217',
'uploader_id': 'cplapp@learn360.com',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura embedded via quoted entry_id
'url': 'https://www.oreilly.com/ideas/my-cloud-makes-pretty-pictures',
'info_dict': {
'id': '0_utuok90b',
'ext': 'mp4',
'title': '06_matthew_brender_raj_dutt',
'timestamp': 1466638791,
'upload_date': '20160622',
},
'add_ie': ['Kaltura'],
'expected_warnings': [
'Could not send HEAD request'
],
'params': {
'skip_download': True,
}
},
{
# Kaltura embedded, some fileExt broken (#11480)
'url': 'http://www.cornell.edu/video/nima-arkani-hamed-standard-models-of-particle-physics',
'info_dict': {
'id': '1_sgtvehim',
'ext': 'mp4',
'title': 'Our "Standard Models" of particle physics and cosmology',
'description': 'md5:67ea74807b8c4fea92a6f38d6d323861',
'timestamp': 1321158993,
'upload_date': '20111113',
'uploader_id': 'kps1',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed
'url': 'http://www.gsd.harvard.edu/event/i-m-pei-a-centennial-celebration/',
'md5': 'ae5ace8eb09dc1a35d03b579a9c2cc44',
'info_dict': {
'id': '0_f2cfbpwy',
'ext': 'mp4',
'title': 'I. M. Pei: A Centennial Celebration',
'description': 'md5:1db8f40c69edc46ca180ba30c567f37c',
'upload_date': '20170403',
'uploader_id': 'batchUser',
'timestamp': 1491232186,
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed, more sophisticated
'url': 'http://www.cns.nyu.edu/~eero/math-tools/Videos/lecture-05sep2017.html',
'info_dict': {
'id': '1_9gzouybz',
'ext': 'mp4',
'title': 'lecture-05sep2017',
'description': 'md5:40f347d91fd4ba047e511c5321064b49',
'upload_date': '20170913',
'uploader_id': 'eps2',
'timestamp': 1505340777,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
{
# meta twitter:player
'url': 'http://thechive.com/2017/12/08/all-i-want-for-christmas-is-more-twerk/',
'info_dict': {
'id': '0_01b42zps',
'ext': 'mp4',
'title': 'Main Twerk (Video)',
'upload_date': '20171208',
'uploader_id': 'sebastian.salinas@thechive.com',
'timestamp': 1512713057,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
# referrer protected EaglePlatform embed
{
'url': 'https://tvrain.ru/lite/teleshow/kak_vse_nachinalos/namin-418921/',
'info_dict': {
'id': '582306',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3382,
'view_count': int,
},
'params': {
'skip_download': True,
},
},
# ClipYou (EaglePlatform) embed (custom URL)
{
'url': 'http://muz-tv.ru/play/7129/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is unavailable.',
},
# Pladform embed
{
'url': 'http://muz-tv.ru/kinozal/view/7400/',
'info_dict': {
'id': '100183293',
'ext': 'mp4',
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 694,
'age_limit': 0,
},
'skip': 'HTTP Error 404: Not Found',
},
# Playwire embed
{
'url': 'http://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html',
'info_dict': {
'id': '3519514',
'ext': 'mp4',
'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer',
'thumbnail': r're:^https?://.*\.png$',
'duration': 45.115,
},
},
# 5min embed
{
'url': 'http://techcrunch.com/video/facebook-creates-on-this-day-crunch-report/518726732/',
'md5': '4c6f127a30736b59b3e2c19234ee2bf7',
'info_dict': {
'id': '518726732',
'ext': 'mp4',
'title': 'Facebook Creates "On This Day" | Crunch Report',
'description': 'Amazon updates Fire TV line, Tesla\'s Model X spotted in the wild',
'timestamp': 1427237531,
'uploader': 'Crunch Report',
'upload_date': '20150324',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# Crooks and Liars embed
{
'url': 'http://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': "Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!",
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
},
},
# Crooks and Liars external embed
{
'url': 'http://theothermccain.com/2010/02/02/video-proves-that-bill-kristol-has-been-watching-glenn-beck/comment-page-1/',
'info_dict': {
'id': 'MTE3MjUtMzQ2MzA',
'ext': 'mp4',
'title': 'md5:5e3662a81a4014d24c250d76d41a08d5',
'description': 'md5:9b8e9542d6c3c5de42d6451b7d780cec',
'timestamp': 1265032391,
'upload_date': '20100201',
'uploader': 'Heather',
},
},
# NBC Sports vplayer embed
{
'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a',
'info_dict': {
'id': 'ln7x1qSThw4k',
'ext': 'flv',
'title': "PFT Live: New leader in the 'new-look' defense",
'description': 'md5:65a19b4bbfb3b0c0c5768bed1dfad74e',
'uploader': 'NBCU-SPORTS',
'upload_date': '20140107',
'timestamp': 1389118457,
},
'skip': 'Invalid Page URL',
},
# NBC News embed
{
'url': 'http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html',
'md5': '1aa589c675898ae6d37a17913cf68d66',
'info_dict': {
'id': 'x_dtl_oa_LettermanliftPR_160608',
'ext': 'mp4',
'title': 'David Letterman: A Preview',
'description': 'A preview of Tom Brokaw\'s interview with David Letterman as part of the On Assignment series powered by Dateline. Airs Sunday June 12 at 7/6c.',
'upload_date': '20160609',
'timestamp': 1465431544,
'uploader': 'NBCU-NEWS',
},
},
# UDN embed
{
'url': 'https://video.udn.com/news/300346',
'md5': 'fd2060e988c326991037b9aff9df21a6',
'info_dict': {
'id': '300346',
'ext': 'mp4',
'title': '中一中男師變性 全校師生力挺',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Failed to parse JSON Expecting value'],
},
# Brightcove URL in single quotes
{
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
'md5': '4ae374f1f8b91c889c4b9203c8c752af',
'info_dict': {
'id': '4255764656001',
'ext': 'mp4',
'title': 'SN Presents: Russell Martin, World Citizen',
'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
'uploader': 'Rogers Sportsnet',
'uploader_id': '1704050871',
'upload_date': '20150525',
'timestamp': 1432570283,
},
},
# Kinja embed
{
'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537',
'info_dict': {
'id': '106351',
'ext': 'mp4',
'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You',
'description': 'Migrated from OnionStudios',
'thumbnail': r're:^https?://.*\.jpe?g$',
'uploader': 'clickhole',
'upload_date': '20150527',
'timestamp': 1432744860,
}
},
# SnagFilms embed
{
'url': 'http://whilewewatch.blogspot.ru/2012/06/whilewewatch-whilewewatch-gripping.html',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
},
# AdobeTVVideo embed
{
'url': 'https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
},
# BrightcoveInPageEmbed embed
{
'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/',
'info_dict': {
'id': '4238694884001',
'ext': 'flv',
'title': 'Tabletop: Dread, Last Thoughts',
'description': 'Tabletop: Dread, Last Thoughts',
'duration': 51690,
},
},
# Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions'
# This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm
{
'url': 'https://dl.dropboxusercontent.com/u/29092637/interview.html',
'info_dict': {
'id': '4785848093001',
'ext': 'mp4',
'title': 'The Cardinal Pell Interview',
'description': 'Sky News Contributor Andrew Bolt interviews George Pell in Rome, following the Cardinal\'s evidence before the Royal Commission into Child Abuse. ',
'uploader': 'GlobeCast Australia - GlobeStream',
'uploader_id': '2733773828001',
'upload_date': '20160304',
'timestamp': 1457083087,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
{
# Brightcove embed with whitespace around attribute names
'url': 'http://www.stack.com/video/3167554373001/learn-to-hit-open-three-pointers-with-damian-lillard-s-baseline-drift-drill',
'info_dict': {
'id': '3167554373001',
'ext': 'mp4',
'title': "Learn to Hit Open Three-Pointers With Damian Lillard's Baseline Drift Drill",
'description': 'md5:57bacb0e0f29349de4972bfda3191713',
'uploader_id': '1079349493',
'upload_date': '20140207',
'timestamp': 1391810548,
},
'params': {
'skip_download': True,
},
},
# Another form of arte.tv embed
{
'url': 'http://www.tv-replay.fr/redirection/09-04-16/arte-reportage-arte-11508975.html',
'md5': '850bfe45417ddf221288c88a0cffe2e2',
'info_dict': {
'id': '030273-562_PLUS7-F',
'ext': 'mp4',
'title': 'ARTE Reportage - Nulle part, en France',
'description': 'md5:e3a0e8868ed7303ed509b9e3af2b870d',
'upload_date': '20160409',
},
},
# LiveLeak embed
{
'url': 'http://www.wykop.pl/link/3088787/',
'md5': '7619da8c820e835bef21a1efa2a0fc71',
'info_dict': {
'id': '874_1459135191',
'ext': 'mp4',
'title': 'Man shows poor quality of new apartment building',
'description': 'The wall is like a sand pile.',
'uploader': 'Lake8737',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Another LiveLeak embed pattern (#13336)
{
'url': 'https://milo.yiannopoulos.net/2017/06/concealed-carry-robbery/',
'info_dict': {
'id': '2eb_1496309988',
'ext': 'mp4',
'title': 'Thief robs place where everyone was armed',
'description': 'md5:694d73ee79e535953cf2488562288eee',
'uploader': 'brazilwtf',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Duplicated embedded video URLs
{
'url': 'http://www.hudl.com/athlete/2538180/highlights/149298443',
'info_dict': {
'id': '149298443_480_16c25b74_2',
'ext': 'mp4',
'title': 'vs. Blue Orange Spring Game',
'uploader': 'www.hudl.com',
},
},
# twitter:player:stream embed
{
'url': 'http://www.rtl.be/info/video/589263.aspx?CategoryID=288',
'info_dict': {
'id': 'master',
'ext': 'mp4',
'title': 'Une nouvelle espèce de dinosaure découverte en Argentine',
'uploader': 'www.rtl.be',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# twitter:player embed
{
'url': 'http://www.theatlantic.com/video/index/484130/what-do-black-holes-sound-like/',
'md5': 'a3e0df96369831de324f0778e126653c',
'info_dict': {
'id': '4909620399001',
'ext': 'mp4',
'title': 'What Do Black Holes Sound Like?',
'description': 'what do black holes sound like',
'upload_date': '20160524',
'uploader_id': '29913724001',
'timestamp': 1464107587,
'uploader': 'TheAtlantic',
},
'add_ie': ['BrightcoveLegacy'],
},
# Facebook <iframe> embed
{
'url': 'https://www.hostblogger.de/blog/archives/6181-Auto-jagt-Betonmischer.html',
'md5': 'fbcde74f534176ecb015849146dd3aee',
'info_dict': {
'id': '599637780109885',
'ext': 'mp4',
'title': 'Facebook video #599637780109885',
},
},
# Facebook <iframe> embed, plugin video
{
'url': 'http://5pillarsuk.com/2017/06/07/tariq-ramadan-disagrees-with-pr-exercise-by-imams-refusing-funeral-prayers-for-london-attackers/',
'info_dict': {
'id': '1754168231264132',
'ext': 'mp4',
'title': 'About the Imams and Religious leaders refusing to perform funeral prayers for...',
'uploader': 'Tariq Ramadan (official)',
'timestamp': 1496758379,
'upload_date': '20170606',
},
'params': {
'skip_download': True,
},
},
# Facebook API embed
{
'url': 'http://www.lothype.com/blue-stars-2016-preview-standstill-full-show/',
'md5': 'a47372ee61b39a7b90287094d447d94e',
'info_dict': {
'id': '10153467542406923',
'ext': 'mp4',
'title': 'Facebook video #10153467542406923',
},
},
# Wordpress "YouTube Video Importer" plugin
{
'url': 'http://www.lothype.com/blue-devils-drumline-stanford-lot-2016/',
'md5': 'd16797741b560b485194eddda8121b48',
'info_dict': {
'id': 'HNTXWDXV9Is',
'ext': 'mp4',
'title': 'Blue Devils Drumline Stanford lot 2016',
'upload_date': '20160627',
'uploader_id': 'GENOCIDE8GENERAL10',
'uploader': 'cylus cyrus',
},
},
{
# video stored on custom kaltura server
'url': 'http://www.expansion.com/multimedia/videos.html?media=EQcM30NHIPv',
'md5': '537617d06e64dfed891fa1593c4b30cc',
'info_dict': {
'id': '0_1iotm5bh',
'ext': 'mp4',
'title': 'Elecciones británicas: 5 lecciones para Rajoy',
'description': 'md5:435a89d68b9760b92ce67ed227055f16',
'uploader_id': 'videos.expansion@el-mundo.net',
'upload_date': '20150429',
'timestamp': 1430303472,
},
'add_ie': ['Kaltura'],
},
{
# multiple kaltura embeds, nsfw
'url': 'https://www.quartier-rouge.be/prive/femmes/kamila-avec-video-jaime-sadomie.html',
'info_dict': {
'id': 'kamila-avec-video-jaime-sadomie',
'title': "Kamila avec vídeo “J'aime sadomie”",
},
'playlist_count': 8,
},
{
# Non-standard Vimeo embed
'url': 'https://openclassrooms.com/courses/understanding-the-web',
'md5': '64d86f1c7d369afd9a78b38cbb88d80a',
'info_dict': {
'id': '148867247',
'ext': 'mp4',
'title': 'Understanding the web - Teaser',
'description': 'This is "Understanding the web - Teaser" by openclassrooms on Vimeo, the home for high quality videos and the people who love them.',
'upload_date': '20151214',
'uploader': 'OpenClassrooms',
'uploader_id': 'openclassrooms',
},
'add_ie': ['Vimeo'],
},
{
# generic vimeo embed that requires original URL passed as Referer
'url': 'http://racing4everyone.eu/2016/07/30/formula-1-2016-round12-germany/',
'only_matching': True,
},
{
'url': 'https://support.arkena.com/display/PLAY/Ways+to+embed+your+video',
'md5': 'b96f2f71b359a8ecd05ce4e1daa72365',
'info_dict': {
'id': 'b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'description': 'Royalty free test video',
'timestamp': 1432816365,
'upload_date': '20150528',
'is_live': False,
},
'params': {
'skip_download': True,
},
'add_ie': [ArkenaIE.ie_key()],
},
{
'url': 'http://nova.bg/news/view/2016/08/16/156543/%D0%BD%D0%B0-%D0%BA%D0%BE%D1%81%D1%8A%D0%BC-%D0%BE%D1%82-%D0%B2%D0%B7%D1%80%D0%B8%D0%B2-%D0%BE%D1%82%D1%86%D0%B5%D0%BF%D0%B8%D1%85%D0%B0-%D1%86%D1%8F%D0%BB-%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB-%D0%B7%D0%B0%D1%80%D0%B0%D0%B4%D0%B8-%D0%B8%D0%B7%D1%82%D0%B8%D1%87%D0%B0%D0%BD%D0%B5-%D0%BD%D0%B0-%D0%B3%D0%B0%D0%B7-%D0%B2-%D0%BF%D0%BB%D0%BE%D0%B2%D0%B4%D0%B8%D0%B2/',
'info_dict': {
'id': '1c7141f46c',
'ext': 'mp4',
'title': 'НА КОСЪМ ОТ ВЗРИВ: Изтичане на газ на бензиностанция в Пловдив',
},
'params': {
'skip_download': True,
},
'add_ie': [Vbox7IE.ie_key()],
},
{
# DBTV embeds
'url': 'http://www.dagbladet.no/2016/02/23/nyheter/nordlys/ski/troms/ver/43254897/',
'info_dict': {
'id': '43254897',
'title': 'Etter ett års planlegging, klaffet endelig alt: - Jeg måtte ta en liten dans',
},
'playlist_mincount': 3,
},
{
# Videa embeds
'url': 'http://forum.dvdtalk.com/movie-talk/623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style.html',
'info_dict': {
'id': '623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style',
'title': 'Deleted Magic - Star Wars: OT Deleted / Alt. Scenes Docu. Style - DVD Talk Forum',
},
'playlist_mincount': 2,
},
{
# 20 minuten embed
'url': 'http://www.20min.ch/schweiz/news/story/So-kommen-Sie-bei-Eis-und-Schnee-sicher-an-27032552',
'info_dict': {
'id': '523629',
'ext': 'mp4',
'title': 'So kommen Sie bei Eis und Schnee sicher an',
'description': 'md5:117c212f64b25e3d95747e5276863f7d',
},
'params': {
'skip_download': True,
},
'add_ie': [TwentyMinutenIE.ie_key()],
},
{
# VideoPress embed
'url': 'https://en.support.wordpress.com/videopress/',
'info_dict': {
'id': 'OcobLTqC',
'ext': 'm4v',
'title': 'IMG_5786',
'timestamp': 1435711927,
'upload_date': '20150701',
},
'params': {
'skip_download': True,
},
'add_ie': [VideoPressIE.ie_key()],
},
{
# Rutube embed
'url': 'http://magazzino.friday.ru/videos/vipuski/kazan-2',
'info_dict': {
'id': '9b3d5bee0a8740bf70dfd29d3ea43541',
'ext': 'flv',
'title': 'Магаззино: Казань 2',
'description': 'md5:99bccdfac2269f0e8fdbc4bbc9db184a',
'uploader': 'Магаззино',
'upload_date': '20170228',
'uploader_id': '996642',
},
'params': {
'skip_download': True,
},
'add_ie': [RutubeIE.ie_key()],
},
{
# ThePlatform embedded with whitespaces in URLs
'url': 'http://www.golfchannel.com/topics/shows/golftalkcentral.htm',
'only_matching': True,
},
{
# Senate ISVP iframe https
'url': 'https://www.hsgac.senate.gov/hearings/canadas-fast-track-refugee-plan-unanswered-questions-and-implications-for-us-national-security',
'md5': 'fb8c70b0b515e5037981a2492099aab8',
'info_dict': {
'id': 'govtaff020316',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
},
'add_ie': [SenateISVPIE.ie_key()],
},
{
# Limelight embeds (1 channel embed + 4 media embeds)
'url': 'http://www.sedona.com/FacilitatorTraining2017',
'info_dict': {
'id': 'FacilitatorTraining2017',
'title': 'Facilitator Training 2017',
},
'playlist_mincount': 5,
},
{
# Limelight embed (LimelightPlayerUtil.embed)
'url': 'https://tv5.ca/videos?v=xuu8qowr291ri',
'info_dict': {
'id': '95d035dc5c8a401588e9c0e6bd1e9c92',
'ext': 'mp4',
'title': '07448641',
'timestamp': 1499890639,
'upload_date': '20170712',
},
'params': {
'skip_download': True,
},
'add_ie': ['LimelightMedia'],
},
{
'url': 'http://kron4.com/2017/04/28/standoff-with-walnut-creek-murder-suspect-ends-with-arrest/',
'info_dict': {
'id': 'standoff-with-walnut-creek-murder-suspect-ends-with-arrest',
'title': 'Standoff with Walnut Creek murder suspect ends',
'description': 'md5:3ccc48a60fc9441eeccfc9c469ebf788',
},
'playlist_mincount': 4,
},
{
# WashingtonPost embed
'url': 'http://www.vanityfair.com/hollywood/2017/04/donald-trump-tv-pitches',
'info_dict': {
'id': '8caf6e88-d0ec-11e5-90d3-34c2c42653ac',
'ext': 'mp4',
'title': "No one has seen the drama series based on Trump's life \u2014 until now",
'description': 'Donald Trump wanted a weekly TV drama based on his life. It never aired. But The Washington Post recently obtained a scene from the pilot script — and enlisted actors.',
'timestamp': 1455216756,
'uploader': 'The Washington Post',
'upload_date': '20160211',
},
'add_ie': [WashingtonPostIE.ie_key()],
},
{
# Mediaset embed
'url': 'http://www.tgcom24.mediaset.it/politica/serracchiani-voglio-vivere-in-una-societa-aperta-reazioni-sproporzionate-_3071354-201702a.shtml',
'info_dict': {
'id': '720642',
'ext': 'mp4',
'title': 'Serracchiani: "Voglio vivere in una società aperta, con tutela del patto di fiducia"',
},
'params': {
'skip_download': True,
},
'add_ie': [MediasetIE.ie_key()],
},
{
# JOJ.sk embeds
'url': 'https://www.noviny.sk/slovensko/238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'info_dict': {
'id': '238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'title': 'Slovenskom sa prehnala vlna silných búrok',
},
'playlist_mincount': 5,
'add_ie': [JojIE.ie_key()],
},
{
# AMP embed (see https://www.ampproject.org/docs/reference/components/amp-video)
'url': 'https://tvrain.ru/amp/418921/',
'md5': 'cc00413936695987e8de148b67d14f1d',
'info_dict': {
'id': '418921',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
},
},
{
# vzaar embed
'url': 'http://help.vzaar.com/article/165-embedding-video',
'md5': '7e3919d9d2620b89e3e00bec7fe8c9d4',
'info_dict': {
'id': '8707641',
'ext': 'mp4',
'title': 'Building A Business Online: Principal Chairs Q & A',
},
},
{
# multiple HTML5 videos on one page
'url': 'https://www.paragon-software.com/home/rk-free/keyscenarios.html',
'info_dict': {
'id': 'keyscenarios',
'title': 'Rescue Kit 14 Free Edition - Getting started',
},
'playlist_count': 4,
},
{
# vshare embed
'url': 'https://youtube-dl-demo.neocities.org/vshare.html',
'md5': '17b39f55b5497ae8b59f5fbce8e35886',
'info_dict': {
'id': '0f64ce6',
'title': 'vl14062007715967',
'ext': 'mp4',
}
},
{
'url': 'http://www.heidelberg-laureate-forum.org/blog/video/lecture-friday-september-23-2016-sir-c-antony-r-hoare/',
'md5': 'aecd089f55b1cb5a59032cb049d3a356',
'info_dict': {
'id': '90227f51a80c4d8f86c345a7fa62bd9a1d',
'ext': 'mp4',
'title': 'Lecture: Friday, September 23, 2016 - Sir Tony Hoare',
'description': 'md5:5a51db84a62def7b7054df2ade403c6c',
'timestamp': 1474354800,
'upload_date': '20160920',
}
},
{
'url': 'http://www.kidzworld.com/article/30935-trolls-the-beat-goes-on-interview-skylar-astin-and-amanda-leighton',
'info_dict': {
'id': '1731611',
'ext': 'mp4',
'title': 'Official Trailer | TROLLS: THE BEAT GOES ON!',
'description': 'md5:eb5f23826a027ba95277d105f248b825',
'timestamp': 1516100691,
'upload_date': '20180116',
},
'params': {
'skip_download': True,
},
'add_ie': [SpringboardPlatformIE.ie_key()],
},
{
'url': 'https://www.youtube.com/shared?ci=1nEzmT-M4fU',
'info_dict': {
'id': 'uPDB5I9wfp8',
'ext': 'webm',
'title': 'Pocoyo: 90 minutos de episódios completos Português para crianças - PARTE 3',
'description': 'md5:d9e4d9346a2dfff4c7dc4c8cec0f546d',
'upload_date': '20160219',
'uploader': 'Pocoyo - Português (BR)',
'uploader_id': 'PocoyoBrazil',
},
'add_ie': [YoutubeIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.yapfiles.ru/show/1872528/690b05d3054d2dbe1e69523aa21bb3b1.mp4.html',
'info_dict': {
'id': 'vMDE4NzI1Mjgt690b',
'ext': 'mp4',
'title': 'Котята',
},
'add_ie': [YapFilesIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# CloudflareStream embed
'url': 'https://www.cloudflare.com/products/cloudflare-stream/',
'info_dict': {
'id': '31c9291ab41fac05471db4e73aa11717',
'ext': 'mp4',
'title': '31c9291ab41fac05471db4e73aa11717',
},
'add_ie': [CloudflareStreamIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# PeerTube embed
'url': 'https://joinpeertube.org/fr/home/',
'info_dict': {
'id': 'home',
'title': 'Reprenez le contrôle de vos vidéos ! #JoinPeertube',
},
'playlist_count': 2,
},
{
# Indavideo embed
'url': 'https://streetkitchen.hu/receptek/igy_kell_otthon_hamburgert_sutni/',
'info_dict': {
'id': '1693903',
'ext': 'mp4',
'title': 'Így kell otthon hamburgert sütni',
'description': 'md5:f5a730ecf900a5c852e1e00540bbb0f7',
'timestamp': 1426330212,
'upload_date': '20150314',
'uploader': 'StreetKitchen',
'uploader_id': '546363',
},
'add_ie': [IndavideoEmbedIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# APA embed via JWPlatform embed
'url': 'http://www.vol.at/blue-man-group/5593454',
'info_dict': {
'id': 'jjv85FdZ',
'ext': 'mp4',
'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 254,
'timestamp': 1519211149,
'upload_date': '20180221',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://share-videos.se/auto/video/83645793?uid=13',
'md5': 'b68d276de422ab07ee1d49388103f457',
'info_dict': {
'id': '83645793',
'title': 'Lock up and get excited',
'ext': 'mp4'
},
'skip': 'TODO: fix nested playlists processing in tests',
},
{
# Viqeo embeds
'url': 'https://viqeo.tv/',
'info_dict': {
'id': 'viqeo',
'title': 'All-new video platform',
},
'playlist_count': 6,
},
{
# Squarespace video embed, 2019-08-28
'url': 'http://ootboxford.com',
'info_dict': {
'id': 'Tc7b_JGdZfw',
'title': 'Out of the Blue, at Childish Things 10',
'ext': 'mp4',
'description': 'md5:a83d0026666cf5ee970f8bd1cfd69c7f',
'uploader_id': 'helendouglashouse',
'uploader': 'Helen & Douglas House',
'upload_date': '20140328',
},
'params': {
'skip_download': True,
},
},
{
# Zype embed
'url': 'https://www.cookscountry.com/episode/554-smoky-barbecue-favorites',
'info_dict': {
'id': '5b400b834b32992a310622b9',
'ext': 'mp4',
'title': 'Smoky Barbecue Favorites',
'thumbnail': r're:^https?://.*\.jpe?g',
'description': 'md5:5ff01e76316bd8d46508af26dc86023b',
'upload_date': '20170909',
'timestamp': 1504915200,
},
'add_ie': [ZypeIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# videojs embed
'url': 'https://video.sibnet.ru/shell.php?videoid=3422904',
'info_dict': {
'id': 'shell',
'ext': 'mp4',
'title': 'Доставщик пиццы спросил разрешения сыграть на фортепиано',
'description': 'md5:89209cdc587dab1e4a090453dbaa2cb1',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to download MPD manifest'],
},
{
# DailyMotion embed with DM.player
'url': 'https://www.beinsports.com/us/copa-del-rey/video/the-locker-room-valencia-beat-barca-in-copa/1203804',
'info_dict': {
'id': 'k6aKkGHd9FJs4mtJN39',
'ext': 'mp4',
'title': 'The Locker Room: Valencia Beat Barca In Copa del Rey Final',
'description': 'This video is private.',
'uploader_id': 'x1jf30l',
'uploader': 'beIN SPORTS USA',
'upload_date': '20190528',
'timestamp': 1559062971,
},
'params': {
'skip_download': True,
},
},
# {
# # TODO: find another test
# # http://schema.org/VideoObject
# 'url': 'https://flipagram.com/f/nyvTSJMKId',
# 'md5': '888dcf08b7ea671381f00fab74692755',
# 'info_dict': {
# 'id': 'nyvTSJMKId',
# 'ext': 'mp4',
# 'title': 'Flipagram by sjuria101 featuring Midnight Memories by One Direction',
# 'description': '#love for cats.',
# 'timestamp': 1461244995,
# 'upload_date': '20160421',
# },
# 'params': {
# 'force_generic_extractor': True,
# },
# }
]
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
entries = []
for it in doc.findall('./channel/item'):
next_url = None
enclosure_nodes = it.findall('./enclosure')
for e in enclosure_nodes:
next_url = e.attrib.get('url')
if next_url:
break
if not next_url:
next_url = xpath_text(it, 'link', fatal=False)
if not next_url:
continue
entries.append({
'_type': 'url_transparent',
'url': next_url,
'title': it.find('title').text,
})
return {
'_type': 'playlist',
'id': url,
'title': playlist_title,
'description': playlist_desc,
'entries': entries,
}
def _extract_camtasia(self, url, video_id, webpage):
""" Returns None if no camtasia video can be found. """
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': '%s - %s' % (title, n.tag),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
def _real_extract(self, url):
if url.startswith('//'):
return self.url_result(self.http_scheme() + url)
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self._downloader.params.get('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if re.match(r'^[^\s/]+\.[^\s/]+/', url):
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
if re.match(r'^(?:url|URL)$', url):
raise ExtractorError(
'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self._downloader.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
if default_search in ('error', 'fixup_error'):
raise ExtractorError(
'%r is not a valid URL. '
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
% (url, url), expected=True)
else:
if ':' not in default_search:
default_search += ':'
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = self._generic_id(url)
self.to_screen('%s: Requesting header' % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
'id': video_id,
'title': self._generic_title(url),
'upload_date': unified_strdate(head_response.headers.get('Last-Modified'))
}
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '').lower()
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
format_id = compat_str(m.group('format_id'))
if format_id.endswith('mpegurl'):
formats = self._extract_m3u8_formats(url, video_id, 'mp4')
elif format_id == 'f4m':
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [{
'format_id': format_id,
'url': url,
'vcodec': 'none' if m.group('type') == 'audio' else None
}]
info_dict['direct'] = True
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
if not self._downloader.params.get('test', False) and not is_intentional:
force = self._downloader.params.get('force_generic_extractor', False)
self._downloader.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to youtube-dl default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b'#EXTM3U'):
info_dict['formats'] = self._extract_m3u8_formats(url, video_id, 'mp4')
self._sort_formats(info_dict['formats'])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self._downloader.report_warning(
'URL could be a direct video link, returning it as such.')
info_dict.update({
'direct': True,
'url': url,
})
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif doc.tag == 'SmoothStreamingMedia':
info_dict['formats'] = self._parse_ism_formats(doc, url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil['formats'])
return smil
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(
self._parse_xspf(
doc, video_id, xspf_url=url,
xspf_base_url=full_response.geturl()),
video_id)
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
info_dict['formats'] = self._parse_mpd_formats(
doc,
mpd_base_url=full_response.geturl().rpartition('/')[0],
mpd_url=url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$', doc.tag):
info_dict['formats'] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict['formats'])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/ytdl-org/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
webpage = compat_urllib_parse_unquote(webpage)
# Unescape squarespace embeds to be detected by generic extractor,
# see https://github.com/ytdl-org/youtube-dl/issues/21294
webpage = re.sub(
r'<div[^>]+class=[^>]*?\bsqs-video-wrapper\b[^>]*>',
lambda x: unescapeHTML(x.group(0)), webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
info_dict.update({
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'age_limit': age_limit,
})
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
'ie_key': 'BrightcoveLegacy'
} for bc_url in bc_urls]
return {
'_type': 'playlist',
'title': video_title,
'id': video_id,
'entries': entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(self, webpage)
if bc_urls:
return self.playlist_from_matches(
bc_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'referrer': url}),
ie='BrightcoveNew')
# Look for Nexx embeds
nexx_urls = NexxIE._extract_urls(webpage)
if nexx_urls:
return self.playlist_from_matches(nexx_urls, video_id, video_title, ie=NexxIE.ie_key())
# Look for Nexx iFrame embeds
nexx_embed_urls = NexxEmbedIE._extract_urls(webpage)
if nexx_embed_urls:
return self.playlist_from_matches(nexx_embed_urls, video_id, video_title, ie=NexxEmbedIE.ie_key())
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return self.playlist_from_matches(tp_urls, video_id, video_title, ie='ThePlatform')
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:(?:www|static)\.)?rtl\.nl/(?:system/videoplayer/[^"]+(?:video_)?)?embed[^"]+)"',
webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='RtlNl')
vimeo_urls = VimeoIE._extract_urls(url, webpage)
if vimeo_urls:
return self.playlist_from_matches(vimeo_urls, video_id, video_title, ie=VimeoIE.ie_key())
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage, 'vid.me embed', default=None)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, 'Vidme')
# Look for YouTube embeds
youtube_urls = YoutubeIE._extract_urls(webpage)
if youtube_urls:
return self.playlist_from_matches(
youtube_urls, video_id, video_title, ie=YoutubeIE.ie_key())
matches = DailymotionIE._extract_urls(webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title)
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return self.playlist_from_matches(
playlists, video_id, video_title, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for DailyMail embeds
dailymail_urls = DailyMailIE._extract_urls(webpage)
if dailymail_urls:
return self.playlist_from_matches(
dailymail_urls, video_id, video_title, ie=DailyMailIE.ie_key())
# Look for Teachable embeds, must be before Wistia
teachable_url = TeachableIE._extract_url(webpage, url)
if teachable_url:
return self.url_result(teachable_url)
# Look for embedded Wistia player
wistia_urls = WistiaIE._extract_urls(webpage)
if wistia_urls:
playlist = self.playlist_from_matches(wistia_urls, video_id, video_title, ie=WistiaIE.ie_key())
for entry in playlist['entries']:
entry.update({
'_type': 'url_transparent',
'uploader': video_uploader,
})
return playlist
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, 'SVT')
# Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
webpage, 'ooyala embed token', default=None)
return OoyalaIE._build_url_result(smuggle_url(
mobj.group('ec'), {
'domain': url,
'embed_token': embed_token,
}))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return self.playlist_from_matches(
embeds, video_id, video_title,
getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Look for MPORA videos
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora')
# Look for embedded Facebook player
facebook_urls = FacebookIE._extract_urls(webpage)
if facebook_urls:
return self.playlist_from_matches(facebook_urls, video_id, video_title)
# Look for embedded VK player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded Odnoklassniki player
odnoklassniki_url = OdnoklassnikiIE._extract_url(webpage)
if odnoklassniki_url:
return self.url_result(odnoklassniki_url, OdnoklassnikiIE.ie_key())
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ivi')
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'HuffPost')
# Look for embed.ly
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
return self.playlist_from_matches(
matches, video_id, video_title, getter=unescapeHTML, ie='FunnyOrDie')
# Look for BBC iPlayer embed
matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='BBCCoUk')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, 'TVC')
# Look for embedded SportBox player
sportbox_urls = SportBoxIE._extract_urls(webpage)
if sportbox_urls:
return self.playlist_from_matches(sportbox_urls, video_id, video_title, ie=SportBoxIE.ie_key())
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return self.playlist_from_matches(xhamster_urls, video_id, video_title, ie='XHamsterEmbed')
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return self.playlist_from_matches(tnaflix_urls, video_id, video_title, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded PornHub player
pornhub_urls = PornHubIE._extract_urls(webpage)
if pornhub_urls:
return self.playlist_from_matches(pornhub_urls, video_id, video_title, ie=PornHubIE.ie_key())
# Look for embedded DrTuber player
drtuber_urls = DrTuberIE._extract_urls(webpage)
if drtuber_urls:
return self.playlist_from_matches(drtuber_urls, video_id, video_title, ie=DrTuberIE.ie_key())
# Look for embedded RedTube player
redtube_urls = RedTubeIE._extract_urls(webpage)
if redtube_urls:
return self.playlist_from_matches(redtube_urls, video_id, video_title, ie=RedTubeIE.ie_key())
# Look for embedded Tube8 player
tube8_urls = Tube8IE._extract_urls(webpage)
if tube8_urls:
return self.playlist_from_matches(tube8_urls, video_id, video_title, ie=Tube8IE.ie_key())
# Look for embedded Mofosex player
mofosex_urls = MofosexEmbedIE._extract_urls(webpage)
if mofosex_urls:
return self.playlist_from_matches(mofosex_urls, video_id, video_title, ie=MofosexEmbedIE.ie_key())
# Look for embedded Spankwire player
spankwire_urls = SpankwireIE._extract_urls(webpage)
if spankwire_urls:
return self.playlist_from_matches(spankwire_urls, video_id, video_title, ie=SpankwireIE.ie_key())
# Look for embedded YouPorn player
youporn_urls = YouPornIE._extract_urls(webpage)
if youporn_urls:
return self.playlist_from_matches(youporn_urls, video_id, video_title, ie=YouPornIE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Tvigle')
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'TED')
# Look for embedded Ustream videos
ustream_url = UstreamIE._extract_url(webpage)
if ustream_url:
return self.url_result(ustream_url, UstreamIE.ie_key())
# Look for embedded arte.tv player
mobj = re.search(
r'<(?:script|iframe) [^>]*?src="(?P<url>http://www\.arte\.tv/(?:playerv2/embed|arte_vp/index)[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'ArteTVEmbed')
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded smotri.com player
smotri_url = SmotriIE._extract_url(webpage)
if smotri_url:
return self.url_result(smotri_url, 'Smotri')
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
soundcloud_urls = SoundcloudEmbedIE._extract_urls(webpage)
if soundcloud_urls:
return self.playlist_from_matches(soundcloud_urls, video_id, video_title, getter=unescapeHTML)
# Look for tunein player
tunein_urls = TuneInBaseIE._extract_urls(webpage)
if tunein_urls:
return self.playlist_from_matches(tunein_urls, video_id, video_title)
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Yahoo')
# Look for embedded sbs.com.au player
mobj = re.search(
r'''(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Cinchcast')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m.mlb.com/video/[^"\']+)',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
kaltura_urls = KalturaIE._extract_urls(webpage)
if kaltura_urls:
return self.playlist_from_matches(
kaltura_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'source_url': url}),
ie=KalturaIE.ie_key())
# Look for EaglePlatform embeds
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
if eagleplatform_url:
return self.url_result(smuggle_url(eagleplatform_url, {'referrer': url}), EaglePlatformIE.ie_key())
# Look for ClipYou (uses EaglePlatform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
if mobj is not None:
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Webcaster embeds
webcaster_url = WebcasterFeedIE._extract_url(self, webpage)
if webcaster_url:
return self.url_result(webcaster_url, ie=WebcasterFeedIE.ie_key())
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?', webpage)
if mobj is not None:
return self.url_result('5min:%s' % mobj.group('id'), 'FiveMin')
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Look for NBC News embeds
nbc_news_embed_url = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1', webpage)
if nbc_news_embed_url:
return self.url_result(nbc_news_embed_url.group('url'), 'NBCNews')
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, 'GoogleDrive')
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?:https?:)?(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, 'SenateISVP')
# Look for Kinja embeds
kinja_embed_urls = KinjaEmbedIE._extract_urls(webpage, url)
if kinja_embed_urls:
return self.playlist_from_matches(
kinja_embed_urls, video_id, video_title)
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_urls = JWPlatformIE._extract_urls(webpage)
if jwplatform_urls:
return self.playlist_from_matches(jwplatform_urls, video_id, video_title, ie=JWPlatformIE.ie_key())
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(self._proto_relative_url(digiteka_url), DigitekaIE.ie_key())
# Look for Arkena embeds
arkena_url = ArkenaIE._extract_url(webpage)
if arkena_url:
return self.url_result(arkena_url, ArkenaIE.ie_key())
# Look for Piksel embeds
piksel_url = PikselIE._extract_url(webpage)
if piksel_url:
return self.url_result(piksel_url, PikselIE.ie_key())
# Look for Limelight embeds
limelight_urls = LimelightBaseIE._extract_urls(webpage, url)
if limelight_urls:
return self.playlist_result(
limelight_urls, video_id, video_title, video_description)
# Look for Anvato embeds
anvato_urls = AnvatoIE._extract_urls(self, webpage, video_id)
if anvato_urls:
return self.playlist_result(
anvato_urls, video_id, video_title, video_description)
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))),
'AdobeTVVideo')
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), 'Vine')
# Look for VODPlatform embeds
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:(?:www\.)?vod-platform\.net|embed\.kwikmotion\.com)/[eE]mbed/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group('url'))), 'VODPlatform')
# Look for Mangomolo embeds
mobj = re.search(
r'''(?x)<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//
(?:
admin\.mangomolo\.com/analytics/index\.php/customers/embed|
player\.mangomolo\.com/v1
)/
(?:
video\?.*?\bid=(?P<video_id>\d+)|
(?:index|live)\?.*?\bchannelid=(?P<channel_id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)
).+?)\1''', webpage)
if mobj is not None:
info = {
'_type': 'url_transparent',
'url': self._proto_relative_url(unescapeHTML(mobj.group('url'))),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
video_id = mobj.group('video_id')
if video_id:
info.update({
'ie_key': 'MangomoloVideo',
'id': video_id,
})
else:
info.update({
'ie_key': 'MangomoloLive',
'id': mobj.group('channel_id'),
})
return info
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key())
# Look for LiveLeak embeds
liveleak_urls = LiveLeakIE._extract_urls(webpage)
if liveleak_urls:
return self.playlist_from_matches(liveleak_urls, video_id, video_title)
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
'_type': 'url_transparent',
'ie_key': ThreeQSDNIE.ie_key(),
'url': self._proto_relative_url(threeqsdn_url),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
# Look for VBOX7 embeds
vbox7_url = Vbox7IE._extract_url(webpage)
if vbox7_url:
return self.url_result(vbox7_url, Vbox7IE.ie_key())
# Look for DBTV embeds
dbtv_urls = DBTVIE._extract_urls(webpage)
if dbtv_urls:
return self.playlist_from_matches(dbtv_urls, video_id, video_title, ie=DBTVIE.ie_key())
# Look for Videa embeds
videa_urls = VideaIE._extract_urls(webpage)
if videa_urls:
return self.playlist_from_matches(videa_urls, video_id, video_title, ie=VideaIE.ie_key())
# Look for 20 minuten embeds
twentymin_urls = TwentyMinutenIE._extract_urls(webpage)
if twentymin_urls:
return self.playlist_from_matches(
twentymin_urls, video_id, video_title, ie=TwentyMinutenIE.ie_key())
# Look for VideoPress embeds
videopress_urls = VideoPressIE._extract_urls(webpage)
if videopress_urls:
return self.playlist_from_matches(
videopress_urls, video_id, video_title, ie=VideoPressIE.ie_key())
# Look for Rutube embeds
rutube_urls = RutubeIE._extract_urls(webpage)
if rutube_urls:
return self.playlist_from_matches(
rutube_urls, video_id, video_title, ie=RutubeIE.ie_key())
# Look for WashingtonPost embeds
wapo_urls = WashingtonPostIE._extract_urls(webpage)
if wapo_urls:
return self.playlist_from_matches(
wapo_urls, video_id, video_title, ie=WashingtonPostIE.ie_key())
# Look for Mediaset embeds
mediaset_urls = MediasetIE._extract_urls(self, webpage)
if mediaset_urls:
return self.playlist_from_matches(
mediaset_urls, video_id, video_title, ie=MediasetIE.ie_key())
# Look for JOJ.sk embeds
joj_urls = JojIE._extract_urls(webpage)
if joj_urls:
return self.playlist_from_matches(
joj_urls, video_id, video_title, ie=JojIE.ie_key())
# Look for megaphone.fm embeds
mpfn_urls = MegaphoneIE._extract_urls(webpage)
if mpfn_urls:
return self.playlist_from_matches(
mpfn_urls, video_id, video_title, ie=MegaphoneIE.ie_key())
# Look for vzaar embeds
vzaar_urls = VzaarIE._extract_urls(webpage)
if vzaar_urls:
return self.playlist_from_matches(
vzaar_urls, video_id, video_title, ie=VzaarIE.ie_key())
channel9_urls = Channel9IE._extract_urls(webpage)
if channel9_urls:
return self.playlist_from_matches(
channel9_urls, video_id, video_title, ie=Channel9IE.ie_key())
vshare_urls = VShareIE._extract_urls(webpage)
if vshare_urls:
return self.playlist_from_matches(
vshare_urls, video_id, video_title, ie=VShareIE.ie_key())
# Look for Mediasite embeds
mediasite_urls = MediasiteIE._extract_urls(webpage)
if mediasite_urls:
entries = [
self.url_result(smuggle_url(
compat_urlparse.urljoin(url, mediasite_url),
{'UrlReferrer': url}), ie=MediasiteIE.ie_key())
for mediasite_url in mediasite_urls]
return self.playlist_result(entries, video_id, video_title)
springboardplatform_urls = SpringboardPlatformIE._extract_urls(webpage)
if springboardplatform_urls:
return self.playlist_from_matches(
springboardplatform_urls, video_id, video_title,
ie=SpringboardPlatformIE.ie_key())
yapfiles_urls = YapFilesIE._extract_urls(webpage)
if yapfiles_urls:
return self.playlist_from_matches(
yapfiles_urls, video_id, video_title, ie=YapFilesIE.ie_key())
vice_urls = ViceIE._extract_urls(webpage)
if vice_urls:
return self.playlist_from_matches(
vice_urls, video_id, video_title, ie=ViceIE.ie_key())
xfileshare_urls = XFileShareIE._extract_urls(webpage)
if xfileshare_urls:
return self.playlist_from_matches(
xfileshare_urls, video_id, video_title, ie=XFileShareIE.ie_key())
cloudflarestream_urls = CloudflareStreamIE._extract_urls(webpage)
if cloudflarestream_urls:
return self.playlist_from_matches(
cloudflarestream_urls, video_id, video_title, ie=CloudflareStreamIE.ie_key())
peertube_urls = PeerTubeIE._extract_urls(webpage, url)
if peertube_urls:
return self.playlist_from_matches(
peertube_urls, video_id, video_title, ie=PeerTubeIE.ie_key())
indavideo_urls = IndavideoEmbedIE._extract_urls(webpage)
if indavideo_urls:
return self.playlist_from_matches(
indavideo_urls, video_id, video_title, ie=IndavideoEmbedIE.ie_key())
apa_urls = APAIE._extract_urls(webpage)
if apa_urls:
return self.playlist_from_matches(
apa_urls, video_id, video_title, ie=APAIE.ie_key())
foxnews_urls = FoxNewsIE._extract_urls(webpage)
if foxnews_urls:
return self.playlist_from_matches(
foxnews_urls, video_id, video_title, ie=FoxNewsIE.ie_key())
sharevideos_urls = [sharevideos_mobj.group('url') for sharevideos_mobj in re.finditer(
r'<iframe[^>]+?\bsrc\s*=\s*(["\'])(?P<url>(?:https?:)?//embed\.share-videos\.se/auto/embed/\d+\?.*?\buid=\d+.*?)\1',
webpage)]
if sharevideos_urls:
return self.playlist_from_matches(
sharevideos_urls, video_id, video_title)
viqeo_urls = ViqeoIE._extract_urls(webpage)
if viqeo_urls:
return self.playlist_from_matches(
viqeo_urls, video_id, video_title, ie=ViqeoIE.ie_key())
expressen_urls = ExpressenIE._extract_urls(webpage)
if expressen_urls:
return self.playlist_from_matches(
expressen_urls, video_id, video_title, ie=ExpressenIE.ie_key())
zype_urls = ZypeIE._extract_urls(webpage)
if zype_urls:
return self.playlist_from_matches(
zype_urls, video_id, video_title, ie=ZypeIE.ie_key())
# Look for HTML5 media
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls')
if entries:
if len(entries) == 1:
entries[0].update({
'id': video_id,
'title': video_title,
})
else:
for num, entry in enumerate(entries, start=1):
entry.update({
'id': '%s-%s' % (video_id, num),
'title': '%s (%d)' % (video_title, num),
})
for entry in entries:
self._sort_formats(entry['formats'])
return self.playlist_result(entries, video_id, video_title)
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
if jwplayer_data:
try:
info = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, base_url=url)
return merge_dicts(info, info_dict)
except ExtractorError:
# See https://github.com/ytdl-org/youtube-dl/pull/16735
pass
# Video.js embed
mobj = re.search(
r'(?s)\bvideojs\s*\(.+?\.src\s*\(\s*((?:\[.+?\]|{.+?}))\s*\)\s*;',
webpage)
if mobj is not None:
sources = self._parse_json(
mobj.group(1), video_id, transform_source=js_to_json,
fatal=False) or []
if not isinstance(sources, list):
sources = [sources]
formats = []
for source in sources:
src = source.get('src')
if not src or not isinstance(src, compat_str):
continue
src = compat_urlparse.urljoin(url, src)
src_type = source.get('type')
if isinstance(src_type, compat_str):
src_type = src_type.lower()
ext = determine_ext(src).lower()
if src_type == 'video/youtube':
return self.url_result(src, YoutubeIE.ie_key())
if src_type == 'application/dash+xml' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src, video_id, mpd_id='dash', fatal=False))
elif src_type == 'application/x-mpegurl' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': src,
'ext': (mimetype2ext(src_type)
or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
})
if formats:
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
# Looking for http://schema.org/VideoObject
json_ld = self._search_json_ld(
webpage, video_id, default={}, expected_type='VideoObject')
if json_ld.get('url'):
return merge_dicts(json_ld, info_dict)
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
if RtmpIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
# Cinerama player
found = re.findall(
r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
# twitter:player:stream should be checked before twitter:player since
# it is expected to contain a raw stream (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
found = filter_video(re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
# In python 2 response HTTP headers are bytestrings
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
if new_url != url:
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
else:
found = None
if not found:
# twitter:player is a https URL to iframe player that may or may not
# be supported by youtube-dl thus this is checked the very last (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
embed_url = self._html_search_meta('twitter:player', webpage, default=None)
if embed_url and embed_url != url:
return self.url_result(embed_url)
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/')
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, 'Youtube'))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
}
if RtmpIE.suitable(video_url):
entry_info_dict.update({
'_type': 'url_transparent',
'ie_key': RtmpIE.ie_key(),
'url': video_url,
})
entries.append(entry_info_dict)
continue
ext = determine_ext(video_url)
if ext == 'smil':
entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id)
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
elif ext == 'm3u8':
entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
elif ext == 'mpd':
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
elif ext == 'f4m':
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
elif re.search(r'(?i)\.(?:ism|smil)/manifest', video_url) and video_url != url:
# Just matching .ism/manifest is not enough to be reliably sure
# whether it's actually an ISM manifest or some other streaming
# manifest since there are various streaming URL formats
# possible (see [1]) as well as some other shenanigans like
# .smil/manifest URLs that actually serve an ISM (see [2]) and
# so on.
# Thus the most reasonable way to solve this is to delegate
# to generic extractor in order to look into the contents of
# the manifest itself.
# 1. https://azure.microsoft.com/en-us/documentation/articles/media-services-deliver-content-overview/#streaming-url-formats
# 2. https://svs.itworkscdn.net/lbcivod/smil:itwfcdn/lbci/170976.smil/Manifest
entry_info_dict = self.url_result(
smuggle_url(video_url, {'to_generic': True}),
GenericIE.ie_key())
else:
entry_info_dict['url'] = video_url
if entry_info_dict.get('formats'):
self._sort_formats(entry_info_dict['formats'])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get('title') is not None:
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 8.18 from Kane 1985."""
from __future__ import division
from sympy import symbols
from sympy.physics.mechanics import ReferenceFrame
from sympy.physics.mechanics import cross, dot, dynamicsymbols, inertia
from util import msprint
print("\n part a")
Ia, Ib, Ic, Iab, Ibc, Ica, t = symbols('Ia Ib Ic Iab Ibc Ica t')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
# I = (I11 * N.x + I12 * N.y + I13 * N.z) N.x +
# (I21 * N.x + I22 * N.y + I23 * N.z) N.y +
# (I31 * N.x + I32 * N.y + I33 * N.z) N.z
# definition of T* is:
# T* = -dot(alpha, I) - dot(cross(omega, I), omega)
ang_vel = omega * N.x
I = inertia(N, Ia, Ib, Ic, Iab, Ibc, Ica)
T_star = -dot(ang_vel.diff(t, N), I) - dot(cross(ang_vel, I), ang_vel)
print(msprint(T_star))
print("\n part b")
I11, I22, I33, I12, I23, I31 = symbols('I11 I22 I33 I12 I23 I31')
omega1, omega2, omega3 = dynamicsymbols('omega1:4')
B = ReferenceFrame('B')
ang_vel = omega1 * B.x + omega2 * B.y + omega3 * B.z
I = inertia(B, I11, I22, I33, I12, I23, I31)
T_star = -dot(ang_vel.diff(t, B), I) - dot(cross(ang_vel, I), ang_vel)
print(msprint(T_star)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from runtest import TestBase
import subprocess as sp
import os.path
import random
TDIR = 'xxx'
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'abc', """
# DURATION TID FUNCTION
62.202 us [28141] | __cxa_atexit();
[28141] | main() {
[28141] | a() {
[28141] | b() {
[28141] | c() {
0.753 us [28141] | getpid();
1.430 us [28141] | } /* c */
1.915 us [28141] | } /* b */
2.405 us [28141] | } /* a */
3.005 us [28141] | } /* main */
""")
def prerun(self, timeout):
self.gen_port()
self.subcmd = 'recv'
self.option = '-d %s --port %s' % (TDIR, self.port)
self.exearg = ''
recv_cmd = self.runcmd()
self.pr_debug('prerun command: ' + recv_cmd)
self.recv_p = sp.Popen(recv_cmd.split())
# recorded but not used
self.subcmd = 'record'
self.option = '--host %s --port %s' % ('localhost', self.port)
self.exearg = 't-' + self.name
record_cmd = self.runcmd()
self.pr_debug('prerun command: ' + record_cmd)
sp.call(record_cmd.split())
# use this
self.pr_debug('run another record')
self.dirname = 'dir-' + str(random.randint(100000, 999999))
self.pr_debug('after randint')
self.option += ' -d ' + self.dirname
record_cmd = self.runcmd()
self.pr_debug('prerun command: ' + record_cmd)
sp.call(record_cmd.split())
return TestBase.TEST_SUCCESS
def setup(self):
self.subcmd = 'replay'
self.option = '-d %s' % os.path.join(TDIR, self.dirname)
def postrun(self, ret):
self.recv_p.terminate()
return ret | unknown | codeparrot/codeparrot-clean | ||
from sympy.polys.domains import QQ, EX, RR
from sympy.polys.rings import ring
from sympy.polys.ring_series import (_invert_monoms, rs_integrate,
rs_trunc, rs_mul, rs_square, rs_pow, _has_constant_term, rs_hadamard_exp,
rs_series_from_list, rs_exp, rs_log, rs_newton, rs_series_inversion,
rs_compose_add, rs_asin, rs_atan, rs_atanh, rs_tan, rs_cot, rs_sin, rs_cos,
rs_cos_sin, rs_sinh, rs_cosh, rs_tanh, _tan1, rs_fun, rs_nth_root,
rs_LambertW, rs_series_reversion, rs_is_puiseux)
from sympy.utilities.pytest import raises
from sympy.core.compatibility import range
from sympy.core.symbol import symbols
from sympy.functions import (sin, cos, exp, tan, cot, atan, asin, atanh,
tanh, log, sqrt)
from sympy.core.numbers import Rational
def is_close(a, b):
tol = 10**(-10)
assert abs(a - b) < tol
def test_ring_series1():
R, x = ring('x', QQ)
p = x**4 + 2*x**3 + 3*x + 4
assert _invert_monoms(p) == 4*x**4 + 3*x**3 + 2*x + 1
assert rs_hadamard_exp(p) == x**4/24 + x**3/3 + 3*x + 4
R, x = ring('x', QQ)
p = x**4 + 2*x**3 + 3*x + 4
assert rs_integrate(p, x) == x**5/5 + x**4/2 + 3*x**2/2 + 4*x
R, x, y = ring('x, y', QQ)
p = x**2*y**2 + x + 1
assert rs_integrate(p, x) == x**3*y**2/3 + x**2/2 + x
assert rs_integrate(p, y) == x**2*y**3/3 + x*y + y
def test_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = (y + t*x)**4
p1 = rs_trunc(p, x, 3)
assert p1 == y**4 + 4*y**3*t*x + 6*y**2*t**2*x**2
def test_mul_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = 1 + t*x + t*y
for i in range(2):
p = rs_mul(p, p, t, 3)
assert p == 6*x**2*t**2 + 12*x*y*t**2 + 6*y**2*t**2 + 4*x*t + 4*y*t + 1
p = 1 + t*x + t*y + t**2*x*y
p1 = rs_mul(p, p, t, 2)
assert p1 == 1 + 2*t*x + 2*t*y
R1, z = ring('z', QQ)
def test1(p):
p2 = rs_mul(p, z, x, 2)
raises(ValueError, lambda: test1(p))
p1 = 2 + 2*x + 3*x**2
p2 = 3 + x**2
assert rs_mul(p1, p2, x, 4) == 2*x**3 + 11*x**2 + 6*x + 6
def test_square_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = (1 + t*x + t*y)*2
p1 = rs_mul(p, p, x, 3)
p2 = rs_square(p, x, 3)
assert p1 == p2
p = 1 + x + x**2 + x**3
assert rs_square(p, x, 4) == 4*x**3 + 3*x**2 + 2*x + 1
def test_pow_trunc():
R, x, y, z = ring('x, y, z', QQ)
p0 = y + x*z
p = p0**16
for xx in (x, y, z):
p1 = rs_trunc(p, xx, 8)
p2 = rs_pow(p0, 16, xx, 8)
assert p1 == p2
p = 1 + x
p1 = rs_pow(p, 3, x, 2)
assert p1 == 1 + 3*x
assert rs_pow(p, 0, x, 2) == 1
assert rs_pow(p, -2, x, 2) == 1 - 2*x
p = x + y
assert rs_pow(p, 3, y, 3) == x**3 + 3*x**2*y + 3*x*y**2
assert rs_pow(1 + x, Rational(2, 3), x, 4) == 4*x**3/81 - x**2/9 + 2*x/3 + 1
def test_has_constant_term():
R, x, y, z = ring('x, y, z', QQ)
p = y + x*z
assert _has_constant_term(p, x)
p = x + x**4
assert not _has_constant_term(p, x)
p = 1 + x + x**4
assert _has_constant_term(p, x)
p = x + y + x*z
def test_inversion():
R, x = ring('x', QQ)
p = 2 + x + 2*x**2
n = 5
p1 = rs_series_inversion(p, x, n)
assert rs_trunc(p*p1, x, n) == 1
R, x, y = ring('x, y', QQ)
p = 2 + x + 2*x**2 + y*x + x**2*y
p1 = rs_series_inversion(p, x, n)
assert rs_trunc(p*p1, x, n) == 1
R, x, y = ring('x, y', QQ)
p = 1 + x + y
def test2(p):
p1 = rs_series_inversion(p, x, 4)
raises(NotImplementedError, lambda: test2(p))
p = R.zero
def test3(p):
p1 = rs_series_inversion(p, x, 3)
raises(ZeroDivisionError, lambda: test3(p))
def test_series_reversion():
R, x, y = ring('x, y', QQ)
p = rs_tan(x, x, 10)
r1 = rs_series_reversion(p, x, 8, y)
r2 = rs_atan(y, y, 8)
assert rs_series_reversion(p, x, 8, y) == rs_atan(y, y, 8)
p = rs_sin(x, x, 10)
assert rs_series_reversion(p, x, 8, y) == 5*y**7/112 + 3*y**5/40 + \
y**3/6 + y
def test_series_from_list():
R, x = ring('x', QQ)
p = 1 + 2*x + x**2 + 3*x**3
c = [1, 2, 0, 4, 4]
r = rs_series_from_list(p, c, x, 5)
pc = R.from_list(list(reversed(c)))
r1 = rs_trunc(pc.compose(x, p), x, 5)
assert r == r1
R, x, y = ring('x, y', QQ)
c = [1, 3, 5, 7]
p1 = rs_series_from_list(x + y, c, x, 3, concur=0)
p2 = rs_trunc((1 + 3*(x+y) + 5*(x+y)**2 + 7*(x+y)**3), x, 3)
assert p1 == p2
R, x = ring('x', QQ)
h = 25
p = rs_exp(x, x, h) - 1
p1 = rs_series_from_list(p, c, x, h)
p2 = 0
for i, cx in enumerate(c):
p2 += cx*rs_pow(p, i, x, h)
assert p1 == p2
def test_log():
R, x = ring('x', QQ)
p = 1 + x
p1 = rs_log(p, x, 4)
assert p1 == x - x**2/2 + x**3/3
p = 1 + x +2*x**2/3
p1 = rs_log(p, x, 9)
assert p1 == -17*x**8/648 + 13*x**7/189 - 11*x**6/162 - x**5/45 + \
7*x**4/36 - x**3/3 + x**2/6 + x
p2 = rs_series_inversion(p, x, 9)
p3 = rs_log(p2, x, 9)
assert p3 == -p1
R, x, y = ring('x, y', QQ)
p = 1 + x + 2*y*x**2
p1 = rs_log(p, x, 6)
assert p1 == (4*x**5*y**2 - 2*x**5*y - 2*x**4*y**2 + x**5/5 + 2*x**4*y -
x**4/4 - 2*x**3*y + x**3/3 + 2*x**2*y - x**2/2 + x)
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_log(x + a, x, 5) == -EX(1/(4*a**4))*x**4 + EX(1/(3*a**3))*x**3 \
- EX(1/(2*a**2))*x**2 + EX(1/a)*x + EX(log(a))
assert rs_log(x + x**2*y + a, x, 4) == -EX(a**(-2))*x**3*y + \
EX(1/(3*a**3))*x**3 + EX(1/a)*x**2*y - EX(1/(2*a**2))*x**2 + \
EX(1/a)*x + EX(log(a))
p = x + x**2 + 3
assert rs_log(p, x, 10).compose(x, 5) == EX(log(3) + 19281291595/9920232)
def test_exp():
R, x = ring('x', QQ)
p = x + x**4
for h in [10, 30]:
q = rs_series_inversion(1 + p, x, h) - 1
p1 = rs_exp(q, x, h)
q1 = rs_log(p1, x, h)
assert q1 == q
p1 = rs_exp(p, x, 30)
assert p1.coeff(x**29) == QQ(74274246775059676726972369, 353670479749588078181744640000)
prec = 21
p = rs_log(1 + x, x, prec)
p1 = rs_exp(p, x, prec)
assert p1 == x + 1
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[exp(a), a])
assert rs_exp(x + a, x, 5) == exp(a)*x**4/24 + exp(a)*x**3/6 + \
exp(a)*x**2/2 + exp(a)*x + exp(a)
assert rs_exp(x + x**2*y + a, x, 5) == exp(a)*x**4*y**2/2 + \
exp(a)*x**4*y/2 + exp(a)*x**4/24 + exp(a)*x**3*y + \
exp(a)*x**3/6 + exp(a)*x**2*y + exp(a)*x**2/2 + exp(a)*x + exp(a)
R, x, y = ring('x, y', EX)
assert rs_exp(x + a, x, 5) == EX(exp(a)/24)*x**4 + EX(exp(a)/6)*x**3 + \
EX(exp(a)/2)*x**2 + EX(exp(a))*x + EX(exp(a))
assert rs_exp(x + x**2*y + a, x, 5) == EX(exp(a)/2)*x**4*y**2 + \
EX(exp(a)/2)*x**4*y + EX(exp(a)/24)*x**4 + EX(exp(a))*x**3*y + \
EX(exp(a)/6)*x**3 + EX(exp(a))*x**2*y + EX(exp(a)/2)*x**2 + \
EX(exp(a))*x + EX(exp(a))
def test_newton():
R, x = ring('x', QQ)
p = x**2 - 2
r = rs_newton(p, x, 4)
f = [1, 0, -2]
assert r == 8*x**4 + 4*x**2 + 2
def test_compose_add():
R, x = ring('x', QQ)
p1 = x**3 - 1
p2 = x**2 - 2
assert rs_compose_add(p1, p2) == x**6 - 6*x**4 - 2*x**3 + 12*x**2 - 12*x - 7
def test_fun():
R, x, y = ring('x, y', QQ)
p = x*y + x**2*y**3 + x**5*y
assert rs_fun(p, rs_tan, x, 10) == rs_tan(p, x, 10)
assert rs_fun(p, _tan1, x, 10) == _tan1(p, x, 10)
def test_nth_root():
R, x, y = ring('x, y', QQ)
r1 = rs_nth_root(1 + x**2*y, 4, x, 10)
assert rs_nth_root(1 + x**2*y, 4, x, 10) == -77*x**8*y**4/2048 + \
7*x**6*y**3/128 - 3*x**4*y**2/32 + x**2*y/4 + 1
assert rs_nth_root(1 + x*y + x**2*y**3, 3, x, 5) == -x**4*y**6/9 + \
5*x**4*y**5/27 - 10*x**4*y**4/243 - 2*x**3*y**4/9 + 5*x**3*y**3/81 + \
x**2*y**3/3 - x**2*y**2/9 + x*y/3 + 1
assert rs_nth_root(8*x, 3, x, 3) == 2*x**QQ(1, 3)
assert rs_nth_root(8*x + x**2 + x**3, 3, x, 3) == x**QQ(4,3)/12 + 2*x**QQ(1,3)
r = rs_nth_root(8*x + x**2*y + x**3, 3, x, 4)
assert r == -x**QQ(7,3)*y**2/288 + x**QQ(7,3)/12 + x**QQ(4,3)*y/12 + 2*x**QQ(1,3)
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_nth_root(x + a, 3, x, 4) == EX(5/(81*a**QQ(8, 3)))*x**3 - \
EX(1/(9*a**QQ(5, 3)))*x**2 + EX(1/(3*a**QQ(2, 3)))*x + EX(a**QQ(1, 3))
assert rs_nth_root(x**QQ(2, 3) + x**2*y + 5, 2, x, 3) == -EX(sqrt(5)/100)*\
x**QQ(8, 3)*y - EX(sqrt(5)/16000)*x**QQ(8, 3) + EX(sqrt(5)/10)*x**2*y + \
EX(sqrt(5)/2000)*x**2 - EX(sqrt(5)/200)*x**QQ(4, 3) + \
EX(sqrt(5)/10)*x**QQ(2, 3) + EX(sqrt(5))
def test_atan():
R, x, y = ring('x, y', QQ)
assert rs_atan(x, x, 9) == -x**7/7 + x**5/5 - x**3/3 + x
assert rs_atan(x*y + x**2*y**3, x, 9) == 2*x**8*y**11 - x**8*y**9 + \
2*x**7*y**9 - x**7*y**7/7 - x**6*y**9/3 + x**6*y**7 - x**5*y**7 + \
x**5*y**5/5 - x**4*y**5 - x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_atan(x + a, x, 5) == -EX((a**3 - a)/(a**8 + 4*a**6 + 6*a**4 + \
4*a**2 + 1))*x**4 + EX((3*a**2 - 1)/(3*a**6 + 9*a**4 + \
9*a**2 + 3))*x**3 - EX(a/(a**4 + 2*a**2 + 1))*x**2 + \
EX(1/(a**2 + 1))*x + EX(atan(a))
assert rs_atan(x + x**2*y + a, x, 4) == -EX(2*a/(a**4 + 2*a**2 + 1)) \
*x**3*y + EX((3*a**2 - 1)/(3*a**6 + 9*a**4 + 9*a**2 + 3))*x**3 + \
EX(1/(a**2 + 1))*x**2*y - EX(a/(a**4 + 2*a**2 + 1))*x**2 + EX(1/(a**2 \
+ 1))*x + EX(atan(a))
def test_asin():
R, x, y = ring('x, y', QQ)
assert rs_asin(x + x*y, x, 5) == x**3*y**3/6 + x**3*y**2/2 + x**3*y/2 + \
x**3/6 + x*y + x
assert rs_asin(x*y + x**2*y**3, x, 6) == x**5*y**7/2 + 3*x**5*y**5/40 + \
x**4*y**5/2 + x**3*y**3/6 + x**2*y**3 + x*y
def test_tan():
R, x, y = ring('x, y', QQ)
assert rs_tan(x, x, 9) == \
x + x**3/3 + 2*x**5/15 + 17*x**7/315
assert rs_tan(x*y + x**2*y**3, x, 9) == 4*x**8*y**11/3 + 17*x**8*y**9/45 + \
4*x**7*y**9/3 + 17*x**7*y**7/315 + x**6*y**9/3 + 2*x**6*y**7/3 + \
x**5*y**7 + 2*x**5*y**5/15 + x**4*y**5 + x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[tan(a), a])
assert rs_tan(x + a, x, 5) == (tan(a)**5 + 5*tan(a)**3/3 + \
2*tan(a)/3)*x**4 + (tan(a)**4 + 4*tan(a)**2/3 + 1/3)*x**3 + \
(tan(a)**3 + tan(a))*x**2 + (tan(a)**2 + 1)*x + tan(a)
assert rs_tan(x + x**2*y + a, x, 4) == (2*tan(a)**3 + 2*tan(a))*x**3*y + \
(tan(a)**4 + 4/3*tan(a)**2 + 1/3)*x**3 + (tan(a)**2 + 1)*x**2*y + \
(tan(a)**3 + tan(a))*x**2 + (tan(a)**2 + 1)*x + tan(a)
R, x, y = ring('x, y', EX)
assert rs_tan(x + a, x, 5) == EX(tan(a)**5 + 5*tan(a)**3/3 + \
2*tan(a)/3)*x**4 + EX(tan(a)**4 + 4*tan(a)**2/3 + EX(1)/3)*x**3 + \
EX(tan(a)**3 + tan(a))*x**2 + EX(tan(a)**2 + 1)*x + EX(tan(a))
assert rs_tan(x + x**2*y + a, x, 4) == EX(2*tan(a)**3 + \
2*tan(a))*x**3*y + EX(tan(a)**4 + 4*tan(a)**2/3 + EX(1)/3)*x**3 + \
EX(tan(a)**2 + 1)*x**2*y + EX(tan(a)**3 + tan(a))*x**2 + \
EX(tan(a)**2 + 1)*x + EX(tan(a))
p = x + x**2 + 5
assert rs_atan(p, x, 10).compose(x, 10) == EX(atan(5) + 67701870330562640/ \
668083460499)
def test_cot():
R, x, y = ring('x, y', QQ)
assert rs_cot(x**6 + x**7, x, 8) == x**-6 - x**-5 + x**-4 - x**-3 + \
x**-2 - x**-1 + 1 - x + x**2 - x**3 + x**4 - x**5 + 2*x**6/3 - 4*x**7/3
assert rs_cot(x + x**2*y, x, 5) == -x**4*y**5 - x**4*y/15 + x**3*y**4 - \
x**3/45 - x**2*y**3 - x**2*y/3 + x*y**2 - x/3 - y + x**-1
def test_sin():
R, x, y = ring('x, y', QQ)
assert rs_sin(x, x, 9) == \
x - x**3/6 + x**5/120 - x**7/5040
assert rs_sin(x*y + x**2*y**3, x, 9) == x**8*y**11/12 - \
x**8*y**9/720 + x**7*y**9/12 - x**7*y**7/5040 - x**6*y**9/6 + \
x**6*y**7/24 - x**5*y**7/2 + x**5*y**5/120 - x**4*y**5/2 - \
x**3*y**3/6 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[sin(a), cos(a), a])
assert rs_sin(x + a, x, 5) == sin(a)*x**4/24 - cos(a)*x**3/6 - \
sin(a)*x**2/2 + cos(a)*x + sin(a)
assert rs_sin(x + x**2*y + a, x, 5) == -sin(a)*x**4*y**2/2 - \
cos(a)*x**4*y/2 + sin(a)*x**4/24 - sin(a)*x**3*y - cos(a)*x**3/6 + \
cos(a)*x**2*y - sin(a)*x**2/2 + cos(a)*x + sin(a)
R, x, y = ring('x, y', EX)
assert rs_sin(x + a, x, 5) == EX(sin(a)/24)*x**4 - EX(cos(a)/6)*x**3 - \
EX(sin(a)/2)*x**2 + EX(cos(a))*x + EX(sin(a))
assert rs_sin(x + x**2*y + a, x, 5) == -EX(sin(a)/2)*x**4*y**2 - \
EX(cos(a)/2)*x**4*y + EX(sin(a)/24)*x**4 - EX(sin(a))*x**3*y - \
EX(cos(a)/6)*x**3 + EX(cos(a))*x**2*y - EX(sin(a)/2)*x**2 + \
EX(cos(a))*x + EX(sin(a))
def test_cos():
R, x, y = ring('x, y', QQ)
assert rs_cos(x, x, 9) == \
x**8/40320 - x**6/720 + x**4/24 - x**2/2 + 1
assert rs_cos(x*y + x**2*y**3, x, 9) == x**8*y**12/24 - \
x**8*y**10/48 + x**8*y**8/40320 + x**7*y**10/6 - \
x**7*y**8/120 + x**6*y**8/4 - x**6*y**6/720 + x**5*y**6/6 - \
x**4*y**6/2 + x**4*y**4/24 - x**3*y**4 - x**2*y**2/2 + 1
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[sin(a), cos(a), a])
assert rs_cos(x + a, x, 5) == cos(a)*x**4/24 + sin(a)*x**3/6 - \
cos(a)*x**2/2 - sin(a)*x + cos(a)
assert rs_cos(x + x**2*y + a, x, 5) == -cos(a)*x**4*y**2/2 + \
sin(a)*x**4*y/2 + cos(a)*x**4/24 - cos(a)*x**3*y + sin(a)*x**3/6 - \
sin(a)*x**2*y - cos(a)*x**2/2 - sin(a)*x + cos(a)
R, x, y = ring('x, y', EX)
assert rs_cos(x + a, x, 5) == EX(cos(a)/24)*x**4 + EX(sin(a)/6)*x**3 - \
EX(cos(a)/2)*x**2 - EX(sin(a))*x + EX(cos(a))
assert rs_cos(x + x**2*y + a, x, 5) == -EX(cos(a)/2)*x**4*y**2 + \
EX(sin(a)/2)*x**4*y + EX(cos(a)/24)*x**4 - EX(cos(a))*x**3*y + \
EX(sin(a)/6)*x**3 - EX(sin(a))*x**2*y - EX(cos(a)/2)*x**2 - \
EX(sin(a))*x + EX(cos(a))
def test_cos_sin():
R, x, y = ring('x, y', QQ)
cos, sin = rs_cos_sin(x, x, 9)
assert cos == rs_cos(x, x, 9)
assert sin == rs_sin(x, x, 9)
cos, sin = rs_cos_sin(x + x*y, x, 5)
assert cos == rs_cos(x + x*y, x, 5)
assert sin == rs_sin(x + x*y, x, 5)
def test_atanh():
R, x, y = ring('x, y', QQ)
assert rs_atanh(x, x, 9) == x**7/7 + x**5/5 + x**3/3 + x
assert rs_atanh(x*y + x**2*y**3, x, 9) == 2*x**8*y**11 + x**8*y**9 + \
2*x**7*y**9 + x**7*y**7/7 + x**6*y**9/3 + x**6*y**7 + x**5*y**7 + \
x**5*y**5/5 + x**4*y**5 + x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_atanh(x + a, x, 5) == EX((a**3 + a)/(a**8 - 4*a**6 + 6*a**4 - \
4*a**2 + 1))*x**4 - EX((3*a**2 + 1)/(3*a**6 - 9*a**4 + \
9*a**2 - 3))*x**3 + EX(a/(a**4 - 2*a**2 + 1))*x**2 - EX(1/(a**2 - \
1))*x + EX(atanh(a))
assert rs_atanh(x + x**2*y + a, x, 4) == EX(2*a/(a**4 - 2*a**2 + \
1))*x**3*y - EX((3*a**2 + 1)/(3*a**6 - 9*a**4 + 9*a**2 - 3))*x**3 - \
EX(1/(a**2 - 1))*x**2*y + EX(a/(a**4 - 2*a**2 + 1))*x**2 - \
EX(1/(a**2 - 1))*x + EX(atanh(a))
p = x + x**2 + 5
assert rs_atanh(p, x, 10).compose(x, 10) == EX(-733442653682135/5079158784 \
+ atanh(5))
def test_sinh():
R, x, y = ring('x, y', QQ)
assert rs_sinh(x, x, 9) == x**7/5040 + x**5/120 + x**3/6 + x
assert rs_sinh(x*y + x**2*y**3, x, 9) == x**8*y**11/12 + \
x**8*y**9/720 + x**7*y**9/12 + x**7*y**7/5040 + x**6*y**9/6 + \
x**6*y**7/24 + x**5*y**7/2 + x**5*y**5/120 + x**4*y**5/2 + \
x**3*y**3/6 + x**2*y**3 + x*y
def test_cosh():
R, x, y = ring('x, y', QQ)
assert rs_cosh(x, x, 9) == x**8/40320 + x**6/720 + x**4/24 + \
x**2/2 + 1
assert rs_cosh(x*y + x**2*y**3, x, 9) == x**8*y**12/24 + \
x**8*y**10/48 + x**8*y**8/40320 + x**7*y**10/6 + \
x**7*y**8/120 + x**6*y**8/4 + x**6*y**6/720 + x**5*y**6/6 + \
x**4*y**6/2 + x**4*y**4/24 + x**3*y**4 + x**2*y**2/2 + 1
def test_tanh():
R, x, y = ring('x, y', QQ)
assert rs_tanh(x, x, 9) == -17*x**7/315 + 2*x**5/15 - x**3/3 + x
assert rs_tanh(x*y + x**2*y**3 , x, 9) == 4*x**8*y**11/3 - \
17*x**8*y**9/45 + 4*x**7*y**9/3 - 17*x**7*y**7/315 - x**6*y**9/3 + \
2*x**6*y**7/3 - x**5*y**7 + 2*x**5*y**5/15 - x**4*y**5 - \
x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_tanh(x + a, x, 5) == EX(tanh(a)**5 - 5*tanh(a)**3/3 + \
2*tanh(a)/3)*x**4 + EX(-tanh(a)**4 + 4*tanh(a)**2/3 - QQ(1, 3))*x**3 + \
EX(tanh(a)**3 - tanh(a))*x**2 + EX(-tanh(a)**2 + 1)*x + EX(tanh(a))
p = rs_tanh(x + x**2*y + a, x, 4)
assert (p.compose(x, 10)).compose(y, 5) == EX(-1000*tanh(a)**4 + \
10100*tanh(a)**3 + 2470*tanh(a)**2/3 - 10099*tanh(a) + QQ(530, 3))
def test_RR():
rs_funcs = [rs_sin, rs_cos, rs_tan, rs_cot, rs_atan, rs_tanh]
sympy_funcs = [sin, cos, tan, cot, atan, tanh]
R, x, y = ring('x, y', RR)
a = symbols('a')
for rs_func, sympy_func in zip(rs_funcs, sympy_funcs):
p = rs_func(2 + x, x, 5).compose(x, 5)
q = sympy_func(2 + a).series(a, 0, 5).removeO()
is_close(p.as_expr(), q.subs(a, 5).n())
p = rs_nth_root(2 + x, 5, x, 5).compose(x, 5)
q = ((2 + a)**QQ(1, 5)).series(a, 0, 5).removeO()
is_close(p.as_expr(), q.subs(a, 5).n())
def test_is_regular():
R, x, y = ring('x, y', QQ)
p = 1 + 2*x + x**2 + 3*x**3
assert not rs_is_puiseux(p, x)
p = x + x**QQ(1,5)*y
assert rs_is_puiseux(p, x)
assert not rs_is_puiseux(p, y)
p = x + x**2*y**QQ(1,5)*y
assert not rs_is_puiseux(p, x)
def test_puiseux():
R, x, y = ring('x, y', QQ)
p = x**QQ(2,5) + x**QQ(2,3) + x
r = rs_series_inversion(p, x, 1)
r1 = -x**QQ(14,15) + x**QQ(4,5) - 3*x**QQ(11,15) + x**QQ(2,3) + \
2*x**QQ(7,15) - x**QQ(2,5) - x**QQ(1,5) + x**QQ(2,15) - x**QQ(-2,15) \
+ x**QQ(-2,5)
assert r == r1
r = rs_nth_root(1 + p, 3, x, 1)
assert r == -x**QQ(4,5)/9 + x**QQ(2,3)/3 + x**QQ(2,5)/3 + 1
r = rs_log(1 + p, x, 1)
assert r == -x**QQ(4,5)/2 + x**QQ(2,3) + x**QQ(2,5)
r = rs_LambertW(p, x, 1)
assert r == -x**QQ(4,5) + x**QQ(2,3) + x**QQ(2,5)
r = rs_exp(p, x, 1)
assert r == x**QQ(4,5)/2 + x**QQ(2,3) + x**QQ(2,5) + 1
p1 = x + x**QQ(1,5)*y
r = rs_exp(p1, x, 1)
assert r == x**QQ(4,5)*y**4/24 + x**QQ(3,5)*y**3/6 + x**QQ(2,5)*y**2/2 + \
x**QQ(1,5)*y + 1
r = rs_atan(p, x, 2)
assert r == -x**QQ(9,5) - x**QQ(26,15) - x**QQ(22,15) - x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
r = rs_atan(p1, x, 2)
assert r == x**QQ(9,5)*y**9/9 + x**QQ(9,5)*y**4 - x**QQ(7,5)*y**7/7 - \
x**QQ(7,5)*y**2 + x*y**5/5 + x - x**QQ(3,5)*y**3/3 + x**QQ(1,5)*y
r = rs_asin(p, x, 2)
assert r == x**QQ(9,5)/2 + x**QQ(26,15)/2 + x**QQ(22,15)/2 + \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_tan(p, x, 2)
assert r == x**QQ(9,5) + x**QQ(26,15) + x**QQ(22,15) + x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cot(p, x, 1)
assert r == -x**QQ(14,15) + x**QQ(4,5) - 3*x**QQ(11,15) + \
2*x**QQ(2,3)/3 + 2*x**QQ(7,15) - 4*x**QQ(2,5)/3 - x**QQ(1,5) + \
x**QQ(2,15) - x**QQ(-2,15) + x**QQ(-2,5)
r = rs_sin(p, x, 2)
assert r == -x**QQ(9,5)/2 - x**QQ(26,15)/2 - x**QQ(22,15)/2 - \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cos(p, x, 2)
assert r == x**QQ(28,15)/6 - x**QQ(5,3) + x**QQ(8,5)/24 - x**QQ(7,5) - \
x**QQ(4,3)/2 - x**QQ(16,15) - x**QQ(4,5)/2 + 1
r = rs_cos_sin(p, x, 2)
assert r[0] == x**QQ(28,15)/6 - x**QQ(5,3) + x**QQ(8,5)/24 - x**QQ(7,5) - \
x**QQ(4,3)/2 - x**QQ(16,15) - x**QQ(4,5)/2 + 1
assert r[1] == -x**QQ(9,5)/2 - x**QQ(26,15)/2 - x**QQ(22,15)/2 - \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_atanh(p, x, 2)
assert r == x**QQ(9,5) + x**QQ(26,15) + x**QQ(22,15) + x**QQ(6,5)/3 + x + \
x**QQ(2,3) + x**QQ(2,5)
r = rs_sinh(p, x, 2)
assert r == x**QQ(9,5)/2 + x**QQ(26,15)/2 + x**QQ(22,15)/2 + \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cosh(p, x, 2)
assert r == x**QQ(28,15)/6 + x**QQ(5,3) + x**QQ(8,5)/24 + x**QQ(7,5) + \
x**QQ(4,3)/2 + x**QQ(16,15) + x**QQ(4,5)/2 + 1
r = rs_tanh(p, x, 2)
assert r == -x**QQ(9,5) - x**QQ(26,15) - x**QQ(22,15) - x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
def test1():
R, x = ring('x', QQ)
r = rs_sin(x, x, 15)*x**(-5)
assert r == x**8/6227020800 - x**6/39916800 + x**4/362880 - x**2/5040 + \
QQ(1,120) - x**-2/6 + x**-4
p = rs_sin(x, x, 10)
r = rs_nth_root(p, 2, x, 10)
assert r == -67*x**QQ(17,2)/29030400 - x**QQ(13,2)/24192 + \
x**QQ(9,2)/1440 - x**QQ(5,2)/12 + x**QQ(1,2)
p = rs_sin(x, x, 10)
r = rs_nth_root(p, 7, x, 10)
r = rs_pow(r, 5, x, 10)
assert r == -97*x**QQ(61,7)/124467840 - x**QQ(47,7)/16464 + \
11*x**QQ(33,7)/3528 - 5*x**QQ(19,7)/42 + x**QQ(5,7)
r = rs_exp(x**QQ(1,2), x, 10)
assert r == x**QQ(19,2)/121645100408832000 + x**9/6402373705728000 + \
x**QQ(17,2)/355687428096000 + x**8/20922789888000 + \
x**QQ(15,2)/1307674368000 + x**7/87178291200 + \
x**QQ(13,2)/6227020800 + x**6/479001600 + x**QQ(11,2)/39916800 + \
x**5/3628800 + x**QQ(9,2)/362880 + x**4/40320 + x**QQ(7,2)/5040 + \
x**3/720 + x**QQ(5,2)/120 + x**2/24 + x**QQ(3,2)/6 + x/2 + \
x**QQ(1,2) + 1
def test_puiseux2():
R, y = ring('y', QQ)
S, x = ring('x', R)
p = x + x**QQ(1,5)*y
r = rs_atan(p, x, 3)
assert r == (y**13/13 + y**8 + 2*y**3)*x**QQ(13,5) - (y**11/11 + y**6 +
y)*x**QQ(11,5) + (y**9/9 + y**4)*x**QQ(9,5) - (y**7/7 +
y**2)*x**QQ(7,5) + (y**5/5 + 1)*x - y**3*x**QQ(3,5)/3 + y*x**QQ(1,5) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # pylint:disable=redefined-builtin,unused-wildcard-import,wildcard-import,wrong-import-order
import logging
import re
import sys
import networkx as nx
import numpy
import pytest
try:
from unittest import mock
except ImportError:
import mock # Python 2 backport
import stbt_core as stbt
from _stbt.keyboard import _keys_to_press, _strip_shift_transitions
from _stbt.transition import _TransitionResult, TransitionStatus
from _stbt.utils import py3
# pylint:disable=redefined-outer-name
python2_only = pytest.mark.skipif(py3, reason="This test requires Python 2")
class DUT(object):
"""Fake keyboard implementation ("Device Under Test").
Behaves like the YouTube Search keyboard on Apple TV.
"""
def __init__(self):
self.x = 0
self.y = 1
self.modes = ["lowercase", "uppercase", "symbols"]
self.mode = self.modes[0]
self.symbols_is_shift = False
self.keys = {
"lowercase": [
["lowercase"] * 2 + ["uppercase"] * 2 + ["symbols"] * 2,
"abcdef",
"ghijkl",
"mnopqr",
"stuvwx",
"yz1234",
"567890",
[" "] * 2 + ["BACKSPACE"] * 2 + ["CLEAR"] * 2
],
"uppercase": [
["lowercase"] * 2 + ["uppercase"] * 2 + ["symbols"] * 2,
"ABCDEF",
"GHIJKL",
"MNOPQR",
"STUVWX",
"YZ1234",
"567890",
[" "] * 2 + ["BACKSPACE"] * 2 + ["CLEAR"] * 2
],
"symbols": [
["lowercase"] * 2 + ["uppercase"] * 2 + ["symbols"] * 2,
"!@#$%&",
"~*\\/?^",
"_`;:|=",
"éñ[]{}",
"çü.,+-",
"<>()'\"",
[" "] * 2 + ["BACKSPACE"] * 2 + ["CLEAR"] * 2
]
}
self.pressed = []
self.entered = ""
@property
def selection(self):
k = self.keys[self.mode][self.y][self.x]
if self.symbols_is_shift and k == "symbols":
return "shift"
else:
return k
def handle_press(self, keypress):
self.pressed.append(keypress)
mode = self.mode
selected = self.selection
logging.debug("DUT.handle_press: Pressed %s", keypress)
if keypress == "KEY_OK":
if self.symbols_is_shift and selected == "shift":
if self.mode == "lowercase":
self.mode = "uppercase"
else:
self.mode = "lowercase"
elif (not self.symbols_is_shift) and selected in self.modes:
self.mode = selected
elif len(selected) == 1: # It's a letter
self.entered += selected
if self.symbols_is_shift and self.mode == "uppercase":
self.mode = "lowercase"
else:
assert False, "Unexpected %s on %r" % (keypress, selected)
elif keypress == "KEY_UP":
if self.y == 0:
assert False, "Unexpected %s on %r" % (keypress, selected)
else:
self.y -= 1
elif keypress == "KEY_DOWN":
if self.y == 7:
assert False, "Unexpected %s on %r" % (keypress, selected)
else:
self.y += 1
elif keypress == "KEY_LEFT":
if self.x == 0 or (self.x == 1 and self.y in [0, 7]):
assert False, "Unexpected %s on %r" % (keypress, selected)
elif self.y in [0, 7]:
# x: 012345
# x%2: 010101
if self.x % 2 == 0:
self.x -= 2
else:
self.x -= 1
else:
self.x -= 1
elif keypress == "KEY_RIGHT":
if self.x == 5 or (self.x == 4 and self.y in [0, 7]):
assert False, "Unexpected %s on %r" % (keypress, selected)
elif self.y in [0, 7]:
# x: 012345
# x%2: 010101
if self.x % 2 == 0:
self.x += 2
else:
self.x += 1
else:
self.x += 1
else:
assert False, "Unexpected %s on %r" % (keypress, selected)
logging.debug("DUT.handle_press: Moved from %r (%s) to %r (%s)",
selected, mode, self.selection, self.mode)
def handle_press_and_wait(self, key, **_kwargs):
self.handle_press(key)
return _TransitionResult(key, None, TransitionStatus.COMPLETE, 0, 0, 0)
class BuggyDUT(DUT):
def handle_press(self, keypress):
super(BuggyDUT, self).handle_press(keypress)
if keypress == "KEY_RIGHT" and self.selection == "b":
self.x += 1
@pytest.fixture(scope="function")
def dut():
dut = DUT()
with mock.patch("stbt_core.press", dut.handle_press), \
mock.patch("stbt_core.press_and_wait", dut.handle_press_and_wait):
yield dut
@pytest.fixture(scope="function")
def buggy_dut():
dut = BuggyDUT()
with mock.patch("stbt_core.press", dut.handle_press), \
mock.patch("stbt_core.press_and_wait", dut.handle_press_and_wait):
yield dut
class SearchPage(stbt.FrameObject):
"""Immutable Page Object representing the test's view of the DUT."""
def __init__(self, dut, kb):
super(SearchPage, self).__init__(
frame=numpy.zeros((720, 1280, 3), dtype=numpy.uint8))
self.dut = dut
self.kb = kb
@property
def is_visible(self):
return True
@property
def mode(self):
if self.kb.modes:
return self.dut.mode
else:
return None
@property
def selection(self):
# In practice this would use image processing to detect the current
# selection & mode, then look up the key by region & mode.
# See test_find_key_by_region for an example.
query = {}
if self.dut.selection == " ":
query = {"text": " "} # for test_that_enter_text_finds_keys_by_text
else:
query = {"name": self.dut.selection}
if self.kb.modes:
query["mode"] = self.dut.mode
key = self.kb.find_key(**query)
logging.debug("SearchPage.selection: %r", key)
return key
def refresh(self, frame=None, **kwargs):
page = SearchPage(self.dut, self.kb)
logging.debug("SearchPage.refresh: Now on %r", page.selection)
return page
def enter_text(self, text):
return self.kb.enter_text(text, page=self)
def navigate_to(self, target, verify_every_keypress=False):
return self.kb.navigate_to(target, page=self,
verify_every_keypress=verify_every_keypress)
kb1 = stbt.Keyboard() # Full model with modes, defined using Grids
MODES_GRID = stbt.Grid(
region=stbt.Region(x=125, y=95, right=430, bottom=140),
data=[["lowercase", "uppercase", "symbols"]])
MIDDLE_REGION = stbt.Region(x=125, y=140, right=430, bottom=445)
MIDDLE_GRIDS = {
"lowercase": stbt.Grid(region=MIDDLE_REGION,
data=[
"abcdef",
"ghijkl",
"mnopqr",
"stuvwx",
"yz1234",
"567890"]),
"uppercase": stbt.Grid(region=MIDDLE_REGION,
data=[
"ABCDEF",
"GHIJKL",
"MNOPQR",
"STUVWX",
"YZ1234",
"567890"]),
"symbols": stbt.Grid(region=MIDDLE_REGION,
data=[
"!@#$%&",
"~*\\/?^",
"_`;:|=",
"éñ[]{}",
"çü.,+-",
"<>()'\""])
}
BOTTOM_GRID = stbt.Grid(
region=stbt.Region(x=125, y=445, right=430, bottom=500),
data=[[" ", "BACKSPACE", "CLEAR"]])
for mode in ["lowercase", "uppercase", "symbols"]:
kb1.add_grid(MODES_GRID, mode)
kb1.add_grid(MIDDLE_GRIDS[mode], mode)
kb1.add_grid(BOTTOM_GRID, mode)
# abc ABC #+-
# ↕ ↕ ↕ ↕ ↕ ↕
# a b c d e f
#
# Note that `add_transition` adds the symmetrical transition
# (KEY_UP) automatically.
g = MIDDLE_GRIDS[mode]
kb1.add_transition("lowercase", g[0, 0].data, "KEY_DOWN", mode)
kb1.add_transition("lowercase", g[1, 0].data, "KEY_DOWN", mode)
kb1.add_transition("uppercase", g[2, 0].data, "KEY_DOWN", mode)
kb1.add_transition("uppercase", g[3, 0].data, "KEY_DOWN", mode)
kb1.add_transition("symbols", g[4, 0].data, "KEY_DOWN", mode)
kb1.add_transition("symbols", g[5, 0].data, "KEY_DOWN", mode)
# 5 6 7 8 9 0
# ↕ ↕ ↕ ↕ ↕ ↕
# SPC DEL CLR
kb1.add_transition(g[0, 5].data, " ", "KEY_DOWN", mode)
kb1.add_transition(g[1, 5].data, " ", "KEY_DOWN", mode)
kb1.add_transition(g[2, 5].data, "BACKSPACE", "KEY_DOWN", mode)
kb1.add_transition(g[3, 5].data, "BACKSPACE", "KEY_DOWN", mode)
kb1.add_transition(g[4, 5].data, "CLEAR", "KEY_DOWN", mode)
kb1.add_transition(g[5, 5].data, "CLEAR", "KEY_DOWN", mode)
# Mode changes: For example when "ABC" is selected and we are in
# lowercase mode, pressing OK takes us to "ABC" still selected
# but the keyboard is now in uppercase mode.
for source_mode in ["lowercase", "uppercase", "symbols"]:
for target_mode in ["lowercase", "uppercase", "symbols"]:
kb1.add_transition({"name": target_mode, "mode": source_mode},
{"name": target_mode, "mode": target_mode},
"KEY_OK")
kb2 = stbt.Keyboard() # uppercase & lowercase modes, defined from edgelist
edgelists = {
"lowercase": """
lowercase uppercase KEY_RIGHT
uppercase symbols KEY_RIGHT
a lowercase KEY_UP
b lowercase KEY_UP
c uppercase KEY_UP
d uppercase KEY_UP
e symbols KEY_UP
f symbols KEY_UP
a b KEY_RIGHT
b c KEY_RIGHT
c d KEY_RIGHT
d e KEY_RIGHT
e f KEY_RIGHT
g h KEY_RIGHT
h i KEY_RIGHT
i j KEY_RIGHT
j k KEY_RIGHT
k l KEY_RIGHT
m n KEY_RIGHT
n o KEY_RIGHT
o p KEY_RIGHT
p q KEY_RIGHT
q r KEY_RIGHT
s t KEY_RIGHT
t u KEY_RIGHT
u v KEY_RIGHT
v w KEY_RIGHT
w x KEY_RIGHT
y z KEY_RIGHT
z 1 KEY_RIGHT
1 2 KEY_RIGHT
2 3 KEY_RIGHT
3 4 KEY_RIGHT
5 6 KEY_RIGHT
6 7 KEY_RIGHT
7 8 KEY_RIGHT
8 9 KEY_RIGHT
9 0 KEY_RIGHT
a g KEY_DOWN
b h KEY_DOWN
c i KEY_DOWN
d j KEY_DOWN
e k KEY_DOWN
f l KEY_DOWN
g m KEY_DOWN
h n KEY_DOWN
i o KEY_DOWN
j p KEY_DOWN
k q KEY_DOWN
l r KEY_DOWN
m s KEY_DOWN
n t KEY_DOWN
o u KEY_DOWN
p v KEY_DOWN
q w KEY_DOWN
r x KEY_DOWN
s y KEY_DOWN
t z KEY_DOWN
u 1 KEY_DOWN
v 2 KEY_DOWN
w 3 KEY_DOWN
x 4 KEY_DOWN
y 5 KEY_DOWN
z 6 KEY_DOWN
1 7 KEY_DOWN
2 8 KEY_DOWN
3 9 KEY_DOWN
4 0 KEY_DOWN
5 SPACE KEY_DOWN
6 SPACE KEY_DOWN
7 BACKSPACE KEY_DOWN
8 BACKSPACE KEY_DOWN
9 CLEAR KEY_DOWN
0 CLEAR KEY_DOWN
SPACE BACKSPACE KEY_RIGHT
BACKSPACE CLEAR KEY_RIGHT
""",
}
edgelists["uppercase"] = re.sub(r"\b[a-z]\b", lambda m: m.group(0).upper(),
edgelists["lowercase"])
kb2.add_edgelist(edgelists["lowercase"], mode="lowercase")
kb2.add_edgelist(edgelists["uppercase"], mode="uppercase")
# Mode changes: For example when "ABC" is selected and we are in
# lowercase mode, pressing OK takes us to "ABC" still selected
# but the keyboard is now in uppercase mode.
kb2.add_transition({"mode": "lowercase", "name": "uppercase"},
{"mode": "uppercase", "name": "uppercase"},
"KEY_OK")
kb2.add_transition({"mode": "uppercase", "name": "lowercase"},
{"mode": "lowercase", "name": "lowercase"},
"KEY_OK")
kb3 = stbt.Keyboard() # Simple keyboard, lowercase only
kb3.add_edgelist(edgelists["lowercase"])
kb3_bytes = stbt.Keyboard() # To test add_edgelist with bytes
if not py3:
kb3_bytes.add_edgelist(edgelists["lowercase"].encode("utf-8"))
# Lowercase + shift (no caps lock).
# This keyboard looks like kb1 but it has a "shift" key instead of the "symbols"
# key; and the other mode keys have no effect.
kb4 = stbt.Keyboard()
kb4.add_edgelist(edgelists["lowercase"].replace("symbols", "shift"),
mode="lowercase")
kb4.add_edgelist(edgelists["uppercase"].replace("symbols", "shift"),
mode="uppercase")
kb4.add_transition({"mode": "lowercase", "name": "shift"},
{"mode": "uppercase", "name": "shift"},
"KEY_OK")
kb4.add_transition({"mode": "uppercase", "name": "shift"},
{"mode": "lowercase", "name": "shift"},
"KEY_OK")
# Pressing OK on a letter when shifted goes to lowercase mode (as well as
# entering that letter).
for k in "abcdefghijklmnopqrstuvwxzy1234567890":
kb4.add_transition({"mode": "uppercase", "name": k.upper()},
{"mode": "lowercase", "name": k},
"KEY_OK")
@pytest.mark.parametrize("kb", [kb1, kb2], ids=["kb1", "kb2"])
def test_enter_text_mixed_case(dut, kb):
logging.debug("Keys: %r", kb.G.nodes())
page = SearchPage(dut, kb)
assert page.selection.name == "a"
assert page.selection.text == "a"
assert page.selection.mode == "lowercase"
page = page.enter_text("Hi there")
assert page.selection.name == "e"
assert dut.entered == "Hi there"
@pytest.mark.parametrize("kb",
[kb1,
kb2,
kb3,
pytest.param(kb3_bytes, marks=python2_only)],
ids=["kb1", "kb2", "kb3", "kb3_bytes"])
def test_enter_text_single_case(dut, kb):
page = SearchPage(dut, kb)
assert page.selection.name == "a"
page = page.enter_text("hi there")
assert page.selection.name == "e"
assert dut.entered == "hi there"
@pytest.mark.parametrize("kb", [kb1, kb2, kb3], ids=["kb1", "kb2", "kb3"])
def test_that_enter_text_uses_minimal_keypresses(dut, kb):
page = SearchPage(dut, kb)
assert page.selection.name == "a"
page.enter_text("gh")
assert dut.pressed == ["KEY_DOWN", "KEY_OK",
"KEY_RIGHT", "KEY_OK"]
@pytest.mark.parametrize("kb", [kb1, kb2, kb3], ids=["kb1", "kb2", "kb3"])
def test_enter_text_twice(dut, kb):
"""This is really a test of your Page Object's implementation of enter_text.
You must return the updated page instance.
"""
page = SearchPage(dut, kb)
assert page.selection.name == "a"
page = page.enter_text("g")
page = page.enter_text("h")
assert dut.pressed == ["KEY_DOWN", "KEY_OK",
"KEY_RIGHT", "KEY_OK"]
def test_that_enter_text_finds_keys_by_text(dut):
kb = stbt.Keyboard()
a, g, m, s, y, five = [kb.add_key(x) for x in "agmsy5"]
space = kb.add_key("SPACE", text=" ")
for k1, k2 in zip([a, g, m, s, y, five], [g, m, s, y, five, space]):
kb.add_transition(k1, k2, "KEY_DOWN")
page = SearchPage(dut, kb)
page = page.enter_text(" ")
assert page.selection.name == "SPACE"
assert dut.entered == " "
@pytest.mark.parametrize("kb", [kb1, kb2, kb3], ids=["kb1", "kb2", "kb3"])
def test_navigate_to(dut, kb):
page = SearchPage(dut, kb)
assert page.selection.name == "a"
page = page.navigate_to("CLEAR")
assert page.selection.name == "CLEAR"
assert dut.pressed == ["KEY_DOWN"] * 6 + ["KEY_RIGHT"] * 2
@pytest.mark.parametrize("kb", [kb1, kb2], ids=["kb1", "kb2"])
def test_navigate_to_other_mode(dut, kb):
page = SearchPage(dut, kb)
assert page.selection.name == "a"
assert page.selection.mode == "lowercase"
page = page.navigate_to({"name": "CLEAR", "mode": "uppercase"})
assert page.selection.name == "CLEAR"
assert page.selection.mode == "uppercase"
assert dut.pressed == ["KEY_UP", "KEY_RIGHT", "KEY_OK", "KEY_RIGHT"] + \
["KEY_DOWN"] * 7
@pytest.mark.parametrize("target,verify_every_keypress,num_presses", [
("b", False, 1),
("b", True, 1),
("c", False, 2),
("c", True, 1),
])
@pytest.mark.parametrize("kb", [kb1, kb2, kb3], ids=["kb1", "kb2", "kb3"])
def test_that_navigate_to_checks_target(buggy_dut, kb, target,
verify_every_keypress, num_presses):
"""buggy_dut skips the B when pressing right from A (and lands on C)."""
page = SearchPage(buggy_dut, kb)
assert page.selection.name == "a"
with pytest.raises(AssertionError):
page.navigate_to(target, verify_every_keypress)
assert buggy_dut.pressed == ["KEY_RIGHT"] * num_presses
@pytest.mark.parametrize("kb", [kb1, kb2, kb3], ids=["kb1", "kb2", "kb3"])
def test_that_keyboard_validates_the_targets_before_navigating(dut, kb):
page = SearchPage(dut, kb)
with pytest.raises(ValueError):
page.enter_text("abcÑ")
assert dut.pressed == []
with pytest.raises(ValueError):
page.navigate_to("Ñ")
assert dut.pressed == []
def test_that_navigate_to_doesnt_type_text_from_shift_transitions(dut):
page = SearchPage(dut, kb4)
dut.symbols_is_shift = True
dut.mode = "uppercase"
assert page.selection.name == "A"
assert page.selection.mode == "uppercase"
page = page.navigate_to("a")
assert page.selection.name == "a"
assert page.selection.mode == "lowercase"
assert dut.entered == ""
def test_that_enter_text_recalculates_after_shift_transitions(dut):
print(edgelists["uppercase"])
page = SearchPage(dut, kb4)
dut.symbols_is_shift = True
assert page.selection.name == "a"
assert page.selection.mode == "lowercase"
page = page.enter_text("Aa")
assert dut.entered == "Aa"
assert dut.pressed == [
"KEY_UP", "KEY_RIGHT", "KEY_RIGHT", "KEY_OK", # shift
"KEY_LEFT", "KEY_LEFT", "KEY_DOWN", "KEY_OK", # A
"KEY_OK" # a
]
def test_edgelist_with_hash_sign():
"""Regression test. `networkx.parse_edgelist` treats "#" as a comment."""
kb = stbt.Keyboard()
kb.add_edgelist("""
### three hashes for a comment
@hotmail.com !#$ KEY_DOWN
@hotmail.com @ KEY_DOWN
@ # KEY_RIGHT
# @ KEY_LEFT
""")
hotmail = kb.find_key("@hotmail.com")
symbols = kb.find_key("!#$")
at_sign = kb.find_key("@")
hash_sign = kb.find_key("#")
assert list(_keys_to_press(kb.G, hotmail, [symbols])) == [
("KEY_DOWN", {at_sign, symbols})]
assert list(_keys_to_press(kb.G, hotmail, [at_sign])) == [
("KEY_DOWN", {at_sign, symbols})]
assert list(_keys_to_press(kb.G, at_sign, [hash_sign])) == [
('KEY_RIGHT', {hash_sign})]
assert list(_keys_to_press(kb.G, hash_sign, [at_sign])) == [
('KEY_LEFT', {at_sign})]
def test_invalid_edgelist():
kb = stbt.Keyboard()
with pytest.raises(ValueError) as excinfo:
kb.add_edgelist("""
A B KEY_RIGHT
B A
""")
assert "line 2" in str(excinfo.value)
assert "'B A'" in str(excinfo.value)
with pytest.raises(ValueError):
kb.add_edgelist("""
A B KEY_RIGHT toomanyfields
""")
kb.add_edgelist("") # Doesn't raise
def test_that_add_key_infers_text():
kb = stbt.Keyboard()
a = kb.add_key("a")
assert a.name == "a"
assert a.text == "a"
space = kb.add_key(" ")
assert space.name == " "
assert space.text == " "
clear = kb.add_key("clear")
assert clear.name == "clear"
assert not clear.text
def test_that_add_grid_returns_grid_of_keys():
kb = stbt.Keyboard()
# The Disney+ search keyboard on Roku has an "accents" mode where some of
# the keys are blank. You *can* navigate to them, but pressing OK has no
# effect.
grid = kb.add_grid(
stbt.Grid(stbt.Region(x=265, y=465, right=895, bottom=690),
data=["àáâãäåæýÿš",
list("èéêëìíîžđ") + [""],
list("ïòóôõöøß") + ["", ""],
list("œùúûüçñ") + ["", "", ""]]))
assert isinstance(grid[0].data, stbt.Keyboard.Key)
right_neighbours = kb.add_grid(
stbt.Grid(stbt.Region(x=915, y=465, right=1040, bottom=690),
data=[["CAPS LOCK"],
["ABC123"],
["!?#$%&"],
["åéåøØ¡"]]))
for i in range(4):
kb.add_transition(grid[9, i].data, right_neighbours[0, i].data,
"KEY_RIGHT")
assert [k for k, _ in
_keys_to_press(kb.G, kb.find_key("ß"), [kb.find_key("!?#$%&")])] \
== ["KEY_RIGHT"] * 3
def test_that_keyboard_catches_errors_at_definition_time():
kb = stbt.Keyboard()
# Can't add the same key twice:
kb.add_key("a")
with pytest.raises(ValueError) as excinfo:
kb.add_key("a")
assert_repr_equal(
"Key already exists: Keyboard.Key(name='a', text='a', region=None, mode=None)", # pylint:disable=line-too-long
str(excinfo.value))
# Can't add transition to key that doesn't exist:
with pytest.raises(ValueError) as excinfo:
kb.add_transition("a", "b", "KEY_RIGHT")
assert_repr_equal("Query 'b' doesn't match any key in the keyboard",
str(excinfo.value))
# ...but add_edgelist creates keys as needed:
kb.add_edgelist("a b KEY_RIGHT")
# All keys must have modes or none of them can
kb.add_key(" ")
with pytest.raises(ValueError) as excinfo:
kb.add_key(" ", mode="uppercase")
assert_repr_equal(
"Key ...'name': ' '...'mode': 'uppercase'... specifies 'mode', but none of the other keys in the keyboard do", # pylint:disable=line-too-long
str(excinfo.value))
# All keys must have regions or none of them can
with pytest.raises(ValueError) as excinfo:
kb.add_grid(stbt.Grid(
region=stbt.Region(x=0, y=0, right=200, bottom=100),
data=[["a", "b", "c", "d"]]))
assert_repr_equal(
"Key ...'a'... specifies 'region', but none of the other keys in the keyboard do", # pylint:disable=line-too-long
str(excinfo.value))
# Can't add grid with no data:
with pytest.raises(ValueError) as excinfo:
kb.add_grid(stbt.Grid(
region=stbt.Region(x=0, y=0, right=200, bottom=100),
cols=6, rows=6))
assert_repr_equal("Grid cell [0,0] doesn't have any data",
str(excinfo.value))
# Now a keyboard with modes: #############################################
kb = stbt.Keyboard()
kb.add_key("a", mode="lowercase")
kb.add_key("A", mode="uppercase")
kb.add_key(" ", mode="lowercase")
# All keys must have modes or none of them can
with pytest.raises(ValueError) as excinfo:
kb.add_key(" ")
assert_repr_equal(
"Key already exists: Keyboard.Key(name=' ', text=' ', region=None, mode='lowercase')", # pylint:disable=line-too-long
str(excinfo.value))
with pytest.raises(ValueError) as excinfo:
kb.add_key("b")
assert_repr_equal(
"Key ...'name': 'b'... doesn't specify 'mode', but all the other keys in the keyboard do", # pylint:disable=line-too-long
str(excinfo.value))
# add_edgelist is happy as long as it can uniquely identify existing keys:
kb.add_edgelist("a SPACE KEY_DOWN")
# ...but if it's ambiguous, it's an error:
kb.add_key(" ", mode="uppercase")
with pytest.raises(ValueError) as excinfo:
kb.add_edgelist("a SPACE KEY_DOWN")
assert_repr_equal(
"Ambiguous key {'name': ' '}: Could mean Keyboard.Key(name=' ', text=' ', region=None, mode='lowercase') or Keyboard.Key(name=' ', text=' ', region=None, mode='uppercase')", # pylint:disable=line-too-long
str(excinfo.value))
# ...so we need to specify the mode explicitly:
kb.add_edgelist("a SPACE KEY_DOWN", mode="lowercase")
def assert_repr_equal(a, b):
if sys.version_info.major == 2:
# In Python 2 the repr of nested classes doesn't show the outer class.
a = a.replace("Keyboard.Key", "Key")
a = re.escape(a).replace(r"\.\.\.", ".*")
b = b.replace("u'", "'")
assert re.match("^" + a + "$", b)
@pytest.mark.parametrize("kb", [kb1, kb2, kb3], ids=["kb1", "kb2", "kb3"])
def test_keys_to_press(kb):
a = kb.find_key("a")
b = kb.find_key("b")
c = kb.find_key("c")
g = kb.find_key("g")
h = kb.find_key("h")
five = kb.find_key("5", mode="lowercase" if kb.modes else None)
six = kb.find_key("6", mode="lowercase" if kb.modes else None)
space = kb.find_key(" ", mode="lowercase" if kb.modes else None)
assert list(_keys_to_press(kb.G, a, [a])) == []
assert list(_keys_to_press(kb.G, a, [b])) == [("KEY_RIGHT", {b})]
assert list(_keys_to_press(kb.G, b, [a])) == [("KEY_LEFT", {a})]
assert list(_keys_to_press(kb.G, a, [c])) == [("KEY_RIGHT", {b}),
("KEY_RIGHT", {c})]
assert list(_keys_to_press(kb.G, c, [a])) == [("KEY_LEFT", {b}),
("KEY_LEFT", {a})]
assert list(_keys_to_press(kb.G, a, [g])) == [("KEY_DOWN", {g})]
assert list(_keys_to_press(kb.G, g, [a])) == [("KEY_UP", {a})]
# I don't know which of these paths it will choose:
assert list(_keys_to_press(kb.G, a, [h])) in (
[("KEY_RIGHT", {b}), ("KEY_DOWN", {h})],
[("KEY_DOWN", {g}), ("KEY_RIGHT", {h})])
# Pressing UP from SPACE could land on 5 or 6, depending on previous state
# (of the device-under-test) that isn't modelled by our Keyboard graph.
assert list(_keys_to_press(kb.G, space, [five])) == [("KEY_UP",
{five, six})]
if kb.modes:
A = kb.find_key("A")
B = kb.find_key("B")
FIVE = kb.find_key("5", mode="uppercase")
SPACE = kb.find_key(" ", mode="uppercase")
# Changing modes:
lowercase = kb.find_key("lowercase", mode="lowercase")
LOWERCASE = kb.find_key("lowercase", mode="uppercase")
uppercase = kb.find_key("uppercase", mode="lowercase")
UPPERCASE = kb.find_key("uppercase", mode="uppercase")
assert list(_keys_to_press(kb.G, a, [A])) == [
("KEY_UP", {lowercase}),
("KEY_RIGHT", {uppercase}),
("KEY_OK", {UPPERCASE}),
("KEY_LEFT", {LOWERCASE}),
("KEY_DOWN", {A, B})]
# Navigate to nearest key:
assert list(_keys_to_press(kb.G, FIVE, [space, SPACE])) == [
("KEY_DOWN", {SPACE})]
assert list(_keys_to_press(kb.G, five, [space, SPACE])) == [
("KEY_DOWN", {space})]
@pytest.mark.parametrize("kb", [kb1, kb2, kb3], ids=["kb1", "kb2", "kb3"])
def test_keyboard_weights(kb):
five = kb.find_key("5", mode="lowercase" if kb.modes else None)
six = kb.find_key("6", mode="lowercase" if kb.modes else None)
space = kb.find_key(" ", mode="lowercase" if kb.modes else None)
backspace = kb.find_key("BACKSPACE", mode="lowercase" if kb.modes else None)
assert kb.G[five][space].get("weight") is None
assert kb.G[six][space].get("weight") is None
assert kb.G[space][five]["weight"] == 100
assert kb.G[space][six]["weight"] == 100
assert kb.G[space][backspace].get("weight") is None
assert kb.G[backspace][space].get("weight") is None
def test_that_we_need_add_weight():
# W X Y Z
# SPACE
kb = stbt.Keyboard()
W = kb.add_key("W")
X = kb.add_key("X")
Y = kb.add_key("Y")
Z = kb.add_key("Z")
SPACE = kb.add_key(" ")
for k in [W, X, Y, Z]:
kb.add_transition(k, SPACE, "KEY_DOWN")
for k1, k2 in zip([W, X, Y], [X, Y, Z]):
kb.add_transition(k1, k2, "KEY_RIGHT")
# This is the bug:
assert nx.shortest_path(kb.G, W, Z) == [W, SPACE, Z]
# And this is how we fix it:
assert nx.shortest_path(kb.G, W, Z, weight="weight") == [W, X, Y, Z]
assert [k for k, _ in _keys_to_press(kb.G, W, [Z])] == ["KEY_RIGHT"] * 3
def test_strip_shift_transitions():
for kb in [kb1, kb2, kb3]:
G_ = _strip_shift_transitions(kb.G)
assert sorted(G_.nodes()) == sorted(kb.G.nodes())
assert sorted(G_.edges(data=True)) == sorted(kb.G.edges(data=True))
G_ = _strip_shift_transitions(kb4.G)
assert sorted(G_.nodes()) == sorted(kb4.G.nodes())
assert sorted(G_.edges(data=True)) != sorted(kb4.G.edges(data=True))
assert len(G_.edges(data=True)) == len(kb4.G.edges(data=True)) - len(
"abcdefghijklmnopqrstuvwxyz1234567890") | unknown | codeparrot/codeparrot-clean | ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
"use strict";
const util = require("util");
/** @typedef {import("../../declarations/WebpackOptions").Falsy} Falsy */
/** @typedef {import("../../declarations/WebpackOptions").RuleSetLoader} RuleSetLoader */
/** @typedef {import("../../declarations/WebpackOptions").RuleSetLoaderOptions} RuleSetLoaderOptions */
/** @typedef {import("../../declarations/WebpackOptions").RuleSetRule} RuleSetRule */
/** @typedef {import("../../declarations/WebpackOptions").RuleSetUse} RuleSetUse */
/** @typedef {import("../../declarations/WebpackOptions").RuleSetUseItem} RuleSetUseItem */
/** @typedef {import("./RuleSetCompiler")} RuleSetCompiler */
/** @typedef {import("./RuleSetCompiler").Effect} Effect */
/** @typedef {import("./RuleSetCompiler").EffectData} EffectData */
/** @typedef {import("./RuleSetCompiler").EffectUseType} EffectUseType */
const PLUGIN_NAME = "UseEffectRulePlugin";
class UseEffectRulePlugin {
/**
* @param {RuleSetCompiler} ruleSetCompiler the rule set compiler
* @returns {void}
*/
apply(ruleSetCompiler) {
ruleSetCompiler.hooks.rule.tap(
PLUGIN_NAME,
(path, rule, unhandledProperties, result, references) => {
/**
* @param {keyof RuleSetRule} property property
* @param {string} correctProperty correct property
*/
const conflictWith = (property, correctProperty) => {
if (unhandledProperties.has(property)) {
throw ruleSetCompiler.error(
`${path}.${property}`,
rule[property],
`A Rule must not have a '${property}' property when it has a '${correctProperty}' property`
);
}
};
if (unhandledProperties.has("use")) {
unhandledProperties.delete("use");
unhandledProperties.delete("enforce");
conflictWith("loader", "use");
conflictWith("options", "use");
const use = /** @type {RuleSetUse} */ (rule.use);
const enforce = rule.enforce;
const type =
/** @type {EffectUseType} */
(enforce ? `use-${enforce}` : "use");
/**
* @param {string} path options path
* @param {string} defaultIdent default ident when none is provided
* @param {RuleSetUseItem} item user provided use value
* @returns {(Effect | ((effectData: EffectData) => Effect[]))} effect
*/
const useToEffect = (path, defaultIdent, item) => {
if (typeof item === "function") {
return (data) =>
useToEffectsWithoutIdent(
path,
/** @type {RuleSetUseItem | RuleSetUseItem[]} */
(item(data))
);
}
return useToEffectRaw(path, defaultIdent, item);
};
/**
* @param {string} path options path
* @param {string} defaultIdent default ident when none is provided
* @param {Exclude<NonNullable<RuleSetUseItem>, EXPECTED_FUNCTION>} item user provided use value
* @returns {Effect} effect
*/
const useToEffectRaw = (path, defaultIdent, item) => {
if (typeof item === "string") {
return {
type,
value: {
loader: item,
options: undefined,
ident: undefined
}
};
}
const loader = /** @type {string} */ (item.loader);
const options = item.options;
let ident = item.ident;
if (options && typeof options === "object") {
if (!ident) ident = defaultIdent;
references.set(ident, options);
}
if (typeof options === "string") {
util.deprecate(
() => {},
`Using a string as loader options is deprecated (${path}.options)`,
"DEP_WEBPACK_RULE_LOADER_OPTIONS_STRING"
)();
}
return {
type: enforce ? `use-${enforce}` : "use",
value: {
loader,
options,
ident
}
};
};
/**
* @param {string} path options path
* @param {RuleSetUseItem | (Falsy | RuleSetUseItem)[]} items user provided use value
* @returns {Effect[]} effects
*/
const useToEffectsWithoutIdent = (path, items) => {
if (Array.isArray(items)) {
return items.filter(Boolean).map((item, idx) =>
useToEffectRaw(
`${path}[${idx}]`,
"[[missing ident]]",
/** @type {Exclude<RuleSetUseItem, EXPECTED_FUNCTION>} */
(item)
)
);
}
return [
useToEffectRaw(
path,
"[[missing ident]]",
/** @type {Exclude<RuleSetUseItem, EXPECTED_FUNCTION>} */
(items)
)
];
};
/**
* @param {string} path current path
* @param {RuleSetUse} items user provided use value
* @returns {(Effect | ((effectData: EffectData) => Effect[]))[]} effects
*/
const useToEffects = (path, items) => {
if (Array.isArray(items)) {
return items.filter(Boolean).map((item, idx) => {
const subPath = `${path}[${idx}]`;
return useToEffect(
subPath,
subPath,
/** @type {RuleSetUseItem} */
(item)
);
});
}
return [
useToEffect(path, path, /** @type {RuleSetUseItem} */ (items))
];
};
if (typeof use === "function") {
result.effects.push((data) =>
useToEffectsWithoutIdent(`${path}.use`, use(data))
);
} else {
for (const effect of useToEffects(`${path}.use`, use)) {
result.effects.push(effect);
}
}
}
if (unhandledProperties.has("loader")) {
unhandledProperties.delete("loader");
unhandledProperties.delete("options");
unhandledProperties.delete("enforce");
const loader = /** @type {RuleSetLoader} */ (rule.loader);
const options = rule.options;
const enforce = rule.enforce;
if (loader.includes("!")) {
throw ruleSetCompiler.error(
`${path}.loader`,
loader,
"Exclamation mark separated loader lists has been removed in favor of the 'use' property with arrays"
);
}
if (loader.includes("?")) {
throw ruleSetCompiler.error(
`${path}.loader`,
loader,
"Query arguments on 'loader' has been removed in favor of the 'options' property"
);
}
if (typeof options === "string") {
util.deprecate(
() => {},
`Using a string as loader options is deprecated (${path}.options)`,
"DEP_WEBPACK_RULE_LOADER_OPTIONS_STRING"
)();
}
const ident =
options && typeof options === "object" ? path : undefined;
if (ident) {
references.set(
ident,
/** @type {RuleSetLoaderOptions} */
(options)
);
}
result.effects.push({
type: enforce ? `use-${enforce}` : "use",
value: {
loader,
options,
ident
}
});
}
}
);
}
}
module.exports = UseEffectRulePlugin; | javascript | github | https://github.com/webpack/webpack | lib/rules/UseEffectRulePlugin.js |
"""
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure('scikit-learn nearest neighbors benchmark results',
figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show() | unknown | codeparrot/codeparrot-clean | ||
#include "numpy/numpyconfig.h" // for NPY_VISIBILITY_HIDDEN
#include <stdbool.h>
/*
* NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check
* for floating-point error (FPE) support in BLAS.
* The known culprit right now is SVM likely only on mac, but that is not
* quite clear.
* This checks always on all ARM (it is a small check overall).
*/
#if defined(__APPLE__) && defined(__aarch64__) && defined(HAVE_CBLAS)
#define NPY_BLAS_CHECK_FPE_SUPPORT 1
#else
#define NPY_BLAS_CHECK_FPE_SUPPORT 0
#endif
/* Runtime check if BLAS supports floating-point errors.
* true - BLAS supports FPE and one can rely on them to indicate errors
* false - BLAS does not support FPE. Special handling needed for FPE state
*/
NPY_VISIBILITY_HIDDEN bool
npy_blas_supports_fpe(void);
/* Allow setting the BLAS FPE flag from Python.*/
NPY_VISIBILITY_HIDDEN bool
npy_set_blas_supports_fpe(bool value);
/* If BLAS supports FPE, exactly the same as npy_get_floatstatus_barrier().
* Otherwise, we can't rely on FPE state and need special handling.
*/
NPY_VISIBILITY_HIDDEN int
npy_get_floatstatus_after_blas(void); | c | github | https://github.com/numpy/numpy | numpy/_core/src/common/blas_utils.h |
//===-- DumpSymbolTests.cpp -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "TweakTesting.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <string>
using ::testing::AllOf;
using ::testing::StartsWith;
namespace clang {
namespace clangd {
namespace {
TWEAK_TEST(DumpSymbol);
TEST_F(DumpSymbolTest, Test) {
std::string ID = R"("id":"CA2EBE44A1D76D2A")";
std::string USR = R"("usr":"c:@F@foo#")";
EXPECT_THAT(apply("void f^oo();"),
AllOf(StartsWith("message:"), testing::HasSubstr(ID),
testing::HasSubstr(USR)));
}
} // namespace
} // namespace clangd
} // namespace clang | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/clangd/unittests/tweaks/DumpSymbolTests.cpp |
# -*- coding: utf-8 -*-
"""
#DSpaceLoader V2
/scripts/generadorPaquetes.py
#########
# 31/08/2015
# Sistema desarrollado por el GIL, Instituto de Ingenieria UNAM
# cgonzalezg@iingen.unam.mx
# Crea los archivos necesarios para DSpace
# return: un diccionario con información referente a la colección a la
# que pertenece cada libro
#########
"""
import shutil
import sys
import os
import logging
import io
def generarContenidoXML(parametros):
titulo = "NA" if ("N/A" or "NA") in parametros["titulo"].upper() else parametros["titulo"].replace("&","&").replace("_"," ").capitalize()
autor = "NA" if ("N/A" or "NA") in parametros["autor"].upper() else parametros["autor"].replace("&","&").replace(" "," ")
contenido ="""<?xml version="1.0" encoding="utf-8" standalone="no"?>
<dublin_core schema="dc">
<dcvalue element="contributor" qualifier="author">"""+autor+"""</dcvalue>
<dcvalue element="date" qualifier="issued">"""+parametros["ano"].replace("_"," ")+"""</dcvalue>
<dcvalue element="title" qualifier="none" language="en_US">"""+titulo+"""</dcvalue>
<dcvalue element="type" qualifier="none" language="en_US">Book</dcvalue>
</dublin_core>"""
return contenido
def generarContenidoContents(parametros,volumenes):
contenido = ""
for volumen in volumenes:
contenido +=volumen+" bundle:ORIGINAL description:libro\n"
contenido += "license.txt bundle:LICENSE\n"
contenido += parametros["titulo"]+".jpg bundle:ORIGINAL description:Imagen de portada"
return contenido
def obtenerCarpetasLibro(carpetaBase):
return os.listdir(carpetaBase)
def obtenerVolumenes(carpetaLibro):
return [x for x in os.listdir(carpetaLibro) if ".pdf" in x]
def generarDiccionarioDeLibros(listaLibros):
diccionarioLibros = dict()
for libro in listaLibros:
tmp = dict()
#cambio temporal
tmp["Titulo"] = libro["Final"].replace(" ","_")
if type(libro["Autor"]) == int:
tmp["Autor"] = str(libro["Autor"])
else:
tmp["Autor"] = libro["Autor"]
tmp["Coleccion"] = libro["Colección".decode("utf8")]
tmp["Licencia"] = libro["Licencia"]
if type(libro["Año de Edición".decode("utf8")]) == int:
if libro["Año de Edición".decode("utf8")] == 0:
tmp["AnoEdicion"] = "0000"
else:
tmp["AnoEdicion"] = str(libro["Año de Edición".decode("utf8")])
else:
tmp["AnoEdicion"] = "1001"
#cambio temporal por la inconsistencia del formato
diccionarioLibros[libro["Final"].replace(" ","_")] = tmp
return diccionarioLibros
def crearPaquete(carpetaBase, datosLibro,volumenes):
parametros = dict()
parametros["autor"] = datosLibro["Autor"]
parametros["ano"] = datosLibro["AnoEdicion"]
parametros["titulo"] = datosLibro["Titulo"]
parametros["coleccion"] = str(datosLibro["Coleccion"])
parametros["licencia"] = datosLibro["Licencia"]
contenidoXML = generarContenidoXML(parametros)
contenidoContents = generarContenidoContents(parametros,volumenes)
archivoXML = io.open(os.path.join(carpetaBase,parametros["titulo"],"dublin_core.xml"), "w")
archivoXML.write(contenidoXML)
archivoXML.close()
archivoLicencia = io.open(os.path.join(carpetaBase,parametros["titulo"],"license.txt"), "w")
archivoLicencia.write(parametros["licencia"])
archivoLicencia.close()
archivoContents = io.open(os.path.join(carpetaBase,parametros["titulo"],"contents"), "w")
archivoContents.write(contenidoContents)
archivoContents.close()
archivoCollection = io.open(os.path.join(carpetaBase,parametros["titulo"],"coleccion"), "w")
archivoCollection.write(unicode(parametros["coleccion"]))
archivoCollection.close
logging.info("Paquete "+os.path.join(carpetaBase,parametros["titulo"])+" creado")
print "Paquete "+os.path.join(carpetaBase,parametros["titulo"])+" creado"
return parametros["coleccion"], parametros["titulo"]
def finalizarPaquete(carpetaBase, libroExistente):
logging.info("Finalizando paquete "+libroExistente)
print "Finalizando paquete "+libroExistente
archivos = os.listdir(os.path.join(carpetaBase,libroExistente))
archivos.remove("coleccion")
os.makedirs(os.path.join(carpetaBase,libroExistente,libroExistente))
for archivo in archivos:
shutil.move(os.path.join(carpetaBase,libroExistente,archivo),os.path.join(carpetaBase,libroExistente,libroExistente,archivo))
os.rename(os.path.join(carpetaBase,libroExistente),os.path.join(carpetaBase,libroExistente+"_DSPACE"))
logging.info("Paquete finalizado")
print "Paquete finalizado"
def main(args):
if args[0] != "":
print "NO ES POSIBLE EJECUTAR EL SCRIPT. FAVOR DE UTILIZAR DSpaceLoader.sh"
return False
carpetaBase = args[1]
listaLibros = args[2]
librosExistentes = obtenerCarpetasLibro(carpetaBase)
diccionarioLibros = generarDiccionarioDeLibros(listaLibros)
diccionarioColecciones = dict() # {"4":[carpeta_1,carpeta3],"5":[carpeta_2,carpeta_5]}
for libroExistente in librosExistentes:
if libroExistente in diccionarioLibros:
volumenes = obtenerVolumenes(os.path.join(carpetaBase,libroExistente))
coleccion, carpeta = crearPaquete(carpetaBase, diccionarioLibros[libroExistente],volumenes)
finalizarPaquete(carpetaBase,libroExistente)
if coleccion in diccionarioColecciones:
diccionarioColecciones[coleccion].append(carpeta)
else:
diccionarioColecciones[coleccion] = [carpeta]
elif "_DSPACE" in libroExistente:
logging.info("El libro "+libroExistente+" ya ha sido procesado en ocasiones pasadas")
print "El libro "+libroExistente+" ya ha sido procesado en ocasiones pasadas"
else:
logging.info("El libro "+libroExistente+" no existe en el archivo de metadatos")
print "El libro "+libroExistente+" no existe en el archivo de metadatos"
if diccionarioColecciones:
return True, diccionarioColecciones
else:
return False, False
if __name__ == '__main__':
main(sys.argv) | unknown | codeparrot/codeparrot-clean | ||
from .bases import _ScandinavianStemmer
from whoosh.compat import u
class SwedishStemmer(_ScandinavianStemmer):
"""
The Swedish Snowball stemmer.
:cvar __vowels: The Swedish vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Swedish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/swedish/stemmer.html
"""
__vowels = u("aeiouy\xE4\xE5\xF6")
__s_ending = "bcdfghjklmnoprtvy"
__step1_suffixes = ("heterna", "hetens", "heter", "heten",
"anden", "arnas", "ernas", "ornas", "andes",
"andet", "arens", "arna", "erna", "orna",
"ande", "arne", "aste", "aren", "ades",
"erns", "ade", "are", "ern", "ens", "het",
"ast", "ad", "en", "ar", "er", "or", "as",
"es", "at", "a", "e", "s")
__step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt")
__step3_suffixes = ("fullt", u("l\xF6st"), "els", "lig", "ig")
def stem(self, word):
"""
Stem a Swedish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("els", "lig", "ig"):
word = word[:-len(suffix)]
elif suffix in ("fullt", u("l\xF6st")):
word = word[:-1]
break
return word | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Copyright (c) 2011, Daniele Esposti <expo@expobrain.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* The name of the contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from PIL import Image
import os
WEBP_IMAGE_FILE = os.path.join(os.path.dirname(__file__), "vancouver2.webp")
PNG_IMAGE_FILE = os.path.join(os.path.dirname(__file__), "vancouver2.png")
DECODE_FILENAME = os.path.join(os.path.dirname(__file__), "decode_{0}.png")
ENCODE_FILENAME = os.path.join(os.path.dirname(__file__), "encode_{0}.webp")
WEBP_IMAGE_DATA = bytearray(file(WEBP_IMAGE_FILE, "rb").read())
PNG_BITMAP_DATA = bytearray(Image.open(PNG_IMAGE_FILE).tostring())
IMAGE_WIDTH = 644
IMAGE_HEIGHT = 484 | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* pg_shdepend.h
* definition of the "shared dependency" system catalog (pg_shdepend)
*
* pg_shdepend has no preloaded contents, so there is no pg_shdepend.dat
* file; dependencies for system-defined objects are loaded into it
* on-the-fly during initdb. Most built-in objects are pinned anyway,
* and hence need no explicit entries in pg_shdepend.
*
* NOTE: we do not represent all possible dependency pairs in pg_shdepend;
* for example, there's not much value in creating an explicit dependency
* from a relation to its database. Currently, only dependencies on roles
* are explicitly stored in pg_shdepend.
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/catalog/pg_shdepend.h
*
* NOTES
* The Catalog.pm module reads this file and derives schema
* information.
*
*-------------------------------------------------------------------------
*/
#ifndef PG_SHDEPEND_H
#define PG_SHDEPEND_H
#include "catalog/genbki.h"
#include "catalog/pg_shdepend_d.h" /* IWYU pragma: export */
/* ----------------
* pg_shdepend definition. cpp turns this into
* typedef struct FormData_pg_shdepend
* ----------------
*/
CATALOG(pg_shdepend,1214,SharedDependRelationId) BKI_SHARED_RELATION
{
/*
* Identification of the dependent (referencing) object.
*
* Note that dbid can be zero to denote a shared object.
*/
Oid dbid BKI_LOOKUP_OPT(pg_database); /* OID of database
* containing object */
Oid classid BKI_LOOKUP(pg_class); /* OID of table containing
* object */
Oid objid; /* OID of object itself */
int32 objsubid; /* column number, or 0 if not used */
/*
* Identification of the independent (referenced) object. This is always
* a shared object, so we need no database ID field. We don't bother with
* a sub-object ID either.
*/
Oid refclassid BKI_LOOKUP(pg_class); /* OID of table containing
* object */
Oid refobjid; /* OID of object itself */
/*
* Precise semantics of the relationship are specified by the deptype
* field. See SharedDependencyType in catalog/dependency.h.
*/
char deptype; /* see codes in dependency.h */
} FormData_pg_shdepend;
/* ----------------
* Form_pg_shdepend corresponds to a pointer to a row with
* the format of pg_shdepend relation.
* ----------------
*/
typedef FormData_pg_shdepend *Form_pg_shdepend;
DECLARE_INDEX(pg_shdepend_depender_index, 1232, SharedDependDependerIndexId, pg_shdepend, btree(dbid oid_ops, classid oid_ops, objid oid_ops, objsubid int4_ops));
DECLARE_INDEX(pg_shdepend_reference_index, 1233, SharedDependReferenceIndexId, pg_shdepend, btree(refclassid oid_ops, refobjid oid_ops));
#endif /* PG_SHDEPEND_H */ | c | github | https://github.com/postgres/postgres | src/include/catalog/pg_shdepend.h |
from __future__ import unicode_literals
from datetime import datetime
from operator import attrgetter
from django.db.models import F
from django.test import TestCase
from .models import Article, Author, Reference
class OrderingTests(TestCase):
def setUp(self):
self.a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26)
)
self.a2 = Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27)
)
self.a3 = Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 27)
)
self.a4 = Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28)
)
def test_default_ordering(self):
"""
By default, Article.objects.all() orders by pub_date descending, then
headline ascending.
"""
self.assertQuerysetEqual(
Article.objects.all(), [
"Article 4",
"Article 2",
"Article 3",
"Article 1",
],
attrgetter("headline")
)
# Getting a single item should work too:
self.assertEqual(Article.objects.all()[0], self.a4)
def test_default_ordering_override(self):
"""
Override ordering with order_by, which is in the same format as the
ordering attribute in models.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline"), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by("pub_date", "-headline"), [
"Article 1",
"Article 3",
"Article 2",
"Article 4",
],
attrgetter("headline")
)
def test_order_by_override(self):
"""
Only the last order_by has any effect (since they each override any
previous ordering).
"""
self.assertQuerysetEqual(
Article.objects.order_by("id"), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by("id").order_by("-headline"), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_stop_slicing(self):
"""
Use the 'stop' part of slicing notation to limit the results.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[:2], [
"Article 1",
"Article 2",
],
attrgetter("headline")
)
def test_stop_start_slicing(self):
"""
Use the 'stop' and 'start' parts of slicing notation to offset the
result list.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[1:3], [
"Article 2",
"Article 3",
],
attrgetter("headline")
)
def test_random_ordering(self):
"""
Use '?' to order randomly.
"""
self.assertEqual(
len(list(Article.objects.order_by("?"))), 4
)
def test_reversed_ordering(self):
"""
Ordering can be reversed using the reverse() method on a queryset.
This allows you to extract things like "the last two items" (reverse
and then take the first two).
"""
self.assertQuerysetEqual(
Article.objects.all().reverse()[:2], [
"Article 1",
"Article 3",
],
attrgetter("headline")
)
def test_reverse_ordering_pure(self):
qs1 = Article.objects.order_by(F('headline').asc())
qs2 = qs1.reverse()
self.assertQuerysetEqual(
qs1, [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
qs2, [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_extra_ordering(self):
"""
Ordering can be based on fields included from an 'extra' clause
"""
self.assertQuerysetEqual(
Article.objects.extra(select={"foo": "pub_date"}, order_by=["foo", "headline"]), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_extra_ordering_quoting(self):
"""
If the extra clause uses an SQL keyword for a name, it will be
protected by quoting.
"""
self.assertQuerysetEqual(
Article.objects.extra(select={"order": "pub_date"}, order_by=["order", "headline"]), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_extra_ordering_with_table_name(self):
self.assertQuerysetEqual(
Article.objects.extra(order_by=['ordering_article.headline']), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.extra(order_by=['-ordering_article.headline']), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_pk(self):
"""
Ensure that 'pk' works as an ordering option in Meta.
Refs #8291.
"""
Author.objects.create(pk=1)
Author.objects.create(pk=2)
Author.objects.create(pk=3)
Author.objects.create(pk=4)
self.assertQuerysetEqual(
Author.objects.all(), [
4, 3, 2, 1
],
attrgetter("pk")
)
def test_order_by_fk_attname(self):
"""
Ensure that ordering by a foreign key by its attribute name prevents
the query from inheriting it's related model ordering option.
Refs #19195.
"""
for i in range(1, 5):
author = Author.objects.create(pk=i)
article = getattr(self, "a%d" % (5 - i))
article.author = author
article.save(update_fields={'author'})
self.assertQuerysetEqual(
Article.objects.order_by('author_id'), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_f_expression(self):
self.assertQuerysetEqual(
Article.objects.order_by(F('headline')), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by(F('headline').asc()), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by(F('headline').desc()), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_f_expression_duplicates(self):
"""
A column may only be included once (the first occurrence) so we check
to ensure there are no duplicates by inspecting the SQL.
"""
qs = Article.objects.order_by(F('headline').asc(), F('headline').desc())
sql = str(qs.query).upper()
fragment = sql[sql.find('ORDER BY'):]
self.assertEqual(fragment.count('HEADLINE'), 1)
self.assertQuerysetEqual(
qs, [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
qs = Article.objects.order_by(F('headline').desc(), F('headline').asc())
sql = str(qs.query).upper()
fragment = sql[sql.find('ORDER BY'):]
self.assertEqual(fragment.count('HEADLINE'), 1)
self.assertQuerysetEqual(
qs, [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_related_ordering_duplicate_table_reference(self):
"""
An ordering referencing a model with an ordering referencing a model
multiple time no circular reference should be detected (#24654).
"""
first_author = Author.objects.create()
second_author = Author.objects.create()
self.a1.author = first_author
self.a1.second_author = second_author
self.a1.save()
self.a2.author = second_author
self.a2.second_author = first_author
self.a2.save()
r1 = Reference.objects.create(article_id=self.a1.pk)
r2 = Reference.objects.create(article_id=self.a2.pk)
self.assertQuerysetEqual(Reference.objects.all(), [r2, r1], lambda x: x) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 1 16:54:05 2016
@author: alex
"""
import numpy as np
def main():
posList = generate_benzene_sites()
nList,zList = get_neighbors()
return posList, nList, zList
def generate_benzene_sites():
"""Generate the locations of a benzene ring, one without hydrogen for attachment to something else."""
#starting distance
a = 1.45
angle = 120 #degrees
angle = angle*np.pi/180.0/2.0
C1pos = np.array([0.,0.,0.])
C2pos = C1pos + np.array([a*np.cos(angle),a*np.sin(angle),0.])
C3pos = C2pos + np.array([a,0.,0.])
C4pos = C1pos + np.array([a*np.cos(angle),-a*np.sin(angle),0.])
C5pos = C4pos + np.array([a,0.,0.])
C6pos = C5pos + np.array([a*np.cos(angle),a*np.sin(angle),0.])
a = 1.15
H2pos = C2pos + np.array([-a*np.cos(angle),a*np.sin(angle),0.])
H3pos = C3pos + np.array([a*np.cos(angle),a*np.sin(angle),0.])
# H6pos = C6pos + np.array([a,0.,0.])
H5pos = C5pos + np.array([a*np.cos(angle),-a*np.sin(angle),0.])
H4pos = C4pos + np.array([-a*np.cos(angle),-a*np.sin(angle),0.])
CList = [C1pos,C2pos,C3pos,C4pos,C5pos,C6pos]
HList = [H2pos,H3pos,H4pos,H5pos]
CList.extend(HList)
return CList
def get_neighbors():
nList = []
#six carbons, four hydrogens
nList.append([1,3])
nList.append([0,2,6])
nList.append([1,5,7])
nList.append([0,4,8])
nList.append([3,5,9])
nList.append([2,4])
nList.append([1])
nList.append([2])
nList.append([3])
nList.append([4])
zList = np.concatenate((np.full(6,6, dtype=int), np.full(4,1, dtype=int)))
return nList, zList | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import importlib
import re
import pytest
from airflow.exceptions import RemovedInAirflow4Warning
from airflow.security import permissions
def test_permissions_import_warns() -> None:
"""Ensures that imports of `airflow.security.permissions` trigger a `RemovedInAirflow4Warning`."""
with pytest.warns(
RemovedInAirflow4Warning, match=re.escape("The airflow.security.permissions module is deprecated")
):
importlib.reload(permissions) | python | github | https://github.com/apache/airflow | airflow-core/tests/unit/security/test_permissions_deprecation_warning.py |
import math
from itertools import product
import numpy as np
import pytest
from scipy.sparse import rand as sparse_rand
from sklearn import clone, datasets, manifold, neighbors, pipeline, preprocessing
from sklearn.datasets import make_blobs
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.utils._testing import (
assert_allclose,
assert_allclose_dense_sparse,
assert_array_equal,
)
from sklearn.utils.fixes import CSR_CONTAINERS
eigen_solvers = ["auto", "dense", "arpack"]
path_methods = ["auto", "FW", "D"]
def create_sample_data(dtype, n_pts=25, add_noise=False):
# grid of equidistant points in 2D, n_components = n_dim
n_per_side = int(math.sqrt(n_pts))
X = np.array(list(product(range(n_per_side), repeat=2))).astype(dtype, copy=False)
if add_noise:
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(n_pts, 1).astype(dtype, copy=False)
X = np.concatenate((X, noise), 1)
return X
@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)])
@pytest.mark.parametrize("eigen_solver", eigen_solvers)
@pytest.mark.parametrize("path_method", path_methods)
def test_isomap_simple_grid(
global_dtype, n_neighbors, radius, eigen_solver, path_method
):
# Isomap should preserve distances when all neighbors are used
n_pts = 25
X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=False)
# distances from each point to all others
if n_neighbors is not None:
G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance")
else:
G = neighbors.radius_neighbors_graph(X, radius, mode="distance")
clf = manifold.Isomap(
n_neighbors=n_neighbors,
radius=radius,
n_components=2,
eigen_solver=eigen_solver,
path_method=path_method,
)
clf.fit(X)
if n_neighbors is not None:
G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance")
else:
G_iso = neighbors.radius_neighbors_graph(
clf.embedding_, radius, mode="distance"
)
atol = 1e-5 if global_dtype == np.float32 else 0
assert_allclose_dense_sparse(G, G_iso, atol=atol)
@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)])
@pytest.mark.parametrize("eigen_solver", eigen_solvers)
@pytest.mark.parametrize("path_method", path_methods)
def test_isomap_reconstruction_error(
global_dtype, n_neighbors, radius, eigen_solver, path_method
):
if global_dtype is np.float32:
pytest.skip(
"Skipping test due to numerical instabilities on float32 data"
"from KernelCenterer used in the reconstruction_error method"
)
# Same setup as in test_isomap_simple_grid, with an added dimension
n_pts = 25
X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=True)
# compute input kernel
if n_neighbors is not None:
G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance").toarray()
else:
G = neighbors.radius_neighbors_graph(X, radius, mode="distance").toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G**2)
clf = manifold.Isomap(
n_neighbors=n_neighbors,
radius=radius,
n_components=2,
eigen_solver=eigen_solver,
path_method=path_method,
)
clf.fit(X)
# compute output kernel
if n_neighbors is not None:
G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance")
else:
G_iso = neighbors.radius_neighbors_graph(
clf.embedding_, radius, mode="distance"
)
G_iso = G_iso.toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso**2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / n_pts
atol = 1e-5 if global_dtype == np.float32 else 0
assert_allclose(reconstruction_error, clf.reconstruction_error(), atol=atol)
@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 0.5)])
def test_transform(global_dtype, n_neighbors, radius):
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.make_s_curve(n_samples, random_state=0)
X = X.astype(global_dtype, copy=False)
# Compute isomap embedding
iso = manifold.Isomap(
n_components=n_components, n_neighbors=n_neighbors, radius=radius
)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert np.sqrt(np.mean((X_iso - X_iso2) ** 2)) < 2 * noise_scale
@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 10.0)])
def test_pipeline(n_neighbors, radius, global_dtype):
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
X = X.astype(global_dtype, copy=False)
clf = pipeline.Pipeline(
[
("isomap", manifold.Isomap(n_neighbors=n_neighbors, radius=radius)),
("clf", neighbors.KNeighborsClassifier()),
]
)
clf.fit(X, y)
assert 0.9 < clf.score(X, y)
def test_pipeline_with_nearest_neighbors_transformer(global_dtype):
# Test chaining NearestNeighborsTransformer and Isomap with
# neighbors_algorithm='precomputed'
algorithm = "auto"
n_neighbors = 10
X, _ = datasets.make_blobs(random_state=0)
X2, _ = datasets.make_blobs(random_state=1)
X = X.astype(global_dtype, copy=False)
X2 = X2.astype(global_dtype, copy=False)
# compare the chained version and the compact version
est_chain = pipeline.make_pipeline(
neighbors.KNeighborsTransformer(
n_neighbors=n_neighbors, algorithm=algorithm, mode="distance"
),
manifold.Isomap(n_neighbors=n_neighbors, metric="precomputed"),
)
est_compact = manifold.Isomap(
n_neighbors=n_neighbors, neighbors_algorithm=algorithm
)
Xt_chain = est_chain.fit_transform(X)
Xt_compact = est_compact.fit_transform(X)
assert_allclose(Xt_chain, Xt_compact)
Xt_chain = est_chain.transform(X2)
Xt_compact = est_compact.transform(X2)
assert_allclose(Xt_chain, Xt_compact)
@pytest.mark.parametrize(
"metric, p, is_euclidean",
[
("euclidean", 2, True),
("manhattan", 1, False),
("minkowski", 1, False),
("minkowski", 2, True),
(lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False),
],
)
def test_different_metric(global_dtype, metric, p, is_euclidean):
# Isomap must work on various metric parameters work correctly
# and must default to euclidean.
X, _ = datasets.make_blobs(random_state=0)
X = X.astype(global_dtype, copy=False)
reference = manifold.Isomap().fit_transform(X)
embedding = manifold.Isomap(metric=metric, p=p).fit_transform(X)
if is_euclidean:
assert_allclose(embedding, reference)
else:
with pytest.raises(AssertionError, match="Not equal to tolerance"):
assert_allclose(embedding, reference)
def test_isomap_clone_bug():
# regression test for bug reported in #6062
model = manifold.Isomap()
for n_neighbors in [10, 15, 20]:
model.set_params(n_neighbors=n_neighbors)
model.fit(np.random.rand(50, 2))
assert model.nbrs_.n_neighbors == n_neighbors
@pytest.mark.parametrize("eigen_solver", eigen_solvers)
@pytest.mark.parametrize("path_method", path_methods)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_input(
global_dtype, eigen_solver, path_method, global_random_seed, csr_container
):
# TODO: compare results on dense and sparse data as proposed in:
# https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186
X = csr_container(
sparse_rand(
100,
3,
density=0.1,
format="csr",
dtype=global_dtype,
random_state=global_random_seed,
)
)
iso_dense = manifold.Isomap(
n_components=2,
eigen_solver=eigen_solver,
path_method=path_method,
n_neighbors=8,
)
iso_sparse = clone(iso_dense)
X_trans_dense = iso_dense.fit_transform(X.toarray())
X_trans_sparse = iso_sparse.fit_transform(X)
assert_allclose(X_trans_sparse, X_trans_dense, rtol=1e-4, atol=1e-4)
def test_isomap_fit_precomputed_radius_graph(global_dtype):
# Isomap.fit_transform must yield similar result when using
# a precomputed distance matrix.
X, y = datasets.make_s_curve(200, random_state=0)
X = X.astype(global_dtype, copy=False)
radius = 10
g = neighbors.radius_neighbors_graph(X, radius=radius, mode="distance")
isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="precomputed")
isomap.fit(g)
precomputed_result = isomap.embedding_
isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="minkowski")
result = isomap.fit_transform(X)
atol = 1e-5 if global_dtype == np.float32 else 0
assert_allclose(precomputed_result, result, atol=atol)
def test_isomap_fitted_attributes_dtype(global_dtype):
"""Check that the fitted attributes are stored accordingly to the
data type of X."""
iso = manifold.Isomap(n_neighbors=2)
X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype)
iso.fit(X)
assert iso.dist_matrix_.dtype == global_dtype
assert iso.embedding_.dtype == global_dtype
def test_isomap_dtype_equivalence():
"""Check the equivalence of the results with 32 and 64 bits input."""
iso_32 = manifold.Isomap(n_neighbors=2)
X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
iso_32.fit(X_32)
iso_64 = manifold.Isomap(n_neighbors=2)
X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64)
iso_64.fit(X_64)
assert_allclose(iso_32.dist_matrix_, iso_64.dist_matrix_)
def test_isomap_raise_error_when_neighbor_and_radius_both_set():
# Isomap.fit_transform must raise a ValueError if
# radius and n_neighbors are provided.
X, _ = datasets.load_digits(return_X_y=True)
isomap = manifold.Isomap(n_neighbors=3, radius=5.5)
msg = "Both n_neighbors and radius are provided"
with pytest.raises(ValueError, match=msg):
isomap.fit_transform(X)
def test_multiple_connected_components():
# Test that a warning is raised when the graph has multiple components
X = np.array([0, 1, 2, 5, 6, 7])[:, None]
with pytest.warns(UserWarning, match="number of connected components"):
manifold.Isomap(n_neighbors=2).fit(X)
def test_multiple_connected_components_metric_precomputed(global_dtype):
# Test that an error is raised when the graph has multiple components
# and when X is a precomputed neighbors graph.
X = np.array([0, 1, 2, 5, 6, 7])[:, None].astype(global_dtype, copy=False)
# works with a precomputed distance matrix (dense)
X_distances = pairwise_distances(X)
with pytest.warns(UserWarning, match="number of connected components"):
manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_distances)
# does not work with a precomputed neighbors graph (sparse)
X_graph = neighbors.kneighbors_graph(X, n_neighbors=2, mode="distance")
with pytest.raises(RuntimeError, match="number of connected components"):
manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_graph)
def test_get_feature_names_out():
"""Check get_feature_names_out for Isomap."""
X, y = make_blobs(random_state=0, n_features=4)
n_components = 2
iso = manifold.Isomap(n_components=n_components)
iso.fit_transform(X)
names = iso.get_feature_names_out()
assert_array_equal([f"isomap{i}" for i in range(n_components)], names) | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/manifold/tests/test_isomap.py |
"""Code for html rendering
"""
import sys
# Import the html parser code, maintaing compatibility with older versions of python
try:
oldpython = False
from html.parser import HTMLParser
htmllib = None
except:
import htmllib
from htmllib import HTMLParser
import re
import pygame
from pygame.locals import *
from pgu import gui
_amap = {'left':-1,'right':1,'center':0,None:None,'':None,}
_vamap = {'top':-1,'bottom':1,'center':0,'middle':0,None:None,'':None,}
# Used by the HTML parser to load external resources (like images). This
# class loads content from the local file system. But you can pass your own
# resource loader to the HTML parser to find images by other means.
class ResourceLoader(object):
# Loads an image and returns it as a pygame image
def load_image(this, path):
return pygame.image.load(path)
class _dummy:
pass
class _flush:
def __init__(self):
self.style = _dummy()
self.style.font = None
self.style.color = None
self.cls = None
def add(self,w): pass
def space(self,v): pass
class _hr(gui.Color):
def __init__(self,**params):
gui.Color.__init__(self,(0,0,0),**params)
def resize(self,width=None,height=None):
w,h = self.style.width,self.style.height
#if width != None: self.rect.w = width
#else: self.rect.w = 1
#xt,xr,xb,xl = self.getspacing()
if width != None: w = max(w,width)
if height != None: h = max(h,height)
w = max(w,1)
h = max(h,1)
return w,h #self.container.rect.w,h
#self.rect.w = max(1,width,self.container.rect.w-(xl+xr))
#print self.rect
#self.rect.w = 1
class _html(HTMLParser):
def init(self,doc,font,color,_globals,_locals,loader=None):
self.mystack = []
self.document = doc
if (loader):
self.loader = loader
else:
# Use the default resource loader
self.loader = ResourceLoader()
self.myopen('document',self.document)
self.myfont = self.font = font
self.mycolor = self.color = color
self.form = None
self._globals = _globals
self._locals = _locals
def myopen(self,type_,w):
self.mystack.append((type_,w))
self.type,self.item = type_,w
self.font = self.item.style.font
self.color = self.item.style.color
if not self.font: self.font = self.myfont
if not self.color: self.color = self.mycolor
def myclose(self,type_):
t = None
self.mydone()
while t != type_:
#if len(self.mystack)==0: return
t,w = self.mystack.pop()
t,w = self.mystack.pop()
self.myopen(t,w)
def myback(self,type_):
if type(type_) == str: type_ = [type_,]
self.mydone()
#print 'myback',type_
t = None
while t not in type_:
#if len(self.mystack)==0: return
t,w = self.mystack.pop()
self.myopen(t,w)
def mydone(self):
#clearing out the last </p>
if not hasattr(self.item,'layout'): return
if len(self.item.layout._widgets) == 0: return
w = self.item.layout._widgets[-1]
if type(w) == tuple:
del self.item.layout._widgets[-1]
def start_b(self,attrs): self.font.set_bold(1)
def end_b(self): self.font.set_bold(0)
def start_i(self,attrs): self.font.set_italic(1)
def end_i(self): self.font.set_italic(0)
def start_u(self,attrs): self.font.set_underline(1)
def end_u(self): self.font.set_underline(0)
def start_br(self,attrs): self.do_br(attrs)
def do_br(self,attrs): self.item.br(self.font.size(" ")[1])
def attrs_to_map(self,attrs):
k = None
r = {}
for k,v in attrs: r[k] = v
return r
def map_to_params(self,r):
anum = re.compile("\D")
params = {'style':{}}
style = params['style']
if 'bgcolor' in r:
style['background'] = gui.parse_color(r['bgcolor'])
if 'background' in r:
style['background'] = self.loader.load_image(r['background'])
if 'border' in r: style['border'] = int(r['border'])
for k in ['width','height','colspan','rowspan','size','min','max']:
if k in r: params[k] = int(anum.sub("",r[k]))
for k in ['name','value']:
if k in r: params[k] = r[k]
if 'class' in r: params['cls'] = r['class']
if 'align' in r:
params['align'] = _amap[r['align']]
if 'valign' in r:
params['valign'] = _vamap[r['valign']]
if 'style' in r:
for st in r['style'].split(";"):
#print st
if ":" in st:
#print st.split(":")
k,v = st.split(":")
k = k.replace("-","_")
k = k.replace(" ","")
v = v.replace(" ","")
if k == 'color' or k == 'border_color' or k == 'background':
v = gui.parse_color(v)
else:
v = int(anum.sub("",v))
style[k] = v
return params
def map_to_connects(self,e,r):
for k,evt in [('onclick',gui.CLICK),('onchange',gui.CHANGE)]: #blah blah blah
if k in r:
#print k,r[k]
e.connect(evt,self.myexec,(e,r[k]))
def start_p(self,attrs):
r = self.attrs_to_map(attrs)
align = r.get("align","left")
self.check_p()
self.item.block(_amap[align])
def check_p(self):
if len(self.item.layout._widgets) == 0: return
if type(self.item.layout._widgets[-1]) == tuple:
w,h = self.item.layout._widgets[-1]
if w == 0: return
self.do_br(None)
def end_p(self):
#print 'end p'
self.check_p()
def start_block(self,t,attrs,align=-1):
r = self.attrs_to_map(attrs)
params = self.map_to_params(r)
if 'cls' in params: params['cls'] = t+"."+params['cls']
else: params['cls'] = t
b = gui.Document(**params)
b.style.font = self.item.style.font
if 'align' in params:
align = params['align']
self.item.block(align)
self.item.add(b)
self.myopen(t,b)
def end_block(self,t):
self.myclose(t)
self.item.block(-1)
def start_div(self,attrs): self.start_block('div',attrs)
def end_div(self): self.end_block('div')
def start_center(self,attrs): self.start_block('div',attrs,0)
def end_center(self): self.end_block('div')
def start_h1(self,attrs): self.start_block('h1',attrs)
def end_h1(self): self.end_block('h1')
def start_h2(self,attrs): self.start_block('h2',attrs)
def end_h2(self): self.end_block('h2')
def start_h3(self,attrs): self.start_block('h3',attrs)
def end_h3(self): self.end_block('h3')
def start_h4(self,attrs): self.start_block('h4',attrs)
def end_h4(self): self.end_block('h4')
def start_h5(self,attrs): self.start_block('h5',attrs)
def end_h5(self): self.end_block('h5')
def start_h6(self,attrs): self.start_block('h6',attrs)
def end_h6(self): self.end_block('h6')
def start_ul(self,attrs): self.start_block('ul',attrs)
def end_ul(self): self.end_block('ul')
def start_ol(self,attrs):
self.start_block('ol',attrs)
self.item.counter = 0
def end_ol(self): self.end_block('ol')
def start_li(self,attrs):
self.myback(['ul','ol'])
cur = self.item
self.start_block('li',attrs)
if hasattr(cur,'counter'):
cur.counter += 1
self.handle_data("%d. "%cur.counter)
else:
self.handle_data("- ")
#def end_li(self): self.end_block('li') #this isn't needed because of how the parser works
def start_pre(self,attrs): self.start_block('pre',attrs)
def end_pre(self): self.end_block('pre')
def start_code(self,attrs): self.start_block('code',attrs)
def end_code(self): self.end_block('code')
def start_table(self,attrs):
r = self.attrs_to_map(attrs)
params = self.map_to_params(r)
align = r.get("align","left")
self.item.block(_amap[align])
t = gui.Table(**params)
self.item.add(t)
self.myopen('table',t)
def start_tr(self,attrs):
self.myback('table')
self.item.tr()
def _start_td(self,t,attrs):
r = self.attrs_to_map(attrs)
params = self.map_to_params(r)
if 'cls' in params: params['cls'] = t+"."+params['cls']
else: params['cls'] = t
b = gui.Document(cls=t)
self.myback('table')
self.item.td(b,**params)
self.myopen(t,b)
self.font = self.item.style.font
self.color = self.item.style.color
def start_td(self,attrs):
self._start_td('td',attrs)
def start_th(self,attrs):
self._start_td('th',attrs)
def end_table(self):
self.myclose('table')
self.item.block(-1)
def start_form(self,attrs):
r = self.attrs_to_map(attrs)
e = self.form = gui.Form()
e.groups = {}
self._locals[r.get('id',None)] = e
def start_input(self,attrs):
r = self.attrs_to_map(attrs)
params = self.map_to_params(r) #why bother
#params = {}
type_,name,value = r.get('type','text'),r.get('name',None),r.get('value',None)
f = self.form
if type_ == 'text':
e = gui.Input(**params)
self.map_to_connects(e,r)
self.item.add(e)
elif type_ == 'radio':
if name not in f.groups:
f.groups[name] = gui.Group(name=name)
g = f.groups[name]
del params['name']
e = gui.Radio(group=g,**params)
self.map_to_connects(e,r)
self.item.add(e)
if 'checked' in r: g.value = value
elif type_ == 'checkbox':
if name not in f.groups:
f.groups[name] = gui.Group(name=name)
g = f.groups[name]
del params['name']
e = gui.Checkbox(group=g,**params)
self.map_to_connects(e,r)
self.item.add(e)
if 'checked' in r: g.value = value
elif type_ == 'button':
e = gui.Button(**params)
self.map_to_connects(e,r)
self.item.add(e)
elif type_ == 'submit':
e = gui.Button(**params)
self.map_to_connects(e,r)
self.item.add(e)
elif type_ == 'file':
e = gui.Input(**params)
self.map_to_connects(e,r)
self.item.add(e)
b = gui.Button(value='Browse...')
self.item.add(b)
def _browse(value):
d = gui.FileDialog();
d.connect(gui.CHANGE,gui.action_setvalue,(d,e))
d.open();
b.connect(gui.CLICK,_browse,None)
self._locals[r.get('id',None)] = e
def start_object(self,attrs):
r = self.attrs_to_map(attrs)
params = self.map_to_params(r)
code = "e = %s(**params)"%r['type']
#print code
#print params
exec(code)
#print e
#print e.style.width,e.style.height
self.map_to_connects(e,r)
self.item.add(e)
self._locals[r.get('id',None)] = e
def start_select(self,attrs):
r = self.attrs_to_map(attrs)
params = {}
name,value = r.get('name',None),r.get('value',None)
e = gui.Select(name=name,value=value,**params)
self.map_to_connects(e,r)
self.item.add(e)
self.myopen('select',e)
def start_option(self,attrs):
r = self.attrs_to_map(attrs)
params = {} #style = self.map_to_style(r)
self.myback('select')
e = gui.Document(**params)
self.item.add(e,value=r.get('value',None))
self.myopen('option',e)
def end_select(self):
self.myclose('select')
def start_hr(self,attrs):
self.do_hr(attrs)
def do_hr(self,attrs):
h = self.font.size(" ")[1]/2
r = self.attrs_to_map(attrs)
params = self.map_to_params(r)
params['style']['padding'] = h
self.item.block(0)
self.item.add(_hr(**params))
self.item.block(-1)
def anchor_begin(self,href,name,type_):
pass
def anchor_end(self):
pass
def start_title(self,attrs): self.myopen('title',_flush())
def end_title(self): self.myclose('title')
def myexec(self,value):
w,code = value
g = self._globals
l = self._locals
l['self'] = w
exec(code,g,l)
def handle_image(self,src,alt,ismap,align,width,height):
try:
w = gui.Image(self.loader.load_image(src))
if align != '':
self.item.add(w,_amap[align])
else:
self.item.add(w)
except:
print('handle_image: missing %s'%src)
def handle_data(self,txt):
if self.type == 'table': return
elif self.type in ('pre','code'):
txt = txt.replace("\t"," ")
ss = txt.split("\n")
if ss[-1] == "": del ss[-1]
for sentence in ss:
img = self.font.render(sentence,1,self.color)
w = gui.Image(img)
self.item.add(w)
self.item.block(-1)
return
txt = re.compile("^[\t\r\n]+").sub("",txt)
txt = re.compile("[\t\r\n]+$").sub("",txt)
tst = re.compile("[\t\r\n]+").sub("",txt)
if tst == "": return
txt = re.compile("\s+").sub(" ",txt)
if txt == "": return
if txt == " ":
self.item.space(self.font.size(" "))
return
for word in txt.split(" "):
word = word.replace(chr(160)," ") #
#print self.item.cls
w = gui.Image(self.font.render(word,1,self.color))
self.item.add(w)
self.item.space(self.font.size(" "))
class HTML(gui.Document):
"""A gui HTML object
Arguments:
data -- html data
globals -- global variables (for scripting)
locals -- local variables (for scripting)
loader -- the resource loader
You may access html elements that have an id via widget[id]
"""
def __init__(self,data,globals=None,locals=None,loader=None,**params):
gui.Document.__init__(self,**params)
# This ensures that the whole HTML document is left-aligned within
# the rendered surface.
self.style.align = -1
_globals,_locals = globals,locals
if _globals == None: _globals = {}
if _locals == None: _locals = {}
self._globals = _globals
self._locals = _locals
#font = gui.theme.get("label","","font")
if (htmllib):
# The constructor is slightly different
p = _html(None, 0)
else:
p = _html()
p.init(self,self.style.font,self.style.color,_globals,_locals,
loader=loader)
p.feed(data)
p.close()
p.mydone()
def __getitem__(self,k):
return self._locals[k]
# Returns a box (pygame rectangle) surrounding all widgets in this document
def get_bounding_box(this):
minx = miny = sys.maxint
maxx = maxy = -sys.maxint
for e in this.layout.widgets:
minx = min(minx, e.rect.left)
miny = min(miny, e.rect.top)
maxx = max(maxx, e.rect.right+1)
maxy = max(maxy, e.rect.bottom+1)
return pygame.Rect(minx, miny, maxx-minx, maxy-miny)
def render_ext(font, rect, text, aa, color, bgcolor=(0,0,0,0), **params):
"""Renders some html and returns the rendered surface, plus the
HTML instance that produced it.
"""
htm = HTML(text, font=font, color=color, **params)
if (rect == -1):
# Make the surface large enough to fit the rendered text
htm.resize(width=sys.maxint)
(width, height) = htm.get_bounding_box().size
# Now set the proper document width (computed from the bounding box)
htm.resize(width=width)
elif (type(rect) == int):
# Fix the width of the document, while the height is variable
width = rect
height = htm.resize(width=width)[1]
else:
# Otherwise the width and height of the document is fixed
(width, height) = rect.size
htm.resize(width=width)
# Now construct a surface and paint to it
surf = pygame.Surface((width, height)).convert_alpha()
surf.fill(bgcolor)
htm.paint(surf)
return (surf, htm)
def render(font, rect, text, aa, color, bgcolor=(0,0,0,0), **params):
"""Renders some html"""
return render_ext(font, rect, text, aa, color, bgcolor, **params)[0]
def rendertrim(font,rect,text,aa,color,bgcolor=(0,0,0,0),**params):
"""Render html, and make sure to trim the size."""
# Render the HTML
(surf, htm) = render_ext(font, rect, text, aa, color, bgcolor, **params)
return surf.subsurface(htm.get_bounding_box())
def write(s,font,rect,text,aa=0,color=(0,0,0), **params):
"""Write html to a surface."""
htm = HTML(text, font=font, color=color, **params)
htm.resize(width=rect.w)
s = s.subsurface(rect)
htm.paint(s) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shortcut methods for getting set up with Google Cloud Datastore.
You'll typically use these to get started with the API:
>>> from gcloud import datastore
>>> datastore.set_defaults()
>>> key = datastore.Key('EntityKind', 1234)
>>> entity = datastore.Entity(key)
>>> query = datastore.Query(kind='EntityKind')
The main concepts with this API are:
- :class:`gcloud.datastore.connection.Connection`
which represents a connection between your machine and the Cloud Datastore
API.
- :class:`gcloud.datastore.entity.Entity`
which represents a single entity in the datastore
(akin to a row in relational database world).
- :class:`gcloud.datastore.key.Key`
which represents a pointer to a particular entity in the datastore
(akin to a unique identifier in relational database world).
- :class:`gcloud.datastore.query.Query`
which represents a lookup or search over the rows in the datastore.
- :class:`gcloud.datastore.transaction.Transaction`
which represents an all-or-none transaction and enables consistency
when race conditions may occur.
"""
import os
from gcloud import credentials
from gcloud.datastore import _implicit_environ
from gcloud.datastore.api import allocate_ids
from gcloud.datastore.api import delete
from gcloud.datastore.api import get
from gcloud.datastore.api import put
from gcloud.datastore.batch import Batch
from gcloud.datastore.connection import Connection
from gcloud.datastore.entity import Entity
from gcloud.datastore.key import Key
from gcloud.datastore.query import Query
from gcloud.datastore.transaction import Transaction
SCOPE = ('https://www.googleapis.com/auth/datastore',
'https://www.googleapis.com/auth/userinfo.email')
"""The scopes required for authenticating as a Cloud Datastore consumer."""
_DATASET_ENV_VAR_NAME = 'GCLOUD_DATASET_ID'
_GCD_DATASET_ENV_VAR_NAME = 'DATASTORE_DATASET'
def set_default_dataset_id(dataset_id=None):
"""Set default dataset ID either explicitly or implicitly as fall-back.
In implicit case, supports three cases. In order of precedence, the
implicit cases are:
- GCLOUD_DATASET_ID environment variable
- Google App Engine application ID
- Google Compute Engine project ID (from metadata server)
:type dataset_id: string
:param dataset_id: Optional. The dataset ID to use as default.
:raises: :class:`EnvironmentError` if no dataset ID was implied.
"""
if dataset_id is None:
dataset_id = os.getenv(_DATASET_ENV_VAR_NAME)
if dataset_id is None:
dataset_id = os.getenv(_GCD_DATASET_ENV_VAR_NAME)
if dataset_id is None:
dataset_id = _implicit_environ.app_engine_id()
if dataset_id is None:
dataset_id = _implicit_environ.compute_engine_id()
if dataset_id is not None:
_implicit_environ.DATASET_ID = dataset_id
else:
raise EnvironmentError('No dataset ID could be inferred.')
def set_default_connection(connection=None):
"""Set default connection either explicitly or implicitly as fall-back.
:type connection: :class:`gcloud.datastore.connection.Connection`
:param connection: A connection provided to be the default.
"""
connection = connection or get_connection()
_implicit_environ.CONNECTION = connection
def set_defaults(dataset_id=None, connection=None):
"""Set defaults either explicitly or implicitly as fall-back.
Uses the arguments to call the individual default methods
- set_default_dataset_id
- set_default_connection
In the future we will likely enable methods like
- set_default_namespace
:type dataset_id: string
:param dataset_id: Optional. The dataset ID to use as default.
:type connection: :class:`gcloud.datastore.connection.Connection`
:param connection: A connection provided to be the default.
"""
set_default_dataset_id(dataset_id=dataset_id)
set_default_connection(connection=connection)
def get_connection():
"""Shortcut method to establish a connection to the Cloud Datastore.
Use this if you are going to access several datasets
with the same set of credentials (unlikely):
>>> from gcloud import datastore
>>> connection = datastore.get_connection()
>>> key1 = datastore.Key('Kind', 1234, dataset_id='dataset1')
>>> key2 = datastore.Key('Kind', 1234, dataset_id='dataset2')
>>> entity1 = datastore.get(key1, connection=connection)
>>> entity2 = datastore.get(key2, connection=connection)
:rtype: :class:`gcloud.datastore.connection.Connection`
:returns: A connection defined with the proper credentials.
"""
implicit_credentials = credentials.get_credentials()
scoped_credentials = implicit_credentials.create_scoped(SCOPE)
return Connection(credentials=scoped_credentials) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
# delete wrong gle entries created due to a bug in make_gl_entries of Account Controller
# when using payment reconciliation
res = webnotes.conn.sql_list("""select distinct gl1.voucher_no
from `tabGL Entry` gl1, `tabGL Entry` gl2
where
date(gl1.modified) >= "2013-03-11"
and date(gl1.modified) = date(gl2.modified)
and gl1.voucher_no = gl2.voucher_no
and gl1.voucher_type = "Journal Voucher"
and gl1.voucher_type = gl2.voucher_type
and gl1.posting_date = gl2.posting_date
and gl1.account = gl2.account
and ifnull(gl1.is_cancelled, 'No') = 'No' and ifnull(gl2.is_cancelled, 'No') = 'No'
and ifnull(gl1.against_voucher, '') = ifnull(gl2.against_voucher, '')
and ifnull(gl1.against_voucher_type, '') = ifnull(gl2.against_voucher_type, '')
and gl1.remarks = gl2.remarks
and ifnull(gl1.debit, 0) = ifnull(gl2.credit, 0)
and ifnull(gl1.credit, 0) = ifnull(gl2.debit, 0)
and gl1.name > gl2.name""")
for r in res:
webnotes.conn.sql("""update `tabGL Entry` set `is_cancelled`='Yes'
where voucher_type='Journal Voucher' and voucher_no=%s""", r)
jv = webnotes.bean("Journal Voucher", r)
jv.run_method("make_gl_entries") | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kfserving
from kfserving.models.v1beta1_transformer_config import V1beta1TransformerConfig # noqa: E501
from kfserving.rest import ApiException
class TestV1beta1TransformerConfig(unittest.TestCase):
"""V1beta1TransformerConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1beta1TransformerConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfserving.models.v1beta1_transformer_config.V1beta1TransformerConfig() # noqa: E501
if include_optional :
return V1beta1TransformerConfig(
default_image_version = '0',
image = '0'
)
else :
return V1beta1TransformerConfig(
default_image_version = '0',
image = '0',
)
def testV1beta1TransformerConfig(self):
"""Test V1beta1TransformerConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2022 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.utils.io.core.internal
@Suppress("RedundantModalityModifier")
internal class CharArraySequence(
private val array: CharArray,
private val offset: Int,
final override val length: Int
) : CharSequence {
final override fun get(index: Int): Char {
if (index >= length) {
indexOutOfBounds(index)
}
return array[index + offset]
}
final override fun subSequence(startIndex: Int, endIndex: Int): CharSequence {
require(startIndex >= 0) { "startIndex shouldn't be negative: $startIndex" }
require(startIndex <= length) { "startIndex is too large: $startIndex > $length" }
require(startIndex + endIndex <= length) { "endIndex is too large: $endIndex > $length" }
require(endIndex >= startIndex) { "endIndex should be greater or equal to startIndex: $startIndex > $endIndex" }
return CharArraySequence(array, offset + startIndex, endIndex - startIndex)
}
private fun indexOutOfBounds(index: Int): Nothing {
throw IndexOutOfBoundsException("String index out of bounds: $index > $length")
}
} | kotlin | github | https://github.com/ktorio/ktor | ktor-io/common/src/io/ktor/utils/io/core/internal/CharArraySequence.kt |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Low-level locale data access.
:note: The `Locale` class, which uses this module under the hood, provides a
more convenient interface for accessing the locale data.
"""
import os
from babel.compat import pickle, DictMixin, PY3, u
try:
import threading
except ImportError:
import dummy_threading as threading
__all__ = ['exists', 'locale_identifiers', 'load']
__docformat__ = 'restructuredtext en'
_cache = {}
_cache_lock = threading.RLock()
_dirname = os.path.join(os.path.dirname(__file__), 'localedata')
def exists(name):
"""Check whether locale data is available for the given locale.
:param name: the locale identifier string
:return: `True` if the locale data exists, `False` otherwise
:rtype: `bool`
"""
if name in _cache:
return True
return os.path.exists(os.path.join(_dirname, '%s.dat' % name))
def locale_identifiers():
"""Return a list of all locale identifiers for which locale data is
available.
:return: a list of locale identifiers (strings)
:rtype: `list`
:since: version 0.8.1
"""
return [stem for stem, extension in [
os.path.splitext(filename) for filename in os.listdir(_dirname)
] if extension == '.dat' and stem != 'root']
def load(name, merge_inherited=True):
"""Load the locale data for the given locale.
The locale data is a dictionary that contains much of the data defined by
the Common Locale Data Repository (CLDR). This data is stored as a
collection of pickle files inside the ``babel`` package.
>>> d = load('en_US')
>>> d['languages']['sv'] == u('Swedish')
True
Note that the results are cached, and subsequent requests for the same
locale return the same dictionary:
>>> d1 = load('en_US')
>>> d2 = load('en_US')
>>> d1 is d2
True
:param name: the locale identifier string (or "root")
:param merge_inherited: whether the inherited data should be merged into
the data of the requested locale
:return: the locale data
:rtype: `dict`
:raise `IOError`: if no locale data file is found for the given locale
identifer, or one of the locales it inherits from
"""
_cache_lock.acquire()
try:
data = _cache.get(name)
if not data:
# Load inherited data
if name == 'root' or not merge_inherited:
data = {}
else:
parts = name.split('_')
if len(parts) == 1:
parent = 'root'
else:
parent = '_'.join(parts[:-1])
data = load(parent).copy()
filename = os.path.join(_dirname, '%s.dat' % name)
fileobj = open(filename, 'rb')
try:
if name != 'root' and merge_inherited:
merge(data, pickle.load(fileobj))
else:
data = pickle.load(fileobj)
_cache[name] = data
finally:
fileobj.close()
return data
finally:
_cache_lock.release()
def merge(dict1, dict2):
"""Merge the data from `dict2` into the `dict1` dictionary, making copies
of nested dictionaries.
>>> d = {1: 'foo', 3: 'baz'}
>>> merge(d, {1: 'Foo', 2: 'Bar'})
>>> items = sorted(d.items()); items
[(1, 'Foo'), (2, 'Bar'), (3, 'baz')]
:param dict1: the dictionary to merge into
:param dict2: the dictionary containing the data that should be merged
"""
for key, val2 in dict2.items():
if val2 is not None:
val1 = dict1.get(key)
if isinstance(val2, dict):
if val1 is None:
val1 = {}
if isinstance(val1, Alias):
val1 = (val1, val2)
elif isinstance(val1, tuple):
alias, others = val1
others = others.copy()
merge(others, val2)
val1 = (alias, others)
else:
val1 = val1.copy()
merge(val1, val2)
else:
val1 = val2
dict1[key] = val1
class Alias(object):
"""Representation of an alias in the locale data.
An alias is a value that refers to some other part of the locale data,
as specified by the `keys`.
"""
def __init__(self, keys):
self.keys = tuple(keys)
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.keys)
def resolve(self, data):
"""Resolve the alias based on the given data.
This is done recursively, so if one alias resolves to a second alias,
that second alias will also be resolved.
:param data: the locale data
:type data: `dict`
"""
base = data
for key in self.keys:
data = data[key]
if isinstance(data, Alias):
data = data.resolve(base)
elif isinstance(data, tuple):
alias, others = data
data = alias.resolve(base)
return data
#FIXED: gabriel
#previous: class LocaleDataDict(DictMixin, dict):
class LocaleDataDict(dict):
"""Dictionary wrapper that automatically resolves aliases to the actual
values.
"""
def __init__(self, data, base=None):
dict.__init__(self, data)
#FIXED: gabriel
#previous:
# if PY3:
# DictMixin.__init__(self, data)
if base is None:
base = data
self.base = base
def __getitem__(self, key):
orig = val = dict.__getitem__(self, key)
if isinstance(val, Alias): # resolve an alias
val = val.resolve(self.base)
if isinstance(val, tuple): # Merge a partial dict with an alias
alias, others = val
val = alias.resolve(self.base).copy()
merge(val, others)
if type(val) is dict: # Return a nested alias-resolving dict
val = LocaleDataDict(val, base=self.base)
if val is not orig:
self[key] = val
return val
def copy(self):
return LocaleDataDict(dict.copy(self), base=self.base) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA Gather Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
class GatherTest(xla_test.XLATestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, adds an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.test_session() as session, self.test_scope():
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in self.all_tf_types:
for indices in 4, [4], [1, 2, 2, 4, 5]:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = session.run(gather_t, feed_dict={params: params_np})
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
def testScalar2D(self):
with self.test_session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(gather_t, feed_dict={params: params_np})
expected = np.take(params_np, 2, axis=axis)
self.assertAllEqual(expected, gather_val)
def testSimpleTwoD32(self):
with self.test_session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(gather_t, feed_dict={params: params_np})
expected = np.take(params_np, [0, 1, 0, 2], axis=axis)
self.assertAllEqual(expected, gather_val)
def testSimpleTwoD32_Int64Indices(self):
if np.int64 not in self.int_types:
return
with self.test_session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
# The indices must be in bounds for any axis.
indices_np = np.array([0, 1, 0, 2])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices = array_ops.placeholder(dtype=dtypes.int64)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(
gather_t, feed_dict={
params: params_np,
indices: indices_np
})
expected = np.take(params_np, [0, 1, 0, 2], axis=axis)
self.assertAllEqual(expected, gather_val)
def testHigherRank(self):
"""Check that scalar and empty indices shapes work as well."""
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in self.all_tf_types:
for axis in 0, 1, 2, 3, -1, -2:
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.test_session() as sess, self.test_scope():
tf_params = array_ops.placeholder(dtype=dtype)
tf_indices = constant_op.constant(indices, dtype=dtypes.int32)
gather = array_ops.gather(tf_params, tf_indices, axis=axis)
gather_value = sess.run(gather, feed_dict={tf_params: params})
gather_np = np.take(params, indices, axis=axis)
self.assertAllEqual(gather_np, gather_value)
def testIndicesWithDifferentDimensions(self):
with self.test_session():
for dtype in self.numeric_tf_types:
params = array_ops.placeholder(dtype=dtype)
indices = array_ops.placeholder(dtype=np.int32)
with self.test_scope():
gather = array_ops.gather(params, indices)
self.assertAllEqual(
7, gather.eval(feed_dict={params: [4, 7, 2], indices: 1}))
self.assertAllEqual(
[7], gather.eval(feed_dict={params: [4, 7, 2], indices: [1]}))
self.assertAllEqual(
[[7]], gather.eval(feed_dict={params: [4, 7, 2], indices: [[1]]}))
def testGatherPrecision(self):
with self.test_session() as session, self.test_scope():
data = np.array([[0, 0, 0, 0], [0, 2 * (1 + np.exp2(-8)), 0, 0],
[0, 0, 0, 0], [0.015789, 0.0985, 0.55789, 0.3842]])
indices = np.array([1, 2, 3, 1])
dtype = dtypes.float32
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = session.run(gather_t, feed_dict={params: params_np})
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
class GatherBenchmark(test.Benchmark):
"""Microbenchmarks for the gather op."""
def _benchmarkGather(self, name, axis, gather_indices, use_xla_jit):
def BuilderFn():
inputs = variables.Variable(
array_ops.zeros([100, 100, 10, 100, 50], dtype=dtypes.float32),
dtype=dtypes.float32,
name='input')
indices = variables.Variable(
gather_indices, dtype=dtypes.int32, name='indices')
gather_t = array_ops.gather(inputs, indices, axis=axis)
return '%s.axis%d' % (name, axis), [gather_t]
xla_test.Benchmark(self, BuilderFn, use_xla_jit=use_xla_jit, device='cpu')
def _benchmarkSliceGather(self, axis, use_xla_jit):
"""Benchmarks a gather op that's really a dynamic slice."""
self._benchmarkGather('slice_gather', axis, [1], use_xla_jit)
def _benchmarkNontrivialGather(self, axis, use_xla_jit):
self._benchmarkGather('nontrivial_gather', axis, [9, 1, 0, 2] * 4,
use_xla_jit)
def benchmarkSliceGatherAxis0(self):
self._benchmarkSliceGather(axis=0, use_xla_jit=False)
def benchmarkSliceGatherAxis0XLA(self):
self._benchmarkSliceGather(axis=0, use_xla_jit=True)
def benchmarkSliceGatherAxis1(self):
self._benchmarkSliceGather(axis=1, use_xla_jit=False)
def benchmarkSliceGatherAxis1XLA(self):
self._benchmarkSliceGather(axis=1, use_xla_jit=True)
def benchmarkSliceGatherAxis4(self):
self._benchmarkSliceGather(axis=4, use_xla_jit=False)
def benchmarkSliceGatherAxis4XLA(self):
self._benchmarkSliceGather(axis=4, use_xla_jit=True)
def benchmarkNontrivialGatherAxis0(self):
self._benchmarkNontrivialGather(axis=0, use_xla_jit=False)
def benchmarkNontrivialGatherAxis0XLA(self):
self._benchmarkNontrivialGather(axis=0, use_xla_jit=True)
def benchmarkNontrivialGatherAxis1(self):
self._benchmarkNontrivialGather(axis=1, use_xla_jit=False)
def benchmarkNontrivialGatherAxis1XLA(self):
self._benchmarkNontrivialGather(axis=1, use_xla_jit=True)
def benchmarkNontrivialGatherAxis4(self):
self._benchmarkNontrivialGather(axis=4, use_xla_jit=False)
def benchmarkNontrivialGatherAxis4XLA(self):
self._benchmarkNontrivialGather(axis=4, use_xla_jit=True)
if __name__ == '__main__':
test.main() | unknown | codeparrot/codeparrot-clean | ||
"""
Set operations for 1D numeric arrays based on sorting.
:Contains:
ediff1d,
unique,
intersect1d,
setxor1d,
in1d,
union1d,
setdiff1d
:Notes:
For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
Speed could be gained in some operations by an implementation of
sort(), that can provide directly the permutation vectors, avoiding
thus calls to argsort().
To do: Optionally return indices analogously to unique for all functions.
:Author: Robert Cimrman
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = [
'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
'in1d'
]
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, 3, -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
ary = np.asanyarray(ary).flat
ed = ary[1:] - ary[:-1]
arrays = [ed]
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in
# the common case where neither to_begin or to_end was given.
ed = np.hstack(arrays)
return ed
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, the indices of the unique array that
reconstruct the input array, and the number of times each unique value
comes up in the input array.
Parameters
----------
ar : array_like
Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` that result in the unique
array.
return_inverse : bool, optional
If True, also return the indices of the unique array that can be used
to reconstruct `ar`.
return_counts : bool, optional
.. versionadded:: 1.9.0
If True, also return the number of times each unique value comes up
in `ar`.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
(flattened) original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the (flattened) original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
.. versionadded:: 1.9.0
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'],
dtype='|S1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'],
dtype='|S1')
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
ret += (np.take(iflag, iperm),)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
def intersect1d(ar1, ar2, assume_unique=False):
"""
Find the intersection of two arrays.
Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
"""
if not assume_unique:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of a 1-D array is also present in a second array.
Returns a boolean array the same length as `ar1` that is True
where an element of `ar1` is in `ar2` and False otherwise.
Parameters
----------
ar1 : (M,) array_like
Input array.
ar2 : array_like
The values against which to test each value of `ar1`.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted (that is,
False where an element of `ar1` is in `ar2` and True otherwise).
Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
to (but is faster than) ``np.invert(in1d(a, b))``.
.. versionadded:: 1.8.0
Returns
-------
in1d : (M,) ndarray, bool
The values `ar1[in1d]` are in `ar2`.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`in1d` can be considered as an element-wise function version of the
python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
equivalent to ``np.array([item in b for item in a])``.
.. versionadded:: 1.4.0
Examples
--------
>>> test = np.array([0, 1, 2, 5, 0])
>>> states = [0, 2]
>>> mask = np.in1d(test, states)
>>> mask
array([ True, False, True, False, True], dtype=bool)
>>> test[mask]
array([0, 2, 0])
>>> mask = np.in1d(test, states, invert=True)
>>> mask
array([False, True, False, True, False], dtype=bool)
>>> test[mask]
array([1, 5])
"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
"""
return unique(np.concatenate((ar1, ar2)))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Find the set difference of two arrays.
Return the sorted, unique values in `ar1` that are not in `ar2`.
Parameters
----------
ar1 : array_like
Input array.
ar2 : array_like
Input comparison array.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setdiff1d : ndarray
Sorted 1D array of values in `ar1` that are not in `ar2`.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4, 1])
>>> b = np.array([3, 4, 5, 6])
>>> np.setdiff1d(a, b)
array([1, 2])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = in1d(ar1, ar2, assume_unique=True)
if aux.size == 0:
return aux
else:
return np.asarray(ar1)[aux == 0] | unknown | codeparrot/codeparrot-clean | ||
//===- bolt/Profile/DataAggregator.cpp - Perf data aggregator -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This family of functions reads profile data written by perf record,
// aggregate it and then write it back to an output file.
//
//===----------------------------------------------------------------------===//
#include "bolt/Profile/DataAggregator.h"
#include "bolt/Core/BinaryContext.h"
#include "bolt/Core/BinaryFunction.h"
#include "bolt/Passes/BinaryPasses.h"
#include "bolt/Profile/BoltAddressTranslation.h"
#include "bolt/Profile/Heatmap.h"
#include "bolt/Profile/YAMLProfileWriter.h"
#include "bolt/Utils/CommandLineOpts.h"
#include "bolt/Utils/Utils.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <optional>
#include <unordered_map>
#include <utility>
#define DEBUG_TYPE "aggregator"
using namespace llvm;
using namespace bolt;
namespace opts {
static cl::opt<bool>
BasicAggregation("basic-events",
cl::desc("aggregate basic events (without brstack info)"),
cl::cat(AggregatorCategory));
static cl::alias BasicAggregationAlias("ba",
cl::desc("Alias for --basic-events"),
cl::aliasopt(BasicAggregation));
static cl::opt<bool> DeprecatedBasicAggregationNl(
"nl", cl::desc("Alias for --basic-events (deprecated. Use --ba)"),
cl::cat(AggregatorCategory), cl::ReallyHidden,
cl::callback([](const bool &Enabled) {
errs()
<< "BOLT-WARNING: '-nl' is deprecated, please use '--ba' instead.\n";
BasicAggregation = Enabled;
}));
cl::opt<bool> ArmSPE("spe", cl::desc("Enable Arm SPE mode."),
cl::cat(AggregatorCategory));
static cl::opt<std::string> ITraceAggregation(
"itrace", cl::desc("Generate brstack info with perf itrace argument"),
cl::cat(AggregatorCategory));
static cl::opt<bool>
FilterMemProfile("filter-mem-profile",
cl::desc("if processing a memory profile, filter out stack or heap accesses "
"that won't be useful for BOLT to reduce profile file size"),
cl::init(true),
cl::cat(AggregatorCategory));
static cl::opt<bool> ParseMemProfile(
"parse-mem-profile",
cl::desc("enable memory profile parsing if it's present in the input data, "
"on by default unless `--itrace` is set."),
cl::init(true), cl::cat(AggregatorCategory));
static cl::opt<unsigned long long>
FilterPID("pid",
cl::desc("only use samples from process with specified PID"),
cl::init(0),
cl::Optional,
cl::cat(AggregatorCategory));
static cl::opt<bool> ImputeTraceFallthrough(
"impute-trace-fall-through",
cl::desc("impute missing fall-throughs for branch-only traces"),
cl::Optional, cl::cat(AggregatorCategory));
static cl::opt<bool>
IgnoreBuildID("ignore-build-id",
cl::desc("continue even if build-ids in input binary and perf.data mismatch"),
cl::init(false),
cl::cat(AggregatorCategory));
static cl::opt<bool> IgnoreInterruptLBR(
"ignore-interrupt-lbr",
cl::desc("ignore kernel interrupt LBR that happens asynchronously"),
cl::init(true), cl::cat(AggregatorCategory));
static cl::opt<unsigned long long>
MaxSamples("max-samples",
cl::init(-1ULL),
cl::desc("maximum number of samples to read from LBR profile"),
cl::Optional,
cl::Hidden,
cl::cat(AggregatorCategory));
extern cl::opt<opts::ProfileFormatKind> ProfileFormat;
extern cl::opt<bool> ProfileWritePseudoProbes;
extern cl::opt<std::string> SaveProfile;
cl::opt<bool> ReadPreAggregated(
"pa", cl::desc("skip perf and read data from a pre-aggregated file format"),
cl::cat(AggregatorCategory));
cl::opt<std::string>
ReadPerfEvents("perf-script-events",
cl::desc("skip perf event collection by supplying a "
"perf-script output in a textual format"),
cl::ReallyHidden, cl::init(""), cl::cat(AggregatorCategory));
static cl::opt<bool>
TimeAggregator("time-aggr",
cl::desc("time BOLT aggregator"),
cl::init(false),
cl::ZeroOrMore,
cl::cat(AggregatorCategory));
} // namespace opts
namespace {
const char TimerGroupName[] = "aggregator";
const char TimerGroupDesc[] = "Aggregator";
std::vector<SectionNameAndRange> getTextSections(const BinaryContext *BC) {
std::vector<SectionNameAndRange> sections;
for (BinarySection &Section : BC->sections()) {
if (!Section.isText())
continue;
if (Section.getSize() == 0)
continue;
sections.push_back(
{Section.getName(), Section.getAddress(), Section.getEndAddress()});
}
llvm::sort(sections,
[](const SectionNameAndRange &A, const SectionNameAndRange &B) {
return A.BeginAddress < B.BeginAddress;
});
return sections;
}
}
DataAggregator::~DataAggregator() { deleteTempFiles(); }
namespace {
void deleteTempFile(const std::string &FileName) {
if (std::error_code Errc = sys::fs::remove(FileName.c_str()))
errs() << "PERF2BOLT: failed to delete temporary file " << FileName
<< " with error " << Errc.message() << "\n";
}
}
void DataAggregator::deleteTempFiles() {
for (std::string &FileName : TempFiles)
deleteTempFile(FileName);
TempFiles.clear();
}
void DataAggregator::findPerfExecutable() {
std::optional<std::string> PerfExecutable =
sys::Process::FindInEnvPath("PATH", "perf");
if (!PerfExecutable) {
outs() << "PERF2BOLT: No perf executable found!\n";
exit(1);
}
PerfPath = *PerfExecutable;
}
void DataAggregator::start() {
outs() << "PERF2BOLT: Starting data aggregation job for " << Filename << "\n";
// Turn on heatmap building if requested by --heatmap flag.
if (!opts::HeatmapMode && opts::HeatmapOutput.getNumOccurrences())
opts::HeatmapMode = opts::HeatmapModeKind::HM_Optional;
// Don't launch perf for pre-aggregated files or when perf input is specified
// by the user.
if (opts::ReadPreAggregated || !opts::ReadPerfEvents.empty())
return;
findPerfExecutable();
if (opts::ArmSPE) {
// pid from_ip to_ip flags
// where flags could be:
// P/M: whether branch was Predicted or Mispredicted.
// N: optionally appears when the branch was Not-Taken (ie fall-through)
// 12345 0x123/0x456/PN/-/-/8/RET/-
opts::ITraceAggregation = "bl";
opts::ParseMemProfile = true;
opts::BasicAggregation = false;
}
if (opts::BasicAggregation) {
launchPerfProcess("events without brstack", MainEventsPPI,
"script -F pid,event,ip");
} else if (!opts::ITraceAggregation.empty()) {
// Disable parsing memory profile from trace data, unless requested by user.
if (!opts::ParseMemProfile.getNumOccurrences())
opts::ParseMemProfile = false;
launchPerfProcess("branch events with itrace", MainEventsPPI,
"script -F pid,brstack --itrace=" +
opts::ITraceAggregation);
} else {
launchPerfProcess("branch events", MainEventsPPI, "script -F pid,brstack");
}
if (opts::ParseMemProfile)
launchPerfProcess("mem events", MemEventsPPI,
"script -F pid,event,addr,ip");
launchPerfProcess("process events", MMapEventsPPI,
"script --show-mmap-events --no-itrace");
launchPerfProcess("task events", TaskEventsPPI,
"script --show-task-events --no-itrace");
}
void DataAggregator::abort() {
if (opts::ReadPreAggregated)
return;
std::string Error;
// Kill subprocesses in case they are not finished
sys::Wait(TaskEventsPPI.PI, 1, &Error);
sys::Wait(MMapEventsPPI.PI, 1, &Error);
sys::Wait(MainEventsPPI.PI, 1, &Error);
if (opts::ParseMemProfile)
sys::Wait(MemEventsPPI.PI, 1, &Error);
deleteTempFiles();
exit(1);
}
void DataAggregator::launchPerfProcess(StringRef Name, PerfProcessInfo &PPI,
StringRef Args) {
SmallVector<StringRef, 4> Argv;
outs() << "PERF2BOLT: spawning perf job to read " << Name << '\n';
Argv.push_back(PerfPath.data());
Args.split(Argv, ' ');
Argv.push_back("-f");
Argv.push_back("-i");
Argv.push_back(Filename.c_str());
if (std::error_code Errc =
sys::fs::createTemporaryFile("perf.script", "out", PPI.StdoutPath)) {
errs() << "PERF2BOLT: failed to create temporary file " << PPI.StdoutPath
<< " with error " << Errc.message() << "\n";
exit(1);
}
TempFiles.push_back(PPI.StdoutPath.data());
if (std::error_code Errc =
sys::fs::createTemporaryFile("perf.script", "err", PPI.StderrPath)) {
errs() << "PERF2BOLT: failed to create temporary file " << PPI.StderrPath
<< " with error " << Errc.message() << "\n";
exit(1);
}
TempFiles.push_back(PPI.StderrPath.data());
std::optional<StringRef> Redirects[] = {
std::nullopt, // Stdin
StringRef(PPI.StdoutPath.data()), // Stdout
StringRef(PPI.StderrPath.data())}; // Stderr
LLVM_DEBUG({
dbgs() << "Launching perf: ";
for (StringRef Arg : Argv)
dbgs() << Arg << " ";
dbgs() << " 1> " << PPI.StdoutPath.data() << " 2> " << PPI.StderrPath.data()
<< "\n";
});
PPI.PI = sys::ExecuteNoWait(PerfPath.data(), Argv, /*envp*/ std::nullopt,
Redirects);
}
void DataAggregator::processFileBuildID(StringRef FileBuildID) {
auto WarningCallback = [](int ReturnCode, StringRef ErrBuf) {
errs() << "PERF-ERROR: return code " << ReturnCode << "\n" << ErrBuf;
};
PerfProcessInfo BuildIDProcessInfo;
launchPerfProcess("buildid list", BuildIDProcessInfo, "buildid-list");
if (prepareToParse("buildid", BuildIDProcessInfo, WarningCallback))
return;
std::optional<StringRef> FileName = getFileNameForBuildID(FileBuildID);
if (FileName && *FileName == sys::path::filename(BC->getFilename())) {
outs() << "PERF2BOLT: matched build-id and file name\n";
return;
}
if (FileName) {
errs() << "PERF2BOLT-WARNING: build-id matched a different file name\n";
BuildIDBinaryName = std::string(*FileName);
return;
}
if (!hasAllBuildIDs()) {
errs() << "PERF2BOLT-WARNING: build-id will not be checked because perf "
"data was recorded without it\n";
return;
}
errs() << "PERF2BOLT-ERROR: failed to match build-id from perf output. "
"This indicates the input binary supplied for data aggregation "
"is not the same recorded by perf when collecting profiling "
"data, or there were no samples recorded for the binary. "
"Use -ignore-build-id option to override.\n";
if (!opts::IgnoreBuildID)
abort();
}
bool DataAggregator::checkPerfDataMagic(StringRef FileName) {
if (opts::ReadPreAggregated)
return true;
Expected<sys::fs::file_t> FD = sys::fs::openNativeFileForRead(FileName);
if (!FD) {
consumeError(FD.takeError());
return false;
}
char Buf[7] = {0, 0, 0, 0, 0, 0, 0};
llvm::scope_exit Close([&] { sys::fs::closeFile(*FD); });
Expected<size_t> BytesRead = sys::fs::readNativeFileSlice(
*FD, MutableArrayRef(Buf, sizeof(Buf)), 0);
if (!BytesRead) {
consumeError(BytesRead.takeError());
return false;
}
if (*BytesRead != 7)
return false;
if (strncmp(Buf, "PERFILE", 7) == 0)
return true;
return false;
}
void DataAggregator::parsePreAggregated() {
ErrorOr<std::unique_ptr<MemoryBuffer>> MB =
MemoryBuffer::getFileOrSTDIN(Filename);
if (std::error_code EC = MB.getError()) {
errs() << "PERF2BOLT-ERROR: cannot open " << Filename << ": "
<< EC.message() << "\n";
exit(1);
}
FileBuf = std::move(*MB);
ParsingBuf = FileBuf->getBuffer();
Col = 0;
Line = 1;
if (parsePreAggregatedLBRSamples()) {
errs() << "PERF2BOLT: failed to parse samples\n";
exit(1);
}
}
void DataAggregator::filterBinaryMMapInfo() {
if (opts::FilterPID) {
auto MMapInfoIter = BinaryMMapInfo.find(opts::FilterPID);
if (MMapInfoIter != BinaryMMapInfo.end()) {
MMapInfo MMap = MMapInfoIter->second;
BinaryMMapInfo.clear();
BinaryMMapInfo.insert(std::make_pair(MMap.PID, MMap));
} else {
if (errs().has_colors())
errs().changeColor(raw_ostream::RED);
errs() << "PERF2BOLT-ERROR: could not find a profile matching PID \""
<< opts::FilterPID << "\""
<< " for binary \"" << BC->getFilename() << "\".";
assert(!BinaryMMapInfo.empty() && "No memory map for matching binary");
errs() << " Profile for the following process is available:\n";
for (std::pair<const uint64_t, MMapInfo> &MMI : BinaryMMapInfo)
outs() << " " << MMI.second.PID
<< (MMI.second.Forked ? " (forked)\n" : "\n");
if (errs().has_colors())
errs().resetColor();
exit(1);
}
}
}
int DataAggregator::prepareToParse(StringRef Name, PerfProcessInfo &Process,
PerfProcessErrorCallbackTy Callback) {
if (!opts::ReadPerfEvents.empty()) {
outs() << "PERF2BOLT: using pre-processed perf events for '" << Name
<< "' (perf-script-events)\n";
ParsingBuf = opts::ReadPerfEvents;
return 0;
}
std::string Error;
outs() << "PERF2BOLT: waiting for perf " << Name
<< " collection to finish...\n";
std::optional<sys::ProcessStatistics> PS;
sys::ProcessInfo PI = sys::Wait(Process.PI, std::nullopt, &Error, &PS);
if (!Error.empty()) {
errs() << "PERF-ERROR: " << PerfPath << ": " << Error << "\n";
deleteTempFiles();
exit(1);
}
LLVM_DEBUG({
const float UserSec = 1.f * PS->UserTime.count() / 1e6;
const float TotalSec = 1.f * PS->TotalTime.count() / 1e6;
const float PeakGiB = 1.f * PS->PeakMemory / (1 << 20);
dbgs() << formatv("Finished in {0:f2}s user time, {1:f2}s total time, "
"{2:f2} GiB peak RSS\n",
UserSec, TotalSec, PeakGiB);
});
if (PI.ReturnCode != 0) {
ErrorOr<std::unique_ptr<MemoryBuffer>> ErrorMB =
MemoryBuffer::getFileOrSTDIN(Process.StderrPath.data());
StringRef ErrBuf = (*ErrorMB)->getBuffer();
deleteTempFiles();
Callback(PI.ReturnCode, ErrBuf);
return PI.ReturnCode;
}
ErrorOr<std::unique_ptr<MemoryBuffer>> MB =
MemoryBuffer::getFileOrSTDIN(Process.StdoutPath.data());
if (std::error_code EC = MB.getError()) {
errs() << "Cannot open " << Process.StdoutPath.data() << ": "
<< EC.message() << "\n";
deleteTempFiles();
exit(1);
}
FileBuf = std::move(*MB);
ParsingBuf = FileBuf->getBuffer();
Col = 0;
Line = 1;
return PI.ReturnCode;
}
void DataAggregator::parsePerfData(BinaryContext &BC) {
auto ErrorCallback = [](int ReturnCode, StringRef ErrBuf) {
errs() << "PERF-ERROR: return code " << ReturnCode << "\n" << ErrBuf;
exit(1);
};
auto MemEventsErrorCallback = [&](int ReturnCode, StringRef ErrBuf) {
Regex NoData("Samples for '.*' event do not have ADDR attribute set. "
"Cannot print 'addr' field.");
if (!NoData.match(ErrBuf))
ErrorCallback(ReturnCode, ErrBuf);
};
if (std::optional<StringRef> FileBuildID = BC.getFileBuildID()) {
outs() << "BOLT-INFO: binary build-id is: " << *FileBuildID << "\n";
processFileBuildID(*FileBuildID);
} else {
errs() << "BOLT-WARNING: build-id will not be checked because we could "
"not read one from input binary\n";
}
if (BC.IsLinuxKernel) {
// Current MMap parsing logic does not work with linux kernel.
// MMap entries for linux kernel uses PERF_RECORD_MMAP
// format instead of typical PERF_RECORD_MMAP2 format.
// Since linux kernel address mapping is absolute (same as
// in the ELF file), we avoid parsing MMap in linux kernel mode.
// While generating optimized linux kernel binary, we may need
// to parse MMap entries.
// In linux kernel mode, we analyze and optimize
// all linux kernel binary instructions, irrespective
// of whether they are due to system calls or due to
// interrupts. Therefore, we cannot ignore interrupt
// in Linux kernel mode.
opts::IgnoreInterruptLBR = false;
} else {
prepareToParse("mmap events", MMapEventsPPI, ErrorCallback);
if (parseMMapEvents())
errs() << "PERF2BOLT: failed to parse mmap events\n";
}
prepareToParse("task events", TaskEventsPPI, ErrorCallback);
if (parseTaskEvents())
errs() << "PERF2BOLT: failed to parse task events\n";
filterBinaryMMapInfo();
prepareToParse("events", MainEventsPPI, ErrorCallback);
if ((!opts::BasicAggregation && parseBranchEvents()) ||
(opts::BasicAggregation && parseBasicEvents()))
errs() << "PERF2BOLT: failed to parse samples\n";
// Special handling for memory events
if (opts::ParseMemProfile &&
!prepareToParse("mem events", MemEventsPPI, MemEventsErrorCallback))
if (const std::error_code EC = parseMemEvents())
errs() << "PERF2BOLT: failed to parse memory events: " << EC.message()
<< '\n';
deleteTempFiles();
}
void DataAggregator::imputeFallThroughs() {
if (Traces.empty())
return;
std::pair PrevBranch(Trace::EXTERNAL, Trace::EXTERNAL);
uint64_t AggregateCount = 0;
uint64_t AggregateFallthroughSize = 0;
uint64_t InferredTraces = 0;
// Helper map with whether the instruction is a call/ret/unconditional branch
std::unordered_map<uint64_t, bool> IsUncondCTMap;
auto checkUnconditionalControlTransfer = [&](const uint64_t Addr) {
auto isUncondCT = [&](const MCInst &MI) -> bool {
return BC->MIB->isUnconditionalControlTransfer(MI);
};
return testAndSet<bool>(Addr, isUncondCT, IsUncondCTMap).value_or(true);
};
// Traces are sorted by their component addresses (Branch, From, To).
// assert(is_sorted(Traces));
// Traces corresponding to the top-of-stack branch entry with a missing
// fall-through have BR_ONLY(-1ULL/UINT64_MAX) in To field, meaning that for
// fixed values of Branch and From branch-only traces are stored after all
// traces with valid fall-through.
//
// Group traces by (Branch, From) and compute weighted average fall-through
// length for the top-of-stack trace (closing the group) by accumulating the
// fall-through lengths of traces with valid fall-throughs earlier in the
// group.
for (auto &[Trace, Info] : Traces) {
// Skip fall-throughs in external code.
if (Trace.From == Trace::EXTERNAL)
continue;
if (std::pair CurrentBranch(Trace.Branch, Trace.From);
CurrentBranch != PrevBranch) {
// New group: reset aggregates.
AggregateCount = AggregateFallthroughSize = 0;
PrevBranch = CurrentBranch;
}
// BR_ONLY must be the last trace in the group
if (Trace.To == Trace::BR_ONLY) {
// If the group is not empty, use aggregate values, otherwise 0-length
// for unconditional jumps (call/ret/uncond branch) or 1-length for others
uint64_t InferredBytes =
AggregateFallthroughSize
? AggregateFallthroughSize / AggregateCount
: !checkUnconditionalControlTransfer(Trace.From);
Trace.To = Trace.From + InferredBytes;
LLVM_DEBUG(dbgs() << "imputed " << Trace << " (" << InferredBytes
<< " bytes)\n");
++InferredTraces;
} else {
// Only use valid fall-through lengths
if (Trace.To != Trace::EXTERNAL)
AggregateFallthroughSize += (Trace.To - Trace.From) * Info.TakenCount;
AggregateCount += Info.TakenCount;
}
}
if (opts::Verbosity >= 1)
outs() << "BOLT-INFO: imputed " << InferredTraces << " traces\n";
}
Error DataAggregator::preprocessProfile(BinaryContext &BC) {
this->BC = &BC;
if (opts::ReadPreAggregated) {
parsePreAggregated();
} else {
parsePerfData(BC);
}
// Sort parsed traces for faster processing.
llvm::sort(Traces, llvm::less_first());
if (opts::ImputeTraceFallthrough)
imputeFallThroughs();
if (opts::HeatmapMode) {
if (std::error_code EC = printLBRHeatMap())
return errorCodeToError(EC);
if (opts::HeatmapMode == opts::HeatmapModeKind::HM_Exclusive)
exit(0);
}
return Error::success();
}
Error DataAggregator::readProfile(BinaryContext &BC) {
processProfile(BC);
for (auto &BFI : BC.getBinaryFunctions()) {
BinaryFunction &Function = BFI.second;
convertBranchData(Function);
}
if (opts::AggregateOnly) {
if (opts::ProfileFormat == opts::ProfileFormatKind::PF_Fdata)
if (std::error_code EC = writeAggregatedFile(opts::OutputFilename))
report_error("cannot create output data file", EC);
// BAT YAML is handled by DataAggregator since normal YAML output requires
// CFG which is not available in BAT mode.
if (usesBAT()) {
if (opts::ProfileFormat == opts::ProfileFormatKind::PF_YAML)
if (std::error_code EC = writeBATYAML(BC, opts::OutputFilename))
report_error("cannot create output data file", EC);
if (!opts::SaveProfile.empty())
if (std::error_code EC = writeBATYAML(BC, opts::SaveProfile))
report_error("cannot create output data file", EC);
}
}
return Error::success();
}
bool DataAggregator::mayHaveProfileData(const BinaryFunction &Function) {
return Function.hasProfileAvailable();
}
void DataAggregator::processProfile(BinaryContext &BC) {
if (opts::BasicAggregation)
processBasicEvents();
else
processBranchEvents();
processMemEvents();
// Mark all functions with registered events as having a valid profile.
for (auto &BFI : BC.getBinaryFunctions()) {
BinaryFunction &BF = BFI.second;
if (FuncBranchData *FBD = getBranchData(BF)) {
BF.markProfiled(BinaryFunction::PF_BRANCH);
BF.RawSampleCount = FBD->getNumExecutedBranches();
} else if (FuncBasicSampleData *FSD =
getFuncBasicSampleData(BF.getNames())) {
BF.markProfiled(BinaryFunction::PF_BASIC);
BF.RawSampleCount = FSD->getSamples();
}
}
for (auto &FuncBranches : NamesToBranches) {
llvm::stable_sort(FuncBranches.second.Data);
llvm::stable_sort(FuncBranches.second.EntryData);
}
for (auto &MemEvents : NamesToMemEvents)
llvm::stable_sort(MemEvents.second.Data);
// Release intermediate storage.
clear(Traces);
clear(BasicSamples);
clear(MemSamples);
}
BinaryFunction *
DataAggregator::getBinaryFunctionContainingAddress(uint64_t Address) const {
if (!BC->containsAddress(Address))
return nullptr;
return BC->getBinaryFunctionContainingAddress(Address, /*CheckPastEnd=*/false,
/*UseMaxSize=*/true);
}
BinaryFunction *
DataAggregator::getBATParentFunction(const BinaryFunction &Func) const {
if (BAT)
if (const uint64_t HotAddr = BAT->fetchParentAddress(Func.getAddress()))
return getBinaryFunctionContainingAddress(HotAddr);
return nullptr;
}
StringRef DataAggregator::getLocationName(const BinaryFunction &Func,
bool BAT) {
if (!BAT)
return Func.getOneName();
const BinaryFunction *OrigFunc = &Func;
// If it is a local function, prefer the name containing the file name where
// the local function was declared
for (StringRef AlternativeName : OrigFunc->getNames()) {
size_t FileNameIdx = AlternativeName.find('/');
// Confirm the alternative name has the pattern Symbol/FileName/1 before
// using it
if (FileNameIdx == StringRef::npos ||
AlternativeName.find('/', FileNameIdx + 1) == StringRef::npos)
continue;
return AlternativeName;
}
return OrigFunc->getOneName();
}
bool DataAggregator::doBasicSample(BinaryFunction &OrigFunc, uint64_t Address,
uint64_t Count) {
// To record executed bytes, use basic block size as is regardless of BAT.
uint64_t BlockSize = 0;
if (BinaryBasicBlock *BB = OrigFunc.getBasicBlockContainingOffset(
Address - OrigFunc.getAddress()))
BlockSize = BB->getOriginalSize();
BinaryFunction *ParentFunc = getBATParentFunction(OrigFunc);
BinaryFunction &Func = ParentFunc ? *ParentFunc : OrigFunc;
// Attach executed bytes to parent function in case of cold fragment.
Func.SampleCountInBytes += Count * BlockSize;
auto I = NamesToBasicSamples.find(Func.getOneName());
if (I == NamesToBasicSamples.end()) {
bool Success;
StringRef LocName = getLocationName(Func, BAT);
std::tie(I, Success) = NamesToBasicSamples.insert(std::make_pair(
Func.getOneName(),
FuncBasicSampleData(LocName, FuncBasicSampleData::ContainerTy())));
}
Address -= Func.getAddress();
if (BAT)
Address = BAT->translate(Func.getAddress(), Address, /*IsBranchSrc=*/false);
I->second.bumpCount(Address, Count);
return true;
}
bool DataAggregator::doIntraBranch(BinaryFunction &Func, uint64_t From,
uint64_t To, uint64_t Count,
uint64_t Mispreds) {
FuncBranchData *AggrData = getBranchData(Func);
if (!AggrData) {
AggrData = &NamesToBranches[Func.getOneName()];
AggrData->Name = getLocationName(Func, BAT);
setBranchData(Func, AggrData);
}
LLVM_DEBUG(dbgs() << "BOLT-DEBUG: bumpBranchCount: "
<< formatv("{0} @ {1:x} -> {0} @ {2:x}\n", Func, From, To));
AggrData->bumpBranchCount(From, To, Count, Mispreds);
return true;
}
bool DataAggregator::doInterBranch(BinaryFunction *FromFunc,
BinaryFunction *ToFunc, uint64_t From,
uint64_t To, uint64_t Count,
uint64_t Mispreds) {
FuncBranchData *FromAggrData = nullptr;
FuncBranchData *ToAggrData = nullptr;
StringRef SrcFunc;
StringRef DstFunc;
if (FromFunc) {
SrcFunc = getLocationName(*FromFunc, BAT);
FromAggrData = getBranchData(*FromFunc);
if (!FromAggrData) {
FromAggrData = &NamesToBranches[FromFunc->getOneName()];
FromAggrData->Name = SrcFunc;
setBranchData(*FromFunc, FromAggrData);
}
recordExit(*FromFunc, From, Mispreds, Count);
}
if (ToFunc) {
DstFunc = getLocationName(*ToFunc, BAT);
ToAggrData = getBranchData(*ToFunc);
if (!ToAggrData) {
ToAggrData = &NamesToBranches[ToFunc->getOneName()];
ToAggrData->Name = DstFunc;
setBranchData(*ToFunc, ToAggrData);
}
recordEntry(*ToFunc, To, Mispreds, Count);
}
if (FromAggrData)
FromAggrData->bumpCallCount(From, Location(!DstFunc.empty(), DstFunc, To),
Count, Mispreds);
if (ToAggrData)
ToAggrData->bumpEntryCount(Location(!SrcFunc.empty(), SrcFunc, From), To,
Count, Mispreds);
return true;
}
bool DataAggregator::checkReturn(uint64_t Addr) {
auto isReturn = [&](const MCInst &MI) -> bool {
return BC->MIB->isReturn(MI);
};
return testAndSet<bool>(Addr, isReturn, Returns).value_or(false);
}
bool DataAggregator::doBranch(uint64_t From, uint64_t To, uint64_t Count,
uint64_t Mispreds) {
// Mutates \p Addr to an offset into the containing function, performing BAT
// offset translation and parent lookup.
//
// Returns the containing function (or BAT parent).
auto handleAddress = [&](uint64_t &Addr, bool IsFrom) {
BinaryFunction *Func = getBinaryFunctionContainingAddress(Addr);
if (!Func) {
Addr = 0;
return Func;
}
Addr -= Func->getAddress();
if (BAT)
Addr = BAT->translate(Func->getAddress(), Addr, IsFrom);
if (BinaryFunction *ParentFunc = getBATParentFunction(*Func))
return ParentFunc;
return Func;
};
BinaryFunction *FromFunc = handleAddress(From, /*IsFrom*/ true);
BinaryFunction *ToFunc = handleAddress(To, /*IsFrom*/ false);
if (!FromFunc && !ToFunc)
return false;
// Treat recursive control transfers as inter-branches.
if (FromFunc == ToFunc && To != 0) {
recordBranch(*FromFunc, From, To, Count, Mispreds);
return doIntraBranch(*FromFunc, From, To, Count, Mispreds);
}
return doInterBranch(FromFunc, ToFunc, From, To, Count, Mispreds);
}
bool DataAggregator::doTrace(const Trace &Trace, uint64_t Count,
bool IsReturn) {
const uint64_t From = Trace.From, To = Trace.To;
BinaryFunction *FromFunc = getBinaryFunctionContainingAddress(From);
BinaryFunction *ToFunc = getBinaryFunctionContainingAddress(To);
NumTraces += Count;
if (!FromFunc || !ToFunc) {
LLVM_DEBUG(dbgs() << "Out of range trace " << Trace << '\n');
NumLongRangeTraces += Count;
return false;
}
if (FromFunc != ToFunc) {
LLVM_DEBUG(dbgs() << "Invalid trace " << Trace << '\n');
NumInvalidTraces += Count;
return false;
}
// Set ParentFunc to BAT parent function or FromFunc itself.
BinaryFunction *ParentFunc = getBATParentFunction(*FromFunc);
if (!ParentFunc)
ParentFunc = FromFunc;
ParentFunc->SampleCountInBytes += Count * (To - From);
const uint64_t FuncAddress = FromFunc->getAddress();
std::optional<BoltAddressTranslation::FallthroughListTy> FTs =
BAT && BAT->isBATFunction(FuncAddress)
? BAT->getFallthroughsInTrace(FuncAddress, From - IsReturn, To)
: getFallthroughsInTrace(*FromFunc, Trace, Count, IsReturn);
if (!FTs) {
LLVM_DEBUG(dbgs() << "Invalid trace " << Trace << '\n');
NumInvalidTraces += Count;
return false;
}
LLVM_DEBUG(dbgs() << "Processing " << FTs->size() << " fallthroughs for "
<< FromFunc->getPrintName() << ":" << Trace << '\n');
for (const auto &[From, To] : *FTs)
doIntraBranch(*ParentFunc, From, To, Count, false);
return true;
}
std::optional<SmallVector<std::pair<uint64_t, uint64_t>, 16>>
DataAggregator::getFallthroughsInTrace(BinaryFunction &BF, const Trace &Trace,
uint64_t Count, bool IsReturn) const {
SmallVector<std::pair<uint64_t, uint64_t>, 16> Branches;
BinaryContext &BC = BF.getBinaryContext();
// Offsets of the trace within this function.
const uint64_t From = Trace.From - BF.getAddress();
const uint64_t To = Trace.To - BF.getAddress();
if (From > To)
return std::nullopt;
// Accept fall-throughs inside pseudo functions (PLT/thunks).
// This check has to be above BF.empty as pseudo functions would pass it:
// pseudo => ignored => CFG not built => empty.
// If we return nullopt, trace would be reported as mismatching disassembled
// function contents which it is not. To avoid this, return an empty
// fall-through list instead.
if (BF.isPseudo())
return Branches;
// Can only record traces in CFG state
if (!BF.hasCFG())
return std::nullopt;
const BinaryBasicBlock *FromBB = BF.getBasicBlockContainingOffset(From);
const BinaryBasicBlock *ToBB = BF.getBasicBlockContainingOffset(To);
if (!FromBB || !ToBB)
return std::nullopt;
// Adjust FromBB if the first LBR is a return from the last instruction in
// the previous block (that instruction should be a call).
if (Trace.Branch != Trace::FT_ONLY && !BF.containsAddress(Trace.Branch) &&
From == FromBB->getOffset() &&
(IsReturn ? From : !(FromBB->isEntryPoint() || FromBB->isLandingPad()))) {
const BinaryBasicBlock *PrevBB =
BF.getLayout().getBlock(FromBB->getIndex() - 1);
if (PrevBB->getSuccessor(FromBB->getLabel())) {
const MCInst *Instr = PrevBB->getLastNonPseudoInstr();
if (Instr && BC.MIB->isCall(*Instr))
FromBB = PrevBB;
else
LLVM_DEBUG(dbgs() << "invalid trace (no call): " << Trace << '\n');
} else {
LLVM_DEBUG(dbgs() << "invalid trace: " << Trace << '\n');
}
}
// Fill out information for fall-through edges. The From and To could be
// within the same basic block, e.g. when two call instructions are in the
// same block. In this case we skip the processing.
if (FromBB == ToBB)
return Branches;
// Process blocks in the original layout order.
BinaryBasicBlock *BB = BF.getLayout().getBlock(FromBB->getIndex());
assert(BB == FromBB && "index mismatch");
while (BB != ToBB) {
BinaryBasicBlock *NextBB = BF.getLayout().getBlock(BB->getIndex() + 1);
assert((NextBB && NextBB->getOffset() > BB->getOffset()) && "bad layout");
// Check for bad LBRs.
if (!BB->getSuccessor(NextBB->getLabel())) {
LLVM_DEBUG(dbgs() << "no fall-through for the trace: " << Trace << '\n');
return std::nullopt;
}
const MCInst *Instr = BB->getLastNonPseudoInstr();
uint64_t Offset = 0;
if (Instr)
Offset = BC.MIB->getOffsetWithDefault(*Instr, 0);
else
Offset = BB->getOffset();
Branches.emplace_back(Offset, NextBB->getOffset());
BB = NextBB;
}
// Record fall-through jumps
for (const auto &[FromOffset, ToOffset] : Branches) {
BinaryBasicBlock *FromBB = BF.getBasicBlockContainingOffset(FromOffset);
BinaryBasicBlock *ToBB = BF.getBasicBlockAtOffset(ToOffset);
assert(FromBB && ToBB);
BinaryBasicBlock::BinaryBranchInfo &BI = FromBB->getBranchInfo(*ToBB);
BI.Count += Count;
}
return Branches;
}
bool DataAggregator::recordEntry(BinaryFunction &BF, uint64_t To, bool Mispred,
uint64_t Count) const {
if (To > BF.getSize())
return false;
if (!BF.hasProfile())
BF.ExecutionCount = 0;
BinaryBasicBlock *EntryBB = nullptr;
if (To == 0) {
BF.ExecutionCount += Count;
if (!BF.empty())
EntryBB = &BF.front();
} else if (BinaryBasicBlock *BB = BF.getBasicBlockAtOffset(To)) {
if (BB->isEntryPoint())
EntryBB = BB;
}
if (EntryBB)
EntryBB->setExecutionCount(EntryBB->getKnownExecutionCount() + Count);
return true;
}
bool DataAggregator::recordExit(BinaryFunction &BF, uint64_t From, bool Mispred,
uint64_t Count) const {
if (!BF.isSimple() || From > BF.getSize())
return false;
if (!BF.hasProfile())
BF.ExecutionCount = 0;
return true;
}
ErrorOr<DataAggregator::LBREntry> DataAggregator::parseLBREntry() {
LBREntry Res;
ErrorOr<StringRef> FromStrRes = parseString('/');
if (std::error_code EC = FromStrRes.getError())
return EC;
StringRef OffsetStr = FromStrRes.get();
if (OffsetStr.getAsInteger(0, Res.From)) {
reportError("expected hexadecimal number with From address");
Diag << "Found: " << OffsetStr << "\n";
return make_error_code(llvm::errc::io_error);
}
ErrorOr<StringRef> ToStrRes = parseString('/');
if (std::error_code EC = ToStrRes.getError())
return EC;
OffsetStr = ToStrRes.get();
if (OffsetStr.getAsInteger(0, Res.To)) {
reportError("expected hexadecimal number with To address");
Diag << "Found: " << OffsetStr << "\n";
return make_error_code(llvm::errc::io_error);
}
ErrorOr<StringRef> MispredStrRes = parseString('/');
if (std::error_code EC = MispredStrRes.getError())
return EC;
StringRef MispredStr = MispredStrRes.get();
// SPE brstack mispredicted flags might be up to two characters long:
// 'PN' or 'MN'. Where 'N' optionally appears.
bool ValidStrSize = opts::ArmSPE
? MispredStr.size() >= 1 && MispredStr.size() <= 2
: MispredStr.size() == 1;
bool SpeTakenBitErr =
(opts::ArmSPE && MispredStr.size() == 2 && MispredStr[1] != 'N');
bool PredictionBitErr =
!ValidStrSize ||
(MispredStr[0] != 'P' && MispredStr[0] != 'M' && MispredStr[0] != '-');
if (SpeTakenBitErr)
reportError("expected 'N' as SPE prediction bit for a not-taken branch");
if (PredictionBitErr)
reportError("expected 'P', 'M' or '-' char as a prediction bit");
if (SpeTakenBitErr || PredictionBitErr) {
Diag << "Found: " << MispredStr << "\n";
return make_error_code(llvm::errc::io_error);
}
Res.Mispred = MispredStr[0] == 'M';
static bool MispredWarning = true;
if (MispredStr[0] == '-' && MispredWarning) {
errs() << "PERF2BOLT-WARNING: misprediction bit is missing in profile\n";
MispredWarning = false;
}
ErrorOr<StringRef> Rest = parseString(FieldSeparator, true);
if (std::error_code EC = Rest.getError())
return EC;
if (Rest.get().size() < 5) {
reportError("expected rest of brstack entry");
Diag << "Found: " << Rest.get() << "\n";
return make_error_code(llvm::errc::io_error);
}
return Res;
}
bool DataAggregator::checkAndConsumeFS() {
if (ParsingBuf[0] != FieldSeparator)
return false;
ParsingBuf = ParsingBuf.drop_front(1);
Col += 1;
return true;
}
void DataAggregator::consumeRestOfLine() {
size_t LineEnd = ParsingBuf.find_first_of('\n');
if (LineEnd == StringRef::npos) {
ParsingBuf = StringRef();
Col = 0;
Line += 1;
return;
}
ParsingBuf = ParsingBuf.drop_front(LineEnd + 1);
Col = 0;
Line += 1;
}
bool DataAggregator::checkNewLine() {
return ParsingBuf[0] == '\n';
}
ErrorOr<DataAggregator::PerfBranchSample> DataAggregator::parseBranchSample() {
PerfBranchSample Res;
while (checkAndConsumeFS()) {
}
ErrorOr<int64_t> PIDRes = parseNumberField(FieldSeparator, true);
if (std::error_code EC = PIDRes.getError())
return EC;
auto MMapInfoIter = BinaryMMapInfo.find(*PIDRes);
if (!BC->IsLinuxKernel && MMapInfoIter == BinaryMMapInfo.end()) {
consumeRestOfLine();
return make_error_code(errc::no_such_process);
}
if (checkAndConsumeNewLine())
return Res;
while (!checkAndConsumeNewLine()) {
checkAndConsumeFS();
ErrorOr<LBREntry> LBRRes = parseLBREntry();
if (std::error_code EC = LBRRes.getError())
return EC;
LBREntry LBR = LBRRes.get();
if (ignoreKernelInterrupt(LBR))
continue;
if (!BC->HasFixedLoadAddress)
adjustLBR(LBR, MMapInfoIter->second);
Res.LBR.push_back(LBR);
}
return Res;
}
ErrorOr<DataAggregator::PerfBasicSample> DataAggregator::parseBasicSample() {
while (checkAndConsumeFS()) {
}
ErrorOr<int64_t> PIDRes = parseNumberField(FieldSeparator, true);
if (std::error_code EC = PIDRes.getError())
return EC;
auto MMapInfoIter = BinaryMMapInfo.find(*PIDRes);
if (MMapInfoIter == BinaryMMapInfo.end()) {
consumeRestOfLine();
return PerfBasicSample{StringRef(), 0};
}
while (checkAndConsumeFS()) {
}
ErrorOr<StringRef> Event = parseString(FieldSeparator);
if (std::error_code EC = Event.getError())
return EC;
while (checkAndConsumeFS()) {
}
ErrorOr<uint64_t> AddrRes = parseHexField(FieldSeparator, true);
if (std::error_code EC = AddrRes.getError())
return EC;
if (!checkAndConsumeNewLine()) {
reportError("expected end of line");
return make_error_code(llvm::errc::io_error);
}
uint64_t Address = *AddrRes;
if (!BC->HasFixedLoadAddress)
adjustAddress(Address, MMapInfoIter->second);
return PerfBasicSample{Event.get(), Address};
}
ErrorOr<DataAggregator::PerfMemSample> DataAggregator::parseMemSample() {
PerfMemSample Res{0, 0};
while (checkAndConsumeFS()) {
}
ErrorOr<int64_t> PIDRes = parseNumberField(FieldSeparator, true);
if (std::error_code EC = PIDRes.getError())
return EC;
auto MMapInfoIter = BinaryMMapInfo.find(*PIDRes);
if (MMapInfoIter == BinaryMMapInfo.end()) {
consumeRestOfLine();
return Res;
}
while (checkAndConsumeFS()) {
}
ErrorOr<StringRef> Event = parseString(FieldSeparator);
if (std::error_code EC = Event.getError())
return EC;
if (!Event.get().contains("mem-loads")) {
consumeRestOfLine();
return Res;
}
while (checkAndConsumeFS()) {
}
ErrorOr<uint64_t> AddrRes = parseHexField(FieldSeparator);
if (std::error_code EC = AddrRes.getError())
return EC;
while (checkAndConsumeFS()) {
}
ErrorOr<uint64_t> PCRes = parseHexField(FieldSeparator, true);
if (std::error_code EC = PCRes.getError()) {
consumeRestOfLine();
return EC;
}
if (!checkAndConsumeNewLine()) {
reportError("expected end of line");
return make_error_code(llvm::errc::io_error);
}
uint64_t Address = *AddrRes;
if (!BC->HasFixedLoadAddress)
adjustAddress(Address, MMapInfoIter->second);
return PerfMemSample{PCRes.get(), Address};
}
ErrorOr<Location> DataAggregator::parseLocationOrOffset() {
auto parseOffset = [this]() -> ErrorOr<Location> {
ErrorOr<uint64_t> Res = parseHexField(FieldSeparator);
if (std::error_code EC = Res.getError())
return EC;
return Location(Res.get());
};
size_t Sep = ParsingBuf.find_first_of(" \n");
if (Sep == StringRef::npos)
return parseOffset();
StringRef LookAhead = ParsingBuf.substr(0, Sep);
if (!LookAhead.contains(':'))
return parseOffset();
ErrorOr<StringRef> BuildID = parseString(':');
if (std::error_code EC = BuildID.getError())
return EC;
ErrorOr<uint64_t> Offset = parseHexField(FieldSeparator);
if (std::error_code EC = Offset.getError())
return EC;
return Location(true, BuildID.get(), Offset.get());
}
std::error_code DataAggregator::parseAggregatedLBREntry() {
enum AggregatedLBREntry : char {
INVALID = 0,
EVENT_NAME, // E
TRACE, // T
RETURN, // R
SAMPLE, // S
BRANCH, // B
FT, // F
FT_EXTERNAL_ORIGIN, // f
FT_EXTERNAL_RETURN // r
} Type = INVALID;
/// The number of fields to parse, set based on \p Type.
int AddrNum = 0;
int CounterNum = 0;
/// Storage for parsed fields.
StringRef EventName;
std::optional<Location> Addr[3];
int64_t Counters[2] = {0};
/// Parse strings: record type and optionally an event name.
while (Type == INVALID || Type == EVENT_NAME) {
while (checkAndConsumeFS()) {
}
ErrorOr<StringRef> StrOrErr =
parseString(FieldSeparator, Type == EVENT_NAME);
if (std::error_code EC = StrOrErr.getError())
return EC;
StringRef Str = StrOrErr.get();
if (Type == EVENT_NAME) {
EventName = Str;
break;
}
Type = StringSwitch<AggregatedLBREntry>(Str)
.Case("T", TRACE)
.Case("R", RETURN)
.Case("S", SAMPLE)
.Case("E", EVENT_NAME)
.Case("B", BRANCH)
.Case("F", FT)
.Case("f", FT_EXTERNAL_ORIGIN)
.Case("r", FT_EXTERNAL_RETURN)
.Default(INVALID);
if (Type == INVALID) {
reportError("expected T, R, S, E, B, F, f or r");
return make_error_code(llvm::errc::io_error);
}
using SSI = StringSwitch<int>;
AddrNum =
SSI(Str).Cases({"T", "R"}, 3).Case("S", 1).Case("E", 0).Default(2);
CounterNum = SSI(Str).Case("B", 2).Case("E", 0).Default(1);
}
/// Parse locations depending on entry type, recording them in \p Addr array.
for (int I = 0; I < AddrNum; ++I) {
while (checkAndConsumeFS()) {
}
ErrorOr<Location> AddrOrErr = parseLocationOrOffset();
if (std::error_code EC = AddrOrErr.getError())
return EC;
Addr[I] = AddrOrErr.get();
}
/// Parse counters depending on entry type.
for (int I = 0; I < CounterNum; ++I) {
while (checkAndConsumeFS()) {
}
ErrorOr<int64_t> CountOrErr =
parseNumberField(FieldSeparator, I + 1 == CounterNum);
if (std::error_code EC = CountOrErr.getError())
return EC;
Counters[I] = CountOrErr.get();
}
/// Expect end of line here.
if (!checkAndConsumeNewLine()) {
reportError("expected end of line");
return make_error_code(llvm::errc::io_error);
}
/// Record event name into \p EventNames and return.
if (Type == EVENT_NAME) {
EventNames.insert(EventName);
return std::error_code();
}
const uint64_t FromOffset = Addr[0]->Offset;
BinaryFunction *FromFunc = getBinaryFunctionContainingAddress(FromOffset);
if (FromFunc)
FromFunc->setHasProfileAvailable();
int64_t Count = Counters[0];
int64_t Mispreds = Counters[1];
/// Record basic IP sample into \p BasicSamples and return.
if (Type == SAMPLE) {
BasicSamples[FromOffset] += Count;
NumTotalSamples += Count;
return std::error_code();
}
const uint64_t ToOffset = Addr[1]->Offset;
BinaryFunction *ToFunc = getBinaryFunctionContainingAddress(ToOffset);
if (ToFunc)
ToFunc->setHasProfileAvailable();
/// For fall-through types, adjust locations to match Trace container.
if (Type == FT || Type == FT_EXTERNAL_ORIGIN || Type == FT_EXTERNAL_RETURN) {
Addr[2] = Location(Addr[1]->Offset); // Trace To
Addr[1] = Location(Addr[0]->Offset); // Trace From
// Put a magic value into Trace Branch to differentiate from a full trace:
if (Type == FT)
Addr[0] = Location(Trace::FT_ONLY);
else if (Type == FT_EXTERNAL_ORIGIN)
Addr[0] = Location(Trace::FT_EXTERNAL_ORIGIN);
else if (Type == FT_EXTERNAL_RETURN)
Addr[0] = Location(Trace::FT_EXTERNAL_RETURN);
else
llvm_unreachable("Unexpected fall-through type");
}
/// For branch type, mark Trace To to differentiate from a full trace.
if (Type == BRANCH)
Addr[2] = Location(Trace::BR_ONLY);
if (Type == RETURN) {
if (!Addr[0]->Offset)
Addr[0]->Offset = Trace::FT_EXTERNAL_RETURN;
else
Returns.emplace(Addr[0]->Offset, true);
}
/// Record a trace.
Trace T{Addr[0]->Offset, Addr[1]->Offset, Addr[2]->Offset};
TakenBranchInfo TI{(uint64_t)Count, (uint64_t)Mispreds};
Traces.emplace_back(T, TI);
NumTotalSamples += Count;
return std::error_code();
}
bool DataAggregator::ignoreKernelInterrupt(LBREntry &LBR) const {
return opts::IgnoreInterruptLBR &&
(LBR.From >= KernelBaseAddr || LBR.To >= KernelBaseAddr);
}
std::error_code DataAggregator::printLBRHeatMap() {
outs() << "PERF2BOLT: parse branch events...\n";
NamedRegionTimer T("buildHeatmap", "Building heatmap", TimerGroupName,
TimerGroupDesc, opts::TimeAggregator);
if (BC->IsLinuxKernel) {
opts::HeatmapMaxAddress = 0xffffffffffffffff;
opts::HeatmapMinAddress = KernelBaseAddr;
}
opts::HeatmapBlockSizes &HMBS = opts::HeatmapBlock;
Heatmap HM(HMBS[0], opts::HeatmapMinAddress, opts::HeatmapMaxAddress,
getTextSections(BC));
auto getSymbolValue = [&](const MCSymbol *Symbol) -> uint64_t {
if (Symbol)
if (ErrorOr<uint64_t> SymValue = BC->getSymbolValue(*Symbol))
return SymValue.get();
return 0;
};
HM.HotStart = getSymbolValue(BC->getHotTextStartSymbol());
HM.HotEnd = getSymbolValue(BC->getHotTextEndSymbol());
if (!NumTotalSamples) {
if (opts::BasicAggregation) {
errs() << "HEATMAP-ERROR: no basic event samples detected in profile. "
"Cannot build heatmap.";
} else {
errs() << "HEATMAP-ERROR: no brstack traces detected in profile. "
"Cannot build heatmap. Use -ba for building heatmap from "
"basic events.\n";
}
exit(1);
}
outs() << "HEATMAP: building heat map...\n";
// Register basic samples and perf LBR addresses not covered by fallthroughs.
for (const auto &[PC, Hits] : BasicSamples)
HM.registerAddress(PC, Hits);
for (const auto &[Trace, Info] : Traces)
if (Trace.To != Trace::BR_ONLY)
HM.registerAddressRange(Trace.From, Trace.To, Info.TakenCount);
if (HM.getNumInvalidRanges())
outs() << "HEATMAP: invalid traces: " << HM.getNumInvalidRanges() << '\n';
if (!HM.size()) {
errs() << "HEATMAP-ERROR: no valid traces registered\n";
exit(1);
}
HM.print(opts::HeatmapOutput);
if (opts::HeatmapOutput == "-") {
HM.printCDF(opts::HeatmapOutput);
HM.printSectionHotness(opts::HeatmapOutput);
} else {
HM.printCDF(opts::HeatmapOutput + ".csv");
HM.printSectionHotness(opts::HeatmapOutput + "-section-hotness.csv");
}
// Provide coarse-grained heatmaps if requested via zoom-out scales
for (const uint64_t NewBucketSize : ArrayRef(HMBS).drop_front()) {
HM.resizeBucket(NewBucketSize);
if (opts::HeatmapOutput == "-")
HM.print(opts::HeatmapOutput);
else
HM.print(formatv("{0}-{1}", opts::HeatmapOutput, NewBucketSize).str());
}
return std::error_code();
}
void DataAggregator::parseLBRSample(const PerfBranchSample &Sample,
bool NeedsSkylakeFix) {
// LBRs are stored in reverse execution order. NextLBR refers to the next
// executed branch record.
const LBREntry *NextLBR = nullptr;
uint32_t NumEntry = 0;
for (const LBREntry &LBR : Sample.LBR) {
++NumEntry;
// Hardware bug workaround: Intel Skylake (which has 32 LBR entries)
// sometimes record entry 32 as an exact copy of entry 31. This will cause
// us to likely record an invalid trace and generate a stale function for
// BAT mode (non BAT disassembles the function and is able to ignore this
// trace at aggregation time). Drop first 2 entries (last two, in
// chronological order)
if (NeedsSkylakeFix && NumEntry <= 2)
continue;
uint64_t TraceTo = NextLBR ? NextLBR->From : Trace::BR_ONLY;
NextLBR = &LBR;
TakenBranchInfo &Info = TraceMap[Trace{LBR.From, LBR.To, TraceTo}];
++Info.TakenCount;
Info.MispredCount += LBR.Mispred;
}
// Record LBR addresses not covered by fallthroughs (bottom-of-stack source
// and top-of-stack target) as basic samples for heatmap.
if (opts::HeatmapMode == opts::HeatmapModeKind::HM_Exclusive &&
!Sample.LBR.empty()) {
++BasicSamples[Sample.LBR.front().To];
++BasicSamples[Sample.LBR.back().From];
}
}
void DataAggregator::printLongRangeTracesDiagnostic() const {
outs() << "PERF2BOLT: out of range traces involving unknown regions: "
<< NumLongRangeTraces;
if (NumTraces > 0)
outs() << format(" (%.1f%%)", NumLongRangeTraces * 100.0f / NumTraces);
outs() << "\n";
}
static float printColoredPct(uint64_t Numerator, uint64_t Denominator, float T1,
float T2) {
if (Denominator == 0) {
outs() << "\n";
return 0;
}
float Percent = Numerator * 100.0f / Denominator;
outs() << " (";
if (outs().has_colors()) {
if (Percent > T2)
outs().changeColor(raw_ostream::RED);
else if (Percent > T1)
outs().changeColor(raw_ostream::YELLOW);
else
outs().changeColor(raw_ostream::GREEN);
}
outs() << format("%.1f%%", Percent);
if (outs().has_colors())
outs().resetColor();
outs() << ")\n";
return Percent;
}
void DataAggregator::printBranchSamplesDiagnostics() const {
outs() << "PERF2BOLT: traces mismatching disassembled function contents: "
<< NumInvalidTraces;
if (printColoredPct(NumInvalidTraces, NumTraces, 5, 10) > 10)
outs() << "\n !! WARNING !! This high mismatch ratio indicates the input "
"binary is probably not the same binary used during profiling "
"collection. The generated data may be ineffective for improving "
"performance\n\n";
printLongRangeTracesDiagnostic();
}
void DataAggregator::printBasicSamplesDiagnostics(
uint64_t OutOfRangeSamples) const {
outs() << "PERF2BOLT: out of range samples recorded in unknown regions: "
<< OutOfRangeSamples;
if (printColoredPct(OutOfRangeSamples, NumTotalSamples, 40, 60) > 80)
outs() << "\n !! WARNING !! This high mismatch ratio indicates the input "
"binary is probably not the same binary used during profiling "
"collection. The generated data may be ineffective for improving "
"performance\n\n";
}
void DataAggregator::printBranchStacksDiagnostics(
uint64_t IgnoredSamples) const {
outs() << "PERF2BOLT: ignored samples: " << IgnoredSamples;
if (printColoredPct(IgnoredSamples, NumTotalSamples, 20, 50) > 50)
errs() << "PERF2BOLT-WARNING: less than 50% of all recorded samples "
"were attributed to the input binary\n";
}
std::error_code DataAggregator::parseBranchEvents() {
std::string BranchEventTypeStr =
opts::ArmSPE ? "SPE branch events in brstack-format" : "branch events";
outs() << "PERF2BOLT: parse " << BranchEventTypeStr << "...\n";
NamedRegionTimer T("parseBranch", "Parsing branch events", TimerGroupName,
TimerGroupDesc, opts::TimeAggregator);
uint64_t NumEntries = 0;
uint64_t NumSamples = 0;
uint64_t NumSamplesNoLBR = 0;
bool NeedsSkylakeFix = false;
while (hasData() && NumTotalSamples < opts::MaxSamples) {
++NumTotalSamples;
ErrorOr<PerfBranchSample> SampleRes = parseBranchSample();
if (std::error_code EC = SampleRes.getError()) {
if (EC == errc::no_such_process)
continue;
return EC;
}
++NumSamples;
PerfBranchSample &Sample = SampleRes.get();
if (Sample.LBR.empty()) {
++NumSamplesNoLBR;
continue;
}
NumEntries += Sample.LBR.size();
if (this->BC->isX86() && BAT && Sample.LBR.size() == 32 &&
!NeedsSkylakeFix) {
errs() << "PERF2BOLT-WARNING: using Intel Skylake bug workaround\n";
NeedsSkylakeFix = true;
}
parseLBRSample(Sample, NeedsSkylakeFix);
}
Traces.reserve(TraceMap.size());
for (const auto &[Trace, Info] : TraceMap) {
Traces.emplace_back(Trace, Info);
for (const uint64_t Addr : {Trace.Branch, Trace.From})
if (BinaryFunction *BF = getBinaryFunctionContainingAddress(Addr))
BF->setHasProfileAvailable();
}
clear(TraceMap);
outs() << "PERF2BOLT: read " << NumSamples << " samples and " << NumEntries
<< " brstack entries\n";
if (NumTotalSamples) {
if (NumSamples && NumSamplesNoLBR == NumSamples) {
// Note: we don't know if perf2bolt is being used to parse memory samples
// at this point. In this case, it is OK to parse zero LBRs.
if (!opts::ArmSPE)
errs()
<< "PERF2BOLT-WARNING: all recorded samples for this binary lack "
"brstack. Record profile with perf record -j any or run "
"perf2bolt "
"in non-brstack mode with -ba (the performance improvement in "
"-ba "
"mode may be limited)\n";
else
errs()
<< "PERF2BOLT-WARNING: All recorded samples for this binary lack "
"SPE brstack entries. Make sure you are running Linux perf 6.14 "
"or later, otherwise you get zero samples. Record the profile "
"with: perf record -e 'arm_spe_0/branch_filter=1/'.";
} else {
printBranchStacksDiagnostics(NumTotalSamples - NumSamples);
}
}
return std::error_code();
}
void DataAggregator::processBranchEvents() {
outs() << "PERF2BOLT: processing branch events...\n";
NamedRegionTimer T("processBranch", "Processing branch events",
TimerGroupName, TimerGroupDesc, opts::TimeAggregator);
Returns.emplace(Trace::FT_EXTERNAL_RETURN, true);
for (const auto &[Trace, Info] : Traces) {
bool IsReturn = checkReturn(Trace.Branch);
// Ignore returns.
if (!IsReturn && Trace.Branch != Trace::FT_ONLY &&
Trace.Branch != Trace::FT_EXTERNAL_ORIGIN)
doBranch(Trace.Branch, Trace.From, Info.TakenCount, Info.MispredCount);
if (Trace.To != Trace::BR_ONLY)
doTrace(Trace, Info.TakenCount, IsReturn);
}
printBranchSamplesDiagnostics();
}
std::error_code DataAggregator::parseBasicEvents() {
outs() << "PERF2BOLT: parsing basic events (without brstack)...\n";
NamedRegionTimer T("parseBasic", "Parsing basic events", TimerGroupName,
TimerGroupDesc, opts::TimeAggregator);
while (hasData()) {
ErrorOr<PerfBasicSample> Sample = parseBasicSample();
if (std::error_code EC = Sample.getError())
return EC;
if (!Sample->PC)
continue;
++NumTotalSamples;
if (BinaryFunction *BF = getBinaryFunctionContainingAddress(Sample->PC))
BF->setHasProfileAvailable();
++BasicSamples[Sample->PC];
EventNames.insert(Sample->EventName);
}
outs() << "PERF2BOLT: read " << NumTotalSamples << " basic samples\n";
return std::error_code();
}
void DataAggregator::processBasicEvents() {
outs() << "PERF2BOLT: processing basic events (without brstack)...\n";
NamedRegionTimer T("processBasic", "Processing basic events", TimerGroupName,
TimerGroupDesc, opts::TimeAggregator);
uint64_t OutOfRangeSamples = 0;
for (auto &Sample : BasicSamples) {
const uint64_t PC = Sample.first;
const uint64_t HitCount = Sample.second;
BinaryFunction *Func = getBinaryFunctionContainingAddress(PC);
if (!Func) {
OutOfRangeSamples += HitCount;
continue;
}
doBasicSample(*Func, PC, HitCount);
}
printBasicSamplesDiagnostics(OutOfRangeSamples);
}
std::error_code DataAggregator::parseMemEvents() {
outs() << "PERF2BOLT: parsing memory events...\n";
NamedRegionTimer T("parseMemEvents", "Parsing mem events", TimerGroupName,
TimerGroupDesc, opts::TimeAggregator);
while (hasData()) {
ErrorOr<PerfMemSample> Sample = parseMemSample();
if (std::error_code EC = Sample.getError())
return EC;
if (BinaryFunction *BF = getBinaryFunctionContainingAddress(Sample->PC)) {
BF->setHasProfileAvailable();
MemSamples.emplace_back(std::move(Sample.get()));
}
}
return std::error_code();
}
void DataAggregator::processMemEvents() {
NamedRegionTimer T("ProcessMemEvents", "Processing mem events",
TimerGroupName, TimerGroupDesc, opts::TimeAggregator);
for (const PerfMemSample &Sample : MemSamples) {
uint64_t PC = Sample.PC;
uint64_t Addr = Sample.Addr;
StringRef FuncName;
StringRef MemName;
// Try to resolve symbol for PC
BinaryFunction *Func = getBinaryFunctionContainingAddress(PC);
if (!Func) {
LLVM_DEBUG(if (PC != 0) {
dbgs() << formatv("Skipped mem event: {0:x} => {1:x}\n", PC, Addr);
});
continue;
}
FuncName = Func->getOneName();
PC -= Func->getAddress();
// Try to resolve symbol for memory load
if (BinaryData *BD = BC->getBinaryDataContainingAddress(Addr)) {
MemName = BD->getName();
Addr -= BD->getAddress();
} else if (opts::FilterMemProfile) {
// Filter out heap/stack accesses
continue;
}
const Location FuncLoc(!FuncName.empty(), FuncName, PC);
const Location AddrLoc(!MemName.empty(), MemName, Addr);
FuncMemData *MemData = &NamesToMemEvents[FuncName];
MemData->Name = FuncName;
setMemData(*Func, MemData);
MemData->update(FuncLoc, AddrLoc);
LLVM_DEBUG(dbgs() << "Mem event: " << FuncLoc << " = " << AddrLoc << "\n");
}
}
std::error_code DataAggregator::parsePreAggregatedLBRSamples() {
outs() << "PERF2BOLT: parsing pre-aggregated profile...\n";
NamedRegionTimer T("parseAggregated", "Parsing aggregated branch events",
TimerGroupName, TimerGroupDesc, opts::TimeAggregator);
size_t AggregatedLBRs = 0;
while (hasData()) {
if (std::error_code EC = parseAggregatedLBREntry())
return EC;
++AggregatedLBRs;
}
outs() << "PERF2BOLT: read " << AggregatedLBRs
<< " aggregated brstack entries\n";
return std::error_code();
}
std::optional<int32_t> DataAggregator::parseCommExecEvent() {
size_t LineEnd = ParsingBuf.find_first_of("\n");
if (LineEnd == StringRef::npos) {
reportError("expected rest of line");
Diag << "Found: " << ParsingBuf << "\n";
return std::nullopt;
}
StringRef Line = ParsingBuf.substr(0, LineEnd);
size_t Pos = Line.find("PERF_RECORD_COMM exec");
if (Pos == StringRef::npos)
return std::nullopt;
Line = Line.drop_front(Pos);
// Line:
// PERF_RECORD_COMM exec: <name>:<pid>/<tid>"
StringRef PIDStr = Line.rsplit(':').second.split('/').first;
int32_t PID;
if (PIDStr.getAsInteger(10, PID)) {
reportError("expected PID");
Diag << "Found: " << PIDStr << "in '" << Line << "'\n";
return std::nullopt;
}
return PID;
}
namespace {
std::optional<uint64_t> parsePerfTime(const StringRef TimeStr) {
const StringRef SecTimeStr = TimeStr.split('.').first;
const StringRef USecTimeStr = TimeStr.split('.').second;
uint64_t SecTime;
uint64_t USecTime;
if (SecTimeStr.getAsInteger(10, SecTime) ||
USecTimeStr.getAsInteger(10, USecTime))
return std::nullopt;
return SecTime * 1000000ULL + USecTime;
}
}
std::optional<DataAggregator::ForkInfo> DataAggregator::parseForkEvent() {
while (checkAndConsumeFS()) {
}
size_t LineEnd = ParsingBuf.find_first_of("\n");
if (LineEnd == StringRef::npos) {
reportError("expected rest of line");
Diag << "Found: " << ParsingBuf << "\n";
return std::nullopt;
}
StringRef Line = ParsingBuf.substr(0, LineEnd);
size_t Pos = Line.find("PERF_RECORD_FORK");
if (Pos == StringRef::npos) {
consumeRestOfLine();
return std::nullopt;
}
ForkInfo FI;
const StringRef TimeStr =
Line.substr(0, Pos).rsplit(':').first.rsplit(FieldSeparator).second;
if (std::optional<uint64_t> TimeRes = parsePerfTime(TimeStr)) {
FI.Time = *TimeRes;
}
Line = Line.drop_front(Pos);
// Line:
// PERF_RECORD_FORK(<child_pid>:<child_tid>):(<parent_pid>:<parent_tid>)
const StringRef ChildPIDStr = Line.split('(').second.split(':').first;
if (ChildPIDStr.getAsInteger(10, FI.ChildPID)) {
reportError("expected PID");
Diag << "Found: " << ChildPIDStr << "in '" << Line << "'\n";
return std::nullopt;
}
const StringRef ParentPIDStr = Line.rsplit('(').second.split(':').first;
if (ParentPIDStr.getAsInteger(10, FI.ParentPID)) {
reportError("expected PID");
Diag << "Found: " << ParentPIDStr << "in '" << Line << "'\n";
return std::nullopt;
}
consumeRestOfLine();
return FI;
}
ErrorOr<std::pair<StringRef, DataAggregator::MMapInfo>>
DataAggregator::parseMMapEvent() {
while (checkAndConsumeFS()) {
}
MMapInfo ParsedInfo;
size_t LineEnd = ParsingBuf.find_first_of("\n");
if (LineEnd == StringRef::npos) {
reportError("expected rest of line");
Diag << "Found: " << ParsingBuf << "\n";
return make_error_code(llvm::errc::io_error);
}
StringRef Line = ParsingBuf.substr(0, LineEnd);
size_t Pos = Line.find("PERF_RECORD_MMAP2");
if (Pos == StringRef::npos) {
consumeRestOfLine();
return std::make_pair(StringRef(), ParsedInfo);
}
// Line:
// {<name> .* <sec>.<usec>: }PERF_RECORD_MMAP2 <pid>/<tid>: .* <file_name>
const StringRef TimeStr =
Line.substr(0, Pos).rsplit(':').first.rsplit(FieldSeparator).second;
if (std::optional<uint64_t> TimeRes = parsePerfTime(TimeStr))
ParsedInfo.Time = *TimeRes;
Line = Line.drop_front(Pos);
// Line:
// PERF_RECORD_MMAP2 <pid>/<tid>: [<hexbase>(<hexsize>) .*]: .* <file_name>
StringRef FileName = Line.rsplit(FieldSeparator).second;
if (FileName.starts_with("//") || FileName.starts_with("[")) {
consumeRestOfLine();
return std::make_pair(StringRef(), ParsedInfo);
}
FileName = sys::path::filename(FileName);
const StringRef PIDStr = Line.split(FieldSeparator).second.split('/').first;
if (PIDStr.getAsInteger(10, ParsedInfo.PID)) {
reportError("expected PID");
Diag << "Found: " << PIDStr << "in '" << Line << "'\n";
return make_error_code(llvm::errc::io_error);
}
const StringRef BaseAddressStr = Line.split('[').second.split('(').first;
if (BaseAddressStr.getAsInteger(0, ParsedInfo.MMapAddress)) {
reportError("expected base address");
Diag << "Found: " << BaseAddressStr << "in '" << Line << "'\n";
return make_error_code(llvm::errc::io_error);
}
const StringRef SizeStr = Line.split('(').second.split(')').first;
if (SizeStr.getAsInteger(0, ParsedInfo.Size)) {
reportError("expected mmaped size");
Diag << "Found: " << SizeStr << "in '" << Line << "'\n";
return make_error_code(llvm::errc::io_error);
}
const StringRef OffsetStr =
Line.split('@').second.ltrim().split(FieldSeparator).first;
if (OffsetStr.getAsInteger(0, ParsedInfo.Offset)) {
reportError("expected mmaped page-aligned offset");
Diag << "Found: " << OffsetStr << "in '" << Line << "'\n";
return make_error_code(llvm::errc::io_error);
}
consumeRestOfLine();
return std::make_pair(FileName, ParsedInfo);
}
std::error_code DataAggregator::parseMMapEvents() {
outs() << "PERF2BOLT: parsing perf-script mmap events output\n";
NamedRegionTimer T("parseMMapEvents", "Parsing mmap events", TimerGroupName,
TimerGroupDesc, opts::TimeAggregator);
std::multimap<StringRef, MMapInfo> GlobalMMapInfo;
while (hasData()) {
ErrorOr<std::pair<StringRef, MMapInfo>> FileMMapInfoRes = parseMMapEvent();
if (std::error_code EC = FileMMapInfoRes.getError())
return EC;
std::pair<StringRef, MMapInfo> FileMMapInfo = FileMMapInfoRes.get();
if (FileMMapInfo.second.PID == -1)
continue;
if (FileMMapInfo.first == "(deleted)")
continue;
GlobalMMapInfo.insert(FileMMapInfo);
}
LLVM_DEBUG({
dbgs() << "FileName -> mmap info:\n"
<< " Filename : PID [MMapAddr, Size, Offset]\n";
for (const auto &[Name, MMap] : GlobalMMapInfo)
dbgs() << formatv(" {0} : {1} [{2:x}, {3:x} @ {4:x}]\n", Name, MMap.PID,
MMap.MMapAddress, MMap.Size, MMap.Offset);
});
StringRef NameToUse = llvm::sys::path::filename(BC->getFilename());
if (GlobalMMapInfo.count(NameToUse) == 0 && !BuildIDBinaryName.empty()) {
errs() << "PERF2BOLT-WARNING: using \"" << BuildIDBinaryName
<< "\" for profile matching\n";
NameToUse = BuildIDBinaryName;
}
auto Range = GlobalMMapInfo.equal_range(NameToUse);
for (MMapInfo &MMapInfo : llvm::make_second_range(make_range(Range))) {
if (BC->HasFixedLoadAddress && MMapInfo.MMapAddress) {
// Check that the binary mapping matches one of the segments.
bool MatchFound = llvm::any_of(
llvm::make_second_range(BC->SegmentMapInfo),
[&](SegmentInfo &SegInfo) {
// The mapping is page-aligned and hence the MMapAddress could be
// different from the segment start address. We cannot know the page
// size of the mapping, but we know it should not exceed the segment
// alignment value. Hence we are performing an approximate check.
return SegInfo.Address >= MMapInfo.MMapAddress &&
SegInfo.Address - MMapInfo.MMapAddress < SegInfo.Alignment &&
SegInfo.IsExecutable;
});
if (!MatchFound) {
errs() << "PERF2BOLT-WARNING: ignoring mapping of " << NameToUse
<< " at 0x" << Twine::utohexstr(MMapInfo.MMapAddress) << '\n';
continue;
}
}
// Set base address for shared objects.
if (!BC->HasFixedLoadAddress) {
std::optional<uint64_t> BaseAddress =
BC->getBaseAddressForMapping(MMapInfo.MMapAddress, MMapInfo.Offset);
if (!BaseAddress) {
errs() << "PERF2BOLT-WARNING: unable to find base address of the "
"binary when memory mapped at 0x"
<< Twine::utohexstr(MMapInfo.MMapAddress)
<< " using file offset 0x" << Twine::utohexstr(MMapInfo.Offset)
<< ". Ignoring profile data for this mapping\n";
continue;
}
MMapInfo.BaseAddress = *BaseAddress;
}
// Try to add MMapInfo to the map and update its size. Large binaries may
// span to multiple text segments, so the mapping is inserted only on the
// first occurrence.
if (!BinaryMMapInfo.insert(std::make_pair(MMapInfo.PID, MMapInfo)).second)
assert(MMapInfo.BaseAddress == BinaryMMapInfo[MMapInfo.PID].BaseAddress &&
"Base address on multiple segment mappings should match");
// Update mapping size.
const uint64_t EndAddress = MMapInfo.MMapAddress + MMapInfo.Size;
const uint64_t Size = EndAddress - BinaryMMapInfo[MMapInfo.PID].BaseAddress;
if (Size > BinaryMMapInfo[MMapInfo.PID].Size)
BinaryMMapInfo[MMapInfo.PID].Size = Size;
}
if (BinaryMMapInfo.empty()) {
if (errs().has_colors())
errs().changeColor(raw_ostream::RED);
errs() << "PERF2BOLT-ERROR: could not find a profile matching binary \""
<< BC->getFilename() << "\".";
if (!GlobalMMapInfo.empty()) {
errs() << " Profile for the following binary name(s) is available:\n";
for (auto I = GlobalMMapInfo.begin(), IE = GlobalMMapInfo.end(); I != IE;
I = GlobalMMapInfo.upper_bound(I->first))
errs() << " " << I->first << '\n';
errs() << "Please rename the input binary.\n";
} else {
errs() << " Failed to extract any binary name from a profile.\n";
}
if (errs().has_colors())
errs().resetColor();
exit(1);
}
return std::error_code();
}
std::error_code DataAggregator::parseTaskEvents() {
outs() << "PERF2BOLT: parsing perf-script task events output\n";
NamedRegionTimer T("parseTaskEvents", "Parsing task events", TimerGroupName,
TimerGroupDesc, opts::TimeAggregator);
while (hasData()) {
if (std::optional<int32_t> CommInfo = parseCommExecEvent()) {
// Remove forked child that ran execve
auto MMapInfoIter = BinaryMMapInfo.find(*CommInfo);
if (MMapInfoIter != BinaryMMapInfo.end() && MMapInfoIter->second.Forked)
BinaryMMapInfo.erase(MMapInfoIter);
consumeRestOfLine();
continue;
}
std::optional<ForkInfo> ForkInfo = parseForkEvent();
if (!ForkInfo)
continue;
if (ForkInfo->ParentPID == ForkInfo->ChildPID)
continue;
if (ForkInfo->Time == 0) {
// Process was forked and mmaped before perf ran. In this case the child
// should have its own mmap entry unless it was execve'd.
continue;
}
auto MMapInfoIter = BinaryMMapInfo.find(ForkInfo->ParentPID);
if (MMapInfoIter == BinaryMMapInfo.end())
continue;
MMapInfo MMapInfo = MMapInfoIter->second;
MMapInfo.PID = ForkInfo->ChildPID;
MMapInfo.Forked = true;
BinaryMMapInfo.insert(std::make_pair(MMapInfo.PID, MMapInfo));
}
outs() << "PERF2BOLT: input binary is associated with "
<< BinaryMMapInfo.size() << " PID(s)\n";
LLVM_DEBUG({
for (const MMapInfo &MMI : llvm::make_second_range(BinaryMMapInfo))
outs() << formatv(" {0}{1}: ({2:x}: {3:x})\n", MMI.PID,
(MMI.Forked ? " (forked)" : ""), MMI.MMapAddress,
MMI.Size);
});
return std::error_code();
}
std::optional<std::pair<StringRef, StringRef>>
DataAggregator::parseNameBuildIDPair() {
while (checkAndConsumeFS()) {
}
ErrorOr<StringRef> BuildIDStr = parseString(FieldSeparator, true);
if (std::error_code EC = BuildIDStr.getError())
return std::nullopt;
// If one of the strings is missing, don't issue a parsing error, but still
// do not return a value.
consumeAllRemainingFS();
if (checkNewLine())
return std::nullopt;
ErrorOr<StringRef> NameStr = parseString(FieldSeparator, true);
if (std::error_code EC = NameStr.getError())
return std::nullopt;
consumeRestOfLine();
return std::make_pair(NameStr.get(), BuildIDStr.get());
}
bool DataAggregator::hasAllBuildIDs() {
const StringRef SavedParsingBuf = ParsingBuf;
if (!hasData())
return false;
bool HasInvalidEntries = false;
while (hasData()) {
if (!parseNameBuildIDPair()) {
HasInvalidEntries = true;
break;
}
}
ParsingBuf = SavedParsingBuf;
return !HasInvalidEntries;
}
std::optional<StringRef>
DataAggregator::getFileNameForBuildID(StringRef FileBuildID) {
const StringRef SavedParsingBuf = ParsingBuf;
StringRef FileName;
while (hasData()) {
std::optional<std::pair<StringRef, StringRef>> IDPair =
parseNameBuildIDPair();
if (!IDPair) {
consumeRestOfLine();
continue;
}
if (IDPair->second.starts_with(FileBuildID)) {
FileName = sys::path::filename(IDPair->first);
break;
}
}
ParsingBuf = SavedParsingBuf;
if (!FileName.empty())
return FileName;
return std::nullopt;
}
std::error_code
DataAggregator::writeAggregatedFile(StringRef OutputFilename) const {
std::error_code EC;
raw_fd_ostream OutFile(OutputFilename, EC, sys::fs::OpenFlags::OF_None);
if (EC)
return EC;
bool WriteMemLocs = false;
auto writeLocation = [&OutFile, &WriteMemLocs](const Location &Loc) {
if (WriteMemLocs)
OutFile << (Loc.IsSymbol ? "4 " : "3 ");
else
OutFile << (Loc.IsSymbol ? "1 " : "0 ");
OutFile << (Loc.Name.empty() ? "[unknown]" : getEscapedName(Loc.Name))
<< " " << Twine::utohexstr(Loc.Offset) << FieldSeparator;
};
uint64_t BranchValues = 0;
uint64_t MemValues = 0;
if (BAT)
OutFile << "boltedcollection\n";
if (opts::BasicAggregation) {
OutFile << "no_lbr";
for (const StringMapEntry<EmptyStringSetTag> &Entry : EventNames)
OutFile << " " << Entry.getKey();
OutFile << "\n";
for (const auto &KV : NamesToBasicSamples) {
const FuncBasicSampleData &FSD = KV.second;
for (const BasicSampleInfo &SI : FSD.Data) {
writeLocation(SI.Loc);
OutFile << SI.Hits << "\n";
++BranchValues;
}
}
} else {
for (const auto &KV : NamesToBranches) {
const FuncBranchData &FBD = KV.second;
for (const BranchInfo &BI : FBD.Data) {
writeLocation(BI.From);
writeLocation(BI.To);
OutFile << BI.Mispreds << " " << BI.Branches << "\n";
++BranchValues;
}
for (const BranchInfo &BI : FBD.EntryData) {
// Do not output if source is a known symbol, since this was already
// accounted for in the source function
if (BI.From.IsSymbol)
continue;
writeLocation(BI.From);
writeLocation(BI.To);
OutFile << BI.Mispreds << " " << BI.Branches << "\n";
++BranchValues;
}
}
WriteMemLocs = true;
for (const auto &KV : NamesToMemEvents) {
const FuncMemData &FMD = KV.second;
for (const MemInfo &MemEvent : FMD.Data) {
writeLocation(MemEvent.Offset);
writeLocation(MemEvent.Addr);
OutFile << MemEvent.Count << "\n";
++MemValues;
}
}
}
outs() << "PERF2BOLT: wrote " << BranchValues << " objects and " << MemValues
<< " memory objects to " << OutputFilename << "\n";
return std::error_code();
}
std::error_code DataAggregator::writeBATYAML(BinaryContext &BC,
StringRef OutputFilename) const {
std::error_code EC;
raw_fd_ostream OutFile(OutputFilename, EC, sys::fs::OpenFlags::OF_None);
if (EC)
return EC;
yaml::bolt::BinaryProfile BP;
const MCPseudoProbeDecoder *PseudoProbeDecoder =
opts::ProfileWritePseudoProbes ? BC.getPseudoProbeDecoder() : nullptr;
// Fill out the header info.
BP.Header.Version = 1;
BP.Header.FileName = std::string(BC.getFilename());
std::optional<StringRef> BuildID = BC.getFileBuildID();
BP.Header.Id = BuildID ? std::string(*BuildID) : "<unknown>";
BP.Header.Origin = std::string(getReaderName());
// Only the input binary layout order is supported.
BP.Header.IsDFSOrder = false;
// FIXME: Need to match hash function used to produce BAT hashes.
BP.Header.HashFunction = HashFunction::Default;
ListSeparator LS(",");
raw_string_ostream EventNamesOS(BP.Header.EventNames);
for (const StringMapEntry<EmptyStringSetTag> &EventEntry : EventNames)
EventNamesOS << LS << EventEntry.first().str();
BP.Header.Flags = opts::BasicAggregation ? BinaryFunction::PF_BASIC
: BinaryFunction::PF_BRANCH;
// Add probe inline tree nodes.
YAMLProfileWriter::InlineTreeDesc InlineTree;
if (PseudoProbeDecoder)
std::tie(BP.PseudoProbeDesc, InlineTree) =
YAMLProfileWriter::convertPseudoProbeDesc(*PseudoProbeDecoder);
if (!opts::BasicAggregation) {
// Convert profile for functions not covered by BAT
for (auto &BFI : BC.getBinaryFunctions()) {
BinaryFunction &Function = BFI.second;
if (!Function.hasProfile())
continue;
if (BAT->isBATFunction(Function.getAddress()))
continue;
BP.Functions.emplace_back(YAMLProfileWriter::convert(
Function, /*UseDFS=*/false, InlineTree, BAT));
}
for (const auto &KV : NamesToBranches) {
const StringRef FuncName = KV.first;
const FuncBranchData &Branches = KV.second;
yaml::bolt::BinaryFunctionProfile YamlBF;
BinaryData *BD = BC.getBinaryDataByName(FuncName);
assert(BD);
uint64_t FuncAddress = BD->getAddress();
if (!BAT->isBATFunction(FuncAddress))
continue;
BinaryFunction *BF = BC.getBinaryFunctionAtAddress(FuncAddress);
assert(BF);
YamlBF.Name = getLocationName(*BF, BAT);
YamlBF.Id = BF->getFunctionNumber();
YamlBF.Hash = BAT->getBFHash(FuncAddress);
YamlBF.ExecCount = BF->getKnownExecutionCount();
YamlBF.ExternEntryCount = BF->getExternEntryCount();
YamlBF.NumBasicBlocks = BAT->getNumBasicBlocks(FuncAddress);
const BoltAddressTranslation::BBHashMapTy &BlockMap =
BAT->getBBHashMap(FuncAddress);
YamlBF.Blocks.resize(YamlBF.NumBasicBlocks);
for (auto &&[Entry, YamlBB] : llvm::zip(BlockMap, YamlBF.Blocks)) {
const auto &Block = Entry.second;
YamlBB.Hash = Block.Hash;
YamlBB.Index = Block.Index;
}
// Lookup containing basic block offset and index
auto getBlock = [&BlockMap](uint32_t Offset) {
auto BlockIt = BlockMap.upper_bound(Offset);
if (LLVM_UNLIKELY(BlockIt == BlockMap.begin())) {
errs() << "BOLT-ERROR: invalid BAT section\n";
exit(1);
}
--BlockIt;
return std::pair(BlockIt->first, BlockIt->second.Index);
};
for (const BranchInfo &BI : Branches.Data) {
using namespace yaml::bolt;
const auto &[BlockOffset, BlockIndex] = getBlock(BI.From.Offset);
BinaryBasicBlockProfile &YamlBB = YamlBF.Blocks[BlockIndex];
if (BI.To.IsSymbol && BI.To.Name == BI.From.Name && BI.To.Offset != 0) {
// Internal branch
const unsigned SuccIndex = getBlock(BI.To.Offset).second;
auto &SI = YamlBB.Successors.emplace_back(SuccessorInfo{SuccIndex});
SI.Count = BI.Branches;
SI.Mispreds = BI.Mispreds;
} else {
// Call
const uint32_t Offset = BI.From.Offset - BlockOffset;
auto &CSI = YamlBB.CallSites.emplace_back(CallSiteInfo{Offset});
CSI.Count = BI.Branches;
CSI.Mispreds = BI.Mispreds;
if (const BinaryData *BD = BC.getBinaryDataByName(BI.To.Name))
YAMLProfileWriter::setCSIDestination(BC, CSI, BD->getSymbol(), BAT,
BI.To.Offset);
}
}
// Set entry counts, similar to DataReader::readProfile.
for (const BranchInfo &BI : Branches.EntryData) {
if (!BlockMap.isInputBlock(BI.To.Offset)) {
if (opts::Verbosity >= 1)
errs() << "BOLT-WARNING: Unexpected EntryData in " << FuncName
<< " at 0x" << Twine::utohexstr(BI.To.Offset) << '\n';
continue;
}
const unsigned BlockIndex = BlockMap.getBBIndex(BI.To.Offset);
YamlBF.Blocks[BlockIndex].ExecCount += BI.Branches;
}
if (PseudoProbeDecoder) {
DenseMap<const MCDecodedPseudoProbeInlineTree *, uint32_t>
InlineTreeNodeId;
std::tie(YamlBF.InlineTree, InlineTreeNodeId) =
YAMLProfileWriter::convertBFInlineTree(*PseudoProbeDecoder,
InlineTree, *BF);
// Fetch probes belonging to all fragments
const AddressProbesMap &ProbeMap =
PseudoProbeDecoder->getAddress2ProbesMap();
BinaryFunction::FragmentsSetTy Fragments(BF->Fragments);
Fragments.insert(BF);
DenseMap<uint32_t, YAMLProfileWriter::BlockProbeCtx> BlockCtx;
for (const BinaryFunction *F : Fragments) {
const uint64_t FuncAddr = F->getAddress();
for (const MCDecodedPseudoProbe &Probe :
ProbeMap.find(FuncAddr, FuncAddr + F->getSize())) {
const uint32_t OutputAddress = Probe.getAddress();
const uint32_t InputOffset = BAT->translate(
FuncAddr, OutputAddress - FuncAddr, /*IsBranchSrc=*/true);
const auto &[BlockOffset, BlockIndex] = getBlock(InputOffset);
BlockCtx[BlockIndex].addBlockProbe(InlineTreeNodeId, Probe,
InputOffset - BlockOffset);
}
}
for (auto &[Block, Ctx] : BlockCtx)
Ctx.finalize(YamlBF.Blocks[Block]);
}
// Skip printing if there's no profile data
llvm::erase_if(
YamlBF.Blocks, [](const yaml::bolt::BinaryBasicBlockProfile &YamlBB) {
auto HasCount = [](const auto &SI) { return SI.Count; };
bool HasAnyCount = YamlBB.ExecCount ||
llvm::any_of(YamlBB.Successors, HasCount) ||
llvm::any_of(YamlBB.CallSites, HasCount);
return !HasAnyCount;
});
BP.Functions.emplace_back(YamlBF);
}
}
// Write the profile.
yaml::Output Out(OutFile, nullptr, 0);
Out << BP;
return std::error_code();
}
void DataAggregator::dump() const { DataReader::dump(); }
void DataAggregator::dump(const PerfBranchSample &Sample) const {
Diag << "Sample brstack entries: " << Sample.LBR.size() << "\n";
for (const LBREntry &LBR : Sample.LBR)
Diag << LBR << '\n';
}
void DataAggregator::dump(const PerfMemSample &Sample) const {
Diag << "Sample mem entries: " << Sample.PC << ": " << Sample.Addr << "\n";
} | cpp | github | https://github.com/llvm/llvm-project | bolt/lib/Profile/DataAggregator.cpp |
import os
import sys
import urlparse
import subprocess
import shutil
import contextlib
from os import makedirs
from os.path import exists, join, abspath, basename, dirname, islink
import dockerfile
import util
import platforms
__all__ = ['merge', 'generate']
RELDIR = "library"
ESPB_SCRIPT = "https://github.com/ack/espb/zipball/master"
SUPERVISED = 'RUN echo "[program:{0}]\\ncommand={1}\\n" > /etc/supervisor/conf.d/{2}.conf'
def merge(*refs, **kw):
"""pull/parse multiple dockerfiles, outputting to STDOUT """
refs = list(refs)
# resolve any remote references
files = expand(*refs)
# parse the dockerfiles
parsed = parse(files)
# ensure we can proceed
errors = validate(parsed)
if errors:
for err in errors:
print >> sys.stderr, err
sys.exit(10)
workspace = join(os.getcwd(), RELDIR)
initial = parsed[0].parent
print "########## docket intro"
print "FROM {0}".format(initial)
print
# echo out the concatenated commands
for df in parsed:
for line in df.lines:
print line
if not kw.get('unsupervised'):
print "\n\n########## docket outro"
print "RUN mkdir -p /etc/supervisor/conf.d /var/log/supervisor"
print "RUN touch /etc/supervisor/supervisord.conf"
for df in parsed:
startup = df.command
if not startup:
continue
print SUPERVISED.format(df.name, startup.replace("\"", "\\\""), df.name).replace("'", "\'")
if kw.get('ssh'):
print SUPERVISED.format('ssh', '/usr/sbin/sshd -D', 'ssh')
print 'CMD ["supervisord", "-n", "-c", "/etc/supervisor/supervisord.conf"]'
def generate(name, parent, **kw):
"""
create a docket Dockerfile
"""
path = join('library', name)
if exists(path):
error("dir/file at path {0} exists!".format(path))
override = None
try:
confpath = abspath("supervisord.conf")
open(confpath)
except IOError:
confpath = abspath(join(dirname(__file__), "supervisord.conf"))
os.makedirs(path)
with util.chdir(path):
shutil.copyfile(confpath, "supervisord.conf")
with open("Dockerfile", 'w') as f:
# this one (in practice) should be ignored
print >> f, "FROM {0}".format(parent)
# install supervisor and/or pip according to platform
for dep in platforms.dependencies(parent):
print >> f, "RUN {0}".format(dep)
print >> f, "RUN mkdir -p /etc/supervisor"
print >> f, "ADD supervisord.conf /etc/supervisor/supervisord.conf"
print >> f, "ENV ETCD http://172.17.42.1:4001"
if kw.get('inject'):
print >> f, "# injected service pooling script"
print >> f, "RUN which espb || pip install {0}".format(ESPB_SCRIPT)
print >> f, 'CMD ["/usr/local/bin/espb", "register", "{0}"]'.format(name)
print join(path, "Dockerfile")
def expand(*refs):
"""
convert refs from { github-ref / directory / uri }
to { local-directory }
"""
files = []
for ref in refs:
if '.' == ref:
ref = abspath(ref)
local = join(RELDIR, ref)
if not exists(local):
local = resolve(ref)
files.append(local)
return files
def parse(files):
parsed = []
for path in files:
df = dockerfile.Dockerfile(path)
df.parse()
parsed.append(df)
return parsed
def validate(parsed):
errors = []
parents = ancestors(parsed)
if len(parents) > 1:
errors.append("Multiple ancestors detected: {0}".format(",".join(parents)))
return errors
def ancestors(parsed):
return set([f.parent for f in parsed])
def github(uri, outdir):
if 'github.com' in uri:
url = uri
elif 'git@' in uri or 'git://' in uri:
url = uri
else:
url = "https://github.com/" + uri
out, err = subprocess.Popen(["git", "clone", url, outdir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
print >> sys.stderr, out
print >> sys.stderr, err
def resolve(ref):
if exists(ref):
# local directory already on disk
return ref
elif 'http' in ref and 'github.com' not in ref:
# curl down a Dockerfile
dir = basename(urlparse.urlparse(ref).path)
target = join(RELDIR, dir)
try: os.makedirs(target)
except: pass
with open(join(target, "Dockerfile")) as out:
out.write(urllib.urlopen(ref))
return target
elif ref.lower().startswith("http://") or \
ref.lower().startswith("https://") or \
ref.lower().startswith("git") or \
'/' in ref:
dir = urlparse.urlparse(ref).path
if dir.startswith('/'): dir = dir[1:]
target = join(RELDIR, dir)
if not exists(target):
try: makedirs(target)
except: pass
github(ref, target)
return target
else:
raise Exception("unknown path type: {0}".format(ref))
def error(m, exit_code=1):
print >> sys.stderr, m
sys.exit(exit_code) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
import sys
import os.path
import pybindgen.settings
from pybindgen.gccxmlparser import ModuleParser, PygenClassifier, PygenSection, WrapperWarning
from pybindgen.typehandlers.codesink import FileCodeSink
from pygccxml.declarations import templates
from pygccxml.declarations.class_declaration import class_t
from pygccxml.declarations.calldef import free_function_t, member_function_t, constructor_t, calldef_t
## we need the smart pointer type transformation to be active even
## during gccxml scanning.
import ns3modulegen_core_customizations
## silence gccxmlparser errors; we only want error handling in the
## generated python script, not while scanning.
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, dummy_wrapper, dummy_exception, dummy_traceback_):
return True
pybindgen.settings.error_handler = ErrorHandler()
import warnings
warnings.filterwarnings(category=WrapperWarning, action='ignore')
type_annotations = {
'::ns3::AttributeChecker': {
'automatic_type_narrowing': 'true',
'allow_subclassing': 'false',
},
'::ns3::AttributeValue': {
'automatic_type_narrowing': 'true',
'allow_subclassing': 'false',
},
'::ns3::CommandLine': {
'allow_subclassing': 'true', # needed so that AddValue is able to set attributes on the object
},
'::ns3::NscTcpL4Protocol': {
'ignore': 'true', # this class is implementation detail
},
'ns3::RandomVariable::RandomVariable(ns3::RandomVariableBase const & variable) [constructor]': {
'ignore': None,
},
'ns3::RandomVariableBase * ns3::RandomVariable::Peek() const [member function]': {
'ignore': None,
},
'void ns3::RandomVariable::GetSeed(uint32_t * seed) const [member function]': {
'params': {'seed':{'direction':'out',
'array_length':'6'}}
},
'bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]': {
'params': {'info':{'transfer_ownership': 'false'}}
},
'static bool ns3::TypeId::LookupByNameFailSafe(std::string name, ns3::TypeId * tid) [member function]': {
'ignore': None, # manually wrapped in
},
'bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]': {
'params': {'obj': {'transfer_ownership':'false'}}
},
'bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]': {
'params': {'obj': {'transfer_ownership':'false'}}
},
'bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]': {
'params': {'obj': {'transfer_ownership':'false'}}
},
'bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]': {
'params': {'obj': {'transfer_ownership':'false'}}
},
'bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]': {
'params': {'object': {'transfer_ownership':'false'}}
},
'ns3::EmpiricalVariable::EmpiricalVariable(ns3::RandomVariableBase const & variable) [constructor]': {
'ignore': None
},
'static ns3::AttributeList * ns3::AttributeList::GetGlobal() [member function]': {
'caller_owns_return': 'false'
},
'void ns3::CommandLine::Parse(int argc, char * * argv) const [member function]': {
'ignore': None # manually wrapped
},
'extern void ns3::PythonCompleteConstruct(ns3::Ptr<ns3::Object> object, ns3::TypeId typeId, ns3::AttributeList const & attributes) [free function]': {
'ignore': None # used transparently by, should not be wrapped
},
'ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4ListRouting::GetRoutingProtocol(uint32_t index, int16_t & priority) const [member function]': {
'params': {'priority':{'direction':'out'}}
},
'ns3::Ipv4RoutingTableEntry * ns3::GlobalRouter::GetInjectedRoute(uint32_t i) [member function]': {
'params': {'return': { 'caller_owns_return': 'false',}},
},
'ns3::Ipv4RoutingTableEntry * ns3::Ipv4GlobalRouting::GetRoute(uint32_t i) const [member function]': {
'params': {'return': { 'caller_owns_return': 'false',}},
},
'::ns3::TestCase': {
'ignore': 'true', # we don't need to write test cases in Python
},
'::ns3::TestRunner': {
'ignore': 'true', # we don't need to write test cases in Python
},
'::ns3::TestSuite': {
'ignore': 'true', # we don't need to write test cases in Python
},
}
def get_ns3_relative_path(path):
l = []
head = path
while head:
head, tail = os.path.split(head)
if tail == 'ns3':
return os.path.join(*l)
l.insert(0, tail)
raise AssertionError("is the path %r inside ns3?!" % path)
def pre_scan_hook(dummy_module_parser,
pygccxml_definition,
global_annotations,
parameter_annotations):
ns3_header = get_ns3_relative_path(pygccxml_definition.location.file_name)
## Note: we don't include line numbers in the comments because
## those numbers are very likely to change frequently, which would
## cause needless changes, since the generated python files are
## kept under version control.
#global_annotations['pygen_comment'] = "%s:%i: %s" % \
# (ns3_header, pygccxml_definition.location.line, pygccxml_definition)
global_annotations['pygen_comment'] = "%s: %s" % \
(ns3_header, pygccxml_definition)
## handle ns3::Object::GetObject (left to its own devices,
## pybindgen will generate a mangled name containing the template
## argument type name).
if isinstance(pygccxml_definition, member_function_t) \
and pygccxml_definition.parent.name == 'Object' \
and pygccxml_definition.name == 'GetObject':
template_args = templates.args(pygccxml_definition.demangled_name)
if template_args == ['ns3::Object']:
global_annotations['template_instance_names'] = 'ns3::Object=>GetObject'
## Don't wrap Simulator::Schedule* (manually wrapped)
if isinstance(pygccxml_definition, member_function_t) \
and pygccxml_definition.parent.name == 'Simulator' \
and pygccxml_definition.name.startswith('Schedule'):
global_annotations['ignore'] = None
# manually wrapped
if isinstance(pygccxml_definition, member_function_t) \
and pygccxml_definition.parent.name == 'Simulator' \
and pygccxml_definition.name == 'Run':
global_annotations['ignore'] = True
## http://www.gccxml.org/Bug/view.php?id=9915
if isinstance(pygccxml_definition, calldef_t):
for arg in pygccxml_definition.arguments:
if arg.default_value is None:
continue
if "ns3::MilliSeconds( )" == arg.default_value:
arg.default_value = "ns3::MilliSeconds(0)"
if "ns3::Seconds( )" == arg.default_value:
arg.default_value = "ns3::Seconds(0)"
## classes
if isinstance(pygccxml_definition, class_t):
# no need for helper classes to allow subclassing in Python, I think...
#if pygccxml_definition.name.endswith('Helper'):
# global_annotations['allow_subclassing'] = 'false'
if pygccxml_definition.decl_string.startswith('::ns3::SimpleRefCount<'):
global_annotations['incref_method'] = 'Ref'
global_annotations['decref_method'] = 'Unref'
global_annotations['peekref_method'] = 'GetReferenceCount'
global_annotations['automatic_type_narrowing'] = 'true'
return
if pygccxml_definition.decl_string.startswith('::ns3::Callback<'):
# manually handled in ns3modulegen_core_customizations.py
global_annotations['ignore'] = None
return
if pygccxml_definition.decl_string.startswith('::ns3::TracedCallback<'):
global_annotations['ignore'] = None
return
if pygccxml_definition.decl_string.startswith('::ns3::Ptr<'):
# handled by pybindgen "type transformation"
global_annotations['ignore'] = None
return
# table driven class customization
try:
annotations = type_annotations[pygccxml_definition.decl_string]
except KeyError:
pass
else:
global_annotations.update(annotations)
## free functions
if isinstance(pygccxml_definition, free_function_t):
if pygccxml_definition.name == 'PeekPointer':
global_annotations['ignore'] = None
return
## table driven methods/constructors/functions customization
if isinstance(pygccxml_definition, (free_function_t, member_function_t, constructor_t)):
try:
annotations = type_annotations[str(pygccxml_definition)]
except KeyError:
pass
else:
for key,value in annotations.items():
if key == 'params':
parameter_annotations.update (value)
del annotations['params']
global_annotations.update(annotations)
# def post_scan_hook(dummy_module_parser, dummy_pygccxml_definition, pybindgen_wrapper):
# ## classes
# if isinstance(pybindgen_wrapper, CppClass):
# if pybindgen_wrapper.name.endswith('Checker'):
# print >> sys.stderr, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", pybindgen_wrapper
# #pybindgen_wrapper.set_instance_creation_function(AttributeChecker_instance_creation_function)
def scan_callback_classes(module_parser, callback_classes_file):
callback_classes_file.write("callback_classes = [\n")
for cls in module_parser.module_namespace.classes(function=module_parser.location_filter,
recursive=False):
if not cls.name.startswith("Callback<"):
continue
assert templates.is_instantiation(cls.decl_string), "%s is not a template instantiation" % cls
dummy_cls_name, template_parameters = templates.split(cls.decl_string)
callback_classes_file.write(" %r,\n" % template_parameters)
callback_classes_file.write("]\n")
class MyPygenClassifier(PygenClassifier):
def __init__(self, headers_map, section_precendences):
self.headers_map = headers_map
self.section_precendences = section_precendences
def classify(self, pygccxml_definition):
name = os.path.basename(pygccxml_definition.location.file_name)
try:
return self.headers_map[name]
except KeyError:
return '__main__'
def get_section_precedence(self, section_name):
if section_name == '__main__':
return -1
return self.section_precendences[section_name]
def ns3_module_scan(top_builddir, pygen_file_name, everything_h, cflags):
ns3_modules = eval(sys.stdin.readline())
## do a topological sort on the modules graph
from topsort import topsort
graph = []
module_names = ns3_modules.keys()
module_names.sort()
for ns3_module_name in module_names:
ns3_module_deps = list(ns3_modules[ns3_module_name][0])
ns3_module_deps.sort()
for dep in ns3_module_deps:
graph.append((dep, ns3_module_name))
sorted_ns3_modules = topsort(graph)
#print >> sys.stderr, "******* topological sort: ", sorted_ns3_modules
sections = [PygenSection('__main__', FileCodeSink(open(pygen_file_name, "wt")))]
headers_map = {} # header_name -> section_name
section_precendences = {} # section_name -> precedence
for prec, ns3_module in enumerate(sorted_ns3_modules):
section_name = "ns3_module_%s" % ns3_module.replace('-', '_')
file_name = os.path.join(os.path.dirname(pygen_file_name), "%s.py" % section_name)
sections.append(PygenSection(section_name, FileCodeSink(open(file_name, "wt")),
section_name + "__local"))
for header in ns3_modules[ns3_module][1]:
headers_map[header] = section_name
section_precendences[section_name] = prec
module_parser = ModuleParser('ns3', 'ns3')
module_parser.add_pre_scan_hook(pre_scan_hook)
#module_parser.add_post_scan_hook(post_scan_hook)
gccxml_options = dict(
include_paths=[top_builddir],
define_symbols={
#'NS3_ASSERT_ENABLE': None,
#'NS3_LOG_ENABLE': None,
},
cflags=('--gccxml-cxxflags "%s -DPYTHON_SCAN"' % cflags)
)
module_parser.parse_init([everything_h],
None, whitelist_paths=[top_builddir, os.path.dirname(everything_h)],
#includes=['"ns3/everything.h"'],
pygen_sink=sections,
pygen_classifier=MyPygenClassifier(headers_map, section_precendences),
gccxml_options=gccxml_options)
module_parser.scan_types()
callback_classes_file = open(os.path.join(os.path.dirname(pygen_file_name), "callbacks_list.py"), "wt")
scan_callback_classes(module_parser, callback_classes_file)
callback_classes_file.close()
module_parser.scan_methods()
module_parser.scan_functions()
module_parser.parse_finalize()
for section in sections:
section.code_sink.file.close()
if __name__ == '__main__':
ns3_module_scan(sys.argv[1], sys.argv[3], sys.argv[2], sys.argv[4]) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import os
import multiprocessing
import serial
import signal
import argparse
def start_dosenet(mode):
if mode == 0:
os.system('sudo -E bash /home/pi/dosenet-raspberrypi/pocket.sh start')
if mode == 1:
os.system('sudo -E bash /home/pi/dosenet-raspberrypi/pocket.sh test')
def start_D3S(mode):
if mode == 0:
os.system('sudo -E bash /home/pi/dosenet-raspberrypi/D3S.sh start')
if mode == 1:
os.system('sudo -E bash /home/pi/dosenet-raspberrypi/D3S.sh test')
def start_AQ(mode):
if mode == 0:
os.system('sudo -E bash /home/pi/dosenet-raspberrypi/AQ.sh start')
if mode == 1:
os.system('sudo -E bash /home/pi/dosenet-raspberrypi/AQ.sh test')
def start_CO2(mode):
if mode == 0:
os.system('sudo -E bash /home/pi/dosenet-raspberrypi/CO2.sh start')
if mode == 1:
os.system('sudo -E bash /home/pi/dosenet-raspberrypi/CO2.sh test')
def start_Weather(mode):
if mode == 0:
os.system('sudo -E bash /home/pi/dosenet-raspberrypi/weather.sh start')
if mode == 1:
os.system('sudo -E bash /home/pi/dosenet-raspberrypi/weather.sh test')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--test_mode', action='store_true', default=False,
help="Choose whether the devices will start in test mode or not. (Default False)")
test_mode = parser.parse_args().test_mode
if not test_mode:
print('Waiting for NTP to be synced...')
os.system('sudo service ntp stop')
os.system('sudo timeout 60s ntpd -gq')
os.system('sudo service ntp start')
try:
ser = serial.Serial('/dev/ttyACM0')
ser.flushInput()
ser.close()
except:
pass
if test_mode:
p = multiprocessing.Process(target=start_D3S, args=(1,))
t = multiprocessing.Process(target=start_dosenet, args=(1,))
a = multiprocessing.Process(target=start_AQ, args=(1,))
c = multiprocessing.Process(target=start_CO2, args=(1,))
w = multiprocessing.Process(target=start_Weather, args=(1,))
else:
p = multiprocessing.Process(target=start_D3S, args=(0,))
t = multiprocessing.Process(target=start_dosenet, args=(0,))
a = multiprocessing.Process(target=start_AQ, args=(0,))
c = multiprocessing.Process(target=start_CO2, args=(0,))
w = multiprocessing.Process(target=start_Weather, args=(0,))
try:
print('Starting D3S script process')
p.start()
print('Starting Pocket Geiger script process')
t.start()
print('Starting Air Quality Sensor script process')
a.start()
print('Starting CO2 sensor script process')
c.start()
print('Starting Weather sensor script process')
w.start()
print('started')
p.join()
t.join()
a.join()
c.join()
w.join()
print('we can reboot here')
except:
pass | unknown | codeparrot/codeparrot-clean | ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Erasure codec framework.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.io.erasurecode.codec;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/package-info.java |
# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python, and influenced by
Apache's log4j system.
Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, cPickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
try:
import codecs
except ImportError:
codecs = None
try:
unicode
_unicode = True
except NameError:
_unicode = False
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=0):
"""
Use the specified filename for streamed logging
"""
if codecs is None:
encoding = None
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
#print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
#print "No need to rollover: %d, %d" % (t, self.rolloverAt)
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
# find the oldest log file and delete it
#s = glob.glob(self.baseFilename + ".20*")
#if len(s) > self.backupCount:
# s.sort()
# os.remove(s[0])
for s in self.getFilesToDelete():
os.remove(s)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
currentTime = int(time.time())
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
if not os.path.exists(self.baseFilename):
self.dev, self.ino = -1, -1
else:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
if not os.path.exists(self.baseFilename):
stat = None
changed = 1
else:
stat = os.stat(self.baseFilename)
changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
if changed and self.stream is not None:
self.stream.flush()
self.stream.close()
self.stream = self._open()
if stat is None:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = 0
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
s.connect((self.host, self.port))
return s
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = 1
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else:
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error:
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
dummy = self.format(record) # just to get traceback text into record.exc_text
record.exc_info = None # to avoid Unpickleable error
s = cPickle.dumps(record.__dict__, 1)
if ei:
record.exc_info = ei # for next handler
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = 0
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=socket.SOCK_DGRAM):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, basestring):
self.unixsocket = 1
self._connect_unixsocket(address)
else:
self.unixsocket = 0
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.formatter = None
def _connect_unixsocket(self, address):
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# syslog may require either DGRAM or STREAM sockets
try:
self.socket.connect(address)
except socket.error:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(address)
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
# into a class variable so that it can be overridden if
# necessary.
log_format_string = '<%d>%s\000'
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, basestring):
facility = self.facility_names[facility]
if isinstance(priority, basestring):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
if self.unixsocket:
self.socket.close()
logging.Handler.close(self)
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record) + '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
# Message is a string. Convert to bytes as required by RFC 5424
if type(msg) is unicode:
msg = msg.encode('utf-8')
if codecs:
msg = codecs.BOM_UTF8 + msg
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, basestring):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET"):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import httplib, urllib
host = self.host
h = httplib.HTTP(host)
url = self.url
data = urllib.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
h.endheaders(data if self.method == "POST" else None)
h.getreply() #can't do anything with the result
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.buffer = []
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
"""
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.target = None
BufferingHandler.close(self) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changed 'CourseCohortsSettings.cohorted_discussions' to 'CourseCohortsSettings._cohorted_discussions' without
# changing db column name
pass
def backwards(self, orm):
# Changed 'CourseCohortsSettings.cohorted_discussions' to 'CourseCohortsSettings._cohorted_discussions' without
# changing db column name
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'course_groups.coursecohort': {
'Meta': {'object_name': 'CourseCohort'},
'assignment_type': ('django.db.models.fields.CharField', [], {'default': "'manual'", 'max_length': '20'}),
'course_user_group': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'cohort'", 'unique': 'True', 'to': "orm['course_groups.CourseUserGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'course_groups.coursecohortssettings': {
'Meta': {'object_name': 'CourseCohortsSettings'},
'_cohorted_discussions': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_column': "'cohorted_discussions'", 'blank': 'True'}),
'always_cohort_inline_discussions': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_cohorted': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'course_groups.courseusergroup': {
'Meta': {'unique_together': "(('name', 'course_id'),)", 'object_name': 'CourseUserGroup'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'group_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'course_groups'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'course_groups.courseusergrouppartitiongroup': {
'Meta': {'object_name': 'CourseUserGroupPartitionGroup'},
'course_user_group': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['course_groups.CourseUserGroup']", 'unique': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'group_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partition_id': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['course_groups'] | unknown | codeparrot/codeparrot-clean | ||
"""SCons.Tool.ilink32
XXX
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ilink32.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Tool
import SCons.Tool.bcc32
import SCons.Util
def generate(env):
"""Add Builders and construction variables for Borland ilink to an
Environment."""
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['LINK'] = '$CC'
env['LINKFLAGS'] = SCons.Util.CLVar('')
env['LINKCOM'] = '$LINK -q $LINKFLAGS -e$TARGET $SOURCES $LIBS'
env['LIBDIRPREFIX']=''
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX'
def exists(env):
# Uses bcc32 to do linking as it generally knows where the standard
# LIBS are and set up the linking correctly
return SCons.Tool.bcc32.findIt('bcc32', env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# RAVEn Plugin
#
# Author: Dnpwwo, 2016
#
#
# Plugin parameter definition below will be parsed during startup and copied into Manifest.xml, this will then drive the user interface in the Hardware web page
#
"""
<plugin key="RAVEn" name="RAVEn Zigbee energy monitor" author="dnpwwo" version="1.3.10" externallink="https://rainforestautomation.com/rfa-z106-raven/">
<params>
<param field="SerialPort" label="Serial Port" width="150px" required="true" default="/dev/ttyRAVEn"/>
<param field="Mode6" label="Debug" width="100px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true" />
<option label="Logging" value="File"/>
</options>
</param>
</params>
</plugin>
"""
import Domoticz
import xml.etree.ElementTree as ET
SerialConn = None
demandFreq=30 # seconds between demand events
summaryFreq=300 # seconds between summary updates
fScale = demandFreq / 3600.0
summation = 0.0
hasConnected = False
nextCommand = ""
def onStart():
global SerialConn
if Parameters["Mode6"] != "Normal":
Domoticz.Debugging(1)
if Parameters["Mode6"] == "Debug":
f = open(Parameters["HomeFolder"]+"plugin.log","w")
f.write("Plugin started.")
f.close()
if (len(Devices) == 0):
Domoticz.Device(Name="Usage", Unit=1, Type=243, Subtype=29, Switchtype=0, Image=0, Options="").Create()
Domoticz.Device("Total", 2, 113).Create()
Domoticz.Log("Devices created.")
Domoticz.Log("Plugin has " + str(len(Devices)) + " devices associated with it.")
DumpConfigToLog()
SerialConn = Domoticz.Connection(Name="RAVEn", Transport="Serial", Protocol="XML", Address=Parameters["SerialPort"], Baud=115200)
SerialConn.Connect()
return
def onConnect(Connection, Status, Description):
global SerialConn
if (Status == 0):
Domoticz.Log("Connected successfully to: "+Parameters["SerialPort"])
Connection.Send("<Command>\n <Name>restart</Name>\n</Command>")
SerialConn = Connection
else:
Domoticz.Log("Failed to connect ("+str(Status)+") to: "+Parameters["SerialPort"])
Domoticz.Debug("Failed to connect ("+str(Status)+") to: "+Parameters["SerialPort"]+" with error: "+Description)
return True
def onMessage(Connection, Data):
global hasConnected, nextCommand, fScale, summation
strData = Data.decode("utf-8", "ignore")
LogMessage(strData)
xmltree = ET.fromstring(strData)
if xmltree.tag == 'ConnectionStatus':
strLog = ""
if (xmltree.find('MeterMacId') != None): strLog = "MeterMacId: "+xmltree.find('MeterMacId').text+", "
connectStatus = xmltree.find('Status').text
strLog += "Connection Status = '"+connectStatus+"'"
if (xmltree.find('Description') != None): strLog += " - "+xmltree.find('Description').text
if (xmltree.find('LinkStrength') != None): strLog += ", Link Strength = "+str(int(xmltree.find('LinkStrength').text,16))
Domoticz.Log(strLog)
if connectStatus == 'Initializing...':
hasConnected = False
elif (connectStatus == 'Connected') and (hasConnected == False):
nextCommand = "get_device_info"
hasConnected = True
elif xmltree.tag == 'DeviceInfo':
Domoticz.Log( "Manufacturer: %s, Device ID: %s, Install Code: %s" % (xmltree.find('Manufacturer').text, xmltree.find('DeviceMacId').text, xmltree.find('InstallCode').text) )
Domoticz.Log( "Hardware: Version %s, Firmware Version: %s, Model: %s" % (xmltree.find('HWVersion').text, xmltree.find('FWVersion').text, xmltree.find('ModelId').text) )
nextCommand = "get_network_info"
elif xmltree.tag == 'NetworkInfo':
LogMessage( "NetworkInfo response, Status = '%s' - %s, Link Strength = %d" % (xmltree.find('Status').text, xmltree.find('Description').text, int(xmltree.find('LinkStrength').text,16)))
nextCommand = "get_meter_list"
elif xmltree.tag == 'MeterList':
nextCommand = ""
for meter in xmltree.iter('MeterMacId'):
LogMessage( "MeterMacId: %s, MeterList response" % meter.text)
Connection.Send("<Command>\n <Name>get_meter_info</Name>\n <MeterMacId>"+meter.text+"</MeterMacId>\n</Command>\n")
elif xmltree.tag == 'MeterInfo':
LogMessage( "MeterMacId: %s, MeterInfo response, Enabled = %s" % (xmltree.find('MeterMacId').text, xmltree.find('Enabled').text))
Connection.Send("<Command>\n <Name>get_schedule</Name>\n <MeterMacId>"+xmltree.find('MeterMacId').text+"</MeterMacId>\n</Command>\n")
elif xmltree.tag == 'ScheduleInfo':
iFreq = int(xmltree.find('Frequency').text,16)
LogMessage( "MeterMacId: %s, ScheduleInfo response: Type '%s', Frequency %d, Enabled %s" % (xmltree.find('MeterMacId').text, xmltree.find('Event').text, iFreq, xmltree.find('Enabled').text))
if (xmltree.find('Event').text == 'demand') and (iFreq != demandFreq):
LogMessage( "MeterMacId: %s, Setting 'demand' schedule to: Frequency %d" % (xmltree.find('MeterMacId').text, demandFreq))
Connection.Send("<Command>\n <Name>set_schedule</Name>\n <MeterMacId>"+xmltree.find('MeterMacId').text+"</MeterMacId>\n <Event>demand</Event>\n <Frequency>" + str(hex(demandFreq)) + "</Frequency>\n <Enabled>Y</Enabled>\n</Command>\n")
if (xmltree.find('Event').text == 'summation') and (iFreq != summaryFreq):
LogMessage( "MeterMacId: %s, Setting 'summation' schedule to: Frequency %d" % (xmltree.find('MeterMacId').text, summaryFreq))
Connection.Send("<Command>\n <Name>set_schedule</Name>\n <MeterMacId>"+xmltree.find('MeterMacId').text+"</MeterMacId>\n <Event>summation</Event>\n <Frequency>" + str(hex(summaryFreq)) + "</Frequency>\n <Enabled>Y</Enabled>\n</Command>\n")
if (xmltree.find('Event').text == 'summation'):
Connection.Send("<Command>\n <Name>get_current_summation_delivered</Name>\n <MeterMacId>"+xmltree.find('MeterMacId').text+"</MeterMacId>\n <Refresh>Y</Refresh>\n</Command>\n")
elif xmltree.tag == 'InstantaneousDemand':
demand = float(getInstantDemandKWh(xmltree))
if (summation == 0.0):
Domoticz.Log("MeterMacId: %s, Instantaneous Demand = %f, NO SUMMARY DATA" % (xmltree.find('MeterMacId').text, demand))
else:
delta = fScale * demand
summation = summation + delta
Domoticz.Log( "MeterMacId: %s, Instantaneous Demand = %.3f, Summary Total = %.3f, Delta = %f" % (xmltree.find('MeterMacId').text, demand, summation, delta))
sValue = "%.3f;%.3f" % (demand,summation)
Devices[1].Update(0, sValue.replace('.',''))
elif xmltree.tag == 'CurrentSummationDelivered':
total = float(getCurrentSummationKWh(xmltree))
if (total > summation):
summation = total
sValue = "%.3f" % (total)
Devices[2].Update(0, sValue.replace('.',''))
Domoticz.Log( "MeterMacId: %s, Current Summation = %.3f" % (xmltree.find('MeterMacId').text, total))
elif xmltree.tag == 'TimeCluster':
Domoticz.Debug( xmltree.tag + " response" )
elif xmltree.tag == 'PriceCluster':
Domoticz.Debug( xmltree.tag + " response" )
elif xmltree.tag == 'CurrentPeriodUsage':
Domoticz.Debug( xmltree.tag + " response" )
elif xmltree.tag == 'LastPeriodUsage':
Domoticz.Debug( xmltree.tag + " response" )
elif xmltree.tag == 'ProfileData':
Domoticz.Debug( xmltree.tag + " response" )
else:
Domoticz.Error("Unrecognised (not implemented) XML Fragment ("+xmltree.tag+").")
return
def onDisconnect(Connection):
Domoticz.Log("Connection '"+Connection.Name+"' disconnected.")
return
def onHeartbeat():
global hasConnected, nextCommand, SerialConn
if (SerialConn.Connected()):
if (nextCommand != ""):
Domoticz.Debug("Sending command: "+nextCommand)
SerialConn.Send("<Command>\n <Name>"+nextCommand+"</Name>\n</Command>\n")
else:
hasConnected = False
SerialConn.Connect()
return True
# RAVEn support functions
def getCurrentSummationKWh(xmltree):
'''Returns a single float value for the SummationDelivered from a Summation response from RAVEn'''
# Get the Current Summation (Meter Reading)
fReading = float(int(xmltree.find('SummationDelivered').text,16))
fResult = calculateRAVEnNumber(xmltree, fReading)
return formatRAVEnDigits(xmltree, fResult)
def getInstantDemandKWh(xmltree):
'''Returns a single float value for the Demand from an Instantaneous Demand response from RAVEn'''
# Get the Instantaneous Demand
fDemand = float(int(xmltree.find('Demand').text,16))
fResult = calculateRAVEnNumber(xmltree, fDemand)
return formatRAVEnDigits(xmltree, fResult)
def calculateRAVEnNumber(xmltree, value):
'''Calculates a float value from RAVEn using Multiplier and Divisor in XML response'''
# Get calculation parameters from XML - Multiplier, Divisor
fDivisor = float(int(xmltree.find('Divisor').text,16))
fMultiplier = float(int(xmltree.find('Multiplier').text,16))
if (fMultiplier > 0 and fDivisor > 0):
fResult = float( (value * fMultiplier) / fDivisor)
elif (fMultiplier > 0):
fResult = float(value * fMultiplier)
else: # (Divisor > 0) or anything else
fResult = float(value / fDivisor)
return fResult
def formatRAVEnDigits(xmltree, value):
'''Formats a float value according to DigitsRight, DigitsLeft and SuppressLeadingZero settings from RAVEn XML response'''
# Get formatting parameters from XML - DigitsRight, DigitsLeft
iDigitsRight = int(xmltree.find('DigitsRight').text,16)
iDigitsLeft = int(xmltree.find('DigitsLeft').text,16)
sResult = ("{:0%d.%df}" % (iDigitsLeft+iDigitsRight+1,iDigitsRight)).format(value)
# Suppress Leading Zeros if specified in XML
if xmltree.find('SuppressLeadingZero').text == 'Y':
while sResult[0] == '0':
sResult = sResult[1:]
if sResult[0] == '.':
sResult = '0' + sResult
return sResult
# Generic helper functions
def LogMessage(Message):
if Parameters["Mode6"] == "File":
f = open(Parameters["HomeFolder"]+"plugin.log","a")
f.write(Message+"\r\n")
f.close()
Domoticz.Debug(Message)
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
LogMessage( "'" + x + "':'" + str(Parameters[x]) + "'")
LogMessage("Device count: " + str(len(Devices)))
for x in Devices:
LogMessage("Device: " + str(x) + " - " + str(Devices[x]))
LogMessage("Device ID: '" + str(Devices[x].ID) + "'")
LogMessage("Device Name: '" + Devices[x].Name + "'")
LogMessage("Device nValue: " + str(Devices[x].nValue))
LogMessage("Device sValue: '" + Devices[x].sValue + "'")
LogMessage("Device LastLevel: " + str(Devices[x].LastLevel))
return | unknown | codeparrot/codeparrot-clean | ||
"""Set ON DELETE CASCADE on Build.*
Revision ID: 403b3fb41569
Revises: 4732741c7696
Create Date: 2013-12-23 16:07:02.202873
"""
# revision identifiers, used by Alembic.
revision = '403b3fb41569'
down_revision = '4732741c7696'
from alembic import op
def upgrade():
op.drop_constraint('build_author_id_fkey', 'build')
op.create_foreign_key('build_author_id_fkey', 'build', 'author', ['author_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('build_change_id_fkey', 'build')
op.create_foreign_key('build_change_id_fkey', 'build', 'change', ['change_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('build_patch_id_fkey', 'build')
op.create_foreign_key('build_patch_id_fkey', 'build', 'patch', ['patch_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('build_project_id_fkey', 'build')
op.create_foreign_key('build_project_id_fkey', 'build', 'project', ['project_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('build_repository_id_fkey', 'build')
op.create_foreign_key('build_repository_id_fkey', 'build', 'repository', ['repository_id'], ['id'], ondelete='CASCADE')
# add missing constraints
op.create_foreign_key('build_family_id_fkey', 'build', 'buildfamily', ['family_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('build_source_id_fkey', 'build', 'source', ['source_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('build_parent_id_fkey', 'build', 'build', ['parent_id'], ['id'], ondelete='CASCADE')
def downgrade():
op.drop_constraint('build_family_id_fkey', 'build')
op.drop_constraint('build_source_id_fkey', 'build')
op.drop_constraint('build_parent_id_fkey', 'build') | unknown | codeparrot/codeparrot-clean | ||
"""Base for PAD plugins."""
from __future__ import absolute_import
from builtins import tuple
from builtins import object
try:
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
except ImportError:
create_engine = None
sessionmaker = None
from collections import defaultdict
import oa.conf
def dbi_to_mysql(dsn, user, password):
conection_dates = defaultdict(int)
dummy, driver, connection = dsn.split(":", 2)
if driver.lower() == "mysql":
driver = "mysql"
db_name, hostname = connection.split(":", 1)
conection_dates["driver"] = driver
conection_dates["hostname"] = hostname
conection_dates["db_name"] = db_name
if not user or not password:
return conection_dates
conection_dates["user"] = user
conection_dates["password"] = password
return conection_dates
def dbi_to_alchemy(dsn, user, password):
"""Convert perl DBI setting to SQLAlchemy settings."""
dummy, driver, connection = dsn.split(":", 2)
if driver.lower() == "mysql":
driver = "mysql+pymysql"
db_name, hostname = connection.split(":", 1)
elif driver.lower() == "pg":
driver = "postgresql"
values = dict(item.split("=") for item in connection.split(";"))
db_name = values.get("dbname", "spamassassin")
hostname = values.get("host", "localhost")
if "port" in values:
hostname = "%s:%s" % (hostname, values["port"])
elif driver.lower() == "sqlite":
driver = "sqlite"
user, password, hostname = "", "", ""
values = dict(item.split("=") for item in connection.split(";"))
db_name = values["dbname"]
else:
return ""
if not user or not password:
return "%s://%s/%s" % (driver, hostname, db_name)
return "%s://%s:%s@%s/%s" % (driver, user, password, hostname, db_name)
class BasePlugin(oa.conf.Conf, object):
"""Abstract class for plugins. All plugins must inherit from this class.
This exposes methods to methods to store data and configuration options
in the "global" context and the "local" context.
* The "global" context is loaded once when the configuration is parsed
and persists throughout until the plugin is reloaded.
* The "local" context is stored per message and each new message parsed
has its one context.
The methods automatically stores the data under the plugin names to ensure
that there are no name clashes between plugins.
The plugin can also define eval rules by implementing a method and adding
it to the eval_rules list. These will be registered after the plugin has
been initialized.
"""
eval_rules = tuple()
# Defines any new rules that the plugins implements.
cmds = None
# See oa.conf.Conf for details on options.
options = None
# Database connection fields, each plugin should set their own if they need them
dsn = None
sql_username = ""
sql_password = ""
def __init__(self, ctxt):
self.path_to_plugin = None
super(BasePlugin, self).__init__(ctxt)
def finish_parsing_start(self, results):
"""Called when the configuration parsing has finished but before
the has actually been initialized from the parsed data.
This can be used to insert new data after parsing.
:param results: A dictionary that maps the rule names to the
rest of the data extracted from the configuration (e.g. the
score, description etc.)
:return: Nothing
"""
# XXX The name method for this is horrible, but it's likely better to have
# XXX it the same as SA.
def finish_parsing_end(self, ruleset):
"""Called when the configuration parsing has finished, but before the
post-parsing. This hook can be used for e.g. to add rules to the
ruleset.
By default this prepares the SQLAlchemy engine if the plugin has any
set.
"""
connect_string = None
self["engine"] = None
if self.dsn:
if self.dsn.upper().startswith("DBI"):
# Convert from SA format.
user = self.sql_username
password = self.sql_password
if not create_engine:
self["engine"] = dbi_to_mysql(self.dsn, user, password)
else:
connect_string = dbi_to_alchemy(self.dsn, user, password)
elif self.dsn:
# The connect string is already in the correct format
connect_string = self.dsn
if connect_string is not None:
self["engine"] = create_engine(connect_string)
def get_engine(self):
return self["engine"]
def get_session(self):
"""Open a new SQLAlchemy session."""
engine = self["engine"]
return sessionmaker(bind=engine)()
def check_start(self, msg):
"""Called before the metadata is extracted from the message. The
message object passed will only have raw_msg and msg available.
May be overridden.
"""
def extract_metadata(self, msg, payload, text, part):
"""Called while the message metadata is extracted for every message
part. If the part contains text, corresponding payload is provided,
else it will be None.
May be overridden.
"""
def parsed_metadata(self, msg):
"""The message has been parsed and all the information can be accessed
by the plugin.
May be overridden.
"""
def check_end(self, ruleset, msg):
"""The message check operation has just finished, and the results are
about to be returned to the caller
May be overridden.
"""
def auto_learn_discriminator(self, ruleset, msg):
"""All message operations have finished and it can be checked for
submission to autolearning systems
May be overridden.
"""
def plugin_report(self, msg):
"""Called when a message should be reported as spam.
May be overridden.
"""
def plugin_revoke(self, msg):
"""Called when a message should be reported as ham.
May be overridden.
"""
def parse_config(self, key, value):
"""Parse a config line that the normal parses doesn't know how to
interpret.
Use self.inhibit_further_callbacks to stop other plugins from
processing this line.
May be overridden.
"""
super(BasePlugin, self).parse_config(key, value) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Network."""
import sys
import traceback
from oslo.config import cfg
from nova.conductor import rpcapi as conductor_rpcapi
from nova import config
import nova.db.api
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as objects_base
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import utils
from nova import version
CONF = cfg.CONF
CONF.import_opt('network_topic', 'nova.network.rpcapi')
CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
def block_db_access():
class NoDB(object):
def __getattr__(self, attr):
return self
def __call__(self, *args, **kwargs):
stacktrace = "".join(traceback.format_stack())
LOG = logging.getLogger('nova.network')
LOG.error(_('No db access allowed in nova-network: %s'),
stacktrace)
raise exception.DBNotAllowed('nova-network')
nova.db.api.IMPL = NoDB()
def main():
config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
objects.register_all()
gmr.TextGuruMeditation.setup_autorun(version)
if not CONF.conductor.use_local:
block_db_access()
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
server = service.Service.create(binary='nova-network',
topic=CONF.network_topic,
db_allowed=CONF.conductor.use_local)
service.serve(server)
service.wait() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# SelfTest/st_common.py: Common functions for SelfTest modules
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Common functions for SelfTest modules"""
__revision__ = "$Id$"
import unittest
import string
import binascii
class _list_testloader(unittest.TestLoader):
suiteClass = list
def list_test_cases(class_):
"""Return a list of TestCase instances given a TestCase class
This is useful when you have defined test* methods on your TestCase class.
"""
return _list_testloader().loadTestsFromTestCase(class_)
def strip_whitespace(s):
"""Remove whitespace from a string"""
table = string.maketrans(string.whitespace, " " * len(string.whitespace))
s = s.translate(table).replace(" ", "")
return s
def a2b_hex(s):
"""Convert hexadecimal to binary, ignoring whitespace"""
return binascii.a2b_hex(strip_whitespace(s))
def b2a_hex(s):
"""Convert binary to hexadecimal"""
# For completeness
return binascii.b2a_hex(s)
# vim:set ts=4 sw=4 sts=4 expandtab: | unknown | codeparrot/codeparrot-clean | ||
#ifndef BOOST_ARCHIVE_BASIC_BINARY_IARCHIVE_HPP
#define BOOST_ARCHIVE_BASIC_BINARY_IARCHIVE_HPP
// MS compatible compilers support #pragma once
#if defined(_MSC_VER)
# pragma once
#endif
/////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8
// basic_binary_iarchive.hpp
//
// archives stored as native binary - this should be the fastest way
// to archive the state of a group of objects. It makes no attempt to
// convert to any canonical form.
// IN GENERAL, ARCHIVES CREATED WITH THIS CLASS WILL NOT BE READABLE
// ON PLATFORM APART FROM THE ONE THEY ARE CREATED ON
// (C) Copyright 2002 Robert Ramey - http://www.rrsd.com .
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org for updates, documentation, and revision history.
#include <boost/config.hpp>
#include <boost/detail/workaround.hpp>
#include <boost/archive/basic_archive.hpp>
#include <boost/archive/detail/common_iarchive.hpp>
#include <boost/serialization/collection_size_type.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/library_version_type.hpp>
#include <boost/serialization/item_version_type.hpp>
#include <boost/integer_traits.hpp>
#ifdef BOOST_MSVC
# pragma warning(push)
# pragma warning(disable : 4511 4512)
#endif
#include <boost/archive/detail/abi_prefix.hpp> // must be the last header
namespace boost {
namespace archive {
namespace detail {
template<class Archive> class interface_iarchive;
} // namespace detail
/////////////////////////////////////////////////////////////////////////
// class basic_binary_iarchive - read serialized objects from a input binary stream
template<class Archive>
class BOOST_SYMBOL_VISIBLE basic_binary_iarchive :
public detail::common_iarchive<Archive>
{
#ifdef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
public:
#else
protected:
#if BOOST_WORKAROUND(BOOST_MSVC, < 1500)
// for some inexplicable reason insertion of "class" generates compile erro
// on msvc 7.1
friend detail::interface_iarchive<Archive>;
#else
friend class detail::interface_iarchive<Archive>;
#endif
#endif
// intermediate level to support override of operators
// fot templates in the absence of partial function
// template ordering. If we get here pass to base class
// note extra nonsense to sneak it pass the borland compilers
typedef detail::common_iarchive<Archive> detail_common_iarchive;
template<class T>
void load_override(T & t){
this->detail_common_iarchive::load_override(t);
}
// include these to trap a change in binary format which
// isn't specifically handled
// upto 32K classes
BOOST_STATIC_ASSERT(sizeof(class_id_type) == sizeof(int_least16_t));
BOOST_STATIC_ASSERT(sizeof(class_id_reference_type) == sizeof(int_least16_t));
// upto 2G objects
BOOST_STATIC_ASSERT(sizeof(object_id_type) == sizeof(uint_least32_t));
BOOST_STATIC_ASSERT(sizeof(object_reference_type) == sizeof(uint_least32_t));
// binary files don't include the optional information
void load_override(class_id_optional_type & /* t */){}
void load_override(tracking_type & t, int /*version*/){
boost::serialization::library_version_type lv = this->get_library_version();
if(boost::serialization::library_version_type(6) < lv){
int_least8_t x=0;
* this->This() >> x;
t = boost::archive::tracking_type(x);
}
else{
bool x=0;
* this->This() >> x;
t = boost::archive::tracking_type(x);
}
}
void load_override(class_id_type & t){
boost::serialization::library_version_type lv = this->get_library_version();
/*
* library versions:
* boost 1.39 -> 5
* boost 1.43 -> 7
* boost 1.47 -> 9
*
*
* 1) in boost 1.43 and inferior, class_id_type is always a 16bit value, with no check on the library version
* --> this means all archives with version v <= 7 are written with a 16bit class_id_type
* 2) in boost 1.44 this load_override has disappeared (and thus boost 1.44 is not backward compatible at all !!)
* 3) recent boosts reintroduced load_override with a test on the version :
* - v > 7 : this->detail_common_iarchive::load_override(t, version)
* - v > 6 : 16bit
* - other : 32bit
* --> which is obviously incorrect, see point 1
*
* the fix here decodes class_id_type on 16bit for all v <= 7, which seems to be the correct behaviour ...
*/
if(boost::serialization::library_version_type (7) < lv){
this->detail_common_iarchive::load_override(t);
}
else{
int_least16_t x=0;
* this->This() >> x;
t = boost::archive::class_id_type(x);
}
}
void load_override(class_id_reference_type & t){
load_override(static_cast<class_id_type &>(t));
}
void load_override(version_type & t){
boost::serialization::library_version_type lv = this->get_library_version();
if(boost::serialization::library_version_type(7) < lv){
this->detail_common_iarchive::load_override(t);
}
else
if(boost::serialization::library_version_type(6) < lv){
uint_least8_t x=0;
* this->This() >> x;
t = boost::archive::version_type(x);
}
else
if(boost::serialization::library_version_type(5) < lv){
uint_least16_t x=0;
* this->This() >> x;
t = boost::archive::version_type(x);
}
else
if(boost::serialization::library_version_type(2) < lv){
// upto 255 versions
unsigned char x=0;
* this->This() >> x;
t = version_type(x);
}
else{
unsigned int x=0;
* this->This() >> x;
t = boost::archive::version_type(x);
}
}
void load_override(boost::serialization::item_version_type & t){
boost::serialization::library_version_type lv = this->get_library_version();
// if(boost::serialization::library_version_type(7) < lvt){
if(boost::serialization::library_version_type(6) < lv){
this->detail_common_iarchive::load_override(t);
}
else
if(boost::serialization::library_version_type(6) < lv){
uint_least16_t x=0;
* this->This() >> x;
t = boost::serialization::item_version_type(x);
}
else{
unsigned int x=0;
* this->This() >> x;
t = boost::serialization::item_version_type(x);
}
}
void load_override(serialization::collection_size_type & t){
if(boost::serialization::library_version_type(5) < this->get_library_version()){
this->detail_common_iarchive::load_override(t);
}
else{
unsigned int x=0;
* this->This() >> x;
t = serialization::collection_size_type(x);
}
}
BOOST_ARCHIVE_OR_WARCHIVE_DECL void
load_override(class_name_type & t);
BOOST_ARCHIVE_OR_WARCHIVE_DECL void
init();
basic_binary_iarchive(unsigned int flags) :
detail::common_iarchive<Archive>(flags)
{}
};
} // namespace archive
} // namespace boost
#ifdef BOOST_MSVC
#pragma warning(pop)
#endif
#include <boost/archive/detail/abi_suffix.hpp> // pops abi_suffix.hpp pragmas
#endif // BOOST_ARCHIVE_BASIC_BINARY_IARCHIVE_HPP | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/archive/basic_binary_iarchive.hpp |
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal, Annotated, TypeAlias
from ...._utils import PropertyInfo
from ...._models import BaseModel
from .error_event import ErrorEvent
from .conversation_item import ConversationItem
from .response_done_event import ResponseDoneEvent
from .session_created_event import SessionCreatedEvent
from .session_updated_event import SessionUpdatedEvent
from .response_created_event import ResponseCreatedEvent
from .response_text_done_event import ResponseTextDoneEvent
from .rate_limits_updated_event import RateLimitsUpdatedEvent
from .response_audio_done_event import ResponseAudioDoneEvent
from .response_text_delta_event import ResponseTextDeltaEvent
from .conversation_created_event import ConversationCreatedEvent
from .response_audio_delta_event import ResponseAudioDeltaEvent
from .conversation_item_created_event import ConversationItemCreatedEvent
from .conversation_item_deleted_event import ConversationItemDeletedEvent
from .response_output_item_done_event import ResponseOutputItemDoneEvent
from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent
from .response_content_part_done_event import ResponseContentPartDoneEvent
from .response_output_item_added_event import ResponseOutputItemAddedEvent
from .conversation_item_truncated_event import ConversationItemTruncatedEvent
from .response_content_part_added_event import ResponseContentPartAddedEvent
from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent
from .transcription_session_updated_event import TranscriptionSessionUpdatedEvent
from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent
from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent
from .input_audio_buffer_speech_started_event import InputAudioBufferSpeechStartedEvent
from .input_audio_buffer_speech_stopped_event import InputAudioBufferSpeechStoppedEvent
from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
from .conversation_item_input_audio_transcription_delta_event import ConversationItemInputAudioTranscriptionDeltaEvent
from .conversation_item_input_audio_transcription_failed_event import ConversationItemInputAudioTranscriptionFailedEvent
from .conversation_item_input_audio_transcription_completed_event import (
ConversationItemInputAudioTranscriptionCompletedEvent,
)
__all__ = [
"RealtimeServerEvent",
"ConversationItemRetrieved",
"OutputAudioBufferStarted",
"OutputAudioBufferStopped",
"OutputAudioBufferCleared",
]
class ConversationItemRetrieved(BaseModel):
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""The item to add to the conversation."""
type: Literal["conversation.item.retrieved"]
"""The event type, must be `conversation.item.retrieved`."""
class OutputAudioBufferStarted(BaseModel):
event_id: str
"""The unique ID of the server event."""
response_id: str
"""The unique ID of the response that produced the audio."""
type: Literal["output_audio_buffer.started"]
"""The event type, must be `output_audio_buffer.started`."""
class OutputAudioBufferStopped(BaseModel):
event_id: str
"""The unique ID of the server event."""
response_id: str
"""The unique ID of the response that produced the audio."""
type: Literal["output_audio_buffer.stopped"]
"""The event type, must be `output_audio_buffer.stopped`."""
class OutputAudioBufferCleared(BaseModel):
event_id: str
"""The unique ID of the server event."""
response_id: str
"""The unique ID of the response that produced the audio."""
type: Literal["output_audio_buffer.cleared"]
"""The event type, must be `output_audio_buffer.cleared`."""
RealtimeServerEvent: TypeAlias = Annotated[
Union[
ConversationCreatedEvent,
ConversationItemCreatedEvent,
ConversationItemDeletedEvent,
ConversationItemInputAudioTranscriptionCompletedEvent,
ConversationItemInputAudioTranscriptionDeltaEvent,
ConversationItemInputAudioTranscriptionFailedEvent,
ConversationItemRetrieved,
ConversationItemTruncatedEvent,
ErrorEvent,
InputAudioBufferClearedEvent,
InputAudioBufferCommittedEvent,
InputAudioBufferSpeechStartedEvent,
InputAudioBufferSpeechStoppedEvent,
RateLimitsUpdatedEvent,
ResponseAudioDeltaEvent,
ResponseAudioDoneEvent,
ResponseAudioTranscriptDeltaEvent,
ResponseAudioTranscriptDoneEvent,
ResponseContentPartAddedEvent,
ResponseContentPartDoneEvent,
ResponseCreatedEvent,
ResponseDoneEvent,
ResponseFunctionCallArgumentsDeltaEvent,
ResponseFunctionCallArgumentsDoneEvent,
ResponseOutputItemAddedEvent,
ResponseOutputItemDoneEvent,
ResponseTextDeltaEvent,
ResponseTextDoneEvent,
SessionCreatedEvent,
SessionUpdatedEvent,
TranscriptionSessionUpdatedEvent,
OutputAudioBufferStarted,
OutputAudioBufferStopped,
OutputAudioBufferCleared,
],
PropertyInfo(discriminator="type"),
] | python | github | https://github.com/openai/openai-python | src/openai/types/beta/realtime/realtime_server_event.py |
from cm.exception import UnauthorizedException
from cm.message import display_message
from cm.models import ApplicationConfiguration, Notification, Configuration, UserRole
from cm.models_base import generate_key
from cm.views import get_text_by_keys_or_404
from cm.utils.embed import embed_html
from cm.security import get_request_user
from django import forms
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils import feedgenerator
from django.utils.translation import ugettext as _
import re
import time
#@login_required
def followup(request):
user = get_request_user(request)
workspace_notify_check = Notification.objects.filter(text=None,type='workspace',user=user, active=True).count()
own_notify_check = Notification.objects.filter(text=None,type='own',user=user, active=True).count()
if request.method == 'POST':
if 'activate' in request.POST:
Configuration.objects.set_key('private_feed_key', generate_key())
display_message(request, _(u"Private feed activated."))
if 'reset' in request.POST:
Configuration.objects.set_key('private_feed_key', generate_key())
display_message(request, _(u"Private feed reseted."))
if request.POST.get('notif_id',None):
notif_id = request.POST.get('notif_id')
notif_type = 'own' if notif_id == 'own_notify_check' else 'workspace'
notif_val = request.POST.get(notif_id,None)
if notif_val != None :
Notification.objects.set_notification(text=None, type=notif_type, active=(notif_val == 'true'), email_or_user=user)
return render_to_response('site/followup.html', {'workspace_notify_check':workspace_notify_check,
'own_notify_check' :own_notify_check,
}, context_instance=RequestContext(request))
# force a POST (database modifications)
def desactivate_notification(request, adminkey):
try:
notification = Notification.objects.get(adminkey=adminkey)
except Notification.DoesNotExist:
display_message(request, _(u"This notification has already been deactivated."))
return HttpResponseRedirect(reverse('index'))
if request.method == 'POST':
if request.POST['adminkey'] == adminkey:
notification.desactivate()
display_message(request, _(u"Notification deactivated."))
return HttpResponseRedirect(reverse('index'))
return render_to_response('site/notifications_desactivate.html',
{'notification' : notification,
'title' : _(u'Deactivate notification?'),
},
context_instance=RequestContext(request))
def text_followup(request, key):
text = get_text_by_keys_or_404(key)
user = request.user if request.user.is_authenticated() else None
from cm.security import user_has_perm # import here!
anonymous_can_view_text = user_has_perm(None, 'can_view_text', text=text)
text_notify_check = Notification.objects.filter(text=text,type='text',user=user, active=True).count()
workspace_notify_check = Notification.objects.filter(text=None,type='workspace',user=user, active=True).count()
if request.method == 'POST':
if 'activate' in request.POST:
text.private_feed_key = generate_key()
text.save()
display_message(request, _(u"Private feed activated."))
if 'reset' in request.POST:
text.private_feed_key = generate_key()
text.save()
display_message(request, _(u"Private notifications feed reseted."))
if request.POST.get('notif_id',None):
notif_id = request.POST.get('notif_id')
notif_val = request.POST.get(notif_id,None)
if notif_val != None :
Notification.objects.set_notification(text=text, type='text', active=(notif_val == 'true'), email_or_user=request.user)
template_dict = {
'text' : text,
'workspace_notify_check' : workspace_notify_check,
'text_notify_check' : text_notify_check,
'anonymous_can_view_text' : anonymous_can_view_text,
}
return render_to_response('site/text_followup.html', template_dict , context_instance=RequestContext(request))
def text_embed(request, key):
text = get_text_by_keys_or_404(key)
embed_code = embed_html(text.key) ;
template_dict = {
'text' : text,
'embed_code': embed_code
}
return render_to_response('site/text_embed.html', template_dict , context_instance=RequestContext(request)) | unknown | codeparrot/codeparrot-clean | ||
{
"User": "root",
"Image": "pack.local/ephemeral-builder",
"Cmd": [
"/cnb/lifecycle/creator",
"-app",
"/workspace",
"-platform",
"/platform",
"-run-image",
"docker.io/cloudfoundry/run:latest",
"-layers",
"/layers",
"-cache-dir",
"/cache",
"-launch-cache",
"/launch-cache",
"-daemon",
"docker.io/library/my-application:latest"
],
"Env": [
"CNB_PLATFORM_API=0.8"
],
"Labels": {
"author": "spring-boot"
},
"HostConfig": {
"Binds": [
"work-volume-app:/workspace",
"work-volume-layers:/layers",
"build-volume:/cache",
"launch-volume:/launch-cache",
"/var/run/docker.sock:/var/run/docker.sock"
],
"SecurityOpt" : [
"label=disable"
]
}
} | json | github | https://github.com/spring-projects/spring-boot | buildpack/spring-boot-buildpack-platform/src/test/resources/org/springframework/boot/buildpack/platform/build/lifecycle-creator-cache-volumes.json |
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponseRedirect
from django.views.generic import list_detail
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from basic.profiles.models import *
from basic.profiles.forms import *
def profile_list(request):
return list_detail.object_list(
request,
queryset = Profile.objects.all(),
paginate_by = 20,
)
profile_list.__doc__ = list_detail.object_list.__doc__
def profile_detail(request, username):
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
raise Http404
profile = Profile.objects.get(user=user)
context = { 'object':profile }
return render_to_response('profiles/profile_detail.html', context, context_instance=RequestContext(request))
@login_required
def profile_edit(request, template_name='profiles/profile_form.html'):
"""Edit profile."""
if request.POST:
profile = Profile.objects.get(user=request.user)
profile_form = ProfileForm(request.POST, request.FILES, instance=profile)
user_form = UserForm(request.POST, instance=request.user)
service_formset = ServiceFormSet(request.POST, instance=profile)
link_formset = LinkFormSet(request.POST, instance=profile)
if profile_form.is_valid() and user_form.is_valid() and service_formset.is_valid() and link_formset.is_valid():
profile_form.save()
user_form.save()
service_formset.save()
link_formset.save()
return HttpResponseRedirect(reverse('profile_detail', kwargs={'username': request.user.username}))
else:
context = {
'profile_form': profile_form,
'user_form': user_form,
'service_formset': service_formset,
'link_formset': link_formset
}
else:
profile = Profile.objects.get(user=request.user)
service_formset = ServiceFormSet(instance=profile)
link_formset = LinkFormSet(instance=profile)
context = {
'profile_form': ProfileForm(instance=profile),
'user_form': UserForm(instance=request.user),
'service_formset': service_formset,
'link_formset': link_formset
}
return render_to_response(template_name, context, context_instance=RequestContext(request)) | unknown | codeparrot/codeparrot-clean | ||
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlTestDeep(PerlPackage):
"""Extremely flexible deep comparison"""
homepage = "http://search.cpan.org/~rjbs/Test-Deep-1.127/lib/Test/Deep.pm"
url = "http://search.cpan.org/CPAN/authors/id/R/RJ/RJBS/Test-Deep-1.127.tar.gz"
version('1.127', 'eeafe5795ba20ba051a1423f4fa86dd6') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para vodbeast
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
from core import logger
from core import scrapertools
def test_video_exists( page_url ):
logger.info("pelisalacarta.servers.vodbeast test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page( page_url )
if ("File was deleted" or "Not Found") in data: return False, "[Vodbeast] El archivo no existe o ha sido borrado"
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("pelisalacarta.servers.vodbeast url="+page_url)
data = scrapertools.cache_page( page_url )
media_urls = scrapertools.find_multiple_matches(data,',{file:\s+"([^"]+)"')
video_urls = []
for media_url in media_urls:
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [vodbeast]",media_url])
for video_url in video_urls:
logger.info("pelisalacarta.servers.vodbeast %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://vodbeast.com/jdfscsa5uoy4
patronvideos = "vodbeast.com/(?:embed-|)([a-z0-9]+)"
logger.info("pelisalacarta.servers.vodbeast find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[vodbeast]"
url = "http://vodbeast.com/embed-%s.html" % match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'vodbeast' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cache.support;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.jspecify.annotations.Nullable;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import org.springframework.util.CollectionUtils;
/**
* Abstract base class implementing the common {@link CacheManager} methods.
* Useful for 'static' environments where the backing caches do not change.
*
* @author Costin Leau
* @author Juergen Hoeller
* @author Stephane Nicoll
* @since 3.1
*/
public abstract class AbstractCacheManager implements CacheManager, InitializingBean {
private final ConcurrentMap<String, Cache> cacheMap = new ConcurrentHashMap<>(16);
private volatile Set<String> cacheNames = Collections.emptySet();
// Early cache initialization on startup
@Override
public void afterPropertiesSet() {
initializeCaches();
}
/**
* Initialize the static configuration of caches.
* <p>Triggered on startup through {@link #afterPropertiesSet()};
* can also be called to re-initialize at runtime.
* @since 4.2.2
* @see #loadCaches()
*/
public void initializeCaches() {
Collection<? extends Cache> caches = loadCaches();
synchronized (this.cacheMap) {
this.cacheNames = Collections.emptySet();
this.cacheMap.clear();
Set<String> cacheNames = CollectionUtils.newLinkedHashSet(caches.size());
for (Cache cache : caches) {
String name = cache.getName();
this.cacheMap.put(name, decorateCache(cache));
cacheNames.add(name);
}
this.cacheNames = Collections.unmodifiableSet(cacheNames);
}
}
/**
* Load the initial caches for this cache manager.
* <p>Called by {@link #afterPropertiesSet()} on startup.
* The returned collection may be empty but must not be {@code null}.
*/
protected abstract Collection<? extends Cache> loadCaches();
// Lazy cache initialization on access
@Override
public @Nullable Cache getCache(String name) {
// Quick check for existing cache...
Cache cache = this.cacheMap.get(name);
if (cache != null) {
return cache;
}
// The provider may support on-demand cache creation...
Cache missingCache = getMissingCache(name);
if (missingCache != null) {
// Fully synchronize now for missing cache registration
synchronized (this.cacheMap) {
cache = this.cacheMap.get(name);
if (cache == null) {
cache = decorateCache(missingCache);
this.cacheMap.put(name, cache);
updateCacheNames(name);
}
}
}
return cache;
}
@Override
public Collection<String> getCacheNames() {
return this.cacheNames;
}
@Override
public void resetCaches() {
synchronized (this.cacheMap) {
this.cacheMap.values().forEach(Cache::clear);
}
}
// Common cache initialization delegates for subclasses
/**
* Check for a registered cache of the given name.
* In contrast to {@link #getCache(String)}, this method does not trigger
* the lazy creation of missing caches via {@link #getMissingCache(String)}.
* @param name the cache identifier (must not be {@code null})
* @return the associated Cache instance, or {@code null} if none found
* @since 4.1
* @see #getCache(String)
* @see #getMissingCache(String)
*/
protected final @Nullable Cache lookupCache(String name) {
return this.cacheMap.get(name);
}
/**
* Update the exposed {@link #cacheNames} set with the given name.
* <p>This will always be called within a full {@link #cacheMap} lock
* and effectively behaves like a {@code CopyOnWriteArraySet} with
* preserved order but exposed as an unmodifiable reference.
* @param name the name of the cache to be added
*/
private void updateCacheNames(String name) {
Set<String> cacheNames = new LinkedHashSet<>(this.cacheNames);
cacheNames.add(name);
this.cacheNames = Collections.unmodifiableSet(cacheNames);
}
// Overridable template methods for cache initialization
/**
* Decorate the given Cache object if necessary.
* @param cache the Cache object to be added to this CacheManager
* @return the decorated Cache object to be used instead,
* or simply the passed-in Cache object by default
*/
protected Cache decorateCache(Cache cache) {
return cache;
}
/**
* Return a missing cache with the specified {@code name}, or {@code null} if
* such a cache does not exist or could not be created on demand.
* <p>Caches may be lazily created at runtime if the native provider supports it.
* If a lookup by name does not yield any result, an {@code AbstractCacheManager}
* subclass gets a chance to register such a cache at runtime. The returned cache
* will be automatically added to this cache manager.
* @param name the name of the cache to retrieve
* @return the missing cache, or {@code null} if no such cache exists or could be
* created on demand
* @since 4.1
* @see #getCache(String)
*/
protected @Nullable Cache getMissingCache(String name) {
return null;
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/cache/support/AbstractCacheManager.java |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
from collections import defaultdict
from contextlib import contextmanager
import torch
# Record all the torch primitives in advance, so that we can use them without them being modified when we patch torch
# in context managers
TORCH_INIT_FUNCTIONS = {
"uniform_": torch.nn.init.uniform_,
"normal_": torch.nn.init.normal_,
"constant_": torch.nn.init.constant_,
"ones_": torch.nn.init.ones_,
"zeros_": torch.nn.init.zeros_,
"eye_": torch.nn.init.eye_,
"dirac_": torch.nn.init.dirac_,
"xavier_uniform_": torch.nn.init.xavier_uniform_,
"xavier_normal_": torch.nn.init.xavier_normal_,
"kaiming_uniform_": torch.nn.init.kaiming_uniform_,
"kaiming_normal_": torch.nn.init.kaiming_normal_,
"trunc_normal_": torch.nn.init.trunc_normal_,
"orthogonal_": torch.nn.init.orthogonal_,
"sparse_": torch.nn.init.sparse_,
}
def uniform_(
tensor: torch.Tensor, a: float = 0.0, b: float = 1.0, generator: torch.Generator | None = None
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["uniform_"](tensor, a=a, b=b, generator=generator)
return tensor
def normal_(
tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, generator: torch.Generator | None = None
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["normal_"](tensor, mean=mean, std=std, generator=generator)
return tensor
def constant_(tensor: torch.Tensor, val: float) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["constant_"](tensor, val=val)
return tensor
def ones_(tensor: torch.Tensor) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["ones_"](tensor)
return tensor
def zeros_(tensor: torch.Tensor) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["zeros_"](tensor)
return tensor
def eye_(tensor: torch.Tensor) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["eye_"](tensor)
return tensor
def dirac_(tensor: torch.Tensor, groups: int = 1) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["dirac_"](tensor, groups=groups)
return tensor
def xavier_uniform_(tensor: torch.Tensor, gain: float = 1.0, generator: torch.Generator | None = None) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["xavier_uniform_"](tensor, gain=gain, generator=generator)
return tensor
def xavier_normal_(tensor: torch.Tensor, gain: float = 1.0, generator: torch.Generator | None = None) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["xavier_normal_"](tensor, gain=gain, generator=generator)
return tensor
def kaiming_uniform_(
tensor: torch.Tensor,
a: float = 0,
mode: str = "fan_in",
nonlinearity: str = "leaky_relu",
generator: torch.Generator | None = None,
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["kaiming_uniform_"](
tensor, a=a, mode=mode, nonlinearity=nonlinearity, generator=generator
)
return tensor
def kaiming_normal_(
tensor: torch.Tensor,
a: float = 0,
mode: str = "fan_in",
nonlinearity: str = "leaky_relu",
generator: torch.Generator | None = None,
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["kaiming_normal_"](
tensor, a=a, mode=mode, nonlinearity=nonlinearity, generator=generator
)
return tensor
def trunc_normal_(
tensor: torch.Tensor,
mean: float = 0.0,
std: float = 1.0,
a: float = -2.0,
b: float = 2.0,
generator: torch.Generator | None = None,
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["trunc_normal_"](tensor, mean=mean, std=std, a=a, b=b, generator=generator)
return tensor
def orthogonal_(
tensor: torch.Tensor,
gain: float = 1,
generator: torch.Generator | None = None,
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["orthogonal_"](tensor, gain=gain, generator=generator)
return tensor
def sparse_(
tensor: torch.Tensor, sparsity: float, std: float = 0.01, generator: torch.Generator | None = None
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["sparse_"](tensor, sparsity=sparsity, std=std, generator=generator)
return tensor
def copy_(tensor: torch.Tensor, other: torch.Tensor) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
with torch.no_grad():
return tensor.copy_(other)
return tensor
def _variance_scaling(tensor, mode="fan_in", distribution="normal"):
fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(tensor)
if mode == "fan_in":
denom = fan_in
elif mode == "fan_out":
denom = fan_out
elif mode == "fan_avg":
denom = (fan_in + fan_out) / 2
variance = 1.0 / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
elif distribution == "normal":
normal_(tensor, std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
uniform_(tensor, -bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
if not getattr(tensor, "_is_hf_initialized", False):
_variance_scaling(tensor, mode="fan_in", distribution="truncated_normal")
return tensor
def default_flax_embed_init_(tensor):
if not getattr(tensor, "_is_hf_initialized", False):
_variance_scaling(tensor, mode="fan_in", distribution="normal")
return tensor
# Here, we need to check several modules imported, and hot patch all of them, as sometimes torch does
# something like `from torch.nn.init import xavier_uniform_` in their internals (e.g in torch.nn.modules.activations,
# where MultiHeadAttention lives), so the function name is binded at import time and just doing
# `setattr(torch.nn.init, name, globals()[name])` is thus not enough
# The following list should be enough for all torch versions we work with
TORCH_MODULES_TO_PATCH = (
"torch.nn.init",
"torch.nn.modules.activation",
"torch.nn.modules.transformer",
"torch.nn.modules.linear",
"torch.nn.modules.loss",
"torch.nn.modules.batchnorm",
"torch.nn.modules.conv",
"torch.nn.modules.normalization",
"torch.nn.modules.rnn",
"torch.nn.modules.sparse",
)
@contextmanager
def guard_torch_init_functions():
"""
Guard the `torch.nn.init` primitive functions to behave exactly like the functions in this file, i.e. be
protected against the `_is_hf_initialized` flag to avoid re-init if the param was already loaded.
Usually, all models are using the init from `transformers` which are already guarded, but just to make extra sure
and for remote code, we also use this context manager.
"""
originals = defaultdict(dict)
try:
# Replace all torch funcs by the ones in this file
for module_name in TORCH_MODULES_TO_PATCH:
if module_name in sys.modules:
module = sys.modules[module_name]
for func_name in TORCH_INIT_FUNCTIONS.keys():
if hasattr(module, func_name):
originals[module][func_name] = getattr(module, func_name)
setattr(module, func_name, globals()[func_name])
yield
finally:
# Set back the original functions on all modules
for module, functions in originals.items():
for func_name, func in functions.items():
setattr(module, func_name, func)
@contextmanager
def no_init_weights():
"""
Disable weight initialization both at the torch-level, and at the transformers-level (`init_weights`).
This is used to speed-up initializing an empty model with deepspeed, as we do not initialize the model on meta device
with deepspeed, but we still don't need to run expensive weight initializations as we are loading params afterwards.
"""
from .modeling_utils import PreTrainedModel
def empty_func(*args, **kwargs):
pass
originals = defaultdict(dict)
try:
# Replace all torch funcs by empty ones
for module_name in TORCH_MODULES_TO_PATCH:
if module_name in sys.modules:
module = sys.modules[module_name]
for func_name in TORCH_INIT_FUNCTIONS.keys():
if hasattr(module, func_name):
originals[module][func_name] = getattr(module, func_name)
setattr(module, func_name, empty_func)
# Also patch our own `init_weights`
original_init_weights = PreTrainedModel.init_weights
PreTrainedModel.init_weights = empty_func
yield
finally:
# Set back the original torch functions on all modules
for module, functions in originals.items():
for func_name, func in functions.items():
setattr(module, func_name, func)
# Set back `init_weights`
PreTrainedModel.init_weights = original_init_weights
@contextmanager
def no_tie_weights():
"""
Disable weight tying during loading with `from_pretrained`. This is needed as we want to have access to ALL
weights in the state_dict during `from_pretrained`, and otherwise tying them would remove them from it, as it's
called in `post_init` when instantiating.
"""
from .modeling_utils import PreTrainedModel
def empty_func(*args, **kwargs):
pass
try:
original_tie_weights = PreTrainedModel.tie_weights
PreTrainedModel.tie_weights = empty_func
yield
finally:
# Set back the original
PreTrainedModel.tie_weights = original_tie_weights | python | github | https://github.com/huggingface/transformers | src/transformers/initialization.py |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields,osv
import tools
from .. import crm
AVAILABLE_STATES = [
('draft','Draft'),
('open','Open'),
('cancel', 'Cancelled'),
('done', 'Closed'),
('pending','Pending')
]
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
class crm_lead_report(osv.osv):
""" CRM Lead Analysis """
_name = "crm.lead.report"
_auto = False
_description = "CRM Lead Analysis"
_rec_name = 'deadline_day'
_columns = {
# grouping fields based on Deadline Date
'deadline_year': fields.char('Ex. Closing Year', size=10, readonly=True, help="Expected closing year"),
'deadline_month':fields.selection(MONTHS, 'Exp. Closing Month', readonly=True, help="Expected closing month"),
'deadline_day': fields.char('Exp. Closing Day', size=10, readonly=True, help="Expected closing day"),
# grouping fields based on Create Date
'creation_year': fields.char('Creation Year', size=10, readonly=True, help="Creation year"),
'creation_month': fields.selection(MONTHS, 'Creation Month', readonly=True, help="Creation month"),
'creation_day': fields.char('Creation Day', size=10, readonly=True, help="Creation day"),
# other date fields
'create_date': fields.datetime('Create Date', readonly=True),
'opening_date': fields.date('Opening Date', readonly=True),
'date_closed': fields.date('Close Date', readonly=True),
# durations
'delay_open': fields.float('Delay to Open',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'delay_close': fields.float('Delay to Close',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'country_id':fields.many2one('res.country', 'Country', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True),
'channel_id':fields.many2one('crm.case.channel', 'Channel', readonly=True),
'type_id':fields.many2one('crm.case.resource.type', 'Campaign', readonly=True),
'state': fields.selection(AVAILABLE_STATES, 'State', size=16, readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'email': fields.integer('# Emails', size=128, readonly=True),
'probability': fields.float('Probability',digits=(16,2),readonly=True, group_operator="avg"),
'planned_revenue': fields.float('Planned Revenue',digits=(16,2),readonly=True),
'probable_revenue': fields.float('Probable Revenue', digits=(16,2),readonly=True),
'categ_id': fields.many2one('crm.case.categ', 'Category',\
domain="['|',('section_id','=',False),('section_id','=',section_id)]" , readonly=True),
'stage_id': fields.many2one ('crm.case.stage', 'Stage', readonly=True, domain="[('section_ids', '=', section_id)]"),
'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True),
'nbr': fields.integer('# of Cases', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'type':fields.selection([
('lead','Lead'),
('opportunity','Opportunity'),
],'Type', help="Type is used to separate Leads and Opportunities"),
}
def init(self, cr):
"""
CRM Lead Report
@param cr: the current row, from the database cursor
"""
tools.drop_view_if_exists(cr, 'crm_lead_report')
cr.execute("""
CREATE OR REPLACE VIEW crm_lead_report AS (
SELECT
id,
to_char(c.date_deadline, 'YYYY') as deadline_year,
to_char(c.date_deadline, 'MM') as deadline_month,
to_char(c.date_deadline, 'YYYY-MM-DD') as deadline_day,
to_char(c.create_date, 'YYYY') as creation_year,
to_char(c.create_date, 'MM') as creation_month,
to_char(c.create_date, 'YYYY-MM-DD') as creation_day,
to_char(c.date_open, 'YYYY-MM-DD') as opening_date,
to_char(c.date_closed, 'YYYY-mm-dd') as date_closed,
c.state,
c.user_id,
c.probability,
c.stage_id,
c.type,
c.company_id,
c.priority,
c.section_id,
c.channel_id,
c.type_id,
c.categ_id,
c.partner_id,
c.country_id,
c.planned_revenue,
c.planned_revenue*(c.probability/100) as probable_revenue,
1 as nbr,
(SELECT count(id) FROM mail_message WHERE model='crm.lead' AND res_id=c.id AND email_from is not null) AS email,
date_trunc('day',c.create_date) as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
abs(extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24)) as delay_expected,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
FROM
crm_lead c
WHERE c.active = 'true'
)""")
crm_lead_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v1beta1",
"metadata": {
"name": "v0alpha1.opentsdb_v23.v42"
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 3151,
"links": [],
"panels": [
{
"aliasColors": {},
"autoMigrateFrom": "graph",
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": {
"uid": "gdev-opentsdb-v2.3"
},
"fieldConfig": {
"defaults": {
"links": []
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 0
},
"hiddenSeries": false,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.0-pre",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "sum",
"alias": "$tag_hostname",
"currentFilterGroupBy": false,
"currentFilterKey": "",
"currentFilterType": "literal_or",
"currentFilterValue": "",
"datasource": {
"uid": "gdev-opentsdb-v2.3"
},
"disableDownsampling": false,
"downsampleAggregator": "avg",
"downsampleFillPolicy": "none",
"explicitTags": false,
"filters": [
{
"filter": "*",
"groupBy": true,
"tagk": "hostname",
"type": "wildcard"
}
],
"metric": "cpu",
"refId": "A",
"shouldComputeRate": false
}
],
"thresholds": [],
"timeRegions": [],
"title": "CPU per host",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "timeseries",
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"logBase": 1,
"show": true
},
{
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
},
{
"aliasColors": {},
"autoMigrateFrom": "graph",
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": {
"uid": "gdev-opentsdb-v2.3"
},
"fieldConfig": {
"defaults": {
"links": []
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 0
},
"hiddenSeries": false,
"id": 4,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.0-pre",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "sum",
"alias": "$tag_hostname",
"currentFilterGroupBy": false,
"currentFilterKey": "",
"currentFilterType": "literal_or",
"currentFilterValue": "",
"datasource": {
"uid": "gdev-opentsdb-v2.3"
},
"downsampleAggregator": "avg",
"downsampleFillPolicy": "none",
"filters": [
{
"filter": "*",
"groupBy": true,
"tagk": "hostname",
"type": "wildcard"
}
],
"metric": "logins.count",
"refId": "A"
}
],
"thresholds": [],
"timeRegions": [],
"title": "Login Count per host",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "timeseries",
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"logBase": 1,
"show": true
},
{
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
}
],
"refresh": "",
"schemaVersion": 42,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Datasource tests - OpenTSDB v2.3",
"uid": "rZRUGik7k",
"weekStart": ""
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/datasource-opentsdb/v0alpha1.opentsdb_v23.v42.v1beta1.json |
"""Testing utils"""
import sys
import abc
import json
from contextlib import contextmanager
import traceback
from unittest.mock import Mock
import csv
import tempfile
from importlib import reload, import_module
import pytest
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls.base import clear_url_caches
from rest_framework.renderers import JSONRenderer
from rest_framework import status
from requests.exceptions import HTTPError
from main import features
def any_instance_of(*cls):
"""
Returns a type that evaluates __eq__ in isinstance terms
Args:
cls (list of types): variable list of types to ensure equality against
Returns:
AnyInstanceOf: dynamic class type with the desired equality
"""
class AnyInstanceOf(metaclass=abc.ABCMeta):
"""Dynamic class type for __eq__ in terms of isinstance"""
def __eq__(self, other):
return isinstance(other, cls)
for c in cls:
AnyInstanceOf.register(c)
return AnyInstanceOf()
@contextmanager
def assert_not_raises():
"""Used to assert that the context does not raise an exception"""
try:
yield
except AssertionError:
raise
except Exception: # pylint: disable=broad-except
pytest.fail(f"An exception was not raised: {traceback.format_exc()}")
def assert_drf_json_equal(obj1, obj2):
"""
Asserts that two objects are equal after a round trip through JSON serialization/deserialization.
Particularly helpful when testing DRF serializers where you may get back OrderedDict and other such objects.
Args:
obj1 (object): the first object
obj2 (object): the second object
"""
json_renderer = JSONRenderer()
converted1 = json.loads(json_renderer.render(obj1))
converted2 = json.loads(json_renderer.render(obj2))
assert converted1 == converted2
class MockResponse:
"""
Mock requests.Response
"""
def __init__(
self, content, status_code=200, content_type="application/json", url=None
):
if isinstance(content, (dict, list)):
self.content = json.dumps(content)
else:
self.content = str(content)
self.text = self.content
self.status_code = status_code
self.headers = {"Content-Type": content_type}
if url:
self.url = url
@property
def ok(self): # pylint: disable=missing-docstring
return status.HTTP_200_OK <= self.status_code < status.HTTP_400_BAD_REQUEST
def json(self):
"""Return json content"""
return json.loads(self.content)
def raise_for_status(self):
"""Raises an exception"""
if not self.ok:
raise HTTPError(response=self)
class MockHttpError(HTTPError):
"""Mocked requests.exceptions.HttpError"""
def __init__(self, *args, **kwargs):
response = MockResponse(content={"bad": "response"}, status_code=400)
super().__init__(*args, **{**kwargs, **{"response": response}})
def drf_datetime(dt):
"""
Returns a datetime formatted as a DRF DateTimeField formats it
Args:
dt(datetime): datetime to format
Returns:
str: ISO 8601 formatted datetime
"""
return dt.isoformat().replace("+00:00", "Z")
class PickleableMock(Mock):
"""
A Mock that can be passed to pickle.dumps()
Source: https://github.com/testing-cabal/mock/issues/139#issuecomment-122128815
"""
def __reduce__(self):
"""Required method for being pickleable"""
return (Mock, ())
def create_tempfile_csv(rows_iter):
"""
Creates a temporary CSV file for use in testing file upload views
Args:
rows_iter (iterable of lists): An iterable of lists of strings representing the csv values.
Example: [["a","b","c"], ["d","e","f"]] --> CSV contents: "a,b,c\nd,e,f"
Returns:
SimpleUploadedFile: A temporary CSV file with the given contents
"""
f = tempfile.NamedTemporaryFile(suffix=".csv", delete=False)
with open(f.name, "w", encoding="utf8", newline="") as f:
writer = csv.writer(f, delimiter=",")
for row in rows_iter:
writer.writerow(row)
with open(f.name, "r") as user_csv:
return SimpleUploadedFile(
f.name, user_csv.read().encode("utf8"), content_type="application/csv"
)
def format_as_iso8601(time, remove_microseconds=True):
"""Helper function to format datetime with the Z at the end"""
# Can't use datetime.isoformat() because format is slightly different from this
iso_format = "%Y-%m-%dT%H:%M:%S.%f"
# chop off microseconds to make milliseconds
str_time = time.strftime(iso_format)
if remove_microseconds:
str_time = str_time[:-3]
return str_time + "Z"
def reload_urlconf():
"""Reloads the Django URL configuration"""
from django.conf import settings
clear_url_caches()
urlconf = settings.ROOT_URLCONF
if urlconf in sys.modules:
reload(sys.modules[urlconf])
else:
import_module(urlconf)
def patched_feature_enabled(patch_dict):
"""
Returns a patched version of features.is_enabled. If the feature name is a key in the
argument dict, it returns the value in that dict. Otherwise it just returns the result
of features.is_enabled.
Usage: Set as the side_effect of a patch for features.is_enabled
mocker.patch("main.features.is_enabled", side_effect=patched_feature_enabled({MY_FEATURE: True}))
Args:
patch_dict (Dict[str, bool]): A dictionary containing feature names mapped to the result
they should return if features.is_enabled is called with that feature name
Returns:
bool: Value indicating whether or not the feature is enabled
"""
def _patched(*args, **kwargs): # pylint:disable=missing-docstring
feature_name = args[0]
if feature_name in patch_dict:
return patch_dict[feature_name]
return features.is_enabled(*args, **kwargs)
return _patched | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hadoop.fs.impl.prefetch;
import java.util.Locale;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.slf4j.Logger;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
/**
* A FuturePool implementation backed by a java.util.concurrent.ExecutorService.
*
* If a piece of work has started, it cannot (currently) be cancelled.
*
* This class is a simplified version of <code>com.twitter:util-core_2.11</code>
* ExecutorServiceFuturePool designed to avoid depending on that Scala library.
* One problem with using a Scala library is that many downstream projects
* (eg Apache Spark) use Scala, and they might want to use a different version of Scala
* from the version that Hadoop chooses to use.
*
*/
public class ExecutorServiceFuturePool {
private final ExecutorService executor;
public ExecutorServiceFuturePool(ExecutorService executor) {
this.executor = executor;
}
/**
* @param f function to run in future on executor pool
* @return future
* @throws java.util.concurrent.RejectedExecutionException can be thrown
* @throws NullPointerException if f param is null
*/
public Future<Void> executeFunction(final Supplier<Void> f) {
return executor.submit(f::get);
}
/**
* @param r runnable to run in future on executor pool
* @return future
* @throws java.util.concurrent.RejectedExecutionException can be thrown
* @throws NullPointerException if r param is null
*/
@SuppressWarnings("unchecked")
public Future<Void> executeRunnable(final Runnable r) {
return (Future<Void>) executor.submit(r::run);
}
/**
* Utility to shutdown the {@link ExecutorService} used by this class. Will wait up to a
* certain timeout for the ExecutorService to gracefully shutdown.
*
* @param logger Logger
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
*/
public void shutdown(Logger logger, long timeout, TimeUnit unit) {
HadoopExecutors.shutdown(executor, logger, timeout, unit);
}
public String toString() {
return String.format(Locale.ROOT, "ExecutorServiceFuturePool(executor=%s)", executor);
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/ExecutorServiceFuturePool.java |
import sys
from datetime import datetime
from django.apps import apps
from django.db import models
from django.utils import six
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from .abstract_mixin import AbstractMixin
from getpaid import signals
from .utils import import_backend_modules
from django.conf import settings
if six.PY3:
unicode = str
PAYMENT_STATUS_CHOICES = (
('new', _("new")),
('in_progress', _("in progress")),
('accepted_for_proc', _("accepted for processing")),
('partially_paid', _("partially paid")),
('paid', _("paid")),
('cancelled', _("cancelled")),
('failed', _("failed")),
)
class PaymentManager(models.Manager):
def get_queryset(self):
return super(PaymentManager, self).get_queryset().select_related('order')
@python_2_unicode_compatible
class PaymentFactory(models.Model, AbstractMixin):
"""
This is an abstract class that defines a structure of Payment model that will be
generated dynamically with one additional field: ``order``
"""
amount = models.DecimalField(_("amount"), decimal_places=4, max_digits=20)
currency = models.CharField(_("currency"), max_length=3)
status = models.CharField(_("status"), max_length=20, choices=PAYMENT_STATUS_CHOICES, default='new', db_index=True)
backend = models.CharField(_("backend"), max_length=50)
created_on = models.DateTimeField(_("created on"), auto_now_add=True, db_index=True)
paid_on = models.DateTimeField(_("paid on"), blank=True, null=True, default=None, db_index=True)
amount_paid = models.DecimalField(_("amount paid"), decimal_places=4, max_digits=20, default=0)
external_id = models.CharField(_("external id"), max_length=64, blank=True, null=True)
description = models.CharField(_("description"), max_length=128, blank=True, null=True)
class Meta:
abstract = True
def __str__(self):
return _("Payment #%(id)d") % {'id': self.id}
@classmethod
def contribute(cls, order, **kwargs):
return {'order': models.ForeignKey(order, **kwargs)}
@classmethod
def create(cls, order, backend):
"""
Builds Payment object based on given Order instance
"""
payment = Payment()
payment.order = order
payment.backend = backend
signals.new_payment_query.send(sender=None, order=order, payment=payment)
if payment.currency is None or payment.amount is None:
raise NotImplementedError('Please provide a listener for getpaid.signals.new_payment_query')
payment.save()
signals.new_payment.send(sender=None, order=order, payment=payment)
return payment
def get_processor(self):
try:
__import__(self.backend)
module = sys.modules[self.backend]
return module.PaymentProcessor
except (ImportError, AttributeError):
raise ValueError("Backend '%s' is not available or provides no processor." % self.backend)
def change_status(self, new_status):
"""
Always change payment status via this method. Otherwise the signal
will not be emitted.
"""
if self.status != new_status:
# do anything only when status is really changed
old_status = self.status
self.status = new_status
self.save()
signals.payment_status_changed.send(
sender=type(self), instance=self,
old_status=old_status, new_status=new_status
)
def on_success(self, amount=None):
"""
Called when payment receives successful balance income. It defaults to
complete payment, but can optionally accept received amount as a parameter
to handle partial payments.
Returns boolean value if payment was fully paid
"""
if getattr(settings, 'USE_TZ', False):
self.paid_on = datetime.utcnow().replace(tzinfo=utc)
else:
self.paid_on = datetime.now()
if amount:
self.amount_paid = amount
else:
self.amount_paid = self.amount
fully_paid = (self.amount_paid >= self.amount)
if fully_paid:
self.change_status('paid')
else:
self.change_status('partially_paid')
return fully_paid
def on_failure(self):
"""
Called when payment was failed
"""
self.change_status('failed')
def register_to_payment(order_class, **kwargs):
"""
A function for registering unaware order class to ``getpaid``. This will
generate a ``Payment`` model class that will store payments with
ForeignKey to original order class
This also will build a model class for every enabled backend.
"""
global Payment
global Order
class Payment(PaymentFactory.construct(order=order_class, **kwargs)):
objects = PaymentManager()
class Meta:
ordering = ('-created_on',)
verbose_name = _("Payment")
verbose_name_plural = _("Payments")
Order = order_class
# Now build models for backends
backend_models_modules = import_backend_modules('models')
for backend_name, models_module in backend_models_modules.items():
for model in models_module.build_models(Payment):
apps.register_model(backend_name, model)
return Payment | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# posts to youtube
from process import process
import youtube_v3_uploader
import ia_uploader
# import rax_uploader
import os
import pprint
import re
import pw
from django.template.defaultfilters import slugify
# from add_to_richard import get_video_id
from main.models import Show, Location, Episode, Raw_File, Cut_List
class FileNotFound(Exception):
def __init__(self, value):
self.value=value
def __str__(self):
return repr(self.value)
class post(process):
ready_state = 4
def get_tags(self,ep):
tags = [ ep.show.client.slug, ep.show.slug, ]
# for more_tags in [ ep.show.client.tags, ep.tags, ep.authors ]:
for more_tags in [ ep.show.client.tags, ep.authors ]:
if more_tags is not None:
tags += more_tags.split(',')
# remove spaces
tags = [ tag.replace(' ','') for tag in tags ]
# remove any empty tags
tags = [_f for _f in tags if _f]
return tags
def get_files(self, ep):
# get a list of video files to upload
# blip and archive support multiple formats, youtube does not.
# youtube and such will only upload the first file.
files = []
for ext in self.options.upload_formats:
src_pathname = os.path.join( self.show_dir, ext, "%s.%s"%(ep.slug,ext))
if os.path.exists(src_pathname):
files.append({'ext':ext,'pathname':src_pathname})
else:
# crapy place to abort, but meh, works for now.
# maybe this is the place to use raise?
print("not found:", src_pathname)
raise FileNotFound(src_pathname)
if self.options.debug_log:
# put the mlt and .sh stuff into the log
# blip and firefox want it to be xml, so jump though some hoops
log = "<log>\n"
mlt_pathname = os.path.join( self.show_dir, 'mlt', "%s.mlt"%(ep.slug,))
log += open(mlt_pathname).read()
sh_pathname = os.path.join( self.show_dir, 'tmp', "%s.sh"%(ep.slug,))
shs = open(sh_pathname).read().split('\n')
shs = [ "<line>\n%s\n</line>\n" % l for l in shs if l]
log += "<shell_script>\n%s</shell_script>\n" % ''.join(shs)
log += "</log>"
# blip says: try something like a tt or srt file
log_pathname = os.path.join( self.show_dir, 'tmp', "%s.tt"%(ep.slug,))
log_file=open(log_pathname,'w').write(log)
# add the log to the list of files to be posted
files.append({'ext':'tt', 'pathname':log_pathname})
return files
def collect_metadata(self, ep):
meta = {}
meta['title'] = '"{title}" - {authors} ({show})'.format(
title=ep.name, authors=ep.authors, show=ep.show.name)
if len(meta['title']) > 100:
meta['title'] = ep.name
meta['authors'] = ep.authors.split(',')
meta['description'] = ep.composed_description()
meta['tags'] = self.get_tags(ep)
meta['start'] = ep.start
meta['language'] = ep.language
meta['language'] = "eng"
if "CC" in ep.license:
meta['license'] = 'creativeCommon'
# meta['rating'] = self.options.rating
# http://gdata.youtube.com/schemas/2007/categories.cat
meta['category'] = 27 # "Education"
if ep.location.lat and ep.location.lon:
meta['latlon'] = (ep.location.lat, ep.location.lon)
meta['privacyStatus'] = 'unlisted'
return meta
def mk_key(self, ep, f):
# make a key for rackspace cdn object key value store
# <category-slug>/<video-id>_<title-of-video>.mp4
# if we have that data handy.
# otherwise client/show/slug
key = ''
if ep.show.client.category_key:
# warning: this does not take into account pvo collisions
# https://github.com/willkg/richard/blob/master/richard/videos/utils.py#L20 def generate_unique_slug(obj, slug_from, slug_field='slug'):
key += slugify( ep.show.client.category_key ) + '/'
else:
key += ep.show.client.slug + '/'+ ep.show.client.slug + '/'
if ep.public_url:
key += get_video_id( ep.public_url) + "_"
key += ep.slug[:50] + "." + f['ext']
return key
def do_yt(self,ep,files,private,meta):
youtube_success = False
# https://developers.google.com/youtube/v3/docs/videos#resource
assert len(meta['title'])<=100, "len(name) > maximum length of 100"
uploader = youtube_v3_uploader.Uploader()
uploader.oauth_file = \
pw.yt[ep.show.client.youtube_id]['filename']
uploader.pathname = files[0]['pathname']
uploader.meta = meta
uploader.private = private
if self.options.test:
print('test mode:')
print("user key:", uploader.user)
print('files = %s' % files)
print('meta = %s' % pprint.pformat(meta))
print('skipping youtube_upoad.py uploader.upload()')
print(len(meta['description']))
elif ep.host_url and not self.options.replace:
print("skipping youtube, already there.")
youtube_success = True
else:
if ep.host_url:
uploader.delete_video(ep.host_url)
# down to next layer of code that will do the uploading
# uploader.debug_mode=True
youtube_success = uploader.upload()
if youtube_success:
# if self.options.verbose: print uploader.new_url
print((uploader.new_url))
# save new youtube url
ep.host_url = uploader.new_url
# the thumb url
ep.thumbnail = uploader.thumbnail
# for test framework
self.last_url = uploader.new_url
else:
print("youtube error! zomg")
ep.comment += "\n%s\n" % (uploader.ret_text.decode('utf-8').encode('ascii', 'xmlcharrefreplace'))
self.save_me(ep)
return youtube_success
def do_ia(self, ep, files, meta):
# upload to archive.org too.
# this should be in post_ia.py, but
# but I don't want 2 processes uploading at the same time.
# bcause bandwidth?
uploader = ia_uploader.Uploader()
uploader.user = ep.show.client.archive_id
# transform veyepar meta to ia meta
if ep.license.upper().startswith('CC'):
x=ep.license[3:8].lower()
ver='4.0'
meta['licenseurl'] = 'http://creativecommons.org/licenses/{x}/{ver}/'.format(x=x,ver=ver)
for f in files:
uploader.pathname = f['pathname']
uploader.verbose = self.options.verbose
slug = "{show}-{slug}".format(
show=ep.show.slug,
slug=ep.slug)[:100]
# IA requires this: ^[a-zA-Z0-9][a-zA-Z0-9_.-]{4,100}$
slug = re.sub(r'[^a-zA-Z0-9_.-]', '', slug)
uploader.slug = slug
uploader.meta = meta
if self.options.test:
print('test mode...')
print('skipping archive_uploader .upload()')
ia_success = False
elif ep.archive_mp4_url and not self.options.replace:
# um.. what about other formats?
# kinda buggy here.
# but only relevant when things are messed up
# and looking for problemss.
print("skipping archive, file already there.")
ia_success = True
else:
# actually upload
# uploader.debug_mode=True
ia_success = uploader.upload()
if ia_success:
if self.options.verbose: print(uploader.new_url)
# this is pretty gross.
# store the archive url
# it should really just be: archive_url
if f['ext'] == "mp4":
ep.archive_mp4_url = uploader.new_url
elif f['ext'] == "ogv":
ep.archive_ogv_url = uploader.new_url
elif f['ext'] == "webm": # omg super gross.
ep.archive_ogv_url = uploader.new_url
# hook for tests so that it can be browsed
self.archive_url = uploader.new_url
# for test framework
self.last_url = uploader.new_url
else:
print("Internet archive.org error!")
self.save_me(ep)
return ia_success
def do_rax(self, ep, files, meta):
# upload to rackspace cdn too.. yuck.
# this should be in post_rax.py, but
# but I don't want 2 processes uploading at the same time.
# bcause bandwidth? or something.
# Not sure what the problem is really.
if self.options.verbose: print("do_rax...")
success = False
uploader = rax_uploader.Uploader()
uploader.user = ep.show.client.rax_id
uploader.bucket_id = ep.show.client.bucket_id
for f in files:
uploader.pathname = f['pathname']
uploader.key_id = self.mk_key(ep, f)
if self.options.test:
print('test mode...')
print('skipping rax_uploader .upload()')
print('key_id:', uploader.key_id)
elif ep.rax_mp4_url and not self.options.replace:
# above assumes rax_mp4_url is what gets filled in below
# this is so gross.
print("skipping rax, already there.")
success = True
else:
# actually upload
# uploader.debug_mode=True
success = uploader.upload()
# possible errors:
# invalid container - halt, it will likely be invalid for all
# transmission - retry
# bad name, mark as error and continue to next
if success:
if self.options.verbose: print(uploader.new_url)
# this is pretty gross.
# store the url
if f['ext'] == "mp4":
ep.rax_mp4_url = uploader.new_url
elif f['ext'] == "webm":
ep.rax_mp4_url = uploader.new_url
elif f['ext'] == "ogv":
# there is no ep.rax_ogv_url
ep.rax_ogv_url = uploader.new_url
# hook for tests so that it can be browsed
# self.rax_url = uploader.new_url
# for test framework
self.last_url = uploader.new_url
else:
print("rax error!")
self.save_me(ep)
return success
def do_vimeo(self,ep,files,private,meta):
vimeo_success = False
uploader = vimeo_uploader.Uploader()
uploader.user = ep.show.client.vimeo_id
uploader.pathname = files[0]['pathname']
uploader.meta = meta
if self.options.test:
print('test mode:')
print("user key:", uploader.user)
print('files = %s' % files)
print('meta = %s' % pprint.pformat(meta))
print('skipping vimeo_upoad.py uploader.upload()')
print(len(meta['description']))
elif ep.host_url and not self.options.replace:
print("skipping vimeo, already there.")
youtube_success = True
else:
# down to next layer of code that will do the uploading
# uploader.debug_mode=True
youtube_success = uploader.upload()
if youtube_success:
if self.options.verbose: print(uploader.new_url)
# save new youtube url
ep.host_url = uploader.new_url
# for test framework
self.last_url = uploader.new_url
else:
print("youtube error! zomg")
ep.comment += "\n%s\n" % (uploader.ret_text.decode('utf-8').encode('ascii', 'xmlcharrefreplace'))
self.save_me(ep)
return youtube_success
def process_ep(self, ep):
if not ep.released and ep.released is not None: # and not self.options.release_all:
# --release will force the upload, overrides ep.released
if self.options.verbose: print("not released:", ep.released)
return False
# collect data needed for uploading
files = self.get_files(ep)
if self.options.verbose:
print("[files]:", end=' ')
pprint.pprint(files)
meta = self.collect_metadata(ep)
if self.options.verbose: pprint.pprint(meta)
# upload youtube
if not ep.show.client.youtube_id: youtube_success = True
else: youtube_success = self.do_yt(ep,files,True,meta)
# upload archive.org
if not ep.show.client.archive_id: archive_success = True
else: archive_success = self.do_ia(ep,files,meta)
# upload rackspace cdn
# if not ep.show.client.rax_id: rax_success = True
# else: rax_success = self.do_rax(ep,files,meta)
# upload vimeo (needs upgrading to new api)
# if not ep.show.client.vimeo_id: vimeo_success = True
# else: vimeo_success = self.do_vimeo(ep,files,meta)
return True
# youtube_success
# and archive_success \
# and rax_success
def add_more_options(self, parser):
parser.add_option('--replace', action="store_true",
help="Upload again, step on existing URL.")
parser.add_option('--release-all', action="store_true",
help="ignore the released setting (assuming this is enabled.)")
if __name__ == '__main__':
p=post()
p.main() | unknown | codeparrot/codeparrot-clean | ||
# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.boolean import boolean
FLAGS = ('skip_missing',)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
def _raise_terms_error(msg=""):
raise AnsibleError(
"subelements lookup expects a list of two or three items, "
+ msg)
terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)
# check lookup terms - check number of terms
if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
_raise_terms_error()
# first term should be a list (or dict), second a string holding the subkey
if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], basestring):
_raise_terms_error("first a dict or a list, second a string pointing to the subkey")
subelements = terms[1].split(".")
if isinstance(terms[0], dict): # convert to list:
if terms[0].get('skipped', False) is not False:
# the registered result was completely skipped
return []
elementlist = []
for key in terms[0].iterkeys():
elementlist.append(terms[0][key])
else:
elementlist = terms[0]
# check for optional flags in third term
flags = {}
if len(terms) == 3:
flags = terms[2]
if not isinstance(flags, dict) and not all([isinstance(key, basestring) and key in FLAGS for key in flags]):
_raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
# build_items
ret = []
for item0 in elementlist:
if not isinstance(item0, dict):
raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
if item0.get('skipped', False) is not False:
# this particular item is to be skipped
continue
skip_missing = boolean(flags.get('skip_missing', False))
subvalue = item0
lastsubkey = False
sublist = []
for subkey in subelements:
if subkey == subelements[-1]:
lastsubkey = True
if not subkey in subvalue:
if skip_missing:
continue
else:
raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
if not lastsubkey:
if not isinstance(subvalue[subkey], dict):
if skip_missing:
continue
else:
raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
else:
subvalue = subvalue[subkey]
else: # lastsubkey
if not isinstance(subvalue[subkey], list):
raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
else:
sublist = subvalue.pop(subkey, [])
for item1 in sublist:
ret.append((item0, item1))
return ret | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that places a soft decision tree embedding before a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class KFeatureDecisionsToDataThenNN(hybrid_model.HybridModel):
"""A model that places a soft decision tree embedding before a neural net."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(KFeatureDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.KFeatureDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)] | unknown | codeparrot/codeparrot-clean | ||
# To run this script with Windows or SLES, you have to modify setupCmdLine
#
# setupCmdLine.bat|sh
# add on line 40: "SET WAS_USER_SCRIPT=d:\ibm\wasuserscript.cmd"
#
# Create d:\ibm\wasuserscript.cmd:
# "SET WAS_EXT_DIRS=%WAS_EXT_DIRS%;c:\ibm\sqllib\java"
#
import os
import sys
from java.util import Properties
# Load all jython commands, when they are not loaded
try:
NewsActivityStreamService.listApplicationRegistrations()
except NameError:
print "Connections Commands not loaded! Load now: "
execfile("loadAll.py")
# add the jar to your classpath, then import it
# better to read WebSphere variable PROFILES_JDBC_DRIVER_HOME
import com.ibm.db2.jcc.DB2Driver as Driver
# Change User and Password
props = Properties()
props.put( 'user', 'lcuser' )
props.put( 'password', 'password' )
# Change Hostname, Port and maybe DB Name
conn = Driver().connect( 'jdbc:db2://cnxdb2.stoeps.local:50000/PEOPLEDB', props )
stmt = conn.createStatement()
email = raw_input( "Mail address of profile you want to check: " ).lower()
sql = 'select PROF_UID_LOWER,PROF_MAIL_LOWER,PROF_GUID,PROF_MAIL from empinst.employee where PROF_MAIL_LOWER = \'' + email + '\' order by PROF_UID_LOWER'
rs = stmt.executeQuery( sql )
employeeList = []
while ( rs.next() ):
row = {}
row['PROF_UID_LOWER'] = rs.getString( 1 )
row['PROF_MAIL_LOWER'] = rs.getString( 2 )
row['PROF_GUID'] = rs.getString( 3 )
row['PROF_MAIL'] = rs.getString( 4 )
employeeList.append( row )
rs.close()
stmt.close()
conn.close()
# print the result
for e in employeeList:
# print e['PROF_UID_LOWER'] + "\t\t" + e['PROF_MAIL_LOWER'] + "\t\t" + e['PROF_GUID']
# print e['PROF_MAIL']
print "Profiles:\t\t\t " + e['PROF_GUID']
LOGIN = e['PROF_MAIL']
try:
print "Activities:\t\t\t",
ActivitiesMemberService.getMemberExtIdByLogin( LOGIN )
except:
print 'No user with Login ' + LOGIN + ' found'
try:
print "Blogs:\t\t\t\t",
BlogsMemberService.getMemberExtIdByLogin( LOGIN )
except:
print 'No user with Login ' + LOGIN + ' found'
try:
print "Communities:\t\t\t",
CommunitiesMemberService.getMemberExtIdByLogin( LOGIN )
except:
print 'No user with Login ' + LOGIN + ' found'
try:
print "Dogear:\t\t\t\t",
DogearMemberService.getMemberExtIdByLogin( LOGIN )
except:
print 'No user with Login ' + LOGIN + ' found'
try:
print "Files:\t\t\t\t",
FilesMemberService.getMemberExtIdByLogin( LOGIN )
except:
print 'No user with Login ' + LOGIN + ' found'
try:
print "Forums:\t\t\t\t",
ForumsMemberService.getMemberExtIdByLogin( LOGIN )
except:
print 'No user with Login ' + LOGIN + ' found'
try:
print "News, Search, Homepage:\t\t",
NewsMemberService.getMemberExtIdByLogin( LOGIN )
except:
print 'No user with Login ' + LOGIN + ' found'
try:
print "Wikis:\t\t\t\t",
WikisMemberService.getMemberExtIdByLogin( LOGIN )
except:
print 'No user with Login ' + LOGIN + ' found' | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.