hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71636a88b0788c3dada5a4ba2b8c2dd70710a74
| 334
|
py
|
Python
|
projects/forms.py
|
18F/projects
|
e8c6bef7f3a6308dbad8c772cc45ddb6d0f50dec
|
[
"CC0-1.0"
] | 9
|
2016-05-10T21:33:09.000Z
|
2019-12-07T05:49:08.000Z
|
projects/forms.py
|
18F/projects
|
e8c6bef7f3a6308dbad8c772cc45ddb6d0f50dec
|
[
"CC0-1.0"
] | 38
|
2016-05-10T19:15:36.000Z
|
2016-07-13T15:04:37.000Z
|
projects/forms.py
|
18F/projects
|
e8c6bef7f3a6308dbad8c772cc45ddb6d0f50dec
|
[
"CC0-1.0"
] | 4
|
2016-06-03T20:12:21.000Z
|
2021-02-15T10:19:36.000Z
|
from dal import autocomplete
from django import forms
from .models import Project
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ('__all__')
widgets = {
'client': autocomplete.ModelSelect2(
url='projects:client-autocomplete'
)
}
| 18.555556
| 48
| 0.595808
|
from dal import autocomplete
from django import forms
from .models import Project
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ('__all__')
widgets = {
'client': autocomplete.ModelSelect2(
url='projects:client-autocomplete'
)
}
| true
| true
|
f716376a1e682052bb9b284fd666fd0f58de3a38
| 1,994
|
py
|
Python
|
php/python/DatabaseManager.py
|
the16bitgamer/YourflixMkII
|
3be2407b214b8553e0a83af04b463cd99c04cf32
|
[
"MIT"
] | null | null | null |
php/python/DatabaseManager.py
|
the16bitgamer/YourflixMkII
|
3be2407b214b8553e0a83af04b463cd99c04cf32
|
[
"MIT"
] | null | null | null |
php/python/DatabaseManager.py
|
the16bitgamer/YourflixMkII
|
3be2407b214b8553e0a83af04b463cd99c04cf32
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb
_connectedDb = None
_dbCursor = None
def ConnectToDb(self, userID, userPassword):
if self._connectedDb is not None:
DisconnectDb(self)
self._connectedDb = MySQLdb.connect(host="localhost", user = userID, passwd=userPassword)
self._dbCursor = self._connectedDb.cursor()
return "Connected to Database!"
def DisconnectDb(self):
self._dbCursor.close()
self._connectedDb.close()
def InsertIntoDb(self, values, location, db, table):
insertLoc = ""
for x in location:
if len(insertLoc) > 0:
insertLoc += ","
insertLoc += "`"+str(x)+"`"
value = ""
for x in values:
if len(value) > 0:
value += ","
value += "\""+str(x)+"\""
self._dbCursor.execute("INSERT INTO `"+db+"`.`"+table+"` ("+insertLoc+") VALUES (%s);" % value)
def UpdateDb(self, updateCol, updateValue, value, location, db, table):
set = ""
for idx, x in enumerate(location):
if len(set) > 0:
set += ","
set += "`"+str(x)+"` = \""+str(value[idx])+"\""
self._dbCursor.execute("UPDATE `"+db+"`.`"+table+"` SET "+set+" WHERE `"+updateCol+"` = \"%s\";"% updateValue)
def RemoveFromDb(self, value, location, db, table):
self._dbCursor.execute("DELETE FROM `"+db+"`.`"+table+"` WHERE (`"+location+"` = \""+str(value)+"\");")
def SearchDb (self, range, db, table, field, search):
self._dbCursor.execute("SELECT "+range+" FROM `"+db+"`.`"+table+"` WHERE `"+field+"` = \"%s\";" % search)
return self._dbCursor.fetchall()
def SearchNullDb (self, range, db, table, field):
self._dbCursor.execute("SELECT "+range+" FROM `"+db+"`.`"+table+"` WHERE `"+field+"` is NULL")
return self._dbCursor.fetchall()
def GetAllTableData(self, range, db, table):
self._dbCursor.execute("SELECT "+range+" FROM `"+db+"`.`"+table+"`;")
return self._dbCursor.fetchall()
def CommitChangesToDb(self):
self._connectedDb.commit()
| 33.233333
| 114
| 0.604313
|
import MySQLdb
_connectedDb = None
_dbCursor = None
def ConnectToDb(self, userID, userPassword):
if self._connectedDb is not None:
DisconnectDb(self)
self._connectedDb = MySQLdb.connect(host="localhost", user = userID, passwd=userPassword)
self._dbCursor = self._connectedDb.cursor()
return "Connected to Database!"
def DisconnectDb(self):
self._dbCursor.close()
self._connectedDb.close()
def InsertIntoDb(self, values, location, db, table):
insertLoc = ""
for x in location:
if len(insertLoc) > 0:
insertLoc += ","
insertLoc += "`"+str(x)+"`"
value = ""
for x in values:
if len(value) > 0:
value += ","
value += "\""+str(x)+"\""
self._dbCursor.execute("INSERT INTO `"+db+"`.`"+table+"` ("+insertLoc+") VALUES (%s);" % value)
def UpdateDb(self, updateCol, updateValue, value, location, db, table):
set = ""
for idx, x in enumerate(location):
if len(set) > 0:
set += ","
set += "`"+str(x)+"` = \""+str(value[idx])+"\""
self._dbCursor.execute("UPDATE `"+db+"`.`"+table+"` SET "+set+" WHERE `"+updateCol+"` = \"%s\";"% updateValue)
def RemoveFromDb(self, value, location, db, table):
self._dbCursor.execute("DELETE FROM `"+db+"`.`"+table+"` WHERE (`"+location+"` = \""+str(value)+"\");")
def SearchDb (self, range, db, table, field, search):
self._dbCursor.execute("SELECT "+range+" FROM `"+db+"`.`"+table+"` WHERE `"+field+"` = \"%s\";" % search)
return self._dbCursor.fetchall()
def SearchNullDb (self, range, db, table, field):
self._dbCursor.execute("SELECT "+range+" FROM `"+db+"`.`"+table+"` WHERE `"+field+"` is NULL")
return self._dbCursor.fetchall()
def GetAllTableData(self, range, db, table):
self._dbCursor.execute("SELECT "+range+" FROM `"+db+"`.`"+table+"`;")
return self._dbCursor.fetchall()
def CommitChangesToDb(self):
self._connectedDb.commit()
| true
| true
|
f716384515c52a1f9ca6af89058c342de1fc5533
| 4,424
|
py
|
Python
|
symphony/cli/tests/pyinventory_tests/test_service_type.py
|
omnicate/magma
|
e1e6c244f9e8bd000587a3dad3c54f4e64ada222
|
[
"BSD-3-Clause"
] | null | null | null |
symphony/cli/tests/pyinventory_tests/test_service_type.py
|
omnicate/magma
|
e1e6c244f9e8bd000587a3dad3c54f4e64ada222
|
[
"BSD-3-Clause"
] | null | null | null |
symphony/cli/tests/pyinventory_tests/test_service_type.py
|
omnicate/magma
|
e1e6c244f9e8bd000587a3dad3c54f4e64ada222
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from pyinventory.api.equipment_type import add_equipment_type
from pyinventory.api.service import add_service
from pyinventory.api.service_type import (
_populate_service_types,
add_service_type,
delete_service_type,
delete_service_type_with_services,
edit_service_type,
get_service_type,
)
from pyinventory.common.cache import SERVICE_TYPES
from pyinventory.common.data_class import PropertyDefinition, ServiceEndpointDefinition
from pyinventory.graphql.enum.property_kind import PropertyKind
from pysymphony import SymphonyClient
from ..utils.base_test import BaseTest
from ..utils.grpc.rpc_pb2_grpc import TenantServiceStub
class TestServiceType(BaseTest):
def __init__(
self, testName: str, client: SymphonyClient, stub: TenantServiceStub
) -> None:
super().__init__(testName, client, stub)
def setUp(self) -> None:
super().setUp()
self.service_type = add_service_type(
client=self.client,
name="Internet Access",
has_customer=True,
properties=[
PropertyDefinition(
property_name="Service Package",
property_kind=PropertyKind.string,
default_value=None,
is_fixed=False,
)
],
endpoint_definitions=[],
)
self.service = add_service(
client=self.client,
name="Room 201 Internet Access",
external_id="S3232",
service_type=self.service_type.name,
customer=None,
properties_dict={"Service Package": "Public 5G"},
)
def test_service_type_populated(self) -> None:
self.assertEqual(len(SERVICE_TYPES), 1)
SERVICE_TYPES.clear()
_populate_service_types(client=self.client)
self.assertEqual(len(SERVICE_TYPES), 1)
def test_service_type_created(self) -> None:
fetched_service_type = get_service_type(
client=self.client, service_type_id=self.service_type.id
)
self.assertEqual(fetched_service_type, self.service_type)
def test_service_type_edited(self) -> None:
equipment_type = add_equipment_type(
client=self.client,
name="Tp-Link T1600G",
category="Router",
properties=[
PropertyDefinition(
property_name="IP",
property_kind=PropertyKind.string,
default_value=None,
is_fixed=False,
)
],
ports_dict={},
position_list=[],
)
new_name = "New Service Package"
new_properties = {"Service Package": "Public 5G"}
endpoint_definitions = SERVICE_TYPES[
self.service_type.name
].endpoint_definitions
self.assertFalse(endpoint_definitions)
edited_service_type = edit_service_type(
client=self.client,
service_type=self.service_type,
new_name=new_name,
new_properties=new_properties,
new_endpoints=[
ServiceEndpointDefinition(
id=None,
name="EndpointDefinition",
role="CPE",
endpoint_definition_index=0,
equipment_type_id=equipment_type.id,
)
],
)
endpoint_definitions = SERVICE_TYPES[
edited_service_type.name
].endpoint_definitions
self.assertEqual(len(endpoint_definitions), 1)
self.assertEqual(edited_service_type.name, new_name)
self.assertEqual(len(edited_service_type.property_types), 1)
self.assertEqual(edited_service_type.property_types[0].stringValue, "Public 5G")
def test_service_type_delete(self) -> None:
delete_service_type(client=self.client, service_type=self.service_type)
self.assertEqual(len(SERVICE_TYPES), 0)
def test_service_type_delete_with_services(self) -> None:
delete_service_type_with_services(
client=self.client, service_type=self.service_type
)
self.assertEqual(len(SERVICE_TYPES), 0)
| 36.262295
| 88
| 0.631103
|
from pyinventory.api.equipment_type import add_equipment_type
from pyinventory.api.service import add_service
from pyinventory.api.service_type import (
_populate_service_types,
add_service_type,
delete_service_type,
delete_service_type_with_services,
edit_service_type,
get_service_type,
)
from pyinventory.common.cache import SERVICE_TYPES
from pyinventory.common.data_class import PropertyDefinition, ServiceEndpointDefinition
from pyinventory.graphql.enum.property_kind import PropertyKind
from pysymphony import SymphonyClient
from ..utils.base_test import BaseTest
from ..utils.grpc.rpc_pb2_grpc import TenantServiceStub
class TestServiceType(BaseTest):
def __init__(
self, testName: str, client: SymphonyClient, stub: TenantServiceStub
) -> None:
super().__init__(testName, client, stub)
def setUp(self) -> None:
super().setUp()
self.service_type = add_service_type(
client=self.client,
name="Internet Access",
has_customer=True,
properties=[
PropertyDefinition(
property_name="Service Package",
property_kind=PropertyKind.string,
default_value=None,
is_fixed=False,
)
],
endpoint_definitions=[],
)
self.service = add_service(
client=self.client,
name="Room 201 Internet Access",
external_id="S3232",
service_type=self.service_type.name,
customer=None,
properties_dict={"Service Package": "Public 5G"},
)
def test_service_type_populated(self) -> None:
self.assertEqual(len(SERVICE_TYPES), 1)
SERVICE_TYPES.clear()
_populate_service_types(client=self.client)
self.assertEqual(len(SERVICE_TYPES), 1)
def test_service_type_created(self) -> None:
fetched_service_type = get_service_type(
client=self.client, service_type_id=self.service_type.id
)
self.assertEqual(fetched_service_type, self.service_type)
def test_service_type_edited(self) -> None:
equipment_type = add_equipment_type(
client=self.client,
name="Tp-Link T1600G",
category="Router",
properties=[
PropertyDefinition(
property_name="IP",
property_kind=PropertyKind.string,
default_value=None,
is_fixed=False,
)
],
ports_dict={},
position_list=[],
)
new_name = "New Service Package"
new_properties = {"Service Package": "Public 5G"}
endpoint_definitions = SERVICE_TYPES[
self.service_type.name
].endpoint_definitions
self.assertFalse(endpoint_definitions)
edited_service_type = edit_service_type(
client=self.client,
service_type=self.service_type,
new_name=new_name,
new_properties=new_properties,
new_endpoints=[
ServiceEndpointDefinition(
id=None,
name="EndpointDefinition",
role="CPE",
endpoint_definition_index=0,
equipment_type_id=equipment_type.id,
)
],
)
endpoint_definitions = SERVICE_TYPES[
edited_service_type.name
].endpoint_definitions
self.assertEqual(len(endpoint_definitions), 1)
self.assertEqual(edited_service_type.name, new_name)
self.assertEqual(len(edited_service_type.property_types), 1)
self.assertEqual(edited_service_type.property_types[0].stringValue, "Public 5G")
def test_service_type_delete(self) -> None:
delete_service_type(client=self.client, service_type=self.service_type)
self.assertEqual(len(SERVICE_TYPES), 0)
def test_service_type_delete_with_services(self) -> None:
delete_service_type_with_services(
client=self.client, service_type=self.service_type
)
self.assertEqual(len(SERVICE_TYPES), 0)
| true
| true
|
f71639222fd2734617fde428e4935406b4096eab
| 2,925
|
py
|
Python
|
photoslib/fields.py
|
ivan-sysoi/django-photoslib
|
ffab2a7c238bcfec709a2db31fdd3b40757cf730
|
[
"MIT"
] | null | null | null |
photoslib/fields.py
|
ivan-sysoi/django-photoslib
|
ffab2a7c238bcfec709a2db31fdd3b40757cf730
|
[
"MIT"
] | 11
|
2020-04-05T17:46:46.000Z
|
2022-02-12T05:11:38.000Z
|
photoslib/fields.py
|
ivan-sysoi/django-photoslib
|
ffab2a7c238bcfec709a2db31fdd3b40757cf730
|
[
"MIT"
] | null | null | null |
from io import BytesIO
from PIL import Image
from django.conf import settings
from django.db import models
from pilkit.processors import ProcessorPipeline
from pilkit.utils import save_image
from sortedm2m.fields import SortedManyToManyField
from .forms import PhotoFieldWidget
__all__ = ('PhotoField', 'ManyPhotosField', 'PhotoProcessorMixin', 'SortableManyPhotosField')
class PhotoProcessorMixin:
def __init__(self, processors=None, format=None, options=None, autoconvert=True, **kwargs):
self.process_image_kwargs = dict(processors=processors, format=format, options=options, autoconvert=autoconvert)
super().__init__(**kwargs)
def process_file(self, file):
img = Image.open(file)
img = ProcessorPipeline(self.process_image_kwargs['processors'] or []).process(img)
options = self.process_image_kwargs['options'] or {
'quality': settings.PHOTOSLIB_QUALITY,
'optimized': True,
}
format = self.process_image_kwargs['format'] or img.format or 'JPEG'
if format.upper() == 'JPEG' and img.mode == 'RGBA':
img = img.convert(mode='RGB')
buff = save_image(img, BytesIO(), format, options=options, autoconvert=self.process_image_kwargs['autoconvert'])
return buff, format
class PhotoField(PhotoProcessorMixin, models.ForeignKey):
def __init__(self, processors=None, format=None, options=None, autoconvert=None, **kwargs):
kwargs['to'] = 'photoslib.Photo'
kwargs['on_delete'] = models.PROTECT
super().__init__(processors=processors, format=format, options=options, autoconvert=autoconvert, **kwargs)
def formfield(self, **kwargs):
kwargs.setdefault('widget', PhotoFieldWidget(self.model._meta.model_name, self.name))
return super().formfield(**kwargs)
class ManyPhotosField(PhotoProcessorMixin, models.ManyToManyField):
def __init__(self, processors=None, format=None, options=None, autoconvert=None, **kwargs):
kwargs['to'] = 'photoslib.Photo'
super().__init__(processors=processors, format=format, options=options, autoconvert=autoconvert, **kwargs)
def formfield(self, **kwargs):
kwargs.setdefault('widget', PhotoFieldWidget(self.model._meta.model_name, self.name, multiply=True))
return super().formfield(**kwargs)
class SortableManyPhotosField(PhotoProcessorMixin, SortedManyToManyField):
def __init__(self, processors=None, format=None, options=None, autoconvert=None, **kwargs):
kwargs['to'] = 'photoslib.Photo'
super().__init__(processors=processors, format=format, options=options, autoconvert=autoconvert, **kwargs)
def formfield(self, **kwargs):
kwargs.setdefault('widget', PhotoFieldWidget(self.model._meta.model_name, self.name, multiply=True,
sortable=True))
return super().formfield(**kwargs)
| 41.785714
| 120
| 0.704274
|
from io import BytesIO
from PIL import Image
from django.conf import settings
from django.db import models
from pilkit.processors import ProcessorPipeline
from pilkit.utils import save_image
from sortedm2m.fields import SortedManyToManyField
from .forms import PhotoFieldWidget
__all__ = ('PhotoField', 'ManyPhotosField', 'PhotoProcessorMixin', 'SortableManyPhotosField')
class PhotoProcessorMixin:
def __init__(self, processors=None, format=None, options=None, autoconvert=True, **kwargs):
self.process_image_kwargs = dict(processors=processors, format=format, options=options, autoconvert=autoconvert)
super().__init__(**kwargs)
def process_file(self, file):
img = Image.open(file)
img = ProcessorPipeline(self.process_image_kwargs['processors'] or []).process(img)
options = self.process_image_kwargs['options'] or {
'quality': settings.PHOTOSLIB_QUALITY,
'optimized': True,
}
format = self.process_image_kwargs['format'] or img.format or 'JPEG'
if format.upper() == 'JPEG' and img.mode == 'RGBA':
img = img.convert(mode='RGB')
buff = save_image(img, BytesIO(), format, options=options, autoconvert=self.process_image_kwargs['autoconvert'])
return buff, format
class PhotoField(PhotoProcessorMixin, models.ForeignKey):
def __init__(self, processors=None, format=None, options=None, autoconvert=None, **kwargs):
kwargs['to'] = 'photoslib.Photo'
kwargs['on_delete'] = models.PROTECT
super().__init__(processors=processors, format=format, options=options, autoconvert=autoconvert, **kwargs)
def formfield(self, **kwargs):
kwargs.setdefault('widget', PhotoFieldWidget(self.model._meta.model_name, self.name))
return super().formfield(**kwargs)
class ManyPhotosField(PhotoProcessorMixin, models.ManyToManyField):
def __init__(self, processors=None, format=None, options=None, autoconvert=None, **kwargs):
kwargs['to'] = 'photoslib.Photo'
super().__init__(processors=processors, format=format, options=options, autoconvert=autoconvert, **kwargs)
def formfield(self, **kwargs):
kwargs.setdefault('widget', PhotoFieldWidget(self.model._meta.model_name, self.name, multiply=True))
return super().formfield(**kwargs)
class SortableManyPhotosField(PhotoProcessorMixin, SortedManyToManyField):
def __init__(self, processors=None, format=None, options=None, autoconvert=None, **kwargs):
kwargs['to'] = 'photoslib.Photo'
super().__init__(processors=processors, format=format, options=options, autoconvert=autoconvert, **kwargs)
def formfield(self, **kwargs):
kwargs.setdefault('widget', PhotoFieldWidget(self.model._meta.model_name, self.name, multiply=True,
sortable=True))
return super().formfield(**kwargs)
| true
| true
|
f7163953b4e98f271c1d184f936ad3281be2de7f
| 13,990
|
py
|
Python
|
src/olympia/amo/sitemap.py
|
snifhex/addons-server
|
2b9dee65c10c0dca700ff2d25f3694c7cf769816
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/amo/sitemap.py
|
snifhex/addons-server
|
2b9dee65c10c0dca700ff2d25f3694c7cf769816
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/amo/sitemap.py
|
snifhex/addons-server
|
2b9dee65c10c0dca700ff2d25f3694c7cf769816
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import math
import os
from collections import namedtuple
from urllib.parse import urlparse
from django.conf import settings
from django.contrib.sitemaps import Sitemap as DjangoSitemap
from django.db.models import Count, Max, Q
from django.template import loader
from django.utils.functional import cached_property
from django.urls import reverse
from olympia import amo
from olympia.addons.models import Addon, AddonCategory
from olympia.amo.reverse import get_url_prefix, override_url_prefix
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.constants.categories import CATEGORIES
from olympia.constants.promoted import RECOMMENDED
from olympia.bandwagon.models import Collection
from olympia.promoted.models import PromotedAddon
from olympia.users.models import UserProfile
# These constants are from:
# https://github.com/mozilla/addons-frontend/blob/master/src/amo/reducers/addonsByAuthors.js
EXTENSIONS_BY_AUTHORS_PAGE_SIZE = 10
THEMES_BY_AUTHORS_PAGE_SIZE = 12
# top 10 locales by visitor from GA (as of May 2021)
FRONTEND_LANGUAGES = [
'de',
'en-GB',
'en-US',
'es',
'fr',
'ja',
'pl',
'pt-BR',
'ru',
'zh-CN',
]
class LazyTupleList:
"""Lazily emulates a generated list like:
[
(item_a, item_b)
for item_b in list_b
for item_a in list_a
]
"""
def __init__(self, list_a, list_b):
self.list_a = list_a
self.list_b = list_b
def __len__(self):
return len(self.list_a) * len(self.list_b)
def __getitem__(self, key):
a_len = len(self.list_a)
def get(index):
return (self.list_a[index % a_len], self.list_b[index // a_len])
return (
[get(idx) for idx in range(key.start, key.stop, key.step or 1)]
if isinstance(key, slice)
else get(key)
)
class Sitemap(DjangoSitemap):
limit = 2000
i18n = True
languages = FRONTEND_LANGUAGES
alternates = True
# x_default = False # TODO: enable this when we can validate it works well
_cached_items = []
protocol = urlparse(settings.EXTERNAL_SITE_URL).scheme
def _location(self, item, force_lang_code=None):
# modified from Django implementation - we don't rely on locale for urls
if self.i18n:
obj, lang_code = item
# Doing .replace is hacky, but `override_url_prefix` is slow at scale
return self.location(obj).replace(
settings.LANGUAGE_CODE, force_lang_code or lang_code, 1
)
return self.location(item)
def _items(self):
items = self.items()
if self.i18n:
# Create (item, lang_code) tuples for all items and languages.
# This is necessary to paginate with all languages already considered.
return LazyTupleList(items, self._languages())
return items
def items(self):
return self._cached_items
def get_domain(self, site):
if not site:
if not hasattr(self, 'domain'):
self.domain = urlparse(settings.EXTERNAL_SITE_URL).netloc
return self.domain
return super().get_domain(site=site)
def get_urls(self, page=1, site=None, protocol=None, *, app_name=None):
with override_url_prefix(app_name=app_name):
return super().get_urls(page=page, site=site, protocol=protocol)
@cached_property
def template(self):
return loader.get_template('sitemap.xml')
def render(self, app_name, page):
context = {'urlset': self.get_urls(page=page, app_name=app_name)}
return self.template.render(context)
@property
def _current_app(self):
return amo.APPS[get_url_prefix().app]
def get_android_promoted_addons():
return PromotedAddon.objects.filter(
Q(application_id=amo.ANDROID.id) | Q(application_id__isnull=True),
group_id=RECOMMENDED.id,
addon___current_version__promoted_approvals__application_id=(amo.ANDROID.id),
addon___current_version__promoted_approvals__group_id=RECOMMENDED.id,
)
class AddonSitemap(Sitemap):
item_tuple = namedtuple('Item', ['last_updated', 'url', 'page'], defaults=(1,))
@cached_property
def _cached_items(self):
current_app = self._current_app
addons_qs = Addon.objects.public().filter(
_current_version__apps__application=current_app.id
)
# android is currently limited to a small number of recommended addons, so get
# the list of those and filter further
if current_app == amo.ANDROID:
promoted_addon_ids = get_android_promoted_addons().values_list(
'addon_id', flat=True
)
addons_qs = addons_qs.filter(id__in=promoted_addon_ids)
addons = list(
addons_qs.order_by('-last_updated')
.values_list(
'last_updated',
'slug',
'text_ratings_count',
named=True,
)
.iterator()
)
items = [
self.item_tuple(
addon.last_updated,
reverse('addons.detail', args=[addon.slug]),
)
for addon in addons
]
# add pages for ratings - and extra pages when needed to paginate
page_size = settings.REST_FRAMEWORK['PAGE_SIZE']
for addon in addons:
pages_needed = math.ceil((addon.text_ratings_count or 1) / page_size)
items.extend(
self.item_tuple(
addon.last_updated,
reverse('addons.ratings.list', args=[addon.slug]),
page,
)
for page in range(1, pages_needed + 1)
)
return items
def lastmod(self, item):
return item.last_updated
def location(self, item):
return item.url + (f'?page={item.page}' if item.page > 1 else '')
class AMOSitemap(Sitemap):
lastmod = datetime.datetime.now()
_cached_items = [
# frontend pages
('home', amo.FIREFOX),
('home', amo.ANDROID),
('pages.about', None),
('pages.review_guide', None),
('browse.extensions', amo.FIREFOX),
('browse.themes', amo.FIREFOX),
('browse.language-tools', amo.FIREFOX),
# server pages
('devhub.index', None),
('apps.appversions', amo.FIREFOX),
('apps.appversions', amo.ANDROID),
]
def location(self, item):
urlname, app = item
if app:
with override_url_prefix(app_name=app.short):
return reverse(urlname)
else:
return reverse(urlname)
class CategoriesSitemap(Sitemap):
lastmod = datetime.datetime.now()
@cached_property
def _cached_items(self):
page_size = settings.REST_FRAMEWORK['PAGE_SIZE']
page_count_max = settings.ES_MAX_RESULT_WINDOW // page_size
def additems(type):
items = []
for category in CATEGORIES[current_app.id][type].values():
items.append((category, 1))
pages_needed = min(
math.ceil(addon_counts.get(category.id, 1) / page_size),
page_count_max,
)
for page in range(2, pages_needed + 1):
items.append((category, page))
return items
current_app = self._current_app
counts_qs = (
AddonCategory.objects.filter(
addon___current_version__isnull=False,
addon___current_version__apps__application=current_app.id,
addon__disabled_by_user=False,
addon__status__in=amo.REVIEWED_STATUSES,
)
.values('category_id')
.annotate(count=Count('addon_id'))
)
addon_counts = {cat['category_id']: cat['count'] for cat in counts_qs}
items = additems(amo.ADDON_EXTENSION)
if current_app == amo.FIREFOX:
items.extend(additems(amo.ADDON_STATICTHEME))
return items
def location(self, item):
(category, page) = item
return category.get_url_path() + (f'?page={page}' if page > 1 else '')
class CollectionSitemap(Sitemap):
@cached_property
def _cached_items(self):
return list(
Collection.objects.filter(author_id=settings.TASK_USER_ID)
.order_by('-modified')
.values_list('modified', 'slug', 'author_id', named=True)
.iterator()
)
def lastmod(self, item):
return item.modified
def location(self, item):
return Collection.get_url_path(item)
class AccountSitemap(Sitemap):
item_tuple = namedtuple(
'AccountItem',
['addons_updated', 'url', 'extension_page', 'theme_page'],
defaults=(1, 1),
)
@cached_property
def _cached_items(self):
current_app = self._current_app
addon_q = Q(
addons___current_version__isnull=False,
addons___current_version__apps__application=current_app.id,
addons__disabled_by_user=False,
addons__status__in=amo.REVIEWED_STATUSES,
addonuser__listed=True,
addonuser__role__in=(amo.AUTHOR_ROLE_DEV, amo.AUTHOR_ROLE_OWNER),
)
# android is currently limited to a small number of recommended addons, so get
# the list of those and filter further
if current_app == amo.ANDROID:
promoted_addon_ids = get_android_promoted_addons().values_list(
'addon_id', flat=True
)
addon_q = addon_q & Q(addons__id__in=promoted_addon_ids)
users = (
UserProfile.objects.filter(is_public=True, deleted=False)
.annotate(
theme_count=Count(
'addons', filter=Q(addon_q, addons__type=amo.ADDON_STATICTHEME)
)
)
.annotate(
extension_count=Count(
'addons', filter=Q(addon_q, addons__type=amo.ADDON_EXTENSION)
)
)
.annotate(addons_updated=Max('addons__last_updated', filter=addon_q))
.order_by('-addons_updated', '-modified')
.values_list(
'addons_updated', 'id', 'extension_count', 'theme_count', named=True
)
.iterator()
)
items = []
for user in users:
if not user.extension_count and not user.theme_count:
# some users have an empty page for various reasons, no need to include
continue
extension_pages_needed = math.ceil(
(user.extension_count or 1) / EXTENSIONS_BY_AUTHORS_PAGE_SIZE
)
theme_pages_needed = math.ceil(
(user.theme_count or 1) / THEMES_BY_AUTHORS_PAGE_SIZE
)
items.extend(
self.item_tuple(
user.addons_updated,
reverse('users.profile', args=[user.id]),
ext_page,
1,
)
for ext_page in range(1, extension_pages_needed + 1)
)
# start themes at 2 because we don't want (1, 1) twice
items.extend(
self.item_tuple(
user.addons_updated,
reverse('users.profile', args=[user.id]),
1,
theme_page,
)
for theme_page in range(2, theme_pages_needed + 1)
)
return items
def lastmod(self, item):
return item.addons_updated
def location(self, item):
urlargs = '&'.join(
([f'page_e={item.extension_page}'] if item.extension_page > 1 else [])
+ ([f'page_t={item.theme_page}'] if item.theme_page > 1 else [])
)
return item.url + (f'?{urlargs}' if urlargs else '')
def get_sitemaps():
return {
# because some urls are app-less, we specify per item, so don't specify an app
('amo', None): AMOSitemap(),
('addons', amo.FIREFOX): AddonSitemap(),
('addons', amo.ANDROID): AddonSitemap(),
# category pages aren't supported on android, so firefox only
('categories', amo.FIREFOX): CategoriesSitemap(),
# we don't expose collections on android, so firefox only
('collections', amo.FIREFOX): CollectionSitemap(),
('users', amo.FIREFOX): AccountSitemap(),
('users', amo.ANDROID): AccountSitemap(),
}
OTHER_SITEMAPS = [
'/blog/sitemap.xml',
]
def get_sitemap_section_pages(sitemaps):
pages = []
for (section, app), site in sitemaps.items():
if not app:
pages.extend((section, None, page) for page in site.paginator.page_range)
continue
with override_url_prefix(app_name=app.short):
# Add all pages of the sitemap section.
pages.extend(
(section, app.short, page) for page in site.paginator.page_range
)
return pages
def render_index_xml(sitemaps):
sitemap_url = reverse('amo.sitemap')
server_urls = (
f'{sitemap_url}?section={section}'
+ (f'&app_name={app_name}' if app_name else '')
+ (f'&p={page}' if page != 1 else '')
for section, app_name, page in get_sitemap_section_pages(sitemaps)
)
urls = list(server_urls) + OTHER_SITEMAPS
return loader.render_to_string(
'sitemap_index.xml',
{'sitemaps': (absolutify(url) for url in urls)},
)
def get_sitemap_path(section, app, page=1):
return os.path.join(
settings.SITEMAP_STORAGE_PATH,
'sitemap'
+ (f'-{section}' if section else '')
+ (f'-{app}' if app else '')
+ (f'-{page}' if page != 1 else '')
+ '.xml',
)
| 32.917647
| 92
| 0.59757
|
import datetime
import math
import os
from collections import namedtuple
from urllib.parse import urlparse
from django.conf import settings
from django.contrib.sitemaps import Sitemap as DjangoSitemap
from django.db.models import Count, Max, Q
from django.template import loader
from django.utils.functional import cached_property
from django.urls import reverse
from olympia import amo
from olympia.addons.models import Addon, AddonCategory
from olympia.amo.reverse import get_url_prefix, override_url_prefix
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.constants.categories import CATEGORIES
from olympia.constants.promoted import RECOMMENDED
from olympia.bandwagon.models import Collection
from olympia.promoted.models import PromotedAddon
from olympia.users.models import UserProfile
EXTENSIONS_BY_AUTHORS_PAGE_SIZE = 10
THEMES_BY_AUTHORS_PAGE_SIZE = 12
FRONTEND_LANGUAGES = [
'de',
'en-GB',
'en-US',
'es',
'fr',
'ja',
'pl',
'pt-BR',
'ru',
'zh-CN',
]
class LazyTupleList:
def __init__(self, list_a, list_b):
self.list_a = list_a
self.list_b = list_b
def __len__(self):
return len(self.list_a) * len(self.list_b)
def __getitem__(self, key):
a_len = len(self.list_a)
def get(index):
return (self.list_a[index % a_len], self.list_b[index // a_len])
return (
[get(idx) for idx in range(key.start, key.stop, key.step or 1)]
if isinstance(key, slice)
else get(key)
)
class Sitemap(DjangoSitemap):
limit = 2000
i18n = True
languages = FRONTEND_LANGUAGES
alternates = True
gs.EXTERNAL_SITE_URL).scheme
def _location(self, item, force_lang_code=None):
if self.i18n:
obj, lang_code = item
# Doing .replace is hacky, but `override_url_prefix` is slow at scale
return self.location(obj).replace(
settings.LANGUAGE_CODE, force_lang_code or lang_code, 1
)
return self.location(item)
def _items(self):
items = self.items()
if self.i18n:
# Create (item, lang_code) tuples for all items and languages.
# This is necessary to paginate with all languages already considered.
return LazyTupleList(items, self._languages())
return items
def items(self):
return self._cached_items
def get_domain(self, site):
if not site:
if not hasattr(self, 'domain'):
self.domain = urlparse(settings.EXTERNAL_SITE_URL).netloc
return self.domain
return super().get_domain(site=site)
def get_urls(self, page=1, site=None, protocol=None, *, app_name=None):
with override_url_prefix(app_name=app_name):
return super().get_urls(page=page, site=site, protocol=protocol)
@cached_property
def template(self):
return loader.get_template('sitemap.xml')
def render(self, app_name, page):
context = {'urlset': self.get_urls(page=page, app_name=app_name)}
return self.template.render(context)
@property
def _current_app(self):
return amo.APPS[get_url_prefix().app]
def get_android_promoted_addons():
return PromotedAddon.objects.filter(
Q(application_id=amo.ANDROID.id) | Q(application_id__isnull=True),
group_id=RECOMMENDED.id,
addon___current_version__promoted_approvals__application_id=(amo.ANDROID.id),
addon___current_version__promoted_approvals__group_id=RECOMMENDED.id,
)
class AddonSitemap(Sitemap):
item_tuple = namedtuple('Item', ['last_updated', 'url', 'page'], defaults=(1,))
@cached_property
def _cached_items(self):
current_app = self._current_app
addons_qs = Addon.objects.public().filter(
_current_version__apps__application=current_app.id
)
# android is currently limited to a small number of recommended addons, so get
# the list of those and filter further
if current_app == amo.ANDROID:
promoted_addon_ids = get_android_promoted_addons().values_list(
'addon_id', flat=True
)
addons_qs = addons_qs.filter(id__in=promoted_addon_ids)
addons = list(
addons_qs.order_by('-last_updated')
.values_list(
'last_updated',
'slug',
'text_ratings_count',
named=True,
)
.iterator()
)
items = [
self.item_tuple(
addon.last_updated,
reverse('addons.detail', args=[addon.slug]),
)
for addon in addons
]
# add pages for ratings - and extra pages when needed to paginate
page_size = settings.REST_FRAMEWORK['PAGE_SIZE']
for addon in addons:
pages_needed = math.ceil((addon.text_ratings_count or 1) / page_size)
items.extend(
self.item_tuple(
addon.last_updated,
reverse('addons.ratings.list', args=[addon.slug]),
page,
)
for page in range(1, pages_needed + 1)
)
return items
def lastmod(self, item):
return item.last_updated
def location(self, item):
return item.url + (f'?page={item.page}' if item.page > 1 else '')
class AMOSitemap(Sitemap):
lastmod = datetime.datetime.now()
_cached_items = [
# frontend pages
('home', amo.FIREFOX),
('home', amo.ANDROID),
('pages.about', None),
('pages.review_guide', None),
('browse.extensions', amo.FIREFOX),
('browse.themes', amo.FIREFOX),
('browse.language-tools', amo.FIREFOX),
# server pages
('devhub.index', None),
('apps.appversions', amo.FIREFOX),
('apps.appversions', amo.ANDROID),
]
def location(self, item):
urlname, app = item
if app:
with override_url_prefix(app_name=app.short):
return reverse(urlname)
else:
return reverse(urlname)
class CategoriesSitemap(Sitemap):
lastmod = datetime.datetime.now()
@cached_property
def _cached_items(self):
page_size = settings.REST_FRAMEWORK['PAGE_SIZE']
page_count_max = settings.ES_MAX_RESULT_WINDOW // page_size
def additems(type):
items = []
for category in CATEGORIES[current_app.id][type].values():
items.append((category, 1))
pages_needed = min(
math.ceil(addon_counts.get(category.id, 1) / page_size),
page_count_max,
)
for page in range(2, pages_needed + 1):
items.append((category, page))
return items
current_app = self._current_app
counts_qs = (
AddonCategory.objects.filter(
addon___current_version__isnull=False,
addon___current_version__apps__application=current_app.id,
addon__disabled_by_user=False,
addon__status__in=amo.REVIEWED_STATUSES,
)
.values('category_id')
.annotate(count=Count('addon_id'))
)
addon_counts = {cat['category_id']: cat['count'] for cat in counts_qs}
items = additems(amo.ADDON_EXTENSION)
if current_app == amo.FIREFOX:
items.extend(additems(amo.ADDON_STATICTHEME))
return items
def location(self, item):
(category, page) = item
return category.get_url_path() + (f'?page={page}' if page > 1 else '')
class CollectionSitemap(Sitemap):
@cached_property
def _cached_items(self):
return list(
Collection.objects.filter(author_id=settings.TASK_USER_ID)
.order_by('-modified')
.values_list('modified', 'slug', 'author_id', named=True)
.iterator()
)
def lastmod(self, item):
return item.modified
def location(self, item):
return Collection.get_url_path(item)
class AccountSitemap(Sitemap):
item_tuple = namedtuple(
'AccountItem',
['addons_updated', 'url', 'extension_page', 'theme_page'],
defaults=(1, 1),
)
@cached_property
def _cached_items(self):
current_app = self._current_app
addon_q = Q(
addons___current_version__isnull=False,
addons___current_version__apps__application=current_app.id,
addons__disabled_by_user=False,
addons__status__in=amo.REVIEWED_STATUSES,
addonuser__listed=True,
addonuser__role__in=(amo.AUTHOR_ROLE_DEV, amo.AUTHOR_ROLE_OWNER),
)
# android is currently limited to a small number of recommended addons, so get
# the list of those and filter further
if current_app == amo.ANDROID:
promoted_addon_ids = get_android_promoted_addons().values_list(
'addon_id', flat=True
)
addon_q = addon_q & Q(addons__id__in=promoted_addon_ids)
users = (
UserProfile.objects.filter(is_public=True, deleted=False)
.annotate(
theme_count=Count(
'addons', filter=Q(addon_q, addons__type=amo.ADDON_STATICTHEME)
)
)
.annotate(
extension_count=Count(
'addons', filter=Q(addon_q, addons__type=amo.ADDON_EXTENSION)
)
)
.annotate(addons_updated=Max('addons__last_updated', filter=addon_q))
.order_by('-addons_updated', '-modified')
.values_list(
'addons_updated', 'id', 'extension_count', 'theme_count', named=True
)
.iterator()
)
items = []
for user in users:
if not user.extension_count and not user.theme_count:
# some users have an empty page for various reasons, no need to include
continue
extension_pages_needed = math.ceil(
(user.extension_count or 1) / EXTENSIONS_BY_AUTHORS_PAGE_SIZE
)
theme_pages_needed = math.ceil(
(user.theme_count or 1) / THEMES_BY_AUTHORS_PAGE_SIZE
)
items.extend(
self.item_tuple(
user.addons_updated,
reverse('users.profile', args=[user.id]),
ext_page,
1,
)
for ext_page in range(1, extension_pages_needed + 1)
)
# start themes at 2 because we don't want (1, 1) twice
items.extend(
self.item_tuple(
user.addons_updated,
reverse('users.profile', args=[user.id]),
1,
theme_page,
)
for theme_page in range(2, theme_pages_needed + 1)
)
return items
def lastmod(self, item):
return item.addons_updated
def location(self, item):
urlargs = '&'.join(
([f'page_e={item.extension_page}'] if item.extension_page > 1 else [])
+ ([f'page_t={item.theme_page}'] if item.theme_page > 1 else [])
)
return item.url + (f'?{urlargs}' if urlargs else '')
def get_sitemaps():
return {
('amo', None): AMOSitemap(),
('addons', amo.FIREFOX): AddonSitemap(),
('addons', amo.ANDROID): AddonSitemap(),
# category pages aren't supported on android, so firefox only
('categories', amo.FIREFOX): CategoriesSitemap(),
('collections', amo.FIREFOX): CollectionSitemap(),
('users', amo.FIREFOX): AccountSitemap(),
('users', amo.ANDROID): AccountSitemap(),
}
OTHER_SITEMAPS = [
'/blog/sitemap.xml',
]
def get_sitemap_section_pages(sitemaps):
pages = []
for (section, app), site in sitemaps.items():
if not app:
pages.extend((section, None, page) for page in site.paginator.page_range)
continue
with override_url_prefix(app_name=app.short):
# Add all pages of the sitemap section.
pages.extend(
(section, app.short, page) for page in site.paginator.page_range
)
return pages
def render_index_xml(sitemaps):
sitemap_url = reverse('amo.sitemap')
server_urls = (
f'{sitemap_url}?section={section}'
+ (f'&app_name={app_name}' if app_name else '')
+ (f'&p={page}' if page != 1 else '')
for section, app_name, page in get_sitemap_section_pages(sitemaps)
)
urls = list(server_urls) + OTHER_SITEMAPS
return loader.render_to_string(
'sitemap_index.xml',
{'sitemaps': (absolutify(url) for url in urls)},
)
def get_sitemap_path(section, app, page=1):
return os.path.join(
settings.SITEMAP_STORAGE_PATH,
'sitemap'
+ (f'-{section}' if section else '')
+ (f'-{app}' if app else '')
+ (f'-{page}' if page != 1 else '')
+ '.xml',
)
| true
| true
|
f71639dc7301c19e856134c3182b0033b000fd73
| 9,078
|
py
|
Python
|
app.py
|
andrius-siup/recipe-book
|
e08b4bd00bf2e79d65623e6a62d865535695afb2
|
[
"W3C"
] | null | null | null |
app.py
|
andrius-siup/recipe-book
|
e08b4bd00bf2e79d65623e6a62d865535695afb2
|
[
"W3C"
] | null | null | null |
app.py
|
andrius-siup/recipe-book
|
e08b4bd00bf2e79d65623e6a62d865535695afb2
|
[
"W3C"
] | 1
|
2021-06-06T19:21:07.000Z
|
2021-06-06T19:21:07.000Z
|
import os
from flask import (
Flask, flash, render_template, redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
mongo = PyMongo(app)
# all recipes
@app.route("/")
@app.route("/get_recipes")
def get_recipes():
recipes = list(mongo.db.recipes.find())
return render_template(
"recipes.html", recipes=recipes, page_title="All Recipes")
@app.route("/view_recipe/<recipe_id>")
def view_recipe(recipe_id):
the_recipe = mongo.db.recipes.find_one_or_404({"_id": ObjectId(recipe_id)})
if 'user' not in session:
return redirect(url_for("login"))
return render_template(
"view_recipe.html", recipes=the_recipe, page_title="View Recipe")
@app.route("/search", methods=["GET", "POST"])
def search():
query = request.form.get("query")
recipes = list(mongo.db.recipes.find({"$text": {"$search": query}}))
return render_template(
"recipes.html", recipes=recipes, page_title="Search Recipe")
# register
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
# check if username already exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
flash("Username already exists")
return redirect(url_for("register"))
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(register)
# put the new user into 'session' cookie
session["user"] = request.form.get("username").lower()
flash("Registration Successful!")
return redirect(url_for("profile", username=session["user"]))
return render_template("register.html", page_title="Register")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
# check if username exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# ensure hashed password matches user input
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome, {}".format(request.form.get("username")))
return redirect(url_for("profile", username=session["user"]))
else:
# invalid password match
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
else:
# username doesn't exist
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
return render_template("login.html", page_title="Login")
@app.route("/profile/", methods=["GET", "POST"])
def profile():
if "user" not in session:
return redirect(url_for("login"))
recipes = mongo.db.recipes.find(
{"created_by": session["user"]}).sort("_id", -1)
return render_template(
"profile.html", username=session["user"],
recipes=recipes, page_title="Profile")
@app.route("/logout")
def logout():
# remove user from session cookies
flash("You have been logged out")
session.pop("user")
return redirect(url_for("login"))
@app.route("/add_recipe", methods=["Get", "POST"])
def add_recipe():
if "user" not in session:
return redirect(url_for("login"))
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name"),
"recipe_name": request.form.get("recipe_name"),
"ingredients_list": request.form.get(
"ingredients_list").splitlines(),
"recipe_img": request.form.get("recipe_img"),
"prep_time": request.form.get("prep_time"),
"cook_time": request.form.get("cook_time"),
"serves": request.form.get("serves"),
"instructions": request.form.get("instructions").splitlines(),
"created_by": session["user"]
}
# print(submit)
recipe = mongo.db.recipes.insert_one(submit)
recipe_id = recipe.inserted_id
flash("Recipe Successfully Added")
return redirect(url_for("view_recipe", recipe_id=recipe_id))
categories = mongo.db.categories.find().sort("category_name")
return render_template(
"add_recipe.html", categories=categories, page_title="Insert Recipe")
@app.route("/edit_recipe/<recipe_id>", methods=["GET", "POST"])
def edit_recipe(recipe_id):
if "user" not in session:
return redirect(url_for("login"))
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name"),
"recipe_name": request.form.get("recipe_name"),
"ingredients_list": request.form.get(
"ingredients_list").splitlines(),
"recipe_img": request.form.get("recipe_img"),
"prep_time": request.form.get("prep_time"),
"cook_time": request.form.get("cook_time"),
"serves": request.form.get("serves"),
"instructions": request.form.get("instructions").splitlines(),
"created_by": session["user"]
}
# print(submit["ingredients_list"])
for ingredient in submit["ingredients_list"]:
ingredient = ingredient.strip()
mongo.db.recipes.update({"_id": ObjectId(recipe_id)}, submit)
flash("Recipe Successfully Updated")
if submit:
the_recipe = mongo.db.recipes.find_one_or_404(
{"_id": ObjectId(recipe_id)})
return redirect(url_for('view_recipe', recipe_id=recipe_id))
recipe = mongo.db.recipes.find_one_or_404({"_id": ObjectId(recipe_id)})
categories = mongo.db.categories.find().sort("category_name")
return render_template(
"edit_recipe.html", recipe=recipe, categories=categories,
page_title="Edit Recipe")
@app.route("/delete_recipe/<recipe_id>")
def delete_recipe(recipe_id):
mongo.db.recipes.remove({"_id": ObjectId(recipe_id)})
flash("Recipe Successfully Deleted")
return redirect(url_for("profile"))
# only admin has access to this page
@app.route("/get_categories")
def get_categories():
if "user" not in session:
return redirect(url_for("login"))
categories = list(mongo.db.categories.find().sort("category_name", 1))
if session['user'] == "admin":
return render_template(
"categories.html", categories=categories, page_title="Categories")
flash("You do not have permission")
return redirect(url_for('login'))
@app.route("/add_category", methods=["GET", "POST"])
def add_category():
if "user" not in session:
return redirect(url_for("login"))
if session['user'] == "admin":
if request.method == "POST":
category = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.insert_one(category)
flash("New Category Added")
return redirect(url_for("get_categories"))
return render_template(
"add_category.html", page_title="Create Category")
flash("You do not have permission")
return redirect(url_for('login'))
@app.route("/edit_category/<category_id>", methods=["GET", "POST"])
def edit_category(category_id):
if "user" not in session:
return redirect(url_for("login"))
if session['user'] == "admin":
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.update({"_id": ObjectId(category_id)}, submit)
flash("Category Successfully Updated")
return redirect(url_for("get_categories"))
category = mongo.db.categories.find_one({"_id": ObjectId(category_id)})
return render_template(
"edit_category.html", category=category,
page_title="Edit Category")
flash("You do not have permission")
return redirect(url_for('login'))
@app.route("/delete_category/<category_id>")
def delete_category(category_id):
mongo.db.categories.remove({"_id": ObjectId(category_id)})
flash("Category Successfully Deleted")
return redirect(url_for("get_categories"))
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
| 33.498155
| 79
| 0.630205
|
import os
from flask import (
Flask, flash, render_template, redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
mongo = PyMongo(app)
@app.route("/")
@app.route("/get_recipes")
def get_recipes():
recipes = list(mongo.db.recipes.find())
return render_template(
"recipes.html", recipes=recipes, page_title="All Recipes")
@app.route("/view_recipe/<recipe_id>")
def view_recipe(recipe_id):
the_recipe = mongo.db.recipes.find_one_or_404({"_id": ObjectId(recipe_id)})
if 'user' not in session:
return redirect(url_for("login"))
return render_template(
"view_recipe.html", recipes=the_recipe, page_title="View Recipe")
@app.route("/search", methods=["GET", "POST"])
def search():
query = request.form.get("query")
recipes = list(mongo.db.recipes.find({"$text": {"$search": query}}))
return render_template(
"recipes.html", recipes=recipes, page_title="Search Recipe")
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
flash("Username already exists")
return redirect(url_for("register"))
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(register)
session["user"] = request.form.get("username").lower()
flash("Registration Successful!")
return redirect(url_for("profile", username=session["user"]))
return render_template("register.html", page_title="Register")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome, {}".format(request.form.get("username")))
return redirect(url_for("profile", username=session["user"]))
else:
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
else:
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
return render_template("login.html", page_title="Login")
@app.route("/profile/", methods=["GET", "POST"])
def profile():
if "user" not in session:
return redirect(url_for("login"))
recipes = mongo.db.recipes.find(
{"created_by": session["user"]}).sort("_id", -1)
return render_template(
"profile.html", username=session["user"],
recipes=recipes, page_title="Profile")
@app.route("/logout")
def logout():
# remove user from session cookies
flash("You have been logged out")
session.pop("user")
return redirect(url_for("login"))
@app.route("/add_recipe", methods=["Get", "POST"])
def add_recipe():
if "user" not in session:
return redirect(url_for("login"))
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name"),
"recipe_name": request.form.get("recipe_name"),
"ingredients_list": request.form.get(
"ingredients_list").splitlines(),
"recipe_img": request.form.get("recipe_img"),
"prep_time": request.form.get("prep_time"),
"cook_time": request.form.get("cook_time"),
"serves": request.form.get("serves"),
"instructions": request.form.get("instructions").splitlines(),
"created_by": session["user"]
}
# print(submit)
recipe = mongo.db.recipes.insert_one(submit)
recipe_id = recipe.inserted_id
flash("Recipe Successfully Added")
return redirect(url_for("view_recipe", recipe_id=recipe_id))
categories = mongo.db.categories.find().sort("category_name")
return render_template(
"add_recipe.html", categories=categories, page_title="Insert Recipe")
@app.route("/edit_recipe/<recipe_id>", methods=["GET", "POST"])
def edit_recipe(recipe_id):
if "user" not in session:
return redirect(url_for("login"))
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name"),
"recipe_name": request.form.get("recipe_name"),
"ingredients_list": request.form.get(
"ingredients_list").splitlines(),
"recipe_img": request.form.get("recipe_img"),
"prep_time": request.form.get("prep_time"),
"cook_time": request.form.get("cook_time"),
"serves": request.form.get("serves"),
"instructions": request.form.get("instructions").splitlines(),
"created_by": session["user"]
}
# print(submit["ingredients_list"])
for ingredient in submit["ingredients_list"]:
ingredient = ingredient.strip()
mongo.db.recipes.update({"_id": ObjectId(recipe_id)}, submit)
flash("Recipe Successfully Updated")
if submit:
the_recipe = mongo.db.recipes.find_one_or_404(
{"_id": ObjectId(recipe_id)})
return redirect(url_for('view_recipe', recipe_id=recipe_id))
recipe = mongo.db.recipes.find_one_or_404({"_id": ObjectId(recipe_id)})
categories = mongo.db.categories.find().sort("category_name")
return render_template(
"edit_recipe.html", recipe=recipe, categories=categories,
page_title="Edit Recipe")
@app.route("/delete_recipe/<recipe_id>")
def delete_recipe(recipe_id):
mongo.db.recipes.remove({"_id": ObjectId(recipe_id)})
flash("Recipe Successfully Deleted")
return redirect(url_for("profile"))
# only admin has access to this page
@app.route("/get_categories")
def get_categories():
if "user" not in session:
return redirect(url_for("login"))
categories = list(mongo.db.categories.find().sort("category_name", 1))
if session['user'] == "admin":
return render_template(
"categories.html", categories=categories, page_title="Categories")
flash("You do not have permission")
return redirect(url_for('login'))
@app.route("/add_category", methods=["GET", "POST"])
def add_category():
if "user" not in session:
return redirect(url_for("login"))
if session['user'] == "admin":
if request.method == "POST":
category = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.insert_one(category)
flash("New Category Added")
return redirect(url_for("get_categories"))
return render_template(
"add_category.html", page_title="Create Category")
flash("You do not have permission")
return redirect(url_for('login'))
@app.route("/edit_category/<category_id>", methods=["GET", "POST"])
def edit_category(category_id):
if "user" not in session:
return redirect(url_for("login"))
if session['user'] == "admin":
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.update({"_id": ObjectId(category_id)}, submit)
flash("Category Successfully Updated")
return redirect(url_for("get_categories"))
category = mongo.db.categories.find_one({"_id": ObjectId(category_id)})
return render_template(
"edit_category.html", category=category,
page_title="Edit Category")
flash("You do not have permission")
return redirect(url_for('login'))
@app.route("/delete_category/<category_id>")
def delete_category(category_id):
mongo.db.categories.remove({"_id": ObjectId(category_id)})
flash("Category Successfully Deleted")
return redirect(url_for("get_categories"))
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
| true
| true
|
f7163ae8d9b4b8ad1f3485dbb15b35d439655fb7
| 7,944
|
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/maximum-students-taking-exam.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269
|
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/maximum-students-taking-exam.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53
|
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/maximum-students-taking-exam.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236
|
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(m * n * sqrt(m * n))
# Space: O(m * n)
# the problem is the same as google codejam 2008 round 3 problem C
# https://github.com/kamyu104/GoogleCodeJam-2008/blob/master/Round%203/no_cheating.py
import collections
from functools import partial
# Time: O(E * sqrt(V))
# Space: O(V)
# Source code from http://code.activestate.com/recipes/123641-hopcroft-karp-bipartite-matching/
# Hopcroft-Karp bipartite max-cardinality matching and max independent set
# David Eppstein, UC Irvine, 27 Apr 2002
def bipartiteMatch(graph):
'''Find maximum cardinality matching of a bipartite graph (U,V,E).
The input format is a dictionary mapping members of U to a list
of their neighbors in V. The output is a triple (M,A,B) where M is a
dictionary mapping members of V to their matches in U, A is the part
of the maximum independent set in U, and B is the part of the MIS in V.
The same object may occur in both U and V, and is treated as two
distinct vertices if this happens.'''
# initialize greedy matching (redundant, but faster than full search)
matching = {}
for u in graph:
for v in graph[u]:
if v not in matching:
matching[v] = u
break
while 1:
# structure residual graph into layers
# pred[u] gives the neighbor in the previous layer for u in U
# preds[v] gives a list of neighbors in the previous layer for v in V
# unmatched gives a list of unmatched vertices in final layer of V,
# and is also used as a flag value for pred[u] when u is in the first layer
preds = {}
unmatched = []
pred = dict([(u,unmatched) for u in graph])
for v in matching:
del pred[matching[v]]
layer = list(pred)
# repeatedly extend layering structure by another pair of layers
while layer and not unmatched:
newLayer = {}
for u in layer:
for v in graph[u]:
if v not in preds:
newLayer.setdefault(v,[]).append(u)
layer = []
for v in newLayer:
preds[v] = newLayer[v]
if v in matching:
layer.append(matching[v])
pred[matching[v]] = v
else:
unmatched.append(v)
# did we finish layering without finding any alternating paths?
if not unmatched:
unlayered = {}
for u in graph:
for v in graph[u]:
if v not in preds:
unlayered[v] = None
return (matching,list(pred),list(unlayered))
# recursively search backward through layers to find alternating paths
# recursion returns true if found path, false otherwise
def recurse(v):
if v in preds:
L = preds[v]
del preds[v]
for u in L:
if u in pred:
pu = pred[u]
del pred[u]
if pu is unmatched or recurse(pu):
matching[v] = u
return 1
return 0
def recurse_iter(v):
def divide(v):
if v not in preds:
return
L = preds[v]
del preds[v]
for u in L :
if u in pred and pred[u] is unmatched: # early return
del pred[u]
matching[v] = u
ret[0] = True
return
stk.append(partial(conquer, v, iter(L)))
def conquer(v, it):
for u in it:
if u not in pred:
continue
pu = pred[u]
del pred[u]
stk.append(partial(postprocess, v, u, it))
stk.append(partial(divide, pu))
return
def postprocess(v, u, it):
if not ret[0]:
stk.append(partial(conquer, v, it))
return
matching[v] = u
ret, stk = [False], []
stk.append(partial(divide, v))
while stk:
stk.pop()()
return ret[0]
for v in unmatched: recurse_iter(v)
# Hopcroft-Karp bipartite matching
class Solution(object):
def maxStudents(self, seats):
"""
:type seats: List[List[str]]
:rtype: int
"""
directions = [(-1, -1), (0, -1), (1, -1), (-1, 1), (0, 1), (1, 1)]
E, count = collections.defaultdict(list), 0
for i in xrange(len(seats)):
for j in xrange(len(seats[0])):
if seats[i][j] != '.':
continue
count += 1
if j%2:
continue
for dx, dy in directions:
ni, nj = i+dx, j+dy
if 0 <= ni < len(seats) and \
0 <= nj < len(seats[0]) and \
seats[ni][nj] == '.':
E[i*len(seats[0])+j].append(ni*len(seats[0])+nj)
return count-len(bipartiteMatch(E)[0])
# Time: O(|V| * |E|) = O(m^2 * n^2)
# Space: O(|V| + |E|) = O(m * n)
# Hungarian bipartite matching
class Solution2(object):
def maxStudents(self, seats):
"""
:type seats: List[List[str]]
:rtype: int
"""
directions = [(-1, -1), (0, -1), (1, -1), (-1, 1), (0, 1), (1, 1)]
def dfs(seats, e, lookup, matching):
i, j = e
for dx, dy in directions:
ni, nj = i+dx, j+dy
if 0 <= ni < len(seats) and 0 <= nj < len(seats[0]) and \
seats[ni][nj] == '.' and not lookup[ni][nj]:
lookup[ni][nj] = True
if matching[ni][nj] == -1 or dfs(seats, matching[ni][nj], lookup, matching):
matching[ni][nj] = e
return True
return False
def Hungarian(seats):
result = 0
matching = [[-1]*len(seats[0]) for _ in xrange(len(seats))]
for i in xrange(len(seats)):
for j in xrange(0, len(seats[0]), 2):
if seats[i][j] != '.':
continue
lookup = [[False]*len(seats[0]) for _ in xrange(len(seats))]
if dfs(seats, (i, j), lookup, matching):
result += 1
return result
count = 0
for i in xrange(len(seats)):
for j in xrange(len(seats[0])):
if seats[i][j] == '.':
count += 1
return count-Hungarian(seats)
# Time: O(m * 2^n * 2^n) = O(m * 4^n)
# Space: O(2^n)
# dp solution
class Solution3(object):
def maxStudents(self, seats):
"""
:type seats: List[List[str]]
:rtype: int
"""
def popcount(n):
result = 0
while n:
n &= n - 1
result += 1
return result
dp = {0: 0}
for row in seats:
invalid_mask = sum(1 << c for c, v in enumerate(row) if v == '#')
new_dp = {}
for mask1, v1 in dp.iteritems():
for mask2 in xrange(1 << len(seats[0])):
if (mask2 & invalid_mask) or \
(mask2 & (mask1 << 1)) or (mask2 & (mask1 >> 1)) or \
(mask2 & (mask2 << 1)) or (mask2 & (mask2 >> 1)):
continue
new_dp[mask2] = max(new_dp.get(mask2, 0), v1+popcount(mask2))
dp = new_dp
return max(dp.itervalues()) if dp else 0
| 35.783784
| 96
| 0.463746
|
import collections
from functools import partial
def bipartiteMatch(graph):
matching = {}
for u in graph:
for v in graph[u]:
if v not in matching:
matching[v] = u
break
while 1:
preds = {}
unmatched = []
pred = dict([(u,unmatched) for u in graph])
for v in matching:
del pred[matching[v]]
layer = list(pred)
while layer and not unmatched:
newLayer = {}
for u in layer:
for v in graph[u]:
if v not in preds:
newLayer.setdefault(v,[]).append(u)
layer = []
for v in newLayer:
preds[v] = newLayer[v]
if v in matching:
layer.append(matching[v])
pred[matching[v]] = v
else:
unmatched.append(v)
if not unmatched:
unlayered = {}
for u in graph:
for v in graph[u]:
if v not in preds:
unlayered[v] = None
return (matching,list(pred),list(unlayered))
def recurse(v):
if v in preds:
L = preds[v]
del preds[v]
for u in L:
if u in pred:
pu = pred[u]
del pred[u]
if pu is unmatched or recurse(pu):
matching[v] = u
return 1
return 0
def recurse_iter(v):
def divide(v):
if v not in preds:
return
L = preds[v]
del preds[v]
for u in L :
if u in pred and pred[u] is unmatched:
del pred[u]
matching[v] = u
ret[0] = True
return
stk.append(partial(conquer, v, iter(L)))
def conquer(v, it):
for u in it:
if u not in pred:
continue
pu = pred[u]
del pred[u]
stk.append(partial(postprocess, v, u, it))
stk.append(partial(divide, pu))
return
def postprocess(v, u, it):
if not ret[0]:
stk.append(partial(conquer, v, it))
return
matching[v] = u
ret, stk = [False], []
stk.append(partial(divide, v))
while stk:
stk.pop()()
return ret[0]
for v in unmatched: recurse_iter(v)
class Solution(object):
def maxStudents(self, seats):
directions = [(-1, -1), (0, -1), (1, -1), (-1, 1), (0, 1), (1, 1)]
E, count = collections.defaultdict(list), 0
for i in xrange(len(seats)):
for j in xrange(len(seats[0])):
if seats[i][j] != '.':
continue
count += 1
if j%2:
continue
for dx, dy in directions:
ni, nj = i+dx, j+dy
if 0 <= ni < len(seats) and \
0 <= nj < len(seats[0]) and \
seats[ni][nj] == '.':
E[i*len(seats[0])+j].append(ni*len(seats[0])+nj)
return count-len(bipartiteMatch(E)[0])
class Solution2(object):
def maxStudents(self, seats):
directions = [(-1, -1), (0, -1), (1, -1), (-1, 1), (0, 1), (1, 1)]
def dfs(seats, e, lookup, matching):
i, j = e
for dx, dy in directions:
ni, nj = i+dx, j+dy
if 0 <= ni < len(seats) and 0 <= nj < len(seats[0]) and \
seats[ni][nj] == '.' and not lookup[ni][nj]:
lookup[ni][nj] = True
if matching[ni][nj] == -1 or dfs(seats, matching[ni][nj], lookup, matching):
matching[ni][nj] = e
return True
return False
def Hungarian(seats):
result = 0
matching = [[-1]*len(seats[0]) for _ in xrange(len(seats))]
for i in xrange(len(seats)):
for j in xrange(0, len(seats[0]), 2):
if seats[i][j] != '.':
continue
lookup = [[False]*len(seats[0]) for _ in xrange(len(seats))]
if dfs(seats, (i, j), lookup, matching):
result += 1
return result
count = 0
for i in xrange(len(seats)):
for j in xrange(len(seats[0])):
if seats[i][j] == '.':
count += 1
return count-Hungarian(seats)
class Solution3(object):
def maxStudents(self, seats):
def popcount(n):
result = 0
while n:
n &= n - 1
result += 1
return result
dp = {0: 0}
for row in seats:
invalid_mask = sum(1 << c for c, v in enumerate(row) if v == '#')
new_dp = {}
for mask1, v1 in dp.iteritems():
for mask2 in xrange(1 << len(seats[0])):
if (mask2 & invalid_mask) or \
(mask2 & (mask1 << 1)) or (mask2 & (mask1 >> 1)) or \
(mask2 & (mask2 << 1)) or (mask2 & (mask2 >> 1)):
continue
new_dp[mask2] = max(new_dp.get(mask2, 0), v1+popcount(mask2))
dp = new_dp
return max(dp.itervalues()) if dp else 0
| true
| true
|
f7163b255379b5cf9193461da5b080f53d6d16ab
| 3,775
|
py
|
Python
|
tests/settings.py
|
ShreeshaRelysys/openwisp-utils
|
7c0b5f249b0e8e1f3af7bf1942b6543c9375dd75
|
[
"BSD-3-Clause"
] | null | null | null |
tests/settings.py
|
ShreeshaRelysys/openwisp-utils
|
7c0b5f249b0e8e1f3af7bf1942b6543c9375dd75
|
[
"BSD-3-Clause"
] | 1
|
2022-01-25T17:46:52.000Z
|
2022-01-25T17:46:52.000Z
|
tests/settings.py
|
ShreeshaRelysys/openwisp-utils
|
7c0b5f249b0e8e1f3af7bf1942b6543c9375dd75
|
[
"BSD-3-Clause"
] | null | null | null |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '@s8$swhj9du^aglt5+@ut^)wepr+un1m7r*+ixcq(-5i^st=y^'
SELENIUM_HEADLESS = True if os.environ.get('SELENIUM_HEADLESS', False) else False
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# test project
'test_project',
'openwisp_utils.admin_theme',
'django.contrib.sites',
# admin
'django.contrib.admin',
# rest framework
'rest_framework',
'drf_yasg',
]
EXTENDED_APPS = ('openwisp_controller', 'django_loci') # Just for testing purposes
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'openwisp_utils.staticfiles.DependencyFinder',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'OPTIONS': {
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'openwisp_utils.loaders.DependencyLoader',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'openwisp_utils.admin_theme.context_processor.menu_groups',
'openwisp_utils.admin_theme.context_processor.admin_theme_settings',
'test_project.context_processors.test_theme_helper',
],
},
}
]
DATABASES = {
'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'openwisp_utils.db'}
}
OPENWISP_ADMIN_SITE_CLASS = 'test_project.site.CustomAdminSite'
SITE_ID = 1
EMAIL_PORT = '1025'
LOGIN_REDIRECT_URL = 'admin:index'
ACCOUNT_LOGOUT_REDIRECT_URL = LOGIN_REDIRECT_URL
# during development only
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# only for automated test purposes
REST_FRAMEWORK = {
'DEFAULT_THROTTLE_CLASSES': [
'test_project.api.throttling.CustomScopedRateThrottle'
],
'DEFAULT_THROTTLE_RATES': {'anon': '20/hour'},
}
CACHES = {'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}
OPENWISP_TEST_ADMIN_MENU_ITEMS = [{'model': 'test_project.Project'}]
OPENWISP_ADMIN_THEME_LINKS = [
{
'type': 'text/css',
'href': 'admin/css/openwisp.css',
'rel': 'stylesheet',
'media': 'all',
},
{
'type': 'text/css',
'href': 'menu-test.css',
'rel': 'stylesheet',
'media': 'all',
}, # custom css for testing menu icons
{
'type': 'image/x-icon',
'href': 'ui/openwisp/images/favicon.png',
'rel': 'icon',
},
]
OPENWISP_ADMIN_THEME_JS = ['dummy.js']
# local settings must be imported before test runner otherwise they'll be ignored
try:
from local_settings import *
except ImportError:
pass
| 28.816794
| 84
| 0.668344
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '@s8$swhj9du^aglt5+@ut^)wepr+un1m7r*+ixcq(-5i^st=y^'
SELENIUM_HEADLESS = True if os.environ.get('SELENIUM_HEADLESS', False) else False
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'test_project',
'openwisp_utils.admin_theme',
'django.contrib.sites',
'django.contrib.admin',
'rest_framework',
'drf_yasg',
]
EXTENDED_APPS = ('openwisp_controller', 'django_loci')
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'openwisp_utils.staticfiles.DependencyFinder',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'OPTIONS': {
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'openwisp_utils.loaders.DependencyLoader',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'openwisp_utils.admin_theme.context_processor.menu_groups',
'openwisp_utils.admin_theme.context_processor.admin_theme_settings',
'test_project.context_processors.test_theme_helper',
],
},
}
]
DATABASES = {
'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'openwisp_utils.db'}
}
OPENWISP_ADMIN_SITE_CLASS = 'test_project.site.CustomAdminSite'
SITE_ID = 1
EMAIL_PORT = '1025'
LOGIN_REDIRECT_URL = 'admin:index'
ACCOUNT_LOGOUT_REDIRECT_URL = LOGIN_REDIRECT_URL
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
REST_FRAMEWORK = {
'DEFAULT_THROTTLE_CLASSES': [
'test_project.api.throttling.CustomScopedRateThrottle'
],
'DEFAULT_THROTTLE_RATES': {'anon': '20/hour'},
}
CACHES = {'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}
OPENWISP_TEST_ADMIN_MENU_ITEMS = [{'model': 'test_project.Project'}]
OPENWISP_ADMIN_THEME_LINKS = [
{
'type': 'text/css',
'href': 'admin/css/openwisp.css',
'rel': 'stylesheet',
'media': 'all',
},
{
'type': 'text/css',
'href': 'menu-test.css',
'rel': 'stylesheet',
'media': 'all',
},
{
'type': 'image/x-icon',
'href': 'ui/openwisp/images/favicon.png',
'rel': 'icon',
},
]
OPENWISP_ADMIN_THEME_JS = ['dummy.js']
try:
from local_settings import *
except ImportError:
pass
| true
| true
|
f7163cfff64ff8625e0a556e900a1ef0e9f60f52
| 13,630
|
py
|
Python
|
mpe/environment.py
|
semitable/multiagent-particle-envs
|
2cef12f72a9192a819ef289646526801c39fb909
|
[
"MIT"
] | null | null | null |
mpe/environment.py
|
semitable/multiagent-particle-envs
|
2cef12f72a9192a819ef289646526801c39fb909
|
[
"MIT"
] | null | null | null |
mpe/environment.py
|
semitable/multiagent-particle-envs
|
2cef12f72a9192a819ef289646526801c39fb909
|
[
"MIT"
] | 2
|
2022-01-12T17:51:03.000Z
|
2022-03-31T07:31:31.000Z
|
import gym
from gym import spaces
from gym.envs.registration import EnvSpec
import numpy as np
from mpe.multi_discrete import MultiDiscrete
import copy
# environment for all agents in the multiagent world
# currently code assumes that no agents will be created/destroyed at runtime!
class MultiAgentEnv(gym.Env):
metadata = {
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, world, reset_callback=None, reward_callback=None,
observation_callback=None, info_callback=None,
done_callback=None, shared_viewer=True):
world = copy.deepcopy(world)
self.world = world
self.agents = self.world.policy_agents
# set required vectorized gym env property
self.n = len(world.policy_agents)
# scenario callbacks
self.reset_callback = reset_callback
self.reward_callback = reward_callback
self.observation_callback = observation_callback
self.info_callback = info_callback
self.done_callback = done_callback
# environment parameters
self.discrete_action_space = True
# if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector
self.discrete_action_input = False
# if true, even the action is continuous, action will be performed discretely
self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False
# if true, every agent has the same reward
self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False
self.time = 0
# configure spaces
self.action_space = []
self.observation_space = []
for agent in self.agents:
total_action_space = []
# physical action space
if self.discrete_action_space:
u_action_space = spaces.Discrete(world.dim_p * 2 + 1)
else:
u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32)
if agent.movable:
total_action_space.append(u_action_space)
# communication action space
if self.discrete_action_space:
c_action_space = spaces.Discrete(world.dim_c)
else:
c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c,), dtype=np.float32)
if not agent.silent:
total_action_space.append(c_action_space)
# total action space
if len(total_action_space) > 1:
# all action spaces are discrete, so simplify to MultiDiscrete action space
if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):
act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])
else:
act_space = spaces.Tuple(total_action_space)
self.action_space.append(act_space)
else:
self.action_space.append(total_action_space[0])
# observation space
obs_dim = len(observation_callback(agent, self.world))
self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))
agent.action.c = np.zeros(self.world.dim_c)
self.action_space = spaces.Tuple(tuple(self.action_space))
self.observation_space = spaces.Tuple(tuple(self.observation_space))
self.n_agents = self.n
# rendering
self.shared_viewer = shared_viewer
if self.shared_viewer:
self.viewers = [None]
else:
self.viewers = [None] * self.n
self._reset_render()
def seed(self, seed):
self.world.seed(seed)
def step(self, action_n):
one_hot_actions = []
for act, acsp in zip(action_n, self.action_space):
one_hot = np.zeros(acsp.n)
one_hot[act] = 1.0
one_hot_actions.append(one_hot)
action_n = one_hot_actions
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
self.agents = self.world.policy_agents
# set action for each agent
for i, agent in enumerate(self.agents):
self._set_action(action_n[i], agent, self.action_space[i])
# advance world state
self.world.step()
# record observation for each agent
for agent in self.agents:
obs_n.append(self._get_obs(agent))
reward_n.append(self._get_reward(agent))
done_n.append(self._get_done(agent))
info_n['n'].append(self._get_info(agent))
# all agents get total reward in cooperative case
reward = np.sum(reward_n)
if self.shared_reward:
reward_n = [reward] * self.n
return tuple(obs_n), reward_n, done_n, info_n
def reset(self):
# reset world
self.reset_callback(self.world)
# reset renderer
self._reset_render()
# record observations for each agent
obs_n = []
self.agents = self.world.policy_agents
for agent in self.agents:
obs_n.append(self._get_obs(agent))
return tuple(obs_n)
# get info used for benchmarking
def _get_info(self, agent):
if self.info_callback is None:
return {}
return self.info_callback(agent, self.world)
# get observation for a particular agent
def _get_obs(self, agent):
if self.observation_callback is None:
return np.zeros(0)
return self.observation_callback(agent, self.world).astype(np.float32)
# get dones for a particular agent
# unused right now -- agents are allowed to go beyond the viewing screen
def _get_done(self, agent):
if self.done_callback is None:
return False
return self.done_callback(agent, self.world)
# get reward for a particular agent
def _get_reward(self, agent):
if self.reward_callback is None:
return 0.0
return self.reward_callback(agent, self.world)
# set env action for a particular agent
def _set_action(self, action, agent, action_space, time=None):
agent.action.u = np.zeros(self.world.dim_p)
agent.action.c = np.zeros(self.world.dim_c)
# process action
if isinstance(action_space, MultiDiscrete):
act = []
size = action_space.high - action_space.low + 1
index = 0
for s in size:
act.append(action[index:(index+s)])
index += s
action = act
else:
action = [action]
if agent.movable:
# physical action
if self.discrete_action_input:
agent.action.u = np.zeros(self.world.dim_p)
# process discrete action
if action[0] == 1: agent.action.u[0] = -1.0
if action[0] == 2: agent.action.u[0] = +1.0
if action[0] == 3: agent.action.u[1] = -1.0
if action[0] == 4: agent.action.u[1] = +1.0
else:
if self.force_discrete_action:
d = np.argmax(action[0])
action[0][:] = 0.0
action[0][d] = 1.0
if self.discrete_action_space:
agent.action.u[0] += action[0][1] - action[0][2]
agent.action.u[1] += action[0][3] - action[0][4]
else:
agent.action.u = action[0]
sensitivity = 5.0
if agent.accel is not None:
sensitivity = agent.accel
agent.action.u *= sensitivity
action = action[1:]
if not agent.silent:
# communication action
if self.discrete_action_input:
agent.action.c = np.zeros(self.world.dim_c)
agent.action.c[action[0]] = 1.0
else:
agent.action.c = action[0]
action = action[1:]
# make sure we used all elements of action
assert len(action) == 0
# reset rendering assets
def _reset_render(self):
self.render_geoms = None
self.render_geoms_xform = None
# render environment
def render(self, mode='human'):
if mode == 'human':
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
message = ''
for agent in self.world.agents:
comm = []
for other in self.world.agents:
if other is agent: continue
if np.all(other.state.c == 0):
word = '_'
else:
word = alphabet[np.argmax(other.state.c)]
message += (other.name + ' to ' + agent.name + ': ' + word + ' ')
print(message)
for i in range(len(self.viewers)):
# create viewers (if necessary)
if self.viewers[i] is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from mpe import rendering
self.viewers[i] = rendering.Viewer(700,700)
# create rendering geometry
if self.render_geoms is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from mpe import rendering
self.render_geoms = []
self.render_geoms_xform = []
for entity in self.world.entities:
geom = rendering.make_circle(entity.size)
xform = rendering.Transform()
if 'agent' in entity.name:
geom.set_color(*entity.color, alpha=0.5)
else:
geom.set_color(*entity.color)
geom.add_attr(xform)
self.render_geoms.append(geom)
self.render_geoms_xform.append(xform)
# add geoms to viewer
for viewer in self.viewers:
viewer.geoms = []
for geom in self.render_geoms:
viewer.add_geom(geom)
results = []
for i in range(len(self.viewers)):
from mpe import rendering
# update bounds to center around agent
cam_range = 1
if self.shared_viewer:
pos = np.zeros(self.world.dim_p)
else:
pos = self.agents[i].state.p_pos
self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)
# update geometry positions
for e, entity in enumerate(self.world.entities):
self.render_geoms_xform[e].set_translation(*entity.state.p_pos)
# render to display or array
results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))
if self.shared_viewer:
assert len(results) == 1
return results[0]
return results
# create receptor field locations in local coordinate frame
def _make_receptor_locations(self, agent):
receptor_type = 'polar'
range_min = 0.05 * 2.0
range_max = 1.00
dx = []
# circular receptive field
if receptor_type == 'polar':
for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):
for distance in np.linspace(range_min, range_max, 3):
dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))
# add origin
dx.append(np.array([0.0, 0.0]))
# grid receptive field
if receptor_type == 'grid':
for x in np.linspace(-range_max, +range_max, 5):
for y in np.linspace(-range_max, +range_max, 5):
dx.append(np.array([x,y]))
return dx
def close(self):
for viewer in self.viewers:
if viewer:
viewer.close()
# vectorized wrapper for a batch of multi-agent environments
# assumes all environments have the same observation and action space
class BatchMultiAgentEnv(gym.Env):
metadata = {
'runtime.vectorized': True,
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, env_batch):
self.env_batch = env_batch
@property
def n(self):
return np.sum([env.n for env in self.env_batch])
@property
def action_space(self):
return self.env_batch[0].action_space
@property
def observation_space(self):
return self.env_batch[0].observation_space
def step(self, action_n, time):
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
i = 0
for env in self.env_batch:
obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)
i += env.n
obs_n += obs
# reward = [r / len(self.env_batch) for r in reward]
reward_n += reward
done_n += done
return obs_n, reward_n, done_n, info_n
def reset(self):
obs_n = []
for env in self.env_batch:
obs_n += env.reset()
return obs_n
# render environment
def render(self, mode='human', close=True):
results_n = []
for env in self.env_batch:
results_n += env.render(mode, close)
return results_n
| 37.651934
| 124
| 0.574101
|
import gym
from gym import spaces
from gym.envs.registration import EnvSpec
import numpy as np
from mpe.multi_discrete import MultiDiscrete
import copy
class MultiAgentEnv(gym.Env):
metadata = {
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, world, reset_callback=None, reward_callback=None,
observation_callback=None, info_callback=None,
done_callback=None, shared_viewer=True):
world = copy.deepcopy(world)
self.world = world
self.agents = self.world.policy_agents
self.n = len(world.policy_agents)
self.reset_callback = reset_callback
self.reward_callback = reward_callback
self.observation_callback = observation_callback
self.info_callback = info_callback
self.done_callback = done_callback
self.discrete_action_space = True
self.discrete_action_input = False
self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False
self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False
self.time = 0
self.action_space = []
self.observation_space = []
for agent in self.agents:
total_action_space = []
if self.discrete_action_space:
u_action_space = spaces.Discrete(world.dim_p * 2 + 1)
else:
u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32)
if agent.movable:
total_action_space.append(u_action_space)
if self.discrete_action_space:
c_action_space = spaces.Discrete(world.dim_c)
else:
c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c,), dtype=np.float32)
if not agent.silent:
total_action_space.append(c_action_space)
if len(total_action_space) > 1:
if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):
act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])
else:
act_space = spaces.Tuple(total_action_space)
self.action_space.append(act_space)
else:
self.action_space.append(total_action_space[0])
obs_dim = len(observation_callback(agent, self.world))
self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))
agent.action.c = np.zeros(self.world.dim_c)
self.action_space = spaces.Tuple(tuple(self.action_space))
self.observation_space = spaces.Tuple(tuple(self.observation_space))
self.n_agents = self.n
self.shared_viewer = shared_viewer
if self.shared_viewer:
self.viewers = [None]
else:
self.viewers = [None] * self.n
self._reset_render()
def seed(self, seed):
self.world.seed(seed)
def step(self, action_n):
one_hot_actions = []
for act, acsp in zip(action_n, self.action_space):
one_hot = np.zeros(acsp.n)
one_hot[act] = 1.0
one_hot_actions.append(one_hot)
action_n = one_hot_actions
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
self.agents = self.world.policy_agents
for i, agent in enumerate(self.agents):
self._set_action(action_n[i], agent, self.action_space[i])
self.world.step()
for agent in self.agents:
obs_n.append(self._get_obs(agent))
reward_n.append(self._get_reward(agent))
done_n.append(self._get_done(agent))
info_n['n'].append(self._get_info(agent))
reward = np.sum(reward_n)
if self.shared_reward:
reward_n = [reward] * self.n
return tuple(obs_n), reward_n, done_n, info_n
def reset(self):
self.reset_callback(self.world)
self._reset_render()
obs_n = []
self.agents = self.world.policy_agents
for agent in self.agents:
obs_n.append(self._get_obs(agent))
return tuple(obs_n)
def _get_info(self, agent):
if self.info_callback is None:
return {}
return self.info_callback(agent, self.world)
def _get_obs(self, agent):
if self.observation_callback is None:
return np.zeros(0)
return self.observation_callback(agent, self.world).astype(np.float32)
def _get_done(self, agent):
if self.done_callback is None:
return False
return self.done_callback(agent, self.world)
def _get_reward(self, agent):
if self.reward_callback is None:
return 0.0
return self.reward_callback(agent, self.world)
def _set_action(self, action, agent, action_space, time=None):
agent.action.u = np.zeros(self.world.dim_p)
agent.action.c = np.zeros(self.world.dim_c)
if isinstance(action_space, MultiDiscrete):
act = []
size = action_space.high - action_space.low + 1
index = 0
for s in size:
act.append(action[index:(index+s)])
index += s
action = act
else:
action = [action]
if agent.movable:
if self.discrete_action_input:
agent.action.u = np.zeros(self.world.dim_p)
if action[0] == 1: agent.action.u[0] = -1.0
if action[0] == 2: agent.action.u[0] = +1.0
if action[0] == 3: agent.action.u[1] = -1.0
if action[0] == 4: agent.action.u[1] = +1.0
else:
if self.force_discrete_action:
d = np.argmax(action[0])
action[0][:] = 0.0
action[0][d] = 1.0
if self.discrete_action_space:
agent.action.u[0] += action[0][1] - action[0][2]
agent.action.u[1] += action[0][3] - action[0][4]
else:
agent.action.u = action[0]
sensitivity = 5.0
if agent.accel is not None:
sensitivity = agent.accel
agent.action.u *= sensitivity
action = action[1:]
if not agent.silent:
if self.discrete_action_input:
agent.action.c = np.zeros(self.world.dim_c)
agent.action.c[action[0]] = 1.0
else:
agent.action.c = action[0]
action = action[1:]
assert len(action) == 0
def _reset_render(self):
self.render_geoms = None
self.render_geoms_xform = None
def render(self, mode='human'):
if mode == 'human':
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
message = ''
for agent in self.world.agents:
comm = []
for other in self.world.agents:
if other is agent: continue
if np.all(other.state.c == 0):
word = '_'
else:
word = alphabet[np.argmax(other.state.c)]
message += (other.name + ' to ' + agent.name + ': ' + word + ' ')
print(message)
for i in range(len(self.viewers)):
if self.viewers[i] is None:
#from gym.envs.classic_control import rendering
from mpe import rendering
self.viewers[i] = rendering.Viewer(700,700)
# create rendering geometry
if self.render_geoms is None:
# import rendering only if we need it (and don't import for headless machines)
from mpe import rendering
self.render_geoms = []
self.render_geoms_xform = []
for entity in self.world.entities:
geom = rendering.make_circle(entity.size)
xform = rendering.Transform()
if 'agent' in entity.name:
geom.set_color(*entity.color, alpha=0.5)
else:
geom.set_color(*entity.color)
geom.add_attr(xform)
self.render_geoms.append(geom)
self.render_geoms_xform.append(xform)
for viewer in self.viewers:
viewer.geoms = []
for geom in self.render_geoms:
viewer.add_geom(geom)
results = []
for i in range(len(self.viewers)):
from mpe import rendering
cam_range = 1
if self.shared_viewer:
pos = np.zeros(self.world.dim_p)
else:
pos = self.agents[i].state.p_pos
self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)
for e, entity in enumerate(self.world.entities):
self.render_geoms_xform[e].set_translation(*entity.state.p_pos)
results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))
if self.shared_viewer:
assert len(results) == 1
return results[0]
return results
def _make_receptor_locations(self, agent):
receptor_type = 'polar'
range_min = 0.05 * 2.0
range_max = 1.00
dx = []
if receptor_type == 'polar':
for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):
for distance in np.linspace(range_min, range_max, 3):
dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))
dx.append(np.array([0.0, 0.0]))
if receptor_type == 'grid':
for x in np.linspace(-range_max, +range_max, 5):
for y in np.linspace(-range_max, +range_max, 5):
dx.append(np.array([x,y]))
return dx
def close(self):
for viewer in self.viewers:
if viewer:
viewer.close()
class BatchMultiAgentEnv(gym.Env):
metadata = {
'runtime.vectorized': True,
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, env_batch):
self.env_batch = env_batch
@property
def n(self):
return np.sum([env.n for env in self.env_batch])
@property
def action_space(self):
return self.env_batch[0].action_space
@property
def observation_space(self):
return self.env_batch[0].observation_space
def step(self, action_n, time):
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
i = 0
for env in self.env_batch:
obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)
i += env.n
obs_n += obs
reward_n += reward
done_n += done
return obs_n, reward_n, done_n, info_n
def reset(self):
obs_n = []
for env in self.env_batch:
obs_n += env.reset()
return obs_n
def render(self, mode='human', close=True):
results_n = []
for env in self.env_batch:
results_n += env.render(mode, close)
return results_n
| true
| true
|
f7163d24a95dcc2a6bddc0e3154afb84cd313c70
| 53
|
py
|
Python
|
python/tevreden/__init__.py
|
lhengstmengel/tevreden-sdk
|
704b575b264f72954a7bb3afc57a9db94f1e273b
|
[
"MIT"
] | null | null | null |
python/tevreden/__init__.py
|
lhengstmengel/tevreden-sdk
|
704b575b264f72954a7bb3afc57a9db94f1e273b
|
[
"MIT"
] | null | null | null |
python/tevreden/__init__.py
|
lhengstmengel/tevreden-sdk
|
704b575b264f72954a7bb3afc57a9db94f1e273b
|
[
"MIT"
] | null | null | null |
from tevreden.apiclient import APIClient
| 8.833333
| 40
| 0.679245
|
from tevreden.apiclient import APIClient
| true
| true
|
f7163d273bf82ffe2219a42636d894ab274c7014
| 1,758
|
py
|
Python
|
code--Django/MulVAL_BAG/venv/lib/python3.7/site-packages/parse/parse.py
|
MekAkUActOR/BAG_MulVAL
|
e0ba159f2eba0a1aaaa3176363d88cadedd90222
|
[
"MIT"
] | 7
|
2020-10-10T03:25:20.000Z
|
2022-02-21T10:02:33.000Z
|
code--Django/MulVAL_BAG/venv/lib/python3.7/site-packages/parse/parse.py
|
bestwishCT/BAG_MulVAL
|
e0ba159f2eba0a1aaaa3176363d88cadedd90222
|
[
"MIT"
] | 2
|
2021-06-22T00:06:31.000Z
|
2022-03-29T06:24:50.000Z
|
code--Django/MulVAL_BAG/venv/lib/python3.7/site-packages/parse/parse.py
|
bestwishCT/BAG_MulVAL
|
e0ba159f2eba0a1aaaa3176363d88cadedd90222
|
[
"MIT"
] | 3
|
2021-07-25T11:05:07.000Z
|
2022-01-08T08:46:24.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 08 16:11:28 2013
@author: kshmirko
"""
import re
from ios.readMeteoBlock import readMeteoFile, readMeteoCtx
import StringIO
from datetime import datetime, timedelta
class ParserException(Exception):
def __init__(self, text):
super(ParserException, self).__init__(text)
regex = re.compile("(?P<stid>[0-9]+)([a-zA-Z\ \(\)]+)(?P<time>[0-9]+\w\ [0-9]+\ \w+\ [0-9]+)",re.IGNORECASE|re.UNICODE|re.DOTALL)
def parse_h2(line):
print line
r = regex.match(line).groupdict()
stid = int(r['stid'])
date = datetime.strptime(r['time'],'%HZ %d %b %Y')
print stid, date
return stid, date
def parse_pre1(line):
sfile = StringIO.StringIO(line)
meteo = readMeteoFile(sfile)
return meteo
def parse_pre2(line):
sfile = StringIO.StringIO(line)
ctx = readMeteoCtx(sfile)
return ctx
def parse_h3(line):
pass
def parse_observation(tags):
tmp = tags.pop()
if tmp.tag=='h2':
print "Header OK"
stid, date = parse_h2(tmp.text)
else:
raise ParserException("Can't parse string '%s'\n"%(tmp.text))
tmp = tags.pop()
if tmp.tag=='pre':
print "data OK"
meteo = parse_pre1(tmp.text)
else:
raise ParserException("Can't parse string '%s'\n"%(tmp.text))
tmp = tags.pop()
if tmp.tag=='h3':
print "Indices title OK"
parse_h3(tmp.text)
else:
raise ParserException("Can't parse string '%s'\n"%(tmp.text))
tmp = tags.pop()
if tmp.tag=='pre':
print "Indices OK"
ctx = parse_pre2(tmp.text)
else:
raise ParserException("Can't parse string '%s'\n"%(tmp.text))
return [stid, date, meteo, ctx]
| 24.416667
| 129
| 0.597838
|
"""
Created on Wed May 08 16:11:28 2013
@author: kshmirko
"""
import re
from ios.readMeteoBlock import readMeteoFile, readMeteoCtx
import StringIO
from datetime import datetime, timedelta
class ParserException(Exception):
def __init__(self, text):
super(ParserException, self).__init__(text)
regex = re.compile("(?P<stid>[0-9]+)([a-zA-Z\ \(\)]+)(?P<time>[0-9]+\w\ [0-9]+\ \w+\ [0-9]+)",re.IGNORECASE|re.UNICODE|re.DOTALL)
def parse_h2(line):
print line
r = regex.match(line).groupdict()
stid = int(r['stid'])
date = datetime.strptime(r['time'],'%HZ %d %b %Y')
print stid, date
return stid, date
def parse_pre1(line):
sfile = StringIO.StringIO(line)
meteo = readMeteoFile(sfile)
return meteo
def parse_pre2(line):
sfile = StringIO.StringIO(line)
ctx = readMeteoCtx(sfile)
return ctx
def parse_h3(line):
pass
def parse_observation(tags):
tmp = tags.pop()
if tmp.tag=='h2':
print "Header OK"
stid, date = parse_h2(tmp.text)
else:
raise ParserException("Can't parse string '%s'\n"%(tmp.text))
tmp = tags.pop()
if tmp.tag=='pre':
print "data OK"
meteo = parse_pre1(tmp.text)
else:
raise ParserException("Can't parse string '%s'\n"%(tmp.text))
tmp = tags.pop()
if tmp.tag=='h3':
print "Indices title OK"
parse_h3(tmp.text)
else:
raise ParserException("Can't parse string '%s'\n"%(tmp.text))
tmp = tags.pop()
if tmp.tag=='pre':
print "Indices OK"
ctx = parse_pre2(tmp.text)
else:
raise ParserException("Can't parse string '%s'\n"%(tmp.text))
return [stid, date, meteo, ctx]
| false
| true
|
f7163d55c4d7ee055eddf4948ff15e01024bdcbf
| 2,574
|
py
|
Python
|
wstools/scan_download.py
|
inductiveload/wstools
|
b354a642b10a8d1bfa2a7683d2270c42512cb25d
|
[
"MIT"
] | null | null | null |
wstools/scan_download.py
|
inductiveload/wstools
|
b354a642b10a8d1bfa2a7683d2270c42512cb25d
|
[
"MIT"
] | null | null | null |
wstools/scan_download.py
|
inductiveload/wstools
|
b354a642b10a8d1bfa2a7683d2270c42512cb25d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
import argparse
import logging
from xlsx2csv import Xlsx2csv
from io import StringIO
import csv
import os
import subprocess
import utils.ht_source
def parse_header_row(hr):
mapping = {}
for i, col in enumerate(hr):
mapping[col.lower()] = i
return mapping
def handle_row(r, args):
print(r)
if r['source'] == "ht":
o_dir, _ = os.path.splitext(r['file'])
utils.ht_source.dl_to_directory(r['id'], o_dir,
skip_existing=args.skip_existing,
make_dirs=True)
else:
raise ValueError("Unknown source: {}".format(r['source']))
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-v', '--verbose', action='store_true',
help='show debugging information')
parser.add_argument('-f', '--data_file', required=True,
help='The data file')
parser.add_argument('-r', '--rows', type=int, nargs="+",
help='Rows to process (1-indexed, same as in spreadsheet)')
parser.add_argument('-s', '--skip_existing', action='store_true',
help='Skip files we already have')
args = parser.parse_args()
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("oauthlib").setLevel(logging.WARNING)
logging.getLogger("requests_oauthlib").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# requests_cache.install_cache('downloadscans')
output = StringIO()
Xlsx2csv(args.data_file, skip_trailing_columns=True,
skip_empty_lines=True, outputencoding="utf-8").convert(output)
output.seek(0)
reader = csv.reader(output, delimiter=',', quotechar='"')
head_row = next(reader)
col_map = parse_header_row(head_row)
row_idx = 1
for row in reader:
row_idx += 1
if args.rows is not None and row_idx not in args.rows:
logging.debug("Skip row {}".format(row_idx))
continue
mapped_row = {}
for col in col_map:
mapped_row[col.lower()] = row[col_map[col]].strip()
if "dl" in mapped_row and mapped_row["dl"].lower() in ["n", "no"]:
logging.debug("Skipping row DL: {}".format(row_idx))
continue
handle_row(mapped_row, args)
if __name__ == "__main__":
main()
| 26
| 83
| 0.612665
|
import argparse
import logging
from xlsx2csv import Xlsx2csv
from io import StringIO
import csv
import os
import subprocess
import utils.ht_source
def parse_header_row(hr):
mapping = {}
for i, col in enumerate(hr):
mapping[col.lower()] = i
return mapping
def handle_row(r, args):
print(r)
if r['source'] == "ht":
o_dir, _ = os.path.splitext(r['file'])
utils.ht_source.dl_to_directory(r['id'], o_dir,
skip_existing=args.skip_existing,
make_dirs=True)
else:
raise ValueError("Unknown source: {}".format(r['source']))
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-v', '--verbose', action='store_true',
help='show debugging information')
parser.add_argument('-f', '--data_file', required=True,
help='The data file')
parser.add_argument('-r', '--rows', type=int, nargs="+",
help='Rows to process (1-indexed, same as in spreadsheet)')
parser.add_argument('-s', '--skip_existing', action='store_true',
help='Skip files we already have')
args = parser.parse_args()
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("oauthlib").setLevel(logging.WARNING)
logging.getLogger("requests_oauthlib").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
output = StringIO()
Xlsx2csv(args.data_file, skip_trailing_columns=True,
skip_empty_lines=True, outputencoding="utf-8").convert(output)
output.seek(0)
reader = csv.reader(output, delimiter=',', quotechar='"')
head_row = next(reader)
col_map = parse_header_row(head_row)
row_idx = 1
for row in reader:
row_idx += 1
if args.rows is not None and row_idx not in args.rows:
logging.debug("Skip row {}".format(row_idx))
continue
mapped_row = {}
for col in col_map:
mapped_row[col.lower()] = row[col_map[col]].strip()
if "dl" in mapped_row and mapped_row["dl"].lower() in ["n", "no"]:
logging.debug("Skipping row DL: {}".format(row_idx))
continue
handle_row(mapped_row, args)
if __name__ == "__main__":
main()
| true
| true
|
f7163da8393455426f9f86eb28054d4db2ae3791
| 657
|
py
|
Python
|
tests/test_vt100_output.py
|
gousaiyang/python-prompt-toolkit
|
6237764658214af4c24633795d2571d2bd03375d
|
[
"BSD-3-Clause"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
tests/test_vt100_output.py
|
gousaiyang/python-prompt-toolkit
|
6237764658214af4c24633795d2571d2bd03375d
|
[
"BSD-3-Clause"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
tests/test_vt100_output.py
|
gousaiyang/python-prompt-toolkit
|
6237764658214af4c24633795d2571d2bd03375d
|
[
"BSD-3-Clause"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
from prompt_toolkit.output.vt100 import _get_closest_ansi_color
def test_get_closest_ansi_color():
# White
assert _get_closest_ansi_color(255, 255, 255) == "ansiwhite"
assert _get_closest_ansi_color(250, 250, 250) == "ansiwhite"
# Black
assert _get_closest_ansi_color(0, 0, 0) == "ansiblack"
assert _get_closest_ansi_color(5, 5, 5) == "ansiblack"
# Green
assert _get_closest_ansi_color(0, 255, 0) == "ansibrightgreen"
assert _get_closest_ansi_color(10, 255, 0) == "ansibrightgreen"
assert _get_closest_ansi_color(0, 255, 10) == "ansibrightgreen"
assert _get_closest_ansi_color(220, 220, 100) == "ansiyellow"
| 34.578947
| 67
| 0.724505
|
from prompt_toolkit.output.vt100 import _get_closest_ansi_color
def test_get_closest_ansi_color():
assert _get_closest_ansi_color(255, 255, 255) == "ansiwhite"
assert _get_closest_ansi_color(250, 250, 250) == "ansiwhite"
assert _get_closest_ansi_color(0, 0, 0) == "ansiblack"
assert _get_closest_ansi_color(5, 5, 5) == "ansiblack"
assert _get_closest_ansi_color(0, 255, 0) == "ansibrightgreen"
assert _get_closest_ansi_color(10, 255, 0) == "ansibrightgreen"
assert _get_closest_ansi_color(0, 255, 10) == "ansibrightgreen"
assert _get_closest_ansi_color(220, 220, 100) == "ansiyellow"
| true
| true
|
f7163dec326b34497f296ba51ed9239979207054
| 27,795
|
py
|
Python
|
decompiler/magic.py
|
Gouvernathor/unrpyc
|
25f4470ea1612ecacec578efcecc3054a59098c8
|
[
"MIT"
] | 490
|
2015-01-02T19:37:41.000Z
|
2022-03-27T09:26:53.000Z
|
decompiler/magic.py
|
Gouvernathor/unrpyc
|
25f4470ea1612ecacec578efcecc3054a59098c8
|
[
"MIT"
] | 114
|
2015-01-02T06:14:15.000Z
|
2022-03-31T23:24:39.000Z
|
decompiler/magic.py
|
Gouvernathor/unrpyc
|
25f4470ea1612ecacec578efcecc3054a59098c8
|
[
"MIT"
] | 123
|
2015-01-02T18:17:53.000Z
|
2022-03-29T13:25:17.000Z
|
# Copyright (c) 2015 CensoredUsername
# This module provides tools for safely analyizing pickle files programmatically
import sys
PY3 = sys.version_info >= (3, 0)
PY2 = not PY3
import types
import pickle
import struct
if PY3:
from io import BytesIO as StringIO
else:
from cStringIO import StringIO
__all__ = [
"load", "loads", "safe_load", "safe_loads", "safe_dump", "safe_dumps",
"fake_package", "remove_fake_package",
"FakeModule", "FakePackage", "FakePackageLoader",
"FakeClassType", "FakeClassFactory",
"FakeClass", "FakeStrict", "FakeWarning", "FakeIgnore",
"FakeUnpicklingError", "FakeUnpickler", "SafeUnpickler",
"SafePickler"
]
# Fake class implementation
class FakeClassType(type):
"""
The metaclass used to create fake classes. To support comparisons between
fake classes and :class:`FakeModule` instances custom behaviour is defined
here which follows this logic:
If the other object does not have ``other.__name__`` set, they are not equal.
Else if it does not have ``other.__module__`` set, they are equal if
``self.__module__ + "." + self.__name__ == other.__name__``.
Else, they are equal if
``self.__module__ == other.__module__ and self.__name__ == other.__name__``
Using this behaviour, ``==``, ``!=``, ``hash()``, ``isinstance()`` and ``issubclass()``
are implemented allowing comparison between :class:`FakeClassType` instances
and :class:`FakeModule` instances to succeed if they are pretending to be in the same
place in the python module hierarchy.
To create a fake class using this metaclass, you can either use this metaclass directly or
inherit from the fake class base instances given below. When doing this, the module that
this fake class is pretending to be in should be specified using the *module* argument
when the metaclass is called directly or a :attr:``__module__`` class attribute in a class statement.
This is a subclass of :class:`type`.
"""
# instance creation logic
def __new__(cls, name, bases, attributes, module=None):
# This would be a lie
attributes.pop("__qualname__", None)
# figure out what module we should say we're in
# note that if no module is explicitly passed, the current module will be chosen
# due to the class statement implicitly specifying __module__ as __name__
if module is not None:
attributes["__module__"] = module
if "__module__" not in attributes:
raise TypeError("No module has been specified for FakeClassType {0}".format(name))
# assemble instance
return type.__new__(cls, name, bases, attributes)
def __init__(self, name, bases, attributes, module=None):
type.__init__(self, name, bases, attributes)
# comparison logic
def __eq__(self, other):
if not hasattr(other, "__name__"):
return False
if hasattr(other, "__module__"):
return self.__module__ == other.__module__ and self.__name__ == other.__name__
else:
return self.__module__ + "." + self.__name__ == other.__name__
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.__module__ + "." + self.__name__)
def __instancecheck__(self, instance):
return self.__subclasscheck__(instance.__class__)
def __subclasscheck__(self, subclass):
return (self == subclass or
(bool(subclass.__bases__) and
any(self.__subclasscheck__(base) for base in subclass.__bases__)))
# PY2 doesn't like the PY3 way of metaclasses and PY3 doesn't support the PY2 way
# so we call the metaclass directly
FakeClass = FakeClassType("FakeClass", (), {"__doc__": """
A barebones instance of :class:`FakeClassType`. Inherit from this to create fake classes.
"""}, module=__name__)
class FakeStrict(FakeClass, object):
def __new__(cls, *args, **kwargs):
self = FakeClass.__new__(cls)
if args or kwargs:
raise FakeUnpicklingError("{0} was instantiated with unexpected arguments {1}, {2}".format(cls, args, kwargs))
return self
def __setstate__(self, state):
slotstate = None
if (isinstance(state, tuple) and len(state) == 2 and
(state[0] is None or isinstance(state[0], dict)) and
(state[1] is None or isinstance(state[1], dict))):
state, slotstate = state
if state:
# Don't have to check for slotstate here since it's either None or a dict
if not isinstance(state, dict):
raise FakeUnpicklingError("{0}.__setstate__() got unexpected arguments {1}".format(self.__class__, state))
else:
self.__dict__.update(state)
if slotstate:
self.__dict__.update(slotstate)
class FakeWarning(FakeClass, object):
def __new__(cls, *args, **kwargs):
self = FakeClass.__new__(cls)
if args or kwargs:
print("{0} was instantiated with unexpected arguments {1}, {2}".format(cls, args, kwargs))
self._new_args = args
return self
def __setstate__(self, state):
slotstate = None
if (isinstance(state, tuple) and len(state) == 2 and
(state[0] is None or isinstance(state[0], dict)) and
(state[1] is None or isinstance(state[1], dict))):
state, slotstate = state
if state:
# Don't have to check for slotstate here since it's either None or a dict
if not isinstance(state, dict):
print("{0}.__setstate__() got unexpected arguments {1}".format(self.__class__, state))
self._setstate_args = state
else:
self.__dict__.update(state)
if slotstate:
self.__dict__.update(slotstate)
class FakeIgnore(FakeClass, object):
def __new__(cls, *args, **kwargs):
self = FakeClass.__new__(cls)
if args:
self._new_args = args
if kwargs:
self._new_kwargs = kwargs
return self
def __setstate__(self, state):
slotstate = None
if (isinstance(state, tuple) and len(state) == 2 and
(state[0] is None or isinstance(state[0], dict)) and
(state[1] is None or isinstance(state[1], dict))):
state, slotstate = state
if state:
# Don't have to check for slotstate here since it's either None or a dict
if not isinstance(state, dict):
self._setstate_args = state
else:
self.__dict__.update(state)
if slotstate:
self.__dict__.update(slotstate)
class FakeClassFactory(object):
"""
Factory of fake classses. It will create fake class definitions on demand
based on the passed arguments.
"""
def __init__(self, special_cases=(), default_class=FakeStrict):
"""
*special_cases* should be an iterable containing fake classes which should be treated
as special cases during the fake unpickling process. This way you can specify custom methods
and attributes on these classes as they're used during unpickling.
*default_class* should be a FakeClassType instance which will be subclassed to create the
necessary non-special case fake classes during unpickling. This should usually be set to
:class:`FakeStrict`, :class:`FakeWarning` or :class:`FakeIgnore`. These classes have
:meth:`__new__` and :meth:`__setstate__` methods which extract data from the pickle stream
and provide means of inspecting the stream when it is not clear how the data should be interpreted.
As an example, we can define the fake class generated for definition bar in module foo,
which has a :meth:`__str__` method which returns ``"baz"``::
class bar(FakeStrict, object):
def __str__(self):
return "baz"
special_cases = [bar]
Alternatively they can also be instantiated using :class:`FakeClassType` directly::
special_cases = [FakeClassType(c.__name__, c.__bases__, c.__dict__, c.__module__)]
"""
self.special_cases = dict(((i.__module__, i.__name__), i) for i in special_cases)
self.default = default_class
self.class_cache = {}
def __call__(self, name, module):
"""
Return the right class for the specified *module* and *name*.
This class will either be one of the special cases in case the name and module match,
or a subclass of *default_class* will be created with the correct name and module.
Created class definitions are cached per factory instance.
"""
# Check if we've got this class cached
klass = self.class_cache.get((module, name), None)
if klass is not None:
return klass
klass = self.special_cases.get((module, name), None)
if not klass:
# generate a new class def which inherits from the default fake class
klass = type(name, (self.default,), {"__module__": module})
self.class_cache[(module, name)] = klass
return klass
# Fake module implementation
class FakeModule(types.ModuleType):
"""
An object which pretends to be a module.
*name* is the name of the module and should be a ``"."`` separated
alphanumeric string.
On initialization the module is added to sys.modules so it can be
imported properly. Further if *name* is a submodule and if its parent
does not exist, it will automatically create a parent :class:`FakeModule`.
This operates recursively until the parent is a top-level module or
when the parent is an existing module.
If any fake submodules are removed from this module they will
automatically be removed from :data:`sys.modules`.
Just as :class:`FakeClassType`, it supports comparison with
:class:`FakeClassType` instances, using the following logic:
If the object does not have ``other.__name__`` set, they are not equal.
Else if the other object does not have ``other.__module__`` set, they are equal if:
``self.__name__ == other.__name__``
Else, they are equal if:
``self.__name__ == other.__module__ + "." + other.__name__``
Using this behaviour, ``==``, ``!=``, ``hash()``, ``isinstance()`` and ``issubclass()``
are implemented allowing comparison between :class:`FakeClassType` instances
and :class:`FakeModule` instances to succeed if they are pretending to bein the same
place in the python module hierarchy.
It inherits from :class:`types.ModuleType`.
"""
def __init__(self, name):
super(FakeModule, self).__init__(name)
sys.modules[name] = self
if "." in name:
parent_name, child_name = name.rsplit(".", 1)
try:
__import__(parent_name)
parent = sys.modules[parent_name]
except:
parent = FakeModule(parent_name)
setattr(parent, child_name, self)
def __repr__(self):
return "<module '{0}' (fake)>".format(self.__name__)
def __str__(self):
return self.__repr__()
def __setattr__(self, name, value):
# If a fakemodule is removed we need to remove its entry from sys.modules
if (name in self.__dict__ and
isinstance(self.__dict__[name], FakeModule) and not
isinstance(value, FakeModule)):
self.__dict__[name]._remove()
self.__dict__[name] = value
def __delattr__(self, name):
if isinstance(self.__dict__[name], FakeModule):
self.__dict__[name]._remove()
del self.__dict__[name]
def _remove(self):
"""
Removes this module from :data:`sys.modules` and calls :meth:`_remove` on any
sub-FakeModules.
"""
for i in tuple(self.__dict__.keys()):
if isinstance(self.__dict__[i], FakeModule):
self.__dict__[i]._remove()
del self.__dict__[i]
del sys.modules[self.__name__]
def __eq__(self, other):
if not hasattr(other, "__name__"):
return False
othername = other.__name__
if hasattr(other, "__module__"):
othername = other.__module__ + "." + other.__name__
return self.__name__ == othername
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.__name__)
def __instancecheck__(self, instance):
return self.__subclasscheck__(instance.__class__)
def __subclasscheck__(self, subclass):
return (self == subclass or
(bool(subclass.__bases__) and
any(self.__subclasscheck__(base) for base in subclass.__bases__)))
class FakePackage(FakeModule):
"""
A :class:`FakeModule` subclass which lazily creates :class:`FakePackage`
instances on its attributes when they're requested.
This ensures that any attribute of this module is a valid FakeModule
which can be used to compare against fake classes.
"""
__path__ = []
def __call__(self, *args, **kwargs):
# This mainly exists to print a nicer error message when
# someone tries to call a FakePackage instance
raise TypeError("'{0}' FakePackage object is not callable".format(self.__name__))
def __getattr__(self, name):
modname = self.__name__ + "." + name
mod = sys.modules.get(modname, None)
if mod is None:
try:
__import__(modname)
except:
mod = FakePackage(modname)
else:
mod = sys.modules[modname]
return mod
class FakePackageLoader(object):
"""
A :term:`loader` of :class:`FakePackage` modules. When added to
:data:`sys.meta_path` it will ensure that any attempt to import
module *root* or its submodules results in a FakePackage.
Together with the attribute creation from :class:`FakePackage`
this ensures that any attempt to get a submodule from module *root*
results in a FakePackage, creating the illusion that *root* is an
actual package tree.
"""
def __init__(self, root):
self.root = root
def find_module(self, fullname, path=None):
if fullname == self.root or fullname.startswith(self.root + "."):
return self
else:
return None
def load_module(self, fullname):
return FakePackage(fullname)
# Fake unpickler implementation
class FakeUnpicklingError(pickle.UnpicklingError):
"""
Error raised when there is not enough information to perform the fake
unpickling process completely. It inherits from :exc:`pickle.UnpicklingError`.
"""
pass
class FakeUnpickler(pickle.Unpickler if PY2 else pickle._Unpickler):
"""
A forgiving unpickler. On uncountering references to class definitions
in the pickle stream which it cannot locate, it will create fake classes
and if necessary fake modules to house them in. Since it still allows access
to all modules and builtins, it should only be used to unpickle trusted data.
*file* is the :term:`binary file` to unserialize.
The optional keyword arguments are *class_factory*, *encoding and *errors*.
*class_factory* can be used to control how the missing class definitions are
created. If set to ``None``, ``FakeClassFactory((), FakeStrict)`` will be used.
In Python 3, the optional keyword arguments *encoding* and *errors* can be used
to indicate how the unpickler should deal with pickle streams generated in python
2, specifically how to deal with 8-bit string instances. If set to "bytes" it will
load them as bytes objects, otherwise it will attempt to decode them into unicode
using the given *encoding* and *errors* arguments.
It inherits from :class:`pickle.Unpickler`. (In Python 3 this is actually
``pickle._Unpickler``)
"""
if PY2:
def __init__(self, file, class_factory=None, encoding="bytes", errors="strict"):
pickle.Unpickler.__init__(self, file,)
self.class_factory = class_factory or FakeClassFactory()
else:
def __init__(self, file, class_factory=None, encoding="bytes", errors="strict"):
super().__init__(file, fix_imports=False, encoding=encoding, errors=errors)
self.class_factory = class_factory or FakeClassFactory()
def find_class(self, module, name):
mod = sys.modules.get(module, None)
if mod is None:
try:
__import__(module)
except:
mod = FakeModule(module)
else:
mod = sys.modules[module]
klass = getattr(mod, name, None)
if klass is None or isinstance(klass, FakeModule):
klass = self.class_factory(name, module)
setattr(mod, name, klass)
return klass
class SafeUnpickler(FakeUnpickler):
"""
A safe unpickler. It will create fake classes for any references to class
definitions in the pickle stream. Further it can block access to the extension
registry making this unpickler safe to use on untrusted data.
*file* is the :term:`binary file` to unserialize.
The optional keyword arguments are *class_factory*, *safe_modules*, *use_copyreg*,
*encoding* and *errors*. *class_factory* can be used to control how the missing class
definitions are created. If set to ``None``, ``FakeClassFactory((), FakeStrict)`` will be
used. *safe_modules* can be set to a set of strings of module names, which will be
regarded as safe by the unpickling process, meaning that it will import objects
from that module instead of generating fake classes (this does not apply to objects
in submodules). *use_copyreg* is a boolean value indicating if it's allowed to
use extensions from the pickle extension registry (documented in the :mod:`copyreg`
module).
In Python 3, the optional keyword arguments *encoding* and *errors* can be used
to indicate how the unpickler should deal with pickle streams generated in python
2, specifically how to deal with 8-bit string instances. If set to "bytes" it will
load them as bytes objects, otherwise it will attempt to decode them into unicode
using the given *encoding* and *errors* arguments.
This function can be used to unpickle untrusted data safely with the default
class_factory when *safe_modules* is empty and *use_copyreg* is False.
It inherits from :class:`pickle.Unpickler`. (In Python 3 this is actually
``pickle._Unpickler``)
It should be noted though that when the unpickler tries to get a nonexistent
attribute of a safe module, an :exc:`AttributeError` will be raised.
This inherits from :class:`FakeUnpickler`
"""
def __init__(self, file, class_factory=None, safe_modules=(),
use_copyreg=False, encoding="bytes", errors="strict"):
FakeUnpickler.__init__(self, file, class_factory, encoding=encoding, errors=errors)
# A set of modules which are safe to load
self.safe_modules = set(safe_modules)
self.use_copyreg = use_copyreg
def find_class(self, module, name):
if module in self.safe_modules:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
else:
return self.class_factory(name, module)
def get_extension(self, code):
if self.use_copyreg:
return FakeUnpickler.get_extension(self, code)
else:
return self.class_factory("extension_code_{0}".format(code), "copyreg")
class SafePickler(pickle.Pickler if PY2 else pickle._Pickler):
"""
A pickler which can repickle object hierarchies containing objects created by SafeUnpickler.
Due to reasons unknown, pythons pickle implementation will normally check if a given class
actually matches with the object specified at the __module__ and __name__ of the class. Since
this check is performed with object identity instead of object equality we cannot fake this from
the classes themselves, and we need to override the method used for normally saving classes.
"""
def save_global(self, obj, name=None, pack=struct.pack):
if isinstance(obj, FakeClassType):
self.write(pickle.GLOBAL + obj.__module__ + '\n' + obj.__name__ + '\n')
self.memoize(obj)
return
pickle.Pickler.save_global(self, obj, name, pack)
# the main API
def load(file, class_factory=None, encoding="bytes", errors="errors"):
"""
Read a pickled object representation from the open binary :term:`file object` *file*
and return the reconstitutded object hierarchy specified therein, generating
any missing class definitions at runtime. This is equivalent to
``FakeUnpickler(file).load()``.
The optional keyword arguments are *class_factory*, *encoding* and *errors*.
*class_factory* can be used to control how the missing class definitions are
created. If set to ``None``, ``FakeClassFactory({}, 'strict')`` will be used.
In Python 3, the optional keyword arguments *encoding* and *errors* can be used
to indicate how the unpickler should deal with pickle streams generated in python
2, specifically how to deal with 8-bit string instances. If set to "bytes" it will
load them as bytes objects, otherwise it will attempt to decode them into unicode
using the given *encoding* and *errors* arguments.
This function should only be used to unpickle trusted data.
"""
return FakeUnpickler(file, class_factory, encoding=encoding, errors=errors).load()
def loads(string, class_factory=None, encoding="bytes", errors="errors"):
"""
Simjilar to :func:`load`, but takes an 8-bit string (bytes in Python 3, str in Python 2)
as its first argument instead of a binary :term:`file object`.
"""
return FakeUnpickler(StringIO(string), class_factory,
encoding=encoding, errors=errors).load()
def safe_load(file, class_factory=None, safe_modules=(), use_copyreg=False,
encoding="bytes", errors="errors"):
"""
Read a pickled object representation from the open binary :term:`file object` *file*
and return the reconstitutded object hierarchy specified therein, substituting any
class definitions by fake classes, ensuring safety in the unpickling process.
This is equivalent to ``SafeUnpickler(file).load()``.
The optional keyword arguments are *class_factory*, *safe_modules*, *use_copyreg*,
*encoding* and *errors*. *class_factory* can be used to control how the missing class
definitions are created. If set to ``None``, ``FakeClassFactory({}, 'strict')`` will be
used. *safe_modules* can be set to a set of strings of module names, which will be
regarded as safe by the unpickling process, meaning that it will import objects
from that module instead of generating fake classes (this does not apply to objects
in submodules). *use_copyreg* is a boolean value indicating if it's allowed to
use extensions from the pickle extension registry (documented in the :mod:`copyreg`
module).
In Python 3, the optional keyword arguments *encoding* and *errors* can be used
to indicate how the unpickler should deal with pickle streams generated in python
2, specifically how to deal with 8-bit string instances. If set to "bytes" it will
load them as bytes objects, otherwise it will attempt to decode them into unicode
using the given *encoding* and *errors* arguments.
This function can be used to unpickle untrusted data safely with the default
class_factory when *safe_modules* is empty and *use_copyreg* is False.
"""
return SafeUnpickler(file, class_factory, safe_modules, use_copyreg,
encoding=encoding, errors=errors).load()
def safe_loads(string, class_factory=None, safe_modules=(), use_copyreg=False,
encoding="bytes", errors="errors"):
"""
Similar to :func:`safe_load`, but takes an 8-bit string (bytes in Python 3, str in Python 2)
as its first argument instead of a binary :term:`file object`.
"""
return SafeUnpickler(StringIO(string), class_factory, safe_modules, use_copyreg,
encoding=encoding, errors=errors).load()
def safe_dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL):
"""
A convenience function wrapping SafePickler. It functions similarly to pickle.dump
"""
SafePickler(file, protocol).dump(obj)
def safe_dumps(obj, protocol=pickle.HIGHEST_PROTOCOL):
"""
A convenience function wrapping SafePickler. It functions similarly to pickle.dumps
"""
file = StringIO()
SafePickler(file, protocol).dump(obj)
return file.getvalue()
def fake_package(name):
"""
Mounts a fake package tree with the name *name*. This causes any attempt to import
module *name*, attributes of the module or submodules will return a :class:`FakePackage`
instance which implements the same behaviour. These :class:`FakePackage` instances compare
properly with :class:`FakeClassType` instances allowing you to code using FakePackages as
if the modules and their attributes actually existed.
This is implemented by creating a :class:`FakePackageLoader` instance with root *name*
and inserting it in the first spot in :data:`sys.meta_path`. This ensures that importing the
module and submodules will work properly. Further the :class:`FakePackage` instances take
care of generating submodules as attributes on request.
If a fake package tree with the same *name* is already registered, no new fake package
tree will be mounted.
This returns the :class:`FakePackage` instance *name*.
"""
if name in sys.modules and isinstance(sys.modules[name], FakePackage):
return sys.modules[name]
else:
loader = FakePackageLoader(name)
sys.meta_path.insert(0, loader)
return __import__(name)
def remove_fake_package(name):
"""
Removes the fake package tree mounted at *name*.
This works by first looking for any FakePackageLoaders in :data:`sys.path`
with their root set to *name* and removing them from sys.path. Next it will
find the top-level :class:`FakePackage` instance *name* and from this point
traverse the tree of created submodules, removing them from :data:`sys.path`
and removing their attributes. After this the modules are not registered
anymore and if they are not referenced from user code anymore they will be
garbage collected.
If no fake package tree *name* exists a :exc:`ValueError` will be raised.
"""
# Get the package entry via its entry in sys.modules
package = sys.modules.get(name, None)
if package is None:
raise ValueError("No fake package with the name {0} found".format(name))
if not isinstance(package, FakePackage):
raise ValueError("The module {0} is not a fake package".format(name))
# Attempt to remove the loader from sys.meta_path
loaders = [i for i in sys.meta_path if isinstance(i, FakePackageLoader) and i.root == name]
for loader in loaders:
sys.meta_path.remove(loader)
# Remove all module and submodule entries from sys.modules
package._remove()
# It is impossible to kill references to the modules, but all traces
# of it have been removed from the import machinery and the submodule
# tree structure has been broken up.
| 41.177778
| 122
| 0.673862
|
import sys
PY3 = sys.version_info >= (3, 0)
PY2 = not PY3
import types
import pickle
import struct
if PY3:
from io import BytesIO as StringIO
else:
from cStringIO import StringIO
__all__ = [
"load", "loads", "safe_load", "safe_loads", "safe_dump", "safe_dumps",
"fake_package", "remove_fake_package",
"FakeModule", "FakePackage", "FakePackageLoader",
"FakeClassType", "FakeClassFactory",
"FakeClass", "FakeStrict", "FakeWarning", "FakeIgnore",
"FakeUnpicklingError", "FakeUnpickler", "SafeUnpickler",
"SafePickler"
]
class FakeClassType(type):
def __new__(cls, name, bases, attributes, module=None):
attributes.pop("__qualname__", None)
# note that if no module is explicitly passed, the current module will be chosen
# due to the class statement implicitly specifying __module__ as __name__
if module is not None:
attributes["__module__"] = module
if "__module__" not in attributes:
raise TypeError("No module has been specified for FakeClassType {0}".format(name))
# assemble instance
return type.__new__(cls, name, bases, attributes)
def __init__(self, name, bases, attributes, module=None):
type.__init__(self, name, bases, attributes)
# comparison logic
def __eq__(self, other):
if not hasattr(other, "__name__"):
return False
if hasattr(other, "__module__"):
return self.__module__ == other.__module__ and self.__name__ == other.__name__
else:
return self.__module__ + "." + self.__name__ == other.__name__
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.__module__ + "." + self.__name__)
def __instancecheck__(self, instance):
return self.__subclasscheck__(instance.__class__)
def __subclasscheck__(self, subclass):
return (self == subclass or
(bool(subclass.__bases__) and
any(self.__subclasscheck__(base) for base in subclass.__bases__)))
# PY2 doesn't like the PY3 way of metaclasses and PY3 doesn't support the PY2 way
# so we call the metaclass directly
FakeClass = FakeClassType("FakeClass", (), {"__doc__": """
A barebones instance of :class:`FakeClassType`. Inherit from this to create fake classes.
"""}, module=__name__)
class FakeStrict(FakeClass, object):
def __new__(cls, *args, **kwargs):
self = FakeClass.__new__(cls)
if args or kwargs:
raise FakeUnpicklingError("{0} was instantiated with unexpected arguments {1}, {2}".format(cls, args, kwargs))
return self
def __setstate__(self, state):
slotstate = None
if (isinstance(state, tuple) and len(state) == 2 and
(state[0] is None or isinstance(state[0], dict)) and
(state[1] is None or isinstance(state[1], dict))):
state, slotstate = state
if state:
# Don't have to check for slotstate here since it's either None or a dict
if not isinstance(state, dict):
raise FakeUnpicklingError("{0}.__setstate__() got unexpected arguments {1}".format(self.__class__, state))
else:
self.__dict__.update(state)
if slotstate:
self.__dict__.update(slotstate)
class FakeWarning(FakeClass, object):
def __new__(cls, *args, **kwargs):
self = FakeClass.__new__(cls)
if args or kwargs:
print("{0} was instantiated with unexpected arguments {1}, {2}".format(cls, args, kwargs))
self._new_args = args
return self
def __setstate__(self, state):
slotstate = None
if (isinstance(state, tuple) and len(state) == 2 and
(state[0] is None or isinstance(state[0], dict)) and
(state[1] is None or isinstance(state[1], dict))):
state, slotstate = state
if state:
# Don't have to check for slotstate here since it's either None or a dict
if not isinstance(state, dict):
print("{0}.__setstate__() got unexpected arguments {1}".format(self.__class__, state))
self._setstate_args = state
else:
self.__dict__.update(state)
if slotstate:
self.__dict__.update(slotstate)
class FakeIgnore(FakeClass, object):
def __new__(cls, *args, **kwargs):
self = FakeClass.__new__(cls)
if args:
self._new_args = args
if kwargs:
self._new_kwargs = kwargs
return self
def __setstate__(self, state):
slotstate = None
if (isinstance(state, tuple) and len(state) == 2 and
(state[0] is None or isinstance(state[0], dict)) and
(state[1] is None or isinstance(state[1], dict))):
state, slotstate = state
if state:
# Don't have to check for slotstate here since it's either None or a dict
if not isinstance(state, dict):
self._setstate_args = state
else:
self.__dict__.update(state)
if slotstate:
self.__dict__.update(slotstate)
class FakeClassFactory(object):
def __init__(self, special_cases=(), default_class=FakeStrict):
self.special_cases = dict(((i.__module__, i.__name__), i) for i in special_cases)
self.default = default_class
self.class_cache = {}
def __call__(self, name, module):
# Check if we've got this class cached
klass = self.class_cache.get((module, name), None)
if klass is not None:
return klass
klass = self.special_cases.get((module, name), None)
if not klass:
klass = type(name, (self.default,), {"__module__": module})
self.class_cache[(module, name)] = klass
return klass
class FakeModule(types.ModuleType):
def __init__(self, name):
super(FakeModule, self).__init__(name)
sys.modules[name] = self
if "." in name:
parent_name, child_name = name.rsplit(".", 1)
try:
__import__(parent_name)
parent = sys.modules[parent_name]
except:
parent = FakeModule(parent_name)
setattr(parent, child_name, self)
def __repr__(self):
return "<module '{0}' (fake)>".format(self.__name__)
def __str__(self):
return self.__repr__()
def __setattr__(self, name, value):
if (name in self.__dict__ and
isinstance(self.__dict__[name], FakeModule) and not
isinstance(value, FakeModule)):
self.__dict__[name]._remove()
self.__dict__[name] = value
def __delattr__(self, name):
if isinstance(self.__dict__[name], FakeModule):
self.__dict__[name]._remove()
del self.__dict__[name]
def _remove(self):
for i in tuple(self.__dict__.keys()):
if isinstance(self.__dict__[i], FakeModule):
self.__dict__[i]._remove()
del self.__dict__[i]
del sys.modules[self.__name__]
def __eq__(self, other):
if not hasattr(other, "__name__"):
return False
othername = other.__name__
if hasattr(other, "__module__"):
othername = other.__module__ + "." + other.__name__
return self.__name__ == othername
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.__name__)
def __instancecheck__(self, instance):
return self.__subclasscheck__(instance.__class__)
def __subclasscheck__(self, subclass):
return (self == subclass or
(bool(subclass.__bases__) and
any(self.__subclasscheck__(base) for base in subclass.__bases__)))
class FakePackage(FakeModule):
__path__ = []
def __call__(self, *args, **kwargs):
raise TypeError("'{0}' FakePackage object is not callable".format(self.__name__))
def __getattr__(self, name):
modname = self.__name__ + "." + name
mod = sys.modules.get(modname, None)
if mod is None:
try:
__import__(modname)
except:
mod = FakePackage(modname)
else:
mod = sys.modules[modname]
return mod
class FakePackageLoader(object):
def __init__(self, root):
self.root = root
def find_module(self, fullname, path=None):
if fullname == self.root or fullname.startswith(self.root + "."):
return self
else:
return None
def load_module(self, fullname):
return FakePackage(fullname)
class FakeUnpicklingError(pickle.UnpicklingError):
pass
class FakeUnpickler(pickle.Unpickler if PY2 else pickle._Unpickler):
if PY2:
def __init__(self, file, class_factory=None, encoding="bytes", errors="strict"):
pickle.Unpickler.__init__(self, file,)
self.class_factory = class_factory or FakeClassFactory()
else:
def __init__(self, file, class_factory=None, encoding="bytes", errors="strict"):
super().__init__(file, fix_imports=False, encoding=encoding, errors=errors)
self.class_factory = class_factory or FakeClassFactory()
def find_class(self, module, name):
mod = sys.modules.get(module, None)
if mod is None:
try:
__import__(module)
except:
mod = FakeModule(module)
else:
mod = sys.modules[module]
klass = getattr(mod, name, None)
if klass is None or isinstance(klass, FakeModule):
klass = self.class_factory(name, module)
setattr(mod, name, klass)
return klass
class SafeUnpickler(FakeUnpickler):
def __init__(self, file, class_factory=None, safe_modules=(),
use_copyreg=False, encoding="bytes", errors="strict"):
FakeUnpickler.__init__(self, file, class_factory, encoding=encoding, errors=errors)
self.safe_modules = set(safe_modules)
self.use_copyreg = use_copyreg
def find_class(self, module, name):
if module in self.safe_modules:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
else:
return self.class_factory(name, module)
def get_extension(self, code):
if self.use_copyreg:
return FakeUnpickler.get_extension(self, code)
else:
return self.class_factory("extension_code_{0}".format(code), "copyreg")
class SafePickler(pickle.Pickler if PY2 else pickle._Pickler):
def save_global(self, obj, name=None, pack=struct.pack):
if isinstance(obj, FakeClassType):
self.write(pickle.GLOBAL + obj.__module__ + '\n' + obj.__name__ + '\n')
self.memoize(obj)
return
pickle.Pickler.save_global(self, obj, name, pack)
def load(file, class_factory=None, encoding="bytes", errors="errors"):
return FakeUnpickler(file, class_factory, encoding=encoding, errors=errors).load()
def loads(string, class_factory=None, encoding="bytes", errors="errors"):
return FakeUnpickler(StringIO(string), class_factory,
encoding=encoding, errors=errors).load()
def safe_load(file, class_factory=None, safe_modules=(), use_copyreg=False,
encoding="bytes", errors="errors"):
return SafeUnpickler(file, class_factory, safe_modules, use_copyreg,
encoding=encoding, errors=errors).load()
def safe_loads(string, class_factory=None, safe_modules=(), use_copyreg=False,
encoding="bytes", errors="errors"):
return SafeUnpickler(StringIO(string), class_factory, safe_modules, use_copyreg,
encoding=encoding, errors=errors).load()
def safe_dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL):
SafePickler(file, protocol).dump(obj)
def safe_dumps(obj, protocol=pickle.HIGHEST_PROTOCOL):
file = StringIO()
SafePickler(file, protocol).dump(obj)
return file.getvalue()
def fake_package(name):
if name in sys.modules and isinstance(sys.modules[name], FakePackage):
return sys.modules[name]
else:
loader = FakePackageLoader(name)
sys.meta_path.insert(0, loader)
return __import__(name)
def remove_fake_package(name):
package = sys.modules.get(name, None)
if package is None:
raise ValueError("No fake package with the name {0} found".format(name))
if not isinstance(package, FakePackage):
raise ValueError("The module {0} is not a fake package".format(name))
loaders = [i for i in sys.meta_path if isinstance(i, FakePackageLoader) and i.root == name]
for loader in loaders:
sys.meta_path.remove(loader)
package._remove()
| true
| true
|
f7163f09627011e16f710ab47f0b28e594f76ff4
| 466
|
py
|
Python
|
livebot/migrations/0012_auto_20170731_2013.py
|
bsquidwrd/Live-Bot
|
f28f028ddc371b86e19df6f603aa3b14bab93533
|
[
"MIT"
] | 1
|
2019-02-27T10:38:46.000Z
|
2019-02-27T10:38:46.000Z
|
livebot/migrations/0012_auto_20170731_2013.py
|
bsquidwrd/Live-Bot
|
f28f028ddc371b86e19df6f603aa3b14bab93533
|
[
"MIT"
] | 21
|
2017-08-03T01:01:31.000Z
|
2020-06-05T18:02:20.000Z
|
livebot/migrations/0012_auto_20170731_2013.py
|
bsquidwrd/Live-Bot
|
f28f028ddc371b86e19df6f603aa3b14bab93533
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 03:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('livebot', '0011_auto_20170731_2011'),
]
operations = [
migrations.AlterModelOptions(
name='notification',
options={'verbose_name': 'Notification', 'verbose_name_plural': 'Notifications'},
),
]
| 23.3
| 93
| 0.643777
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('livebot', '0011_auto_20170731_2011'),
]
operations = [
migrations.AlterModelOptions(
name='notification',
options={'verbose_name': 'Notification', 'verbose_name_plural': 'Notifications'},
),
]
| true
| true
|
f7163fa0dee4221d901279b0ac5dae6527bc823c
| 4,872
|
py
|
Python
|
model_zoo/official/nlp/lstm/eval.py
|
taroxd/mindspore
|
9bb620ff2caaac7f1c53c4b104935f22352cb88f
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/nlp/lstm/eval.py
|
taroxd/mindspore
|
9bb620ff2caaac7f1c53c4b104935f22352cb88f
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/nlp/lstm/eval.py
|
taroxd/mindspore
|
9bb620ff2caaac7f1c53c4b104935f22352cb88f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
#################train lstm example on aclImdb########################
"""
import argparse
import os
import numpy as np
from src.config import lstm_cfg as cfg, lstm_cfg_ascend
from src.dataset import lstm_create_dataset, convert_to_mindrecord
from src.lr_schedule import get_lr
from src.lstm import SentimentNet
from mindspore import Tensor, nn, Model, context
from mindspore.nn import Accuracy
from mindspore.train.callback import LossMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MindSpore LSTM Example')
parser.add_argument('--preprocess', type=str, default='false', choices=['true', 'false'],
help='whether to preprocess data.')
parser.add_argument('--aclimdb_path', type=str, default="./aclImdb",
help='path where the dataset is stored.')
parser.add_argument('--glove_path', type=str, default="./glove",
help='path where the GloVe is stored.')
parser.add_argument('--preprocess_path', type=str, default="./preprocess",
help='path where the pre-process data is stored.')
parser.add_argument('--ckpt_path', type=str, default=None,
help='the checkpoint file path used to evaluate model.')
parser.add_argument('--device_target', type=str, default="Ascend", choices=['GPU', 'CPU', 'Ascend'],
help='the target device to run, support "GPU", "CPU". Default: "Ascend".')
args = parser.parse_args()
context.set_context(
mode=context.GRAPH_MODE,
save_graphs=False,
device_target=args.device_target)
if args.device_target == 'Ascend':
cfg = lstm_cfg_ascend
else:
cfg = lstm_cfg
if args.preprocess == "true":
print("============== Starting Data Pre-processing ==============")
convert_to_mindrecord(cfg.embed_size, args.aclimdb_path, args.preprocess_path, args.glove_path)
embedding_table = np.loadtxt(os.path.join(args.preprocess_path, "weight.txt")).astype(np.float32)
# DynamicRNN in this network on Ascend platform only support the condition that the shape of input_size
# and hiddle_size is multiples of 16, this problem will be solved later.
if args.device_target == 'Ascend':
pad_num = int(np.ceil(cfg.embed_size / 16) * 16 - cfg.embed_size)
if pad_num > 0:
embedding_table = np.pad(embedding_table, [(0, 0), (0, pad_num)], 'constant')
cfg.embed_size = int(np.ceil(cfg.embed_size / 16) * 16)
network = SentimentNet(vocab_size=embedding_table.shape[0],
embed_size=cfg.embed_size,
num_hiddens=cfg.num_hiddens,
num_layers=cfg.num_layers,
bidirectional=cfg.bidirectional,
num_classes=cfg.num_classes,
weight=Tensor(embedding_table),
batch_size=cfg.batch_size)
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
ds_eval = lstm_create_dataset(args.preprocess_path, cfg.batch_size, training=False)
if cfg.dynamic_lr:
lr = Tensor(get_lr(global_step=cfg.global_step,
lr_init=cfg.lr_init, lr_end=cfg.lr_end, lr_max=cfg.lr_max,
warmup_epochs=cfg.warmup_epochs,
total_epochs=cfg.num_epochs,
steps_per_epoch=ds_eval.get_dataset_size(),
lr_adjust_epoch=cfg.lr_adjust_epoch))
else:
lr = cfg.learning_rate
opt = nn.Momentum(network.trainable_params(), lr, cfg.momentum)
loss_cb = LossMonitor()
model = Model(network, loss, opt, {'acc': Accuracy()})
print("============== Starting Testing ==============")
param_dict = load_checkpoint(args.ckpt_path)
load_param_into_net(network, param_dict)
if args.device_target == "CPU":
acc = model.eval(ds_eval, dataset_sink_mode=False)
else:
acc = model.eval(ds_eval)
print("============== {} ==============".format(acc))
| 46.4
| 107
| 0.631773
|
import argparse
import os
import numpy as np
from src.config import lstm_cfg as cfg, lstm_cfg_ascend
from src.dataset import lstm_create_dataset, convert_to_mindrecord
from src.lr_schedule import get_lr
from src.lstm import SentimentNet
from mindspore import Tensor, nn, Model, context
from mindspore.nn import Accuracy
from mindspore.train.callback import LossMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MindSpore LSTM Example')
parser.add_argument('--preprocess', type=str, default='false', choices=['true', 'false'],
help='whether to preprocess data.')
parser.add_argument('--aclimdb_path', type=str, default="./aclImdb",
help='path where the dataset is stored.')
parser.add_argument('--glove_path', type=str, default="./glove",
help='path where the GloVe is stored.')
parser.add_argument('--preprocess_path', type=str, default="./preprocess",
help='path where the pre-process data is stored.')
parser.add_argument('--ckpt_path', type=str, default=None,
help='the checkpoint file path used to evaluate model.')
parser.add_argument('--device_target', type=str, default="Ascend", choices=['GPU', 'CPU', 'Ascend'],
help='the target device to run, support "GPU", "CPU". Default: "Ascend".')
args = parser.parse_args()
context.set_context(
mode=context.GRAPH_MODE,
save_graphs=False,
device_target=args.device_target)
if args.device_target == 'Ascend':
cfg = lstm_cfg_ascend
else:
cfg = lstm_cfg
if args.preprocess == "true":
print("============== Starting Data Pre-processing ==============")
convert_to_mindrecord(cfg.embed_size, args.aclimdb_path, args.preprocess_path, args.glove_path)
embedding_table = np.loadtxt(os.path.join(args.preprocess_path, "weight.txt")).astype(np.float32)
if args.device_target == 'Ascend':
pad_num = int(np.ceil(cfg.embed_size / 16) * 16 - cfg.embed_size)
if pad_num > 0:
embedding_table = np.pad(embedding_table, [(0, 0), (0, pad_num)], 'constant')
cfg.embed_size = int(np.ceil(cfg.embed_size / 16) * 16)
network = SentimentNet(vocab_size=embedding_table.shape[0],
embed_size=cfg.embed_size,
num_hiddens=cfg.num_hiddens,
num_layers=cfg.num_layers,
bidirectional=cfg.bidirectional,
num_classes=cfg.num_classes,
weight=Tensor(embedding_table),
batch_size=cfg.batch_size)
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
ds_eval = lstm_create_dataset(args.preprocess_path, cfg.batch_size, training=False)
if cfg.dynamic_lr:
lr = Tensor(get_lr(global_step=cfg.global_step,
lr_init=cfg.lr_init, lr_end=cfg.lr_end, lr_max=cfg.lr_max,
warmup_epochs=cfg.warmup_epochs,
total_epochs=cfg.num_epochs,
steps_per_epoch=ds_eval.get_dataset_size(),
lr_adjust_epoch=cfg.lr_adjust_epoch))
else:
lr = cfg.learning_rate
opt = nn.Momentum(network.trainable_params(), lr, cfg.momentum)
loss_cb = LossMonitor()
model = Model(network, loss, opt, {'acc': Accuracy()})
print("============== Starting Testing ==============")
param_dict = load_checkpoint(args.ckpt_path)
load_param_into_net(network, param_dict)
if args.device_target == "CPU":
acc = model.eval(ds_eval, dataset_sink_mode=False)
else:
acc = model.eval(ds_eval)
print("============== {} ==============".format(acc))
| true
| true
|
f7163ffb1d48a4c69218cceb0d24ae429e4459db
| 2,248
|
py
|
Python
|
specifications/auto_grader.py
|
fusiongyro/contest-automata
|
388a38baae98bbb8dc9f70e7a428f3b60e7380bb
|
[
"BSD-3-Clause"
] | null | null | null |
specifications/auto_grader.py
|
fusiongyro/contest-automata
|
388a38baae98bbb8dc9f70e7a428f3b60e7380bb
|
[
"BSD-3-Clause"
] | null | null | null |
specifications/auto_grader.py
|
fusiongyro/contest-automata
|
388a38baae98bbb8dc9f70e7a428f3b60e7380bb
|
[
"BSD-3-Clause"
] | null | null | null |
##
# This is the auto-grader for the project.
##
import sys, shutil, os, dfaParser
##
# Given the teamname and the input (DFA) file name, converts user input
# file into haskell file and execute the file. The output is piped
# to out.dat.
##
def compileRun(teamname, dfa):
os.system("python dfaParser.py " + dfa)
os.system("ghc " + teamname + ".nfa.hs")
os.system("./" + teamname + ".nfa > out.dat")
##
# Checks if the output from the users DFA is correct by comparing it
# with the solution file. (Script avaliable but no way to know teamname
# unless with a 3rd command-line arg)
##
def checkAnswer(teamname):
#Output the differance between the solution and the outpuf from the produced NFA
resultFile = "result." + teamname + ".dat"
os.system("diff out.dat solution > " + resultFile)
#If the result file is empty (its size is 0) the given DFA accpets the input string
if os.stat(resultFile).st_size == 0:
print "Correct!"
else:
print "Incorrect!"
##
# Parses the input file from the user and returns the teamname as a string
##
def parseTeamName(filename):
list = filename.split(".", 3)
return list[1]
if __name__ == "__main__":
##
# The script is called with 3 arguments. These args are as follows:
# python auto_grader.py <submission dir> <problem number> <filename>
# Example: python auto_grader.py 1 21 xor.nfa
#
submissionDir = sys.argv[1]
problemNum = sys.argv[2]
dfaCode = sys.argv[3]
#Parse teamname from input file name
teamname = parseTeamName(dfaCode)
#Contruct the path where solution resides
solutionPath = os.getcwd() + "/" + problemNum + "/solution"
#Contruct the destination path for copy
newSubDir = os.getcwd() + "/Submissions/" + submissionDir + "/"
#Contruct the input directory path
inputDir = os.getcwd() + "/" + problemNum + "/" + "input"
#Perform both copy operations
shutil.copy(dfaCode, newSubDir)
shutil.copy(inputDir, newSubDir)
shutil.copy("dfaParser.py", newSubDir)
shutil.copy("dfaParser.pyc", newSubDir)
#Change the current directory to the submission directory
os.chdir(newSubDir)
compileRun(teamname, dfaCode)
#Copy over solution of the problem
shutil.copy(solutionPath, newSubDir)
checkAnswer(teamname)
| 27.084337
| 84
| 0.702847
|
import sys, shutil, os, dfaParser
def compileRun(teamname, dfa):
os.system("python dfaParser.py " + dfa)
os.system("ghc " + teamname + ".nfa.hs")
os.system("./" + teamname + ".nfa > out.dat")
def checkAnswer(teamname):
resultFile = "result." + teamname + ".dat"
os.system("diff out.dat solution > " + resultFile)
if os.stat(resultFile).st_size == 0:
print "Correct!"
else:
print "Incorrect!"
def parseTeamName(filename):
list = filename.split(".", 3)
return list[1]
if __name__ == "__main__":
submissionDir = sys.argv[1]
problemNum = sys.argv[2]
dfaCode = sys.argv[3]
teamname = parseTeamName(dfaCode)
solutionPath = os.getcwd() + "/" + problemNum + "/solution"
newSubDir = os.getcwd() + "/Submissions/" + submissionDir + "/"
inputDir = os.getcwd() + "/" + problemNum + "/" + "input"
shutil.copy(dfaCode, newSubDir)
shutil.copy(inputDir, newSubDir)
shutil.copy("dfaParser.py", newSubDir)
shutil.copy("dfaParser.pyc", newSubDir)
os.chdir(newSubDir)
compileRun(teamname, dfaCode)
shutil.copy(solutionPath, newSubDir)
checkAnswer(teamname)
| false
| true
|
f71640025ffae92102ad3de901450b81ff6e14af
| 2,741
|
py
|
Python
|
examples/charts/file/hover_span.py
|
pyjsdev/googlemap_flask
|
9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5
|
[
"BSD-3-Clause"
] | 2
|
2019-05-24T14:07:33.000Z
|
2019-05-24T14:36:19.000Z
|
examples/charts/file/hover_span.py
|
pyjsdev/googlemap_flask
|
9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5
|
[
"BSD-3-Clause"
] | null | null | null |
examples/charts/file/hover_span.py
|
pyjsdev/googlemap_flask
|
9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5
|
[
"BSD-3-Clause"
] | 1
|
2021-09-09T03:33:04.000Z
|
2021-09-09T03:33:04.000Z
|
import pandas as pd
from bokeh.charts import Line, Scatter, show, output_file, defaults
from bokeh.layouts import gridplot
from bokeh.models import HoverTool
from bokeh.sampledata.degrees import data
defaults.width = 500
defaults.height = 300
TOOLS='box_zoom,box_select,hover,crosshair,reset'
TOOLTIPS = [ ("y", "$~y"), ("x", "$~x") ]
data = data[['Biology', 'Business', 'Computer Science', "Year"]]
data = pd.melt(data, id_vars=['Year'],
value_vars=['Biology', 'Business', 'Computer Science'],
value_name='Count', var_name='Degree')
vline = Line(data, y='Count', color='Degree', title="Lines VLine", ylabel='measures',
tools=TOOLS)
hline = Line(data, y='Count', color='Degree', title="Lines HLine",
ylabel='measures', tools=TOOLS)
int_vline = Line(data, y='Count', color='Degree', title="Lines VLine Interp",
ylabel='measures', tools=TOOLS)
int_hline = Line(data, y='Count', color='Degree', title="Lines HLine Interp",
ylabel='measures', tools=TOOLS)
scatter_point = Scatter(data, x='Year', y='Count', color='Degree',
title="Scatter mouse", ylabel='measures', legend=True,
tools=TOOLS)
scatter = Scatter(data, x='Year', y='Count', color='Degree',
title="Scatter V Line", ylabel='measures', legend=True, tools=TOOLS)
int_point_line = Line(data, x='Year', y='Count', color='Degree',
title="Lines Mouse Interp.", ylabel='measures', tools=TOOLS)
point_line = Line(data, x='Year', y='Count', color='Degree',
title="Lines Mouse", ylabel='measures', tools=TOOLS)
hhover = hline.select(HoverTool)
hhover.mode = 'hline'
hhover.line_policy = 'next'
vhover = vline.select(HoverTool)
vhover.mode = 'vline'
vhover.line_policy = 'nearest'
int_hhover = int_hline.select(HoverTool)
int_hhover.mode = 'hline'
int_hhover.line_policy = 'interp'
int_vhover = int_vline.select(HoverTool)
int_vhover.mode = 'vline'
int_vhover.line_policy = 'interp'
iphover = int_point_line.select(HoverTool)
iphover.mode = 'mouse'
iphover.line_policy = 'interp'
tphover = point_line.select(HoverTool)
tphover.mode = 'mouse'
shover = scatter.select(HoverTool)
shover.mode = 'vline'
shoverp = scatter_point.select(HoverTool)
shoverp.mode = 'mouse'
# set up tooltips
int_vhover.tooltips = int_hhover.tooltips = TOOLTIPS
tphover.tooltips = iphover.tooltips = TOOLTIPS
shover.tooltips = shoverp.tooltips = TOOLTIPS
vhover.tooltips = hhover.tooltips = TOOLTIPS
output_file("hover_span.html", title="hover_span.py example")
show(gridplot(hline, vline, int_hline, int_vline,
int_point_line, point_line, scatter_point, scatter,
ncols=2))
| 31.872093
| 86
| 0.676395
|
import pandas as pd
from bokeh.charts import Line, Scatter, show, output_file, defaults
from bokeh.layouts import gridplot
from bokeh.models import HoverTool
from bokeh.sampledata.degrees import data
defaults.width = 500
defaults.height = 300
TOOLS='box_zoom,box_select,hover,crosshair,reset'
TOOLTIPS = [ ("y", "$~y"), ("x", "$~x") ]
data = data[['Biology', 'Business', 'Computer Science', "Year"]]
data = pd.melt(data, id_vars=['Year'],
value_vars=['Biology', 'Business', 'Computer Science'],
value_name='Count', var_name='Degree')
vline = Line(data, y='Count', color='Degree', title="Lines VLine", ylabel='measures',
tools=TOOLS)
hline = Line(data, y='Count', color='Degree', title="Lines HLine",
ylabel='measures', tools=TOOLS)
int_vline = Line(data, y='Count', color='Degree', title="Lines VLine Interp",
ylabel='measures', tools=TOOLS)
int_hline = Line(data, y='Count', color='Degree', title="Lines HLine Interp",
ylabel='measures', tools=TOOLS)
scatter_point = Scatter(data, x='Year', y='Count', color='Degree',
title="Scatter mouse", ylabel='measures', legend=True,
tools=TOOLS)
scatter = Scatter(data, x='Year', y='Count', color='Degree',
title="Scatter V Line", ylabel='measures', legend=True, tools=TOOLS)
int_point_line = Line(data, x='Year', y='Count', color='Degree',
title="Lines Mouse Interp.", ylabel='measures', tools=TOOLS)
point_line = Line(data, x='Year', y='Count', color='Degree',
title="Lines Mouse", ylabel='measures', tools=TOOLS)
hhover = hline.select(HoverTool)
hhover.mode = 'hline'
hhover.line_policy = 'next'
vhover = vline.select(HoverTool)
vhover.mode = 'vline'
vhover.line_policy = 'nearest'
int_hhover = int_hline.select(HoverTool)
int_hhover.mode = 'hline'
int_hhover.line_policy = 'interp'
int_vhover = int_vline.select(HoverTool)
int_vhover.mode = 'vline'
int_vhover.line_policy = 'interp'
iphover = int_point_line.select(HoverTool)
iphover.mode = 'mouse'
iphover.line_policy = 'interp'
tphover = point_line.select(HoverTool)
tphover.mode = 'mouse'
shover = scatter.select(HoverTool)
shover.mode = 'vline'
shoverp = scatter_point.select(HoverTool)
shoverp.mode = 'mouse'
int_vhover.tooltips = int_hhover.tooltips = TOOLTIPS
tphover.tooltips = iphover.tooltips = TOOLTIPS
shover.tooltips = shoverp.tooltips = TOOLTIPS
vhover.tooltips = hhover.tooltips = TOOLTIPS
output_file("hover_span.html", title="hover_span.py example")
show(gridplot(hline, vline, int_hline, int_vline,
int_point_line, point_line, scatter_point, scatter,
ncols=2))
| true
| true
|
f716411cdba7b9542f41d46f5963695af627aed2
| 732
|
py
|
Python
|
integrationtest/vm/vm_offering/test_hotplugin_memory_c7.py
|
bgerxx/woodpecker
|
fdc51245945cc9be4d1f028988079213eb99b2ad
|
[
"Apache-2.0"
] | null | null | null |
integrationtest/vm/vm_offering/test_hotplugin_memory_c7.py
|
bgerxx/woodpecker
|
fdc51245945cc9be4d1f028988079213eb99b2ad
|
[
"Apache-2.0"
] | null | null | null |
integrationtest/vm/vm_offering/test_hotplugin_memory_c7.py
|
bgerxx/woodpecker
|
fdc51245945cc9be4d1f028988079213eb99b2ad
|
[
"Apache-2.0"
] | null | null | null |
'''
@author: FangSun
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import functools
_config_ = {
'timeout' : 1000,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
'''
def test()
This document sting is a dirty solution to find test case
'''
test = functools.partial(test_stub.vm_offering_testcase,
tbj=test_obj_dict,
test_image_name="imageName_i_c7",
add_cpu=False,
add_memory=True,
need_online=True)
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| 23.612903
| 58
| 0.625683
|
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import functools
_config_ = {
'timeout' : 1000,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
test = functools.partial(test_stub.vm_offering_testcase,
tbj=test_obj_dict,
test_image_name="imageName_i_c7",
add_cpu=False,
add_memory=True,
need_online=True)
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| true
| true
|
f716454730d5c1602c8d8bf63c6dfeaee57d2e2c
| 5,725
|
py
|
Python
|
evepraisal/views.py
|
fucema/evepraisal
|
9e35803118928696e17bd82510a1331724b3a00e
|
[
"Unlicense"
] | null | null | null |
evepraisal/views.py
|
fucema/evepraisal
|
9e35803118928696e17bd82510a1331724b3a00e
|
[
"Unlicense"
] | null | null | null |
evepraisal/views.py
|
fucema/evepraisal
|
9e35803118928696e17bd82510a1331724b3a00e
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
An Eve Online Cargo Scanner
"""
import time
import json
from flask import (
g, flash, request, render_template, url_for, redirect, session,
send_from_directory, abort)
from sqlalchemy import desc
import evepaste
from helpers import login_required
from estimate import get_market_prices
from models import Appraisals, Users, appraisal_count
from parser import parse
from . import app, db, cache, oid
def estimate_cost():
""" Estimate Cost of pasted stuff result given by POST[raw_paste].
Renders HTML """
raw_paste = request.form.get('raw_paste', '')
solar_system = request.form.get('market', '30000142')
if solar_system not in app.config['VALID_SOLAR_SYSTEMS'].keys():
abort(400)
try:
parse_results = parse(raw_paste)
except evepaste.Unparsable as ex:
if raw_paste:
app.logger.warning("User input invalid data: %s", raw_paste)
return render_template(
'error.html', error='Error when parsing input: ' + str(ex))
# Populate types with pricing data
prices = get_market_prices(list(parse_results['unique_items']),
options={'solarsystem_id': solar_system})
appraisal = Appraisals(Created=int(time.time()),
RawInput=raw_paste,
Kind=parse_results['representative_kind'],
Prices=prices,
Parsed=parse_results['results'],
ParsedVersion=1,
BadLines=parse_results['bad_lines'],
Market=solar_system,
Public=bool(session['options'].get('share')),
UserId=g.user.Id if g.user else None)
db.session.add(appraisal)
db.session.commit()
app.logger.debug("New Appraisal [%s]: %s",
appraisal.Id,
parse_results['representative_kind'])
return render_template('results.html',
appraisal=appraisal)
def display_result(result_id):
page = cache.get('appraisal:%s' % result_id)
if page:
return page
q = Appraisals.query.filter(Appraisals.Id == result_id)
if g.user:
q = q.filter((Appraisals.UserId == g.user.Id) |
(Appraisals.Public == True)) # noqa
else:
q = q.filter(Appraisals.Public == True) # noqa
appraisal = q.first()
if not appraisal:
flash('Resource Not Found', 'error')
return index(), 404
page = render_template('results.html',
appraisal=appraisal,
full_page=True)
if appraisal.Public:
try:
cache.set('appraisal:%s' % result_id, page, timeout=30)
except Exception:
pass
return page
@login_required
def options():
if request.method == 'POST':
autosubmit = True if request.form.get('autosubmit') == 'on' else False
paste_share = True if request.form.get('share') == 'on' else False
new_options = {
'autosubmit': autosubmit,
'share': paste_share,
}
session['loaded_options'] = False
g.user.Options = json.dumps(new_options)
db.session.add(g.user)
db.session.commit()
flash('Successfully saved options.')
return redirect(url_for('options'))
return render_template('options.html')
@login_required
def history():
q = Appraisals.query
q = q.filter(Appraisals.UserId == g.user.Id)
q = q.order_by(desc(Appraisals.Created))
q = q.limit(100)
appraisals = q.all()
return render_template('history.html', appraisals=appraisals)
def latest():
cache_key = "latest:%s" % request.args.get('kind', 'all')
body = cache.get(cache_key)
if body:
return body
q = Appraisals.query
q = q.filter_by(Public=True) # NOQA
if request.args.get('kind'):
q = q.filter_by(Kind=request.args.get('kind')) # NOQA
q = q.order_by(desc(Appraisals.Created))
q = q.limit(200)
appraisals = q.all()
body = render_template('latest.html', appraisals=appraisals)
cache.set(cache_key, body, timeout=30)
return body
def index():
"Index. Renders HTML."
count = cache.get("stats:appraisal_count")
if not count:
count = appraisal_count()
cache.set("stats:appraisal_count", count, timeout=60)
return render_template('index.html', appraisal_count=count)
def legal():
return render_template('legal.html')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@oid.loginhandler
def login():
# if we are already logged in, go back to were we came from
if g.user is not None:
return redirect(url_for('index'))
if request.method == 'POST':
openid = request.form.get('openid')
if openid:
return oid.try_login(openid)
return render_template('login.html', next=oid.get_next_url(),
error=oid.fetch_error())
@oid.after_login
def create_or_login(resp):
session['openid'] = resp.identity_url
user = Users.query.filter_by(OpenId=resp.identity_url).first()
if user is None:
user = Users(
OpenId=session['openid'],
Options=json.dumps(app.config['USER_DEFAULT_OPTIONS']))
db.session.add(user)
db.session.commit()
flash(u'Successfully signed in')
g.user = user
return redirect(oid.get_next_url())
def logout():
session.pop('openid', None)
session.pop('options', None)
flash(u'You have been signed out')
return redirect(url_for('index'))
| 29.663212
| 78
| 0.608559
|
import time
import json
from flask import (
g, flash, request, render_template, url_for, redirect, session,
send_from_directory, abort)
from sqlalchemy import desc
import evepaste
from helpers import login_required
from estimate import get_market_prices
from models import Appraisals, Users, appraisal_count
from parser import parse
from . import app, db, cache, oid
def estimate_cost():
raw_paste = request.form.get('raw_paste', '')
solar_system = request.form.get('market', '30000142')
if solar_system not in app.config['VALID_SOLAR_SYSTEMS'].keys():
abort(400)
try:
parse_results = parse(raw_paste)
except evepaste.Unparsable as ex:
if raw_paste:
app.logger.warning("User input invalid data: %s", raw_paste)
return render_template(
'error.html', error='Error when parsing input: ' + str(ex))
prices = get_market_prices(list(parse_results['unique_items']),
options={'solarsystem_id': solar_system})
appraisal = Appraisals(Created=int(time.time()),
RawInput=raw_paste,
Kind=parse_results['representative_kind'],
Prices=prices,
Parsed=parse_results['results'],
ParsedVersion=1,
BadLines=parse_results['bad_lines'],
Market=solar_system,
Public=bool(session['options'].get('share')),
UserId=g.user.Id if g.user else None)
db.session.add(appraisal)
db.session.commit()
app.logger.debug("New Appraisal [%s]: %s",
appraisal.Id,
parse_results['representative_kind'])
return render_template('results.html',
appraisal=appraisal)
def display_result(result_id):
page = cache.get('appraisal:%s' % result_id)
if page:
return page
q = Appraisals.query.filter(Appraisals.Id == result_id)
if g.user:
q = q.filter((Appraisals.UserId == g.user.Id) |
(Appraisals.Public == True))
else:
q = q.filter(Appraisals.Public == True)
appraisal = q.first()
if not appraisal:
flash('Resource Not Found', 'error')
return index(), 404
page = render_template('results.html',
appraisal=appraisal,
full_page=True)
if appraisal.Public:
try:
cache.set('appraisal:%s' % result_id, page, timeout=30)
except Exception:
pass
return page
@login_required
def options():
if request.method == 'POST':
autosubmit = True if request.form.get('autosubmit') == 'on' else False
paste_share = True if request.form.get('share') == 'on' else False
new_options = {
'autosubmit': autosubmit,
'share': paste_share,
}
session['loaded_options'] = False
g.user.Options = json.dumps(new_options)
db.session.add(g.user)
db.session.commit()
flash('Successfully saved options.')
return redirect(url_for('options'))
return render_template('options.html')
@login_required
def history():
q = Appraisals.query
q = q.filter(Appraisals.UserId == g.user.Id)
q = q.order_by(desc(Appraisals.Created))
q = q.limit(100)
appraisals = q.all()
return render_template('history.html', appraisals=appraisals)
def latest():
cache_key = "latest:%s" % request.args.get('kind', 'all')
body = cache.get(cache_key)
if body:
return body
q = Appraisals.query
q = q.filter_by(Public=True)
if request.args.get('kind'):
q = q.filter_by(Kind=request.args.get('kind'))
q = q.order_by(desc(Appraisals.Created))
q = q.limit(200)
appraisals = q.all()
body = render_template('latest.html', appraisals=appraisals)
cache.set(cache_key, body, timeout=30)
return body
def index():
count = cache.get("stats:appraisal_count")
if not count:
count = appraisal_count()
cache.set("stats:appraisal_count", count, timeout=60)
return render_template('index.html', appraisal_count=count)
def legal():
return render_template('legal.html')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@oid.loginhandler
def login():
if g.user is not None:
return redirect(url_for('index'))
if request.method == 'POST':
openid = request.form.get('openid')
if openid:
return oid.try_login(openid)
return render_template('login.html', next=oid.get_next_url(),
error=oid.fetch_error())
@oid.after_login
def create_or_login(resp):
session['openid'] = resp.identity_url
user = Users.query.filter_by(OpenId=resp.identity_url).first()
if user is None:
user = Users(
OpenId=session['openid'],
Options=json.dumps(app.config['USER_DEFAULT_OPTIONS']))
db.session.add(user)
db.session.commit()
flash(u'Successfully signed in')
g.user = user
return redirect(oid.get_next_url())
def logout():
session.pop('openid', None)
session.pop('options', None)
flash(u'You have been signed out')
return redirect(url_for('index'))
| true
| true
|
f71645634067780989b528d6dbd814017009be67
| 1,620
|
py
|
Python
|
Toolkits/Discovery/meta/searx/searx/engines/blekko_images.py
|
roscopecoltran/SniperKit-Core
|
4600dffe1cddff438b948b6c22f586d052971e04
|
[
"MIT"
] | null | null | null |
Toolkits/Discovery/meta/searx/searx/engines/blekko_images.py
|
roscopecoltran/SniperKit-Core
|
4600dffe1cddff438b948b6c22f586d052971e04
|
[
"MIT"
] | null | null | null |
Toolkits/Discovery/meta/searx/searx/engines/blekko_images.py
|
roscopecoltran/SniperKit-Core
|
4600dffe1cddff438b948b6c22f586d052971e04
|
[
"MIT"
] | null | null | null |
"""
Blekko (Images)
@website https://blekko.com
@provide-api yes (inofficial)
@using-api yes
@results JSON
@stable yes
@parse url, title, img_src
"""
from json import loads
from searx.url_utils import urlencode
# engine dependent config
categories = ['images']
paging = True
safesearch = True
# search-url
base_url = 'https://blekko.com'
search_url = '/api/images?{query}&c={c}'
# safesearch definitions
safesearch_types = {2: '1',
1: '',
0: '0'}
# do search-request
def request(query, params):
c = (params['pageno'] - 1) * 48
params['url'] = base_url +\
search_url.format(query=urlencode({'q': query}),
c=c)
if params['pageno'] != 1:
params['url'] += '&page={pageno}'.format(pageno=(params['pageno'] - 1))
# let Blekko know we wan't have profiling
params['cookies']['tag_lesslogging'] = '1'
# parse safesearch argument
params['cookies']['safesearch'] = safesearch_types.get(params['safesearch'], '')
return params
# get response from search-request
def response(resp):
results = []
search_results = loads(resp.text)
# return empty array if there are no results
if not search_results:
return []
for result in search_results:
# append result
results.append({'url': result['page_url'],
'title': result['title'],
'content': '',
'img_src': result['url'],
'template': 'images.html'})
# return results
return results
| 22.816901
| 84
| 0.571605
|
from json import loads
from searx.url_utils import urlencode
categories = ['images']
paging = True
safesearch = True
base_url = 'https://blekko.com'
search_url = '/api/images?{query}&c={c}'
safesearch_types = {2: '1',
1: '',
0: '0'}
def request(query, params):
c = (params['pageno'] - 1) * 48
params['url'] = base_url +\
search_url.format(query=urlencode({'q': query}),
c=c)
if params['pageno'] != 1:
params['url'] += '&page={pageno}'.format(pageno=(params['pageno'] - 1))
params['cookies']['tag_lesslogging'] = '1'
# parse safesearch argument
params['cookies']['safesearch'] = safesearch_types.get(params['safesearch'], '')
return params
# get response from search-request
def response(resp):
results = []
search_results = loads(resp.text)
# return empty array if there are no results
if not search_results:
return []
for result in search_results:
# append result
results.append({'url': result['page_url'],
'title': result['title'],
'content': '',
'img_src': result['url'],
'template': 'images.html'})
# return results
return results
| true
| true
|
f71645c0c0fb267e3a9d8d27f97d0ec8b8f3d710
| 22,819
|
py
|
Python
|
py/observer/ObserverData.py
|
nwfsc-fram/pyFieldSoftware
|
477ba162b66ede2263693cda8c5a51d27eaa3b89
|
[
"MIT"
] | null | null | null |
py/observer/ObserverData.py
|
nwfsc-fram/pyFieldSoftware
|
477ba162b66ede2263693cda8c5a51d27eaa3b89
|
[
"MIT"
] | 176
|
2019-11-22T17:44:55.000Z
|
2021-10-20T23:40:03.000Z
|
py/observer/ObserverData.py
|
nwfsc-fram/pyFieldSoftware
|
477ba162b66ede2263693cda8c5a51d27eaa3b89
|
[
"MIT"
] | 1
|
2021-05-07T01:06:32.000Z
|
2021-05-07T01:06:32.000Z
|
# -----------------------------------------------------------------------------
# Name: ObserverData.py
# Purpose: Observer Database routines
#
# Author: Will Smith <will.smith@noaa.gov>
#
# Created: Jan - July, 2016
# License: MIT
# ------------------------------------------------------------------------------
# Python implementation of Observer data class
from operator import itemgetter
from PyQt5.QtCore import pyqtProperty, QObject, QVariant
from py.observer.ObserverDBUtil import ObserverDBUtil
from py.observer.ObserverDBModels import Lookups, Users, Vessels, \
Programs, Contacts, VesselContacts, Ports, CatchCategories, IfqDealers, Species
from py.observer.ObserverUsers import ObserverUsers
import logging
import unittest
class ObserverData(QObject):
"""
Handles details of various Observer data
(from database, etc)
"""
# A special MIX species for use within OPTECS.
# Used to divert MIX baskets, which are unspeciated, to CATCH_ADDITIONAL_BASKETS rather than
# SPECIES_COMPOSITION_BASKETS.
MIX_PACFIN_CODE = 'MIX'
MIX_SPECIES_CODE = 99999
def __init__(self):
super(ObserverData, self).__init__()
self._logger = logging.getLogger(__name__)
self._observers = None
self._observers_keys = None
self._vessels = None
self._lookup_fisheries = None # Fisheries in LOOKUPS table - purpose unclear
self._captains = None # Skippers
self._captain_vessel_id = None
self._ports = None
self._catch_categories = None
self._trawl_gear_types = None
self._fg_gear_types = None
self._first_receivers = None
self._species = None
self._lookups = None
# Get data tables
self._get_observers_orm()
self._get_vessels_orm()
self._get_captains_orm()
self._get_ports_orm()
self._get_catch_categories_orm()
self._get_first_receivers_orm()
self._get_species_orm()
# Data from LOOKUPS table
self._get_lookups_orm()
self._weightmethods = self._build_lookup_data('WEIGHT_METHOD')
self._sc_samplemethods = self._build_lookup_data('SC_SAMPLE_METHOD')
self._discardreasons = self._build_lookup_data('DISCARD_REASON', values_in_text=False)
self._vesseltypes = self._build_lookup_data('VESSEL_TYPE')
self._beaufort = self._get_beaufort_dict()
self._gearperf = self._get_gearperf_trawl_dict()
self._gearperf_fg = self._get_gearperf_fg_dict()
self._trawl_gear_types = self._get_trawl_gear_list() # Single field, key + desc concatenated
self._fg_gear_types = self._get_fg_gear_list() # Single field, key + desc concatenated
self._soaktimes = self._get_avg_soaktimes_list()
self._bs_samplemethods = sorted(self._list_lookup_desc('BS_SAMPLE_METHOD', values_in_text=True))
self._vessellogbooknames = self._list_lookup_desc('VESSEL_LOGBOOK_NAME')
self._create_mix_species_if_not_present()
def _create_mix_species_if_not_present(self):
"""
Check SPECIES table for 'MIX' pacfin code, if not there, create it.
Scientific name, commmon name, and PacFIN code are all 'MIX'.
Species ID and species code are both 99999.
"""
current_user_id = ObserverDBUtil.get_current_user_id()
created_date = ObserverDBUtil.get_arrow_datestr(date_format=ObserverDBUtil.oracle_date_format)
mix_species_info = {
'species': ObserverData.MIX_SPECIES_CODE,
'scientific_name': ObserverData.MIX_PACFIN_CODE,
'common_name': ObserverData.MIX_PACFIN_CODE,
'species_code': ObserverData.MIX_SPECIES_CODE,
'pacfin_code': ObserverData.MIX_PACFIN_CODE,
'created_by': current_user_id if current_user_id else 1,
'created_date': created_date,
}
try:
Species.get(Species.pacfin_code == 'MIX')
self._logger.info('MIX exists in SPECIES table.')
except Species.DoesNotExist:
self._logger.info('Adding MIX to SPECIES table (one-time operation)')
Species.create(**mix_species_info)
@staticmethod
def make_username(user_model):
return user_model.first_name + ' ' + user_model.last_name
def _get_observers_orm(self, rebuild=False):
"""
Get observers from database via ORM, store DB keys
"""
if self._observers is not None and not rebuild: # only build this once
return
self._observers = list()
self._observers_keys = dict()
obs_q = Users.select()
for obs in obs_q:
username = self.make_username(obs)
self._observers.append(username)
self._observers_keys[username] = obs
self._observers = sorted(self._observers) # Sort Alphabetically - should we do this by last name instead?
def _get_vessels_orm(self, rebuild=False):
"""
Get vessels from database via ORM, store DB keys
"""
if self._vessels is not None and not rebuild: # only build this once
return
self._vessels = list()
vess_q = Vessels.select().where(Vessels.vessel_status.not_in(['S', 'I', 'R'])) # Not sunk, inactive, retired
for vessel in vess_q:
vessel_number = vessel.coast_guard_number
if not vessel_number or len(vessel_number) < 1:
vessel_number = vessel.state_reg_number
vessel_entry = '{} - {}'.format(vessel.vessel_name.upper(), vessel_number)
self._vessels.append(vessel_entry)
self._vessels = sorted(self._vessels) # Don't Remove Duplicates + Sort Alphabetically
def _get_programs_orm(self, rebuild=False):
"""
Get programs from database via ORM, store DB keys
"""
if self._programs is not None and not rebuild: # only build this once
return
self._programs = list()
fish_q = Programs.select()
for fishery in fish_q:
self._programs.append(fishery.program_name)
self._programs = sorted(set(self._programs)) # Remove Duplicates + Sort Alphabetically
def _get_captains_orm(self, rebuild=False):
"""
Get skippers from database via ORM, store DB keys
"""
if self._captains is not None and not rebuild: # only build this once
return
self._captains = list()
if self._captain_vessel_id:
captains_q = Contacts.select(). \
join(VesselContacts, on=(Contacts.contact == VesselContacts.contact)). \
where(
(Contacts.contact_category == 3) & # Vessel category
(VesselContacts.contact_status != 'NA') &
(VesselContacts.vessel == self._captain_vessel_id) & # Vessel ID
((VesselContacts.contact_type == 1) | # Skipper
(VesselContacts.contact_type == 3))) # Skipper/ Owner
else:
captains_q = Contacts.select(). \
join(VesselContacts, on=(Contacts.contact == VesselContacts.contact)). \
where(
(VesselContacts.contact_status != 'NA') &
(Contacts.contact_category == 3) & # Vessel
((VesselContacts.contact_type == 1) | # Skipper
(VesselContacts.contact_type == 3))) # Skipper/ Owner
for captain in captains_q:
if len(captain.first_name) > 0:
self._captains.append(captain.first_name + ' ' + captain.last_name)
self._captains = sorted(set(self._captains)) # Remove Duplicates + Sort Alphabetically
def _get_first_receivers_orm(self, rebuild=False):
"""
Get first receivers from database via ORM, store DB keys
@return: dict of values and PK
"""
if self._first_receivers is not None and not rebuild: # only build this once
return
self._first_receivers = list()
fr_q = IfqDealers. \
select(IfqDealers, Ports). \
join(Ports, on=(IfqDealers.port_code == Ports.ifq_port_code).alias('port')). \
where(IfqDealers.active == 1)
for fr in fr_q:
fr_line = '{} {}'.format(fr.dealer_name, fr.port.port_name)
# self._logger.info(fr_line)
self._first_receivers.append(fr_line)
self._first_receivers = sorted(self._first_receivers)
def _get_catch_categories_orm(self, rebuild=False):
"""
To support autocomplete, get catch categories from database via ORM, store DB keys
"""
if self._catch_categories is not None and not rebuild: # only build this once
return
self._catch_categories = list()
catch_q = CatchCategories.select().where(CatchCategories.active.is_null(True))
for cc in catch_q:
self._catch_categories.append('{} {}'.format(cc.catch_category_code, cc.catch_category_name))
self._catch_categories = sorted(self._catch_categories) # Sort Alphabetically
def _get_ports_orm(self, rebuild=False):
"""
Get ports from database via ORM, store DB keys
"""
if self._ports is not None and not rebuild: # only build this once
return
self._ports = list()
port_q = Ports.select()
for port in port_q:
self._ports.append(port.port_name.title()) # Title case
self._ports = sorted(set(self._ports)) # Remove Duplicates + Sort Alphabetically
def _get_species_orm(self, rebuild=False):
"""
To support autocomplete, get catch categories from database via ORM, store DB keys
"""
if self._species is not None and not rebuild: # only build this once
return
self._species = list()
species_q = Species.select().where(Species.active.is_null(True))
for s in species_q:
self._species.append('{}'.format(s.common_name))
self._species = sorted(self._species) # Sort Alphabetically
def get_observer_id(self, observer_name):
if observer_name in self._observers_keys:
return self._observers_keys[observer_name].user # USER_ID
else:
return None
def get_observer_name(self, observer_id):
obs = Users.get(Users.user == observer_id)
return self.make_username(obs)
@pyqtProperty(QVariant)
def catch_categories(self): # for autocomplete
return self._catchcategories
@pyqtProperty(QVariant)
def observers(self):
return self._observers
@pyqtProperty(QVariant)
def vessels(self):
return self._vessels
@pyqtProperty(QVariant)
def vessel_logbook_names(self):
return self._vessellogbooknames
@property
def weight_methods(self):
return self._weightmethods
@property
def sc_sample_methods(self):
return self._sc_samplemethods
@property
def species(self):
return self._species
@property
def bs_sample_methods(self):
return self._bs_samplemethods
@property
def vessel_types(self):
return self._vesseltypes
@property
def discard_reasons(self):
return self._discardreasons
@property
def catch_categories(self): # For AutoComplete
return self._catch_categories
@property
def trawl_gear_types(self): # For AutoComplete
return self._trawl_gear_types
@property
def fg_gear_types(self): # For AutoComplete
return self._fg_gear_types
@property
def beaufort(self):
return self._beaufort
@property
def soaktimes(self):
return self._soaktimes
@property
def gearperf(self):
return self._gearperf
@property
def gearperf_fg(self):
return self._gearperf_fg
@property
def first_receivers(self):
return self._first_receivers
@staticmethod
def get_fisheries_by_program_id(program_id, is_fg):
return ObserverUsers.get_fisheries_by_program_id(program_id, is_fg)
def _get_lookups_orm(self, rebuild=False):
"""
Get lookups via peewee ORM
:return:
"""
if self._lookups is not None and not rebuild: # only build this once unless rebuilt
return
self._lookups = dict() # of lists
# http://peewee.readthedocs.org/en/latest/peewee/querying.html#query-operators
lookups_q = Lookups.select().where((Lookups.active >> None) | (Lookups.active == 1))
for lu in lookups_q:
key = lu.lookup_type
if key in self._lookups:
self._lookups[key].append({'desc': lu.description, 'value': lu.lookup_value})
else:
self._lookups[key] = [{'desc': lu.description, 'value': lu.lookup_value}]
if len(self._lookups) == 0:
raise ConnectionError('Unable to get LOOKUPS from database, check observer DB')
# Build fisheries list
self._lookup_fisheries = list()
for fishery in self._lookups['FISHERY']:
self._lookup_fisheries.append(fishery['desc'])
self._lookup_fisheries = sorted(self._lookup_fisheries)
@pyqtProperty(QVariant)
def lookup_fisheries(self):
return self._lookup_fisheries
@pyqtProperty(QVariant)
def fisheries(self): # TODO do we want these or the lookup table entries?
return self._programs
@pyqtProperty(QVariant)
def captains(self):
return self._captains
@pyqtProperty(QVariant)
def captain_vessel_id(self):
return self._captain_vessel_id
@captain_vessel_id.setter
def captain_vessel_id(self, vessel_id):
self._logger.debug(f'Set Captain Vessel ID to {vessel_id}')
if vessel_id != self._captain_vessel_id:
self._captain_vessel_id = vessel_id
self._get_captains_orm(rebuild=True)
@pyqtProperty(QVariant)
def ports(self):
return self._ports
def _build_lookup_data(self, lookup_type, include_empty=True, values_in_text=True):
"""
Get values and descriptions from LOOKUPS
:param lookup_type: primary name of lookup
:param include_empty: include an extra empty item, useful for combo box with no default
:param values_in_text: include the value in the text descriptions returned
:return: list of dicts of the format [{'text': 'asf', 'value' 'somedata'}]
"""
if self._lookups is None:
self._get_lookups_orm()
lookupdata = list()
if include_empty:
lookupdata.append({'text': '-', 'value': '-1'}) # Create empty selection option
for data in self._lookups[lookup_type]:
if values_in_text:
lookupdata.append({'text': data['value'].zfill(2) + ' ' + data['desc'], # zfill for 0-padding
'value': data['value']})
else:
lookupdata.append({'text': data['desc'],
'value': data['value']})
lookupdata = sorted(lookupdata, key=itemgetter('text')) # Sort Alphabetically
return lookupdata
def _get_beaufort_dict(self):
"""
Build beaufort description dict
@return: dict of format {'0':'Description', ...}
"""
bvals = self._build_lookup_data('BEAUFORT_VALUE', include_empty=False, values_in_text=False)
return {b['value']: b['text'] for b in bvals}
def _get_soaktime_dict(self):
"""
Build avg soak time range description dict
@return: dict of format {'0':'Description', ...}
"""
bvals = self._build_lookup_data('AVG_SOAK_TIME_RANGE', include_empty=False, values_in_text=False)
return {b['value']: b['text'] for b in bvals}
def _get_gearperf_trawl_dict(self):
"""
Build gear performance description dict
@return: dict of format {'1':'Description', ...}
"""
gvals = self._build_lookup_data('GEAR_PERFORMANCE', include_empty=False, values_in_text=False)
return {b['value']: b['text'] for b in gvals}
def _get_gearperf_fg_dict(self):
"""
Build gear performance description dict for FG
NOTE The description for #5 is trawl based, so hardcoded the alternate text here
@return: dict of format {'1':'Description', ...}
"""
gvals = self._build_lookup_data('GEAR_PERFORMANCE', include_empty=False, values_in_text=False)
# FG - manually change this one value
for g in gvals:
if g['value'] == '5':
g['text'] = 'Problem - Pot(s) or other gear lost'
break
return {b['value']: b['text'] for b in gvals}
def _list_lookup_desc(self, lookup_type, values_in_text=False):
"""
Get simple list of values from LOOKUPS given a list created by _build_lookup_data
"""
lookup_data = self._build_lookup_data(lookup_type, include_empty=False, values_in_text=values_in_text)
lookuplistdata = list()
for datum in lookup_data:
lookuplistdata.append(datum['text'])
return sorted(lookuplistdata)
def _get_trawl_gear_list(self):
"""
Get the list of trawl gear types and descriptions, concatenated.
Sort by trawl type treated as integer.
:return: a list of strings of gear type value, space, and gear type description.
"""
gvals = self._build_lookup_data('TRAWL_GEAR_TYPE', include_empty=False, values_in_text=False)
# Sort by gear type - numerically, not alphabetically.
lookupdata = []
for entry in gvals:
entry['value_as_int'] = int(entry['value'])
lookupdata.append(entry)
lookupdata = sorted(lookupdata, key=itemgetter('value_as_int')) # Sort numerically by gear number.
return [str(entry['value_as_int']) + ' ' + entry['text'] for entry in lookupdata]
def _get_fg_gear_list(self):
"""
Get the list of fg gear types and descriptions, concatenated.
Sort by trawl type treated as integer.
:return: a list of strings of gear type value, space, and gear type description.
"""
gvals = self._build_lookup_data('FG_GEAR_TYPE', include_empty=False, values_in_text=False)
# Sort by gear type - numerically, not alphabetically.
lookupdata = []
for entry in gvals:
entry['value_as_int'] = int(entry['value'])
lookupdata.append(entry)
lookupdata = sorted(lookupdata, key=itemgetter('value_as_int')) # Sort numerically by gear number.
return [str(entry['value_as_int']) + ' ' + entry['text'] for entry in lookupdata]
def _get_avg_soaktimes_list(self):
"""
Get the list of avg soak times descriptions, concatenated.
Sort by # type treated as integer.
:return: a list of strings of soak time value, space, and soak time description.
"""
gvals = self._build_lookup_data('AVG_SOAK_TIME_RANGE', include_empty=False, values_in_text=False)
# Sort by gear type - numerically, not alphabetically.
lookupdata = []
for entry in gvals:
entry['value_as_int'] = int(entry['value'])
lookupdata.append(entry)
lookupdata = sorted(lookupdata, key=itemgetter('value_as_int')) # Sort numerically by gear number.
return [str(entry['value_as_int']) + ' ' + entry['text'] for entry in lookupdata]
class TestObserverData(unittest.TestCase):
"""
Test basic SQLite connectivity
"""
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.testdata = ObserverData()
def test_connection(self):
self.assertIsNotNone(self.testdata.db_connection)
cursor = self.testdata.db_connection.cursor()
self.assertIsNotNone(cursor)
def test_beaufort(self):
beef = self.testdata._get_beaufort_dict()
self.assertGreater(len(beef['0']), 5)
self.assertGreater(len(beef['9']), 5)
def test_gearperf(self):
beef = self.testdata._get_gearperf_trawl_dict()
self.assertGreater(len(beef['1']), 5)
self.assertGreater(len(beef['7']), 5)
def test_observers(self):
logging.debug(self.testdata.observers)
self.assertGreater(len(self.testdata.observers), 10)
def test_vessels(self):
logging.debug(self.testdata.vessels)
self.assertGreater(len(self.testdata.vessels), 10)
def test_weightmethods(self):
logging.debug(self.testdata.weight_methods)
self.assertGreater(len(self.testdata.weight_methods), 10)
def test_vesseltypes(self):
logging.debug(self.testdata.vessel_types)
self.assertGreater(len(self.testdata.vessel_types), 5)
def test_vessellogbooknames(self):
logging.debug(self.testdata.vessel_logbook_names)
self.assertGreater(len(self.testdata.vessel_logbook_names), 5)
def test_discardreasons(self):
logging.debug(self.testdata.discard_reasons)
self.assertGreater(len(self.testdata.discard_reasons), 5)
def test_catchcategories(self):
logging.debug(self.testdata.catch_categories)
self.assertGreater(len(self.testdata.catch_categories), 200)
def test_trawlgeartypes(self):
logging.debug(self.testdata.trawl_gear_types)
self.assertEqual(len(self.testdata.trawl_gear_types), 12)
def test_lookups_orm(self):
"""
Compares old and new LOOKUPS select
:return:
"""
self.testdata._get_lookups()
copylookups = dict(self.testdata._lookups)
self.testdata._get_lookups_orm(rebuild=True)
self.assertGreater(len(copylookups), 0)
self.assertEqual(len(copylookups), len(self.testdata._lookups))
@unittest.skip("ObserverData no longer has a 'programs' attribute.")
def test_fisheries(self):
self.assertGreater(len(self.testdata.programs), 10)
logging.debug(self.testdata.programs)
def test_observers_orm(self):
"""
Compares old and new LOOKUPS select
:return:
"""
self.testdata._get_observers()
copyobs = list(self.testdata._observers)
self.testdata._get_observers_orm(rebuild=True)
self.assertGreater(len(copyobs), 0)
self.assertEqual(len(copyobs), len(self.testdata._observers))
def test_get_observer(self):
self.assertEqual(self.testdata.get_observer_id('Eric Brasseur'), 1484)
self.assertEqual(self.testdata.get_observer_name(1471), 'Amos Cernohouz')
if __name__ == '__main__':
unittest.main()
| 36.686495
| 117
| 0.640168
|
from operator import itemgetter
from PyQt5.QtCore import pyqtProperty, QObject, QVariant
from py.observer.ObserverDBUtil import ObserverDBUtil
from py.observer.ObserverDBModels import Lookups, Users, Vessels, \
Programs, Contacts, VesselContacts, Ports, CatchCategories, IfqDealers, Species
from py.observer.ObserverUsers import ObserverUsers
import logging
import unittest
class ObserverData(QObject):
MIX_PACFIN_CODE = 'MIX'
MIX_SPECIES_CODE = 99999
def __init__(self):
super(ObserverData, self).__init__()
self._logger = logging.getLogger(__name__)
self._observers = None
self._observers_keys = None
self._vessels = None
self._lookup_fisheries = None
self._captains = None
self._captain_vessel_id = None
self._ports = None
self._catch_categories = None
self._trawl_gear_types = None
self._fg_gear_types = None
self._first_receivers = None
self._species = None
self._lookups = None
self._get_observers_orm()
self._get_vessels_orm()
self._get_captains_orm()
self._get_ports_orm()
self._get_catch_categories_orm()
self._get_first_receivers_orm()
self._get_species_orm()
self._get_lookups_orm()
self._weightmethods = self._build_lookup_data('WEIGHT_METHOD')
self._sc_samplemethods = self._build_lookup_data('SC_SAMPLE_METHOD')
self._discardreasons = self._build_lookup_data('DISCARD_REASON', values_in_text=False)
self._vesseltypes = self._build_lookup_data('VESSEL_TYPE')
self._beaufort = self._get_beaufort_dict()
self._gearperf = self._get_gearperf_trawl_dict()
self._gearperf_fg = self._get_gearperf_fg_dict()
self._trawl_gear_types = self._get_trawl_gear_list()
self._fg_gear_types = self._get_fg_gear_list()
self._soaktimes = self._get_avg_soaktimes_list()
self._bs_samplemethods = sorted(self._list_lookup_desc('BS_SAMPLE_METHOD', values_in_text=True))
self._vessellogbooknames = self._list_lookup_desc('VESSEL_LOGBOOK_NAME')
self._create_mix_species_if_not_present()
def _create_mix_species_if_not_present(self):
current_user_id = ObserverDBUtil.get_current_user_id()
created_date = ObserverDBUtil.get_arrow_datestr(date_format=ObserverDBUtil.oracle_date_format)
mix_species_info = {
'species': ObserverData.MIX_SPECIES_CODE,
'scientific_name': ObserverData.MIX_PACFIN_CODE,
'common_name': ObserverData.MIX_PACFIN_CODE,
'species_code': ObserverData.MIX_SPECIES_CODE,
'pacfin_code': ObserverData.MIX_PACFIN_CODE,
'created_by': current_user_id if current_user_id else 1,
'created_date': created_date,
}
try:
Species.get(Species.pacfin_code == 'MIX')
self._logger.info('MIX exists in SPECIES table.')
except Species.DoesNotExist:
self._logger.info('Adding MIX to SPECIES table (one-time operation)')
Species.create(**mix_species_info)
@staticmethod
def make_username(user_model):
return user_model.first_name + ' ' + user_model.last_name
def _get_observers_orm(self, rebuild=False):
if self._observers is not None and not rebuild:
return
self._observers = list()
self._observers_keys = dict()
obs_q = Users.select()
for obs in obs_q:
username = self.make_username(obs)
self._observers.append(username)
self._observers_keys[username] = obs
self._observers = sorted(self._observers)
def _get_vessels_orm(self, rebuild=False):
if self._vessels is not None and not rebuild:
return
self._vessels = list()
vess_q = Vessels.select().where(Vessels.vessel_status.not_in(['S', 'I', 'R']))
for vessel in vess_q:
vessel_number = vessel.coast_guard_number
if not vessel_number or len(vessel_number) < 1:
vessel_number = vessel.state_reg_number
vessel_entry = '{} - {}'.format(vessel.vessel_name.upper(), vessel_number)
self._vessels.append(vessel_entry)
self._vessels = sorted(self._vessels)
def _get_programs_orm(self, rebuild=False):
if self._programs is not None and not rebuild: # only build this once
return
self._programs = list()
fish_q = Programs.select()
for fishery in fish_q:
self._programs.append(fishery.program_name)
self._programs = sorted(set(self._programs)) # Remove Duplicates + Sort Alphabetically
def _get_captains_orm(self, rebuild=False):
if self._captains is not None and not rebuild: # only build this once
return
self._captains = list()
if self._captain_vessel_id:
captains_q = Contacts.select(). \
join(VesselContacts, on=(Contacts.contact == VesselContacts.contact)). \
where(
(Contacts.contact_category == 3) & # Vessel category
(VesselContacts.contact_status != 'NA') &
(VesselContacts.vessel == self._captain_vessel_id) & # Vessel ID
((VesselContacts.contact_type == 1) | # Skipper
(VesselContacts.contact_type == 3))) # Skipper/ Owner
else:
captains_q = Contacts.select(). \
join(VesselContacts, on=(Contacts.contact == VesselContacts.contact)). \
where(
(VesselContacts.contact_status != 'NA') &
(Contacts.contact_category == 3) & # Vessel
((VesselContacts.contact_type == 1) | # Skipper
(VesselContacts.contact_type == 3))) # Skipper/ Owner
for captain in captains_q:
if len(captain.first_name) > 0:
self._captains.append(captain.first_name + ' ' + captain.last_name)
self._captains = sorted(set(self._captains)) # Remove Duplicates + Sort Alphabetically
def _get_first_receivers_orm(self, rebuild=False):
if self._first_receivers is not None and not rebuild: # only build this once
return
self._first_receivers = list()
fr_q = IfqDealers. \
select(IfqDealers, Ports). \
join(Ports, on=(IfqDealers.port_code == Ports.ifq_port_code).alias('port')). \
where(IfqDealers.active == 1)
for fr in fr_q:
fr_line = '{} {}'.format(fr.dealer_name, fr.port.port_name)
# self._logger.info(fr_line)
self._first_receivers.append(fr_line)
self._first_receivers = sorted(self._first_receivers)
def _get_catch_categories_orm(self, rebuild=False):
if self._catch_categories is not None and not rebuild: # only build this once
return
self._catch_categories = list()
catch_q = CatchCategories.select().where(CatchCategories.active.is_null(True))
for cc in catch_q:
self._catch_categories.append('{} {}'.format(cc.catch_category_code, cc.catch_category_name))
self._catch_categories = sorted(self._catch_categories) # Sort Alphabetically
def _get_ports_orm(self, rebuild=False):
if self._ports is not None and not rebuild: # only build this once
return
self._ports = list()
port_q = Ports.select()
for port in port_q:
self._ports.append(port.port_name.title()) # Title case
self._ports = sorted(set(self._ports)) # Remove Duplicates + Sort Alphabetically
def _get_species_orm(self, rebuild=False):
if self._species is not None and not rebuild: # only build this once
return
self._species = list()
species_q = Species.select().where(Species.active.is_null(True))
for s in species_q:
self._species.append('{}'.format(s.common_name))
self._species = sorted(self._species) # Sort Alphabetically
def get_observer_id(self, observer_name):
if observer_name in self._observers_keys:
return self._observers_keys[observer_name].user # USER_ID
else:
return None
def get_observer_name(self, observer_id):
obs = Users.get(Users.user == observer_id)
return self.make_username(obs)
@pyqtProperty(QVariant)
def catch_categories(self): # for autocomplete
return self._catchcategories
@pyqtProperty(QVariant)
def observers(self):
return self._observers
@pyqtProperty(QVariant)
def vessels(self):
return self._vessels
@pyqtProperty(QVariant)
def vessel_logbook_names(self):
return self._vessellogbooknames
@property
def weight_methods(self):
return self._weightmethods
@property
def sc_sample_methods(self):
return self._sc_samplemethods
@property
def species(self):
return self._species
@property
def bs_sample_methods(self):
return self._bs_samplemethods
@property
def vessel_types(self):
return self._vesseltypes
@property
def discard_reasons(self):
return self._discardreasons
@property
def catch_categories(self): # For AutoComplete
return self._catch_categories
@property
def trawl_gear_types(self): # For AutoComplete
return self._trawl_gear_types
@property
def fg_gear_types(self): # For AutoComplete
return self._fg_gear_types
@property
def beaufort(self):
return self._beaufort
@property
def soaktimes(self):
return self._soaktimes
@property
def gearperf(self):
return self._gearperf
@property
def gearperf_fg(self):
return self._gearperf_fg
@property
def first_receivers(self):
return self._first_receivers
@staticmethod
def get_fisheries_by_program_id(program_id, is_fg):
return ObserverUsers.get_fisheries_by_program_id(program_id, is_fg)
def _get_lookups_orm(self, rebuild=False):
if self._lookups is not None and not rebuild: # only build this once unless rebuilt
return
self._lookups = dict() # of lists
# http://peewee.readthedocs.org/en/latest/peewee/querying.html#query-operators
lookups_q = Lookups.select().where((Lookups.active >> None) | (Lookups.active == 1))
for lu in lookups_q:
key = lu.lookup_type
if key in self._lookups:
self._lookups[key].append({'desc': lu.description, 'value': lu.lookup_value})
else:
self._lookups[key] = [{'desc': lu.description, 'value': lu.lookup_value}]
if len(self._lookups) == 0:
raise ConnectionError('Unable to get LOOKUPS from database, check observer DB')
# Build fisheries list
self._lookup_fisheries = list()
for fishery in self._lookups['FISHERY']:
self._lookup_fisheries.append(fishery['desc'])
self._lookup_fisheries = sorted(self._lookup_fisheries)
@pyqtProperty(QVariant)
def lookup_fisheries(self):
return self._lookup_fisheries
@pyqtProperty(QVariant)
def fisheries(self): # TODO do we want these or the lookup table entries?
return self._programs
@pyqtProperty(QVariant)
def captains(self):
return self._captains
@pyqtProperty(QVariant)
def captain_vessel_id(self):
return self._captain_vessel_id
@captain_vessel_id.setter
def captain_vessel_id(self, vessel_id):
self._logger.debug(f'Set Captain Vessel ID to {vessel_id}')
if vessel_id != self._captain_vessel_id:
self._captain_vessel_id = vessel_id
self._get_captains_orm(rebuild=True)
@pyqtProperty(QVariant)
def ports(self):
return self._ports
def _build_lookup_data(self, lookup_type, include_empty=True, values_in_text=True):
if self._lookups is None:
self._get_lookups_orm()
lookupdata = list()
if include_empty:
lookupdata.append({'text': '-', 'value': '-1'}) # Create empty selection option
for data in self._lookups[lookup_type]:
if values_in_text:
lookupdata.append({'text': data['value'].zfill(2) + ' ' + data['desc'], # zfill for 0-padding
'value': data['value']})
else:
lookupdata.append({'text': data['desc'],
'value': data['value']})
lookupdata = sorted(lookupdata, key=itemgetter('text')) # Sort Alphabetically
return lookupdata
def _get_beaufort_dict(self):
bvals = self._build_lookup_data('BEAUFORT_VALUE', include_empty=False, values_in_text=False)
return {b['value']: b['text'] for b in bvals}
def _get_soaktime_dict(self):
bvals = self._build_lookup_data('AVG_SOAK_TIME_RANGE', include_empty=False, values_in_text=False)
return {b['value']: b['text'] for b in bvals}
def _get_gearperf_trawl_dict(self):
gvals = self._build_lookup_data('GEAR_PERFORMANCE', include_empty=False, values_in_text=False)
return {b['value']: b['text'] for b in gvals}
def _get_gearperf_fg_dict(self):
gvals = self._build_lookup_data('GEAR_PERFORMANCE', include_empty=False, values_in_text=False)
# FG - manually change this one value
for g in gvals:
if g['value'] == '5':
g['text'] = 'Problem - Pot(s) or other gear lost'
break
return {b['value']: b['text'] for b in gvals}
def _list_lookup_desc(self, lookup_type, values_in_text=False):
lookup_data = self._build_lookup_data(lookup_type, include_empty=False, values_in_text=values_in_text)
lookuplistdata = list()
for datum in lookup_data:
lookuplistdata.append(datum['text'])
return sorted(lookuplistdata)
def _get_trawl_gear_list(self):
gvals = self._build_lookup_data('TRAWL_GEAR_TYPE', include_empty=False, values_in_text=False)
# Sort by gear type - numerically, not alphabetically.
lookupdata = []
for entry in gvals:
entry['value_as_int'] = int(entry['value'])
lookupdata.append(entry)
lookupdata = sorted(lookupdata, key=itemgetter('value_as_int')) # Sort numerically by gear number.
return [str(entry['value_as_int']) + ' ' + entry['text'] for entry in lookupdata]
def _get_fg_gear_list(self):
gvals = self._build_lookup_data('FG_GEAR_TYPE', include_empty=False, values_in_text=False)
# Sort by gear type - numerically, not alphabetically.
lookupdata = []
for entry in gvals:
entry['value_as_int'] = int(entry['value'])
lookupdata.append(entry)
lookupdata = sorted(lookupdata, key=itemgetter('value_as_int')) # Sort numerically by gear number.
return [str(entry['value_as_int']) + ' ' + entry['text'] for entry in lookupdata]
def _get_avg_soaktimes_list(self):
gvals = self._build_lookup_data('AVG_SOAK_TIME_RANGE', include_empty=False, values_in_text=False)
# Sort by gear type - numerically, not alphabetically.
lookupdata = []
for entry in gvals:
entry['value_as_int'] = int(entry['value'])
lookupdata.append(entry)
lookupdata = sorted(lookupdata, key=itemgetter('value_as_int')) # Sort numerically by gear number.
return [str(entry['value_as_int']) + ' ' + entry['text'] for entry in lookupdata]
class TestObserverData(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.testdata = ObserverData()
def test_connection(self):
self.assertIsNotNone(self.testdata.db_connection)
cursor = self.testdata.db_connection.cursor()
self.assertIsNotNone(cursor)
def test_beaufort(self):
beef = self.testdata._get_beaufort_dict()
self.assertGreater(len(beef['0']), 5)
self.assertGreater(len(beef['9']), 5)
def test_gearperf(self):
beef = self.testdata._get_gearperf_trawl_dict()
self.assertGreater(len(beef['1']), 5)
self.assertGreater(len(beef['7']), 5)
def test_observers(self):
logging.debug(self.testdata.observers)
self.assertGreater(len(self.testdata.observers), 10)
def test_vessels(self):
logging.debug(self.testdata.vessels)
self.assertGreater(len(self.testdata.vessels), 10)
def test_weightmethods(self):
logging.debug(self.testdata.weight_methods)
self.assertGreater(len(self.testdata.weight_methods), 10)
def test_vesseltypes(self):
logging.debug(self.testdata.vessel_types)
self.assertGreater(len(self.testdata.vessel_types), 5)
def test_vessellogbooknames(self):
logging.debug(self.testdata.vessel_logbook_names)
self.assertGreater(len(self.testdata.vessel_logbook_names), 5)
def test_discardreasons(self):
logging.debug(self.testdata.discard_reasons)
self.assertGreater(len(self.testdata.discard_reasons), 5)
def test_catchcategories(self):
logging.debug(self.testdata.catch_categories)
self.assertGreater(len(self.testdata.catch_categories), 200)
def test_trawlgeartypes(self):
logging.debug(self.testdata.trawl_gear_types)
self.assertEqual(len(self.testdata.trawl_gear_types), 12)
def test_lookups_orm(self):
self.testdata._get_lookups()
copylookups = dict(self.testdata._lookups)
self.testdata._get_lookups_orm(rebuild=True)
self.assertGreater(len(copylookups), 0)
self.assertEqual(len(copylookups), len(self.testdata._lookups))
@unittest.skip("ObserverData no longer has a 'programs' attribute.")
def test_fisheries(self):
self.assertGreater(len(self.testdata.programs), 10)
logging.debug(self.testdata.programs)
def test_observers_orm(self):
self.testdata._get_observers()
copyobs = list(self.testdata._observers)
self.testdata._get_observers_orm(rebuild=True)
self.assertGreater(len(copyobs), 0)
self.assertEqual(len(copyobs), len(self.testdata._observers))
def test_get_observer(self):
self.assertEqual(self.testdata.get_observer_id('Eric Brasseur'), 1484)
self.assertEqual(self.testdata.get_observer_name(1471), 'Amos Cernohouz')
if __name__ == '__main__':
unittest.main()
| true
| true
|
f716461f33bb89db290e03f930670a8c2c58abd5
| 4,917
|
py
|
Python
|
test/functional/p2p_node_network_limited.py
|
The-Bitcoin-Phantom/The-bitcoin-Phantom
|
c914b51924932f07026eb6ba057c6e375e4dcdac
|
[
"MIT"
] | null | null | null |
test/functional/p2p_node_network_limited.py
|
The-Bitcoin-Phantom/The-bitcoin-Phantom
|
c914b51924932f07026eb6ba057c6e375e4dcdac
|
[
"MIT"
] | null | null | null |
test/functional/p2p_node_network_limited.py
|
The-Bitcoin-Phantom/The-bitcoin-Phantom
|
c914b51924932f07026eb6ba057c6e375e4dcdac
|
[
"MIT"
] | 1
|
2020-11-04T06:59:19.000Z
|
2020-11-04T06:59:19.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The bitphantom Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import bitphantomTestFramework
from test_framework.util import (
assert_equal,
disconnect_nodes,
connect_nodes,
wait_until,
)
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(bitphantomTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes(self.nodes[0], 1)
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, expected_services)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes(self.nodes[0], 2)
try:
self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at height 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes(self.nodes[1], 2)
# sync must be possible
self.sync_blocks()
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes(self.nodes[0], 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
self.sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| 40.636364
| 121
| 0.693512
|
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import bitphantomTestFramework
from test_framework.util import (
assert_equal,
disconnect_nodes,
connect_nodes,
wait_until,
)
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(bitphantomTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes(self.nodes[0], 1)
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1])
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0])
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
assert_equal(node1.firstAddrnServices, expected_services)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
connect_nodes(self.nodes[0], 2)
try:
self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
connect_nodes(self.nodes[1], 2)
self.sync_blocks()
self.disconnect_all()
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
connect_nodes(self.nodes[0], 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| true
| true
|
f7164659386641171d3bd75324ad46792423c6e2
| 818
|
py
|
Python
|
evidently/analyzers/test_utils.py
|
jim-fun/evidently
|
eb3479b8ce39e43601fb2d1ffbf61e0624541865
|
[
"Apache-2.0"
] | null | null | null |
evidently/analyzers/test_utils.py
|
jim-fun/evidently
|
eb3479b8ce39e43601fb2d1ffbf61e0624541865
|
[
"Apache-2.0"
] | null | null | null |
evidently/analyzers/test_utils.py
|
jim-fun/evidently
|
eb3479b8ce39e43601fb2d1ffbf61e0624541865
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import unittest
import pandas
from evidently.analyzers.utils import process_columns
from evidently.pipeline.column_mapping import ColumnMapping
class TestUtils(unittest.TestCase):
def test_process_columns(self):
dataset = pandas.DataFrame.from_dict([
dict(datetime=datetime.datetime.now(),
target=1,
prediction=1,
feature1=0,
feature2=1,
cat_feature1="o",
cat_feature2="b")])
columns = process_columns(dataset, ColumnMapping())
self.assertIsNone(columns.utility_columns.id_column)
self.assertCountEqual(['feature1', 'feature2'], columns.num_feature_names)
self.assertCountEqual(['cat_feature1', 'cat_feature2'], columns.cat_feature_names)
| 32.72
| 90
| 0.660147
|
import datetime
import unittest
import pandas
from evidently.analyzers.utils import process_columns
from evidently.pipeline.column_mapping import ColumnMapping
class TestUtils(unittest.TestCase):
def test_process_columns(self):
dataset = pandas.DataFrame.from_dict([
dict(datetime=datetime.datetime.now(),
target=1,
prediction=1,
feature1=0,
feature2=1,
cat_feature1="o",
cat_feature2="b")])
columns = process_columns(dataset, ColumnMapping())
self.assertIsNone(columns.utility_columns.id_column)
self.assertCountEqual(['feature1', 'feature2'], columns.num_feature_names)
self.assertCountEqual(['cat_feature1', 'cat_feature2'], columns.cat_feature_names)
| true
| true
|
f716469fc9aafa94a0b15694c9c6e42ee3698e48
| 1,311
|
py
|
Python
|
tests/components/yeelight/test_binary_sensor.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
tests/components/yeelight/test_binary_sensor.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 79
|
2020-07-23T07:13:37.000Z
|
2022-03-22T06:02:37.000Z
|
tests/components/yeelight/test_binary_sensor.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Test the Yeelight binary sensor."""
from unittest.mock import patch
from homeassistant.components.yeelight import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_component
from homeassistant.setup import async_setup_component
from . import MODULE, NAME, PROPERTIES, YAML_CONFIGURATION, _mocked_bulb
ENTITY_BINARY_SENSOR = f"binary_sensor.{NAME}_nightlight"
async def test_nightlight(hass: HomeAssistant):
"""Test nightlight sensor."""
mocked_bulb = _mocked_bulb()
with patch(f"{MODULE}.Bulb", return_value=mocked_bulb), patch(
f"{MODULE}.config_flow.yeelight.Bulb", return_value=mocked_bulb
):
await async_setup_component(hass, DOMAIN, YAML_CONFIGURATION)
await hass.async_block_till_done()
# active_mode
assert hass.states.get(ENTITY_BINARY_SENSOR).state == "off"
# nl_br
properties = {**PROPERTIES}
properties.pop("active_mode")
mocked_bulb.last_properties = properties
await entity_component.async_update_entity(hass, ENTITY_BINARY_SENSOR)
assert hass.states.get(ENTITY_BINARY_SENSOR).state == "on"
# default
properties.pop("nl_br")
await entity_component.async_update_entity(hass, ENTITY_BINARY_SENSOR)
assert hass.states.get(ENTITY_BINARY_SENSOR).state == "off"
| 35.432432
| 74
| 0.764302
|
from unittest.mock import patch
from homeassistant.components.yeelight import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_component
from homeassistant.setup import async_setup_component
from . import MODULE, NAME, PROPERTIES, YAML_CONFIGURATION, _mocked_bulb
ENTITY_BINARY_SENSOR = f"binary_sensor.{NAME}_nightlight"
async def test_nightlight(hass: HomeAssistant):
mocked_bulb = _mocked_bulb()
with patch(f"{MODULE}.Bulb", return_value=mocked_bulb), patch(
f"{MODULE}.config_flow.yeelight.Bulb", return_value=mocked_bulb
):
await async_setup_component(hass, DOMAIN, YAML_CONFIGURATION)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_BINARY_SENSOR).state == "off"
properties = {**PROPERTIES}
properties.pop("active_mode")
mocked_bulb.last_properties = properties
await entity_component.async_update_entity(hass, ENTITY_BINARY_SENSOR)
assert hass.states.get(ENTITY_BINARY_SENSOR).state == "on"
properties.pop("nl_br")
await entity_component.async_update_entity(hass, ENTITY_BINARY_SENSOR)
assert hass.states.get(ENTITY_BINARY_SENSOR).state == "off"
| true
| true
|
f7164704b1c192777023ed6248ee9dad022d4284
| 26,727
|
py
|
Python
|
project4/util.py
|
Plastix/CSC-320
|
4c8802d0ceeffbea77bd1ef5f21d27d4de80dbb6
|
[
"MIT"
] | null | null | null |
project4/util.py
|
Plastix/CSC-320
|
4c8802d0ceeffbea77bd1ef5f21d27d4de80dbb6
|
[
"MIT"
] | null | null | null |
project4/util.py
|
Plastix/CSC-320
|
4c8802d0ceeffbea77bd1ef5f21d27d4de80dbb6
|
[
"MIT"
] | null | null | null |
# util.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import heapq
import random
import sys
# import cStringIO
import types
import inspect
class FixedRandom:
def __init__(self):
fixedState = (3, (2147483648, 507801126, 683453281, 310439348, 2597246090, \
2209084787, 2267831527, 979920060, 3098657677, 37650879, 807947081, 3974896263, \
881243242, 3100634921, 1334775171, 3965168385, 746264660, 4074750168, 500078808, \
776561771, 702988163, 1636311725, 2559226045, 157578202, 2498342920, 2794591496, \
4130598723, 496985844, 2944563015, 3731321600, 3514814613, 3362575829, 3038768745, \
2206497038, 1108748846, 1317460727, 3134077628, 988312410, 1674063516, 746456451, \
3958482413, 1857117812, 708750586, 1583423339, 3466495450, 1536929345, 1137240525, \
3875025632, 2466137587, 1235845595, 4214575620, 3792516855, 657994358, 1241843248, \
1695651859, 3678946666, 1929922113, 2351044952, 2317810202, 2039319015, 460787996, \
3654096216, 4068721415, 1814163703, 2904112444, 1386111013, 574629867, 2654529343, \
3833135042, 2725328455, 552431551, 4006991378, 1331562057, 3710134542, 303171486, \
1203231078, 2670768975, 54570816, 2679609001, 578983064, 1271454725, 3230871056, \
2496832891, 2944938195, 1608828728, 367886575, 2544708204, 103775539, 1912402393, \
1098482180, 2738577070, 3091646463, 1505274463, 2079416566, 659100352, 839995305, \
1696257633, 274389836, 3973303017, 671127655, 1061109122, 517486945, 1379749962, \
3421383928, 3116950429, 2165882425, 2346928266, 2892678711, 2936066049, 1316407868, \
2873411858, 4279682888, 2744351923, 3290373816, 1014377279, 955200944, 4220990860, \
2386098930, 1772997650, 3757346974, 1621616438, 2877097197, 442116595, 2010480266, \
2867861469, 2955352695, 605335967, 2222936009, 2067554933, 4129906358, 1519608541, \
1195006590, 1942991038, 2736562236, 279162408, 1415982909, 4099901426, 1732201505, \
2934657937, 860563237, 2479235483, 3081651097, 2244720867, 3112631622, 1636991639, \
3860393305, 2312061927, 48780114, 1149090394, 2643246550, 1764050647, 3836789087, \
3474859076, 4237194338, 1735191073, 2150369208, 92164394, 756974036, 2314453957, \
323969533, 4267621035, 283649842, 810004843, 727855536, 1757827251, 3334960421, \
3261035106, 38417393, 2660980472, 1256633965, 2184045390, 811213141, 2857482069, \
2237770878, 3891003138, 2787806886, 2435192790, 2249324662, 3507764896, 995388363, \
856944153, 619213904, 3233967826, 3703465555, 3286531781, 3863193356, 2992340714, \
413696855, 3865185632, 1704163171, 3043634452, 2225424707, 2199018022, 3506117517, \
3311559776, 3374443561, 1207829628, 668793165, 1822020716, 2082656160, 1160606415, \
3034757648, 741703672, 3094328738, 459332691, 2702383376, 1610239915, 4162939394, \
557861574, 3805706338, 3832520705, 1248934879, 3250424034, 892335058, 74323433, \
3209751608, 3213220797, 3444035873, 3743886725, 1783837251, 610968664, 580745246, \
4041979504, 201684874, 2673219253, 1377283008, 3497299167, 2344209394, 2304982920, \
3081403782, 2599256854, 3184475235, 3373055826, 695186388, 2423332338, 222864327, \
1258227992, 3627871647, 3487724980, 4027953808, 3053320360, 533627073, 3026232514, \
2340271949, 867277230, 868513116, 2158535651, 2487822909, 3428235761, 3067196046, \
3435119657, 1908441839, 788668797, 3367703138, 3317763187, 908264443, 2252100381, \
764223334, 4127108988, 384641349, 3377374722, 1263833251, 1958694944, 3847832657, \
1253909612, 1096494446, 555725445, 2277045895, 3340096504, 1383318686, 4234428127, \
1072582179, 94169494, 1064509968, 2681151917, 2681864920, 734708852, 1338914021, \
1270409500, 1789469116, 4191988204, 1716329784, 2213764829, 3712538840, 919910444, \
1318414447, 3383806712, 3054941722, 3378649942, 1205735655, 1268136494, 2214009444, \
2532395133, 3232230447, 230294038, 342599089, 772808141, 4096882234, 3146662953, \
2784264306, 1860954704, 2675279609, 2984212876, 2466966981, 2627986059, 2985545332, \
2578042598, 1458940786, 2944243755, 3959506256, 1509151382, 325761900, 942251521, \
4184289782, 2756231555, 3297811774, 1169708099, 3280524138, 3805245319, 3227360276, \
3199632491, 2235795585, 2865407118, 36763651, 2441503575, 3314890374, 1755526087, \
17915536, 1196948233, 949343045, 3815841867, 489007833, 2654997597, 2834744136, \
417688687, 2843220846, 85621843, 747339336, 2043645709, 3520444394, 1825470818, \
647778910, 275904777, 1249389189, 3640887431, 4200779599, 323384601, 3446088641, \
4049835786, 1718989062, 3563787136, 44099190, 3281263107, 22910812, 1826109246, \
745118154, 3392171319, 1571490704, 354891067, 815955642, 1453450421, 940015623, \
796817754, 1260148619, 3898237757, 176670141, 1870249326, 3317738680, 448918002, \
4059166594, 2003827551, 987091377, 224855998, 3520570137, 789522610, 2604445123, \
454472869, 475688926, 2990723466, 523362238, 3897608102, 806637149, 2642229586, \
2928614432, 1564415411, 1691381054, 3816907227, 4082581003, 1895544448, 3728217394, \
3214813157, 4054301607, 1882632454, 2873728645, 3694943071, 1297991732, 2101682438, \
3952579552, 678650400, 1391722293, 478833748, 2976468591, 158586606, 2576499787, \
662690848, 3799889765, 3328894692, 2474578497, 2383901391, 1718193504, 3003184595, \
3630561213, 1929441113, 3848238627, 1594310094, 3040359840, 3051803867, 2462788790, \
954409915, 802581771, 681703307, 545982392, 2738993819, 8025358, 2827719383, \
770471093, 3484895980, 3111306320, 3900000891, 2116916652, 397746721, 2087689510, \
721433935, 1396088885, 2751612384, 1998988613, 2135074843, 2521131298, 707009172, \
2398321482, 688041159, 2264560137, 482388305, 207864885, 3735036991, 3490348331, \
1963642811, 3260224305, 3493564223, 1939428454, 1128799656, 1366012432, 2858822447, \
1428147157, 2261125391, 1611208390, 1134826333, 2374102525, 3833625209, 2266397263, \
3189115077, 770080230, 2674657172, 4280146640, 3604531615, 4235071805, 3436987249, \
509704467, 2582695198, 4256268040, 3391197562, 1460642842, 1617931012, 457825497, \
1031452907, 1330422862, 4125947620, 2280712485, 431892090, 2387410588, 2061126784, \
896457479, 3480499461, 2488196663, 4021103792, 1877063114, 2744470201, 1046140599, \
2129952955, 3583049218, 4217723693, 2720341743, 820661843, 1079873609, 3360954200, \
3652304997, 3335838575, 2178810636, 1908053374, 4026721976, 1793145418, 476541615, \
973420250, 515553040, 919292001, 2601786155, 1685119450, 3030170809, 1590676150, \
1665099167, 651151584, 2077190587, 957892642, 646336572, 2743719258, 866169074, \
851118829, 4225766285, 963748226, 799549420, 1955032629, 799460000, 2425744063, \
2441291571, 1928963772, 528930629, 2591962884, 3495142819, 1896021824, 901320159, \
3181820243, 843061941, 3338628510, 3782438992, 9515330, 1705797226, 953535929, \
764833876, 3202464965, 2970244591, 519154982, 3390617541, 566616744, 3438031503, \
1853838297, 170608755, 1393728434, 676900116, 3184965776, 1843100290, 78995357, \
2227939888, 3460264600, 1745705055, 1474086965, 572796246, 4081303004, 882828851, \
1295445825, 137639900, 3304579600, 2722437017, 4093422709, 273203373, 2666507854, \
3998836510, 493829981, 1623949669, 3482036755, 3390023939, 833233937, 1639668730, \
1499455075, 249728260, 1210694006, 3836497489, 1551488720, 3253074267, 3388238003, \
2372035079, 3945715164, 2029501215, 3362012634, 2007375355, 4074709820, 631485888, \
3135015769, 4273087084, 3648076204, 2739943601, 1374020358, 1760722448, 3773939706, \
1313027823, 1895251226, 4224465911, 421382535, 1141067370, 3660034846, 3393185650, \
1850995280, 1451917312, 3841455409, 3926840308, 1397397252, 2572864479, 2500171350, \
3119920613, 531400869, 1626487579, 1099320497, 407414753, 2438623324, 99073255, \
3175491512, 656431560, 1153671785, 236307875, 2824738046, 2320621382, 892174056, \
230984053, 719791226, 2718891946, 624), None)
self.random = random.Random()
self.random.setstate(fixedState)
"""
Data structures useful for implementing SearchAgents
"""
class Stack:
"A container with a last-in-first-out (LIFO) queuing policy."
def __init__(self):
self.list = []
def push(self, item):
"Push 'item' onto the stack"
self.list.append(item)
def pop(self):
"Pop the most recently pushed item from the stack"
return self.list.pop()
def isEmpty(self):
"Returns true if the stack is empty"
return len(self.list) == 0
class Queue:
"A container with a first-in-first-out (FIFO) queuing policy."
def __init__(self):
self.list = []
def push(self, item):
"Enqueue the 'item' into the queue"
self.list.insert(0, item)
def pop(self):
"""
Dequeue the earliest enqueued item still in the queue. This
operation removes the item from the queue.
"""
return self.list.pop()
def isEmpty(self):
"Returns true if the queue is empty"
return len(self.list) == 0
class PriorityQueue:
"""
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
"""
def __init__(self):
self.heap = []
self.count = 0
def push(self, item, priority):
entry = (priority, self.count, item)
heapq.heappush(self.heap, entry)
self.count += 1
def pop(self):
(_, _, item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
def update(self, item, priority):
# If item already in priority queue with higher priority, update its priority and rebuild the heap.
# If item already in priority queue with equal or lower priority, do nothing.
# If item not in priority queue, do the same thing as self.push.
for index, (p, c, i) in enumerate(self.heap):
if i == item:
if p <= priority:
break
del self.heap[index]
self.heap.append((priority, c, item))
heapq.heapify(self.heap)
break
else:
self.push(item, priority)
class PriorityQueueWithFunction(PriorityQueue):
"""
Implements a priority queue with the same push/pop signature of the
Queue and the Stack classes. This is designed for drop-in replacement for
those two classes. The caller has to provide a priority function, which
extracts each item's priority.
"""
def __init__(self, priorityFunction):
"priorityFunction (item) -> priority"
self.priorityFunction = priorityFunction # store the priority function
PriorityQueue.__init__(self) # super-class initializer
def push(self, item):
"Adds an item to the queue with priority from the priority function"
PriorityQueue.push(self, item, self.priorityFunction(item))
def manhattanDistance(xy1, xy2):
"Returns the Manhattan distance between points xy1 and xy2"
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
"""
Data structures and functions useful for various course projects
The search project should not need anything below this line.
"""
class Counter(dict):
"""
A counter keeps track of counts for a set of keys.
The counter class is an extension of the standard python
dictionary type. It is specialized to have number values
(integers or floats), and includes a handful of additional
functions to ease the task of counting data. In particular,
all keys are defaulted to have value 0. Using a dictionary:
a = {}
print(a['test'])
would give an error, while the Counter class analogue:
>>> a = Counter()
>>> print(a['test'])
0
returns the default 0 value. Note that to reference a key
that you know is contained in the counter,
you can still use the dictionary syntax:
>>> a = Counter()
>>> a['test'] = 2
>>> print(a['test'])
2
This is very useful for counting things without initializing their counts,
see for example:
>>> a['blah'] += 1
>>> print(a['blah'])
1
The counter also includes additional functionality useful in implementing
the classifiers for this assignment. Two counters can be added,
subtracted or multiplied together. See below for details. They can
also be normalized and their total count and arg max can be extracted.
"""
def __getitem__(self, idx):
self.setdefault(idx, 0)
return dict.__getitem__(self, idx)
def incrementAll(self, keys, count):
"""
Increments all elements of keys by the same count.
>>> a = Counter()
>>> a.incrementAll(['one','two', 'three'], 1)
>>> a['one']
1
>>> a['two']
1
"""
for key in keys:
self[key] += count
def argMax(self):
"""
Returns the key with the highest value.
"""
if len(self.keys()) == 0: return None
all = list(self.items())
values = [x[1] for x in all]
maxIndex = values.index(max(values))
return all[maxIndex][0]
def sortedKeys(self):
"""
Returns a list of keys sorted by their values. Keys
with the highest values will appear first.
>>> a = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> a['third'] = 1
>>> a.sortedKeys()
['second', 'third', 'first']
"""
sortedItems = self.items()
compare = lambda x, y: sign(y[1] - x[1])
sortedItems.sort(cmp=compare)
return [x[0] for x in sortedItems]
def totalCount(self):
"""
Returns the sum of counts for all keys.
"""
return sum(self.values())
def normalize(self):
"""
Edits the counter such that the total count of all
keys sums to 1. The ratio of counts for all keys
will remain the same. Note that normalizing an empty
Counter will result in an error.
"""
total = float(self.totalCount())
if total == 0: return
for key in self.keys():
self[key] = self[key] / total
def divideAll(self, divisor):
"""
Divides all counts by divisor
"""
divisor = float(divisor)
for key in self:
self[key] /= divisor
def copy(self):
"""
Returns a copy of the counter
"""
return Counter(dict.copy(self))
def __mul__(self, y):
"""
Multiplying two counters gives the dot product of their vectors where
each unique label is a vector element.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['second'] = 5
>>> a['third'] = 1.5
>>> a['fourth'] = 2.5
>>> a * b
14
"""
sum = 0
x = self
if len(x) > len(y):
x, y = y, x
for key in x:
if key not in y:
continue
sum += x[key] * y[key]
return sum
def __radd__(self, y):
"""
Adding another counter to a counter increments the current counter
by the values stored in the second counter.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> a += b
>>> a['first']
1
"""
for key, value in y.items():
self[key] += value
def __add__(self, y):
"""
Adding two counters gives a counter with the union of all keys and
counts of the second added to counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a + b)['first']
1
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] + y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = y[key]
return addend
def __sub__(self, y):
"""
Subtracting a counter from another gives a counter with the union of all keys and
counts of the second subtracted from counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a - b)['first']
-5
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] - y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = -1 * y[key]
return addend
def raiseNotDefined():
fileName = inspect.stack()[1][1]
line = inspect.stack()[1][2]
method = inspect.stack()[1][3]
print("*** Method not implemented: %s at line %s of %s" % (method, line, fileName))
sys.exit(1)
def normalize(vectorOrCounter):
"""
normalize a vector or counter by dividing each value by the sum of all values
"""
normalizedCounter = Counter()
if type(vectorOrCounter) == type(normalizedCounter):
counter = vectorOrCounter
total = float(counter.totalCount())
if total == 0: return counter
for key in counter.keys():
value = counter[key]
normalizedCounter[key] = value / total
return normalizedCounter
else:
vector = vectorOrCounter
s = float(sum(vector))
if s == 0: return vector
return [el / s for el in vector]
def nSample(distribution, values, n):
if sum(distribution) != 1:
distribution = normalize(distribution)
rand = [random.random() for i in range(n)]
rand.sort()
samples = []
samplePos, distPos, cdf = 0, 0, distribution[0]
while samplePos < n:
if rand[samplePos] < cdf:
samplePos += 1
samples.append(values[distPos])
else:
distPos += 1
cdf += distribution[distPos]
return samples
def sample(distribution, values=None):
if type(distribution) == Counter:
items = sorted(distribution.items())
distribution = [i[1] for i in items]
values = [i[0] for i in items]
if sum(distribution) != 1:
distribution = normalize(distribution)
choice = random.random()
i, total = 0, distribution[0]
while choice > total:
i += 1
total += distribution[i]
return values[i]
def sampleFromCounter(ctr):
items = sorted(ctr.items())
return sample([v for k, v in items], [k for k, v in items])
def getProbability(value, distribution, values):
"""
Gives the probability of a value under a discrete distribution
defined by (distributions, values).
"""
total = 0.0
for prob, val in zip(distribution, values):
if val == value:
total += prob
return total
def flipCoin(p):
r = random.random()
return r < p
def chooseFromDistribution(distribution):
"Takes either a counter or a list of (prob, key) pairs and samples"
if type(distribution) == dict or type(distribution) == Counter:
return sample(distribution)
r = random.random()
base = 0.0
for prob, element in distribution:
base += prob
if r <= base: return element
def nearestPoint(pos):
"""
Finds the nearest grid point to a position (discretizes).
"""
(current_row, current_col) = pos
grid_row = int(current_row + 0.5)
grid_col = int(current_col + 0.5)
return (grid_row, grid_col)
def sign(x):
"""
Returns 1 or -1 depending on the sign of x
"""
if (x >= 0):
return 1
else:
return -1
def arrayInvert(array):
"""
Inverts a matrix stored as a list of lists.
"""
result = [[] for i in array]
for outer in array:
for inner in range(len(outer)):
result[inner].append(outer[inner])
return result
def matrixAsList(matrix, value=True):
"""
Turns a matrix into a list of coordinates matching the specified value
"""
rows, cols = len(matrix), len(matrix[0])
cells = []
for row in range(rows):
for col in range(cols):
if matrix[row][col] == value:
cells.append((row, col))
return cells
def lookup(name, namespace):
"""
Get a method or class from any imported module from its name.
Usage: lookup(functionName, globals())
"""
dots = name.count('.')
if dots > 0:
moduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
module = __import__(moduleName)
return getattr(module, objName)
else:
# modules = [obj for obj in namespace.values() if str(type(obj)) == "<type 'module'>"]
modules = [obj for obj in namespace.values() if isinstance(obj, types.ModuleType)]
options = [getattr(module, name) for module in modules if name in dir(module)]
options += [obj[1] for obj in namespace.items() if obj[0] == name]
if len(options) == 1: return options[0]
if len(options) > 1: raise Exception('Name conflict for %s')
raise Exception('%s not found as a method or class' % name)
def pause():
"""
Pauses the output stream awaiting user feedback.
"""
print("<Press enter/return to continue>")
raw_input()
# code to handle timeouts
#
# FIXME
# NOTE: TimeoutFuncton is NOT reentrant. Later timeouts will silently
# disable earlier timeouts. Could be solved by maintaining a global list
# of active time outs. Currently, questions which have test cases calling
# this have all student code so wrapped.
#
import signal
import time
class TimeoutFunctionException(Exception):
"""Exception to raise on a timeout"""
pass
class TimeoutFunction:
def __init__(self, function, timeout):
self.timeout = timeout
self.function = function
def handle_timeout(self, signum, frame):
raise TimeoutFunctionException()
def __call__(self, *args, **keyArgs):
# If we have SIGALRM signal, use it to cause an exception if and
# when this function runs too long. Otherwise check the time taken
# after the method has returned, and throw an exception then.
if hasattr(signal, 'SIGALRM'):
old = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.timeout)
try:
result = self.function(*args, **keyArgs)
finally:
signal.signal(signal.SIGALRM, old)
signal.alarm(0)
else:
startTime = time.time()
result = self.function(*args, **keyArgs)
timeElapsed = time.time() - startTime
if timeElapsed >= self.timeout:
self.handle_timeout(None, None)
return result
_ORIGINAL_STDOUT = None
_ORIGINAL_STDERR = None
_MUTED = False
class WritableNull:
def write(self, string):
pass
def mutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if _MUTED:
return
_MUTED = True
_ORIGINAL_STDOUT = sys.stdout
# _ORIGINAL_STDERR = sys.stderr
sys.stdout = WritableNull()
# sys.stderr = WritableNull()
def unmutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if not _MUTED:
return
_MUTED = False
sys.stdout = _ORIGINAL_STDOUT
# sys.stderr = _ORIGINAL_STDERR
| 38.5671
| 111
| 0.596176
|
import heapq
import random
import sys
import types
import inspect
class FixedRandom:
def __init__(self):
fixedState = (3, (2147483648, 507801126, 683453281, 310439348, 2597246090, \
2209084787, 2267831527, 979920060, 3098657677, 37650879, 807947081, 3974896263, \
881243242, 3100634921, 1334775171, 3965168385, 746264660, 4074750168, 500078808, \
776561771, 702988163, 1636311725, 2559226045, 157578202, 2498342920, 2794591496, \
4130598723, 496985844, 2944563015, 3731321600, 3514814613, 3362575829, 3038768745, \
2206497038, 1108748846, 1317460727, 3134077628, 988312410, 1674063516, 746456451, \
3958482413, 1857117812, 708750586, 1583423339, 3466495450, 1536929345, 1137240525, \
3875025632, 2466137587, 1235845595, 4214575620, 3792516855, 657994358, 1241843248, \
1695651859, 3678946666, 1929922113, 2351044952, 2317810202, 2039319015, 460787996, \
3654096216, 4068721415, 1814163703, 2904112444, 1386111013, 574629867, 2654529343, \
3833135042, 2725328455, 552431551, 4006991378, 1331562057, 3710134542, 303171486, \
1203231078, 2670768975, 54570816, 2679609001, 578983064, 1271454725, 3230871056, \
2496832891, 2944938195, 1608828728, 367886575, 2544708204, 103775539, 1912402393, \
1098482180, 2738577070, 3091646463, 1505274463, 2079416566, 659100352, 839995305, \
1696257633, 274389836, 3973303017, 671127655, 1061109122, 517486945, 1379749962, \
3421383928, 3116950429, 2165882425, 2346928266, 2892678711, 2936066049, 1316407868, \
2873411858, 4279682888, 2744351923, 3290373816, 1014377279, 955200944, 4220990860, \
2386098930, 1772997650, 3757346974, 1621616438, 2877097197, 442116595, 2010480266, \
2867861469, 2955352695, 605335967, 2222936009, 2067554933, 4129906358, 1519608541, \
1195006590, 1942991038, 2736562236, 279162408, 1415982909, 4099901426, 1732201505, \
2934657937, 860563237, 2479235483, 3081651097, 2244720867, 3112631622, 1636991639, \
3860393305, 2312061927, 48780114, 1149090394, 2643246550, 1764050647, 3836789087, \
3474859076, 4237194338, 1735191073, 2150369208, 92164394, 756974036, 2314453957, \
323969533, 4267621035, 283649842, 810004843, 727855536, 1757827251, 3334960421, \
3261035106, 38417393, 2660980472, 1256633965, 2184045390, 811213141, 2857482069, \
2237770878, 3891003138, 2787806886, 2435192790, 2249324662, 3507764896, 995388363, \
856944153, 619213904, 3233967826, 3703465555, 3286531781, 3863193356, 2992340714, \
413696855, 3865185632, 1704163171, 3043634452, 2225424707, 2199018022, 3506117517, \
3311559776, 3374443561, 1207829628, 668793165, 1822020716, 2082656160, 1160606415, \
3034757648, 741703672, 3094328738, 459332691, 2702383376, 1610239915, 4162939394, \
557861574, 3805706338, 3832520705, 1248934879, 3250424034, 892335058, 74323433, \
3209751608, 3213220797, 3444035873, 3743886725, 1783837251, 610968664, 580745246, \
4041979504, 201684874, 2673219253, 1377283008, 3497299167, 2344209394, 2304982920, \
3081403782, 2599256854, 3184475235, 3373055826, 695186388, 2423332338, 222864327, \
1258227992, 3627871647, 3487724980, 4027953808, 3053320360, 533627073, 3026232514, \
2340271949, 867277230, 868513116, 2158535651, 2487822909, 3428235761, 3067196046, \
3435119657, 1908441839, 788668797, 3367703138, 3317763187, 908264443, 2252100381, \
764223334, 4127108988, 384641349, 3377374722, 1263833251, 1958694944, 3847832657, \
1253909612, 1096494446, 555725445, 2277045895, 3340096504, 1383318686, 4234428127, \
1072582179, 94169494, 1064509968, 2681151917, 2681864920, 734708852, 1338914021, \
1270409500, 1789469116, 4191988204, 1716329784, 2213764829, 3712538840, 919910444, \
1318414447, 3383806712, 3054941722, 3378649942, 1205735655, 1268136494, 2214009444, \
2532395133, 3232230447, 230294038, 342599089, 772808141, 4096882234, 3146662953, \
2784264306, 1860954704, 2675279609, 2984212876, 2466966981, 2627986059, 2985545332, \
2578042598, 1458940786, 2944243755, 3959506256, 1509151382, 325761900, 942251521, \
4184289782, 2756231555, 3297811774, 1169708099, 3280524138, 3805245319, 3227360276, \
3199632491, 2235795585, 2865407118, 36763651, 2441503575, 3314890374, 1755526087, \
17915536, 1196948233, 949343045, 3815841867, 489007833, 2654997597, 2834744136, \
417688687, 2843220846, 85621843, 747339336, 2043645709, 3520444394, 1825470818, \
647778910, 275904777, 1249389189, 3640887431, 4200779599, 323384601, 3446088641, \
4049835786, 1718989062, 3563787136, 44099190, 3281263107, 22910812, 1826109246, \
745118154, 3392171319, 1571490704, 354891067, 815955642, 1453450421, 940015623, \
796817754, 1260148619, 3898237757, 176670141, 1870249326, 3317738680, 448918002, \
4059166594, 2003827551, 987091377, 224855998, 3520570137, 789522610, 2604445123, \
454472869, 475688926, 2990723466, 523362238, 3897608102, 806637149, 2642229586, \
2928614432, 1564415411, 1691381054, 3816907227, 4082581003, 1895544448, 3728217394, \
3214813157, 4054301607, 1882632454, 2873728645, 3694943071, 1297991732, 2101682438, \
3952579552, 678650400, 1391722293, 478833748, 2976468591, 158586606, 2576499787, \
662690848, 3799889765, 3328894692, 2474578497, 2383901391, 1718193504, 3003184595, \
3630561213, 1929441113, 3848238627, 1594310094, 3040359840, 3051803867, 2462788790, \
954409915, 802581771, 681703307, 545982392, 2738993819, 8025358, 2827719383, \
770471093, 3484895980, 3111306320, 3900000891, 2116916652, 397746721, 2087689510, \
721433935, 1396088885, 2751612384, 1998988613, 2135074843, 2521131298, 707009172, \
2398321482, 688041159, 2264560137, 482388305, 207864885, 3735036991, 3490348331, \
1963642811, 3260224305, 3493564223, 1939428454, 1128799656, 1366012432, 2858822447, \
1428147157, 2261125391, 1611208390, 1134826333, 2374102525, 3833625209, 2266397263, \
3189115077, 770080230, 2674657172, 4280146640, 3604531615, 4235071805, 3436987249, \
509704467, 2582695198, 4256268040, 3391197562, 1460642842, 1617931012, 457825497, \
1031452907, 1330422862, 4125947620, 2280712485, 431892090, 2387410588, 2061126784, \
896457479, 3480499461, 2488196663, 4021103792, 1877063114, 2744470201, 1046140599, \
2129952955, 3583049218, 4217723693, 2720341743, 820661843, 1079873609, 3360954200, \
3652304997, 3335838575, 2178810636, 1908053374, 4026721976, 1793145418, 476541615, \
973420250, 515553040, 919292001, 2601786155, 1685119450, 3030170809, 1590676150, \
1665099167, 651151584, 2077190587, 957892642, 646336572, 2743719258, 866169074, \
851118829, 4225766285, 963748226, 799549420, 1955032629, 799460000, 2425744063, \
2441291571, 1928963772, 528930629, 2591962884, 3495142819, 1896021824, 901320159, \
3181820243, 843061941, 3338628510, 3782438992, 9515330, 1705797226, 953535929, \
764833876, 3202464965, 2970244591, 519154982, 3390617541, 566616744, 3438031503, \
1853838297, 170608755, 1393728434, 676900116, 3184965776, 1843100290, 78995357, \
2227939888, 3460264600, 1745705055, 1474086965, 572796246, 4081303004, 882828851, \
1295445825, 137639900, 3304579600, 2722437017, 4093422709, 273203373, 2666507854, \
3998836510, 493829981, 1623949669, 3482036755, 3390023939, 833233937, 1639668730, \
1499455075, 249728260, 1210694006, 3836497489, 1551488720, 3253074267, 3388238003, \
2372035079, 3945715164, 2029501215, 3362012634, 2007375355, 4074709820, 631485888, \
3135015769, 4273087084, 3648076204, 2739943601, 1374020358, 1760722448, 3773939706, \
1313027823, 1895251226, 4224465911, 421382535, 1141067370, 3660034846, 3393185650, \
1850995280, 1451917312, 3841455409, 3926840308, 1397397252, 2572864479, 2500171350, \
3119920613, 531400869, 1626487579, 1099320497, 407414753, 2438623324, 99073255, \
3175491512, 656431560, 1153671785, 236307875, 2824738046, 2320621382, 892174056, \
230984053, 719791226, 2718891946, 624), None)
self.random = random.Random()
self.random.setstate(fixedState)
class Stack:
def __init__(self):
self.list = []
def push(self, item):
self.list.append(item)
def pop(self):
return self.list.pop()
def isEmpty(self):
return len(self.list) == 0
class Queue:
def __init__(self):
self.list = []
def push(self, item):
self.list.insert(0, item)
def pop(self):
return self.list.pop()
def isEmpty(self):
return len(self.list) == 0
class PriorityQueue:
def __init__(self):
self.heap = []
self.count = 0
def push(self, item, priority):
entry = (priority, self.count, item)
heapq.heappush(self.heap, entry)
self.count += 1
def pop(self):
(_, _, item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
def update(self, item, priority):
for index, (p, c, i) in enumerate(self.heap):
if i == item:
if p <= priority:
break
del self.heap[index]
self.heap.append((priority, c, item))
heapq.heapify(self.heap)
break
else:
self.push(item, priority)
class PriorityQueueWithFunction(PriorityQueue):
def __init__(self, priorityFunction):
self.priorityFunction = priorityFunction
PriorityQueue.__init__(self)
def push(self, item):
PriorityQueue.push(self, item, self.priorityFunction(item))
def manhattanDistance(xy1, xy2):
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
class Counter(dict):
def __getitem__(self, idx):
self.setdefault(idx, 0)
return dict.__getitem__(self, idx)
def incrementAll(self, keys, count):
for key in keys:
self[key] += count
def argMax(self):
if len(self.keys()) == 0: return None
all = list(self.items())
values = [x[1] for x in all]
maxIndex = values.index(max(values))
return all[maxIndex][0]
def sortedKeys(self):
sortedItems = self.items()
compare = lambda x, y: sign(y[1] - x[1])
sortedItems.sort(cmp=compare)
return [x[0] for x in sortedItems]
def totalCount(self):
return sum(self.values())
def normalize(self):
total = float(self.totalCount())
if total == 0: return
for key in self.keys():
self[key] = self[key] / total
def divideAll(self, divisor):
divisor = float(divisor)
for key in self:
self[key] /= divisor
def copy(self):
return Counter(dict.copy(self))
def __mul__(self, y):
sum = 0
x = self
if len(x) > len(y):
x, y = y, x
for key in x:
if key not in y:
continue
sum += x[key] * y[key]
return sum
def __radd__(self, y):
for key, value in y.items():
self[key] += value
def __add__(self, y):
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] + y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = y[key]
return addend
def __sub__(self, y):
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] - y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = -1 * y[key]
return addend
def raiseNotDefined():
fileName = inspect.stack()[1][1]
line = inspect.stack()[1][2]
method = inspect.stack()[1][3]
print("*** Method not implemented: %s at line %s of %s" % (method, line, fileName))
sys.exit(1)
def normalize(vectorOrCounter):
normalizedCounter = Counter()
if type(vectorOrCounter) == type(normalizedCounter):
counter = vectorOrCounter
total = float(counter.totalCount())
if total == 0: return counter
for key in counter.keys():
value = counter[key]
normalizedCounter[key] = value / total
return normalizedCounter
else:
vector = vectorOrCounter
s = float(sum(vector))
if s == 0: return vector
return [el / s for el in vector]
def nSample(distribution, values, n):
if sum(distribution) != 1:
distribution = normalize(distribution)
rand = [random.random() for i in range(n)]
rand.sort()
samples = []
samplePos, distPos, cdf = 0, 0, distribution[0]
while samplePos < n:
if rand[samplePos] < cdf:
samplePos += 1
samples.append(values[distPos])
else:
distPos += 1
cdf += distribution[distPos]
return samples
def sample(distribution, values=None):
if type(distribution) == Counter:
items = sorted(distribution.items())
distribution = [i[1] for i in items]
values = [i[0] for i in items]
if sum(distribution) != 1:
distribution = normalize(distribution)
choice = random.random()
i, total = 0, distribution[0]
while choice > total:
i += 1
total += distribution[i]
return values[i]
def sampleFromCounter(ctr):
items = sorted(ctr.items())
return sample([v for k, v in items], [k for k, v in items])
def getProbability(value, distribution, values):
total = 0.0
for prob, val in zip(distribution, values):
if val == value:
total += prob
return total
def flipCoin(p):
r = random.random()
return r < p
def chooseFromDistribution(distribution):
if type(distribution) == dict or type(distribution) == Counter:
return sample(distribution)
r = random.random()
base = 0.0
for prob, element in distribution:
base += prob
if r <= base: return element
def nearestPoint(pos):
(current_row, current_col) = pos
grid_row = int(current_row + 0.5)
grid_col = int(current_col + 0.5)
return (grid_row, grid_col)
def sign(x):
if (x >= 0):
return 1
else:
return -1
def arrayInvert(array):
result = [[] for i in array]
for outer in array:
for inner in range(len(outer)):
result[inner].append(outer[inner])
return result
def matrixAsList(matrix, value=True):
rows, cols = len(matrix), len(matrix[0])
cells = []
for row in range(rows):
for col in range(cols):
if matrix[row][col] == value:
cells.append((row, col))
return cells
def lookup(name, namespace):
dots = name.count('.')
if dots > 0:
moduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
module = __import__(moduleName)
return getattr(module, objName)
else:
modules = [obj for obj in namespace.values() if isinstance(obj, types.ModuleType)]
options = [getattr(module, name) for module in modules if name in dir(module)]
options += [obj[1] for obj in namespace.items() if obj[0] == name]
if len(options) == 1: return options[0]
if len(options) > 1: raise Exception('Name conflict for %s')
raise Exception('%s not found as a method or class' % name)
def pause():
print("<Press enter/return to continue>")
raw_input()
import signal
import time
class TimeoutFunctionException(Exception):
pass
class TimeoutFunction:
def __init__(self, function, timeout):
self.timeout = timeout
self.function = function
def handle_timeout(self, signum, frame):
raise TimeoutFunctionException()
def __call__(self, *args, **keyArgs):
if hasattr(signal, 'SIGALRM'):
old = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.timeout)
try:
result = self.function(*args, **keyArgs)
finally:
signal.signal(signal.SIGALRM, old)
signal.alarm(0)
else:
startTime = time.time()
result = self.function(*args, **keyArgs)
timeElapsed = time.time() - startTime
if timeElapsed >= self.timeout:
self.handle_timeout(None, None)
return result
_ORIGINAL_STDOUT = None
_ORIGINAL_STDERR = None
_MUTED = False
class WritableNull:
def write(self, string):
pass
def mutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if _MUTED:
return
_MUTED = True
_ORIGINAL_STDOUT = sys.stdout
sys.stdout = WritableNull()
def unmutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if not _MUTED:
return
_MUTED = False
sys.stdout = _ORIGINAL_STDOUT
| true
| true
|
f716476a8f9925dfffc1e7bbbe2678c6a6fa7d50
| 11,978
|
py
|
Python
|
tools/accuracy_checker/accuracy_checker/metrics/coco_orig_metrics.py
|
apankratovantonp/open_model_zoo
|
e372d4173e50741a6828cda415d55c37320f89cd
|
[
"Apache-2.0"
] | 5
|
2020-03-09T07:39:04.000Z
|
2021-08-16T07:17:28.000Z
|
tools/accuracy_checker/accuracy_checker/metrics/coco_orig_metrics.py
|
ananda89/open_model_zoo
|
e372d4173e50741a6828cda415d55c37320f89cd
|
[
"Apache-2.0"
] | 6
|
2020-09-26T01:24:39.000Z
|
2022-02-10T02:16:03.000Z
|
tools/accuracy_checker/accuracy_checker/metrics/coco_orig_metrics.py
|
ananda89/open_model_zoo
|
e372d4173e50741a6828cda415d55c37320f89cd
|
[
"Apache-2.0"
] | 3
|
2020-07-06T08:45:26.000Z
|
2020-11-12T10:14:45.000Z
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tempfile
import json
from ..representation import (
DetectionPrediction,
DetectionAnnotation,
CoCoInstanceSegmentationAnnotation,
CoCocInstanceSegmentationPrediction,
PoseEstimationAnnotation,
PoseEstimationPrediction
)
from ..logging import print_info
from ..config import BaseField
from ..utils import get_or_parse_value
from .metric import FullDatasetEvaluationMetric
from .coco_metrics import COCO_THRESHOLDS
SHOULD_SHOW_PREDICTIONS = False
SHOULD_DISPLAY_DEBUG_IMAGES = False
if SHOULD_DISPLAY_DEBUG_IMAGES:
import cv2
def box_to_coco(prediction_data_to_store, pred):
x_mins = pred.x_mins.tolist()
y_mins = pred.y_mins.tolist()
x_maxs = pred.x_maxs.tolist()
y_maxs = pred.y_maxs.tolist()
for data_record, x_min, y_min, x_max, y_max in zip(
prediction_data_to_store, x_mins, y_mins, x_maxs, y_maxs
):
width = x_max - x_min + 1
height = y_max - y_min + 1
data_record.update({'bbox': [x_min, y_min, width, height]})
return prediction_data_to_store
def segm_to_coco(prediction_data_to_store, pred):
encoded_masks = pred.mask
for data_record, segm_mask in zip(prediction_data_to_store, encoded_masks):
data_record.update({'segmentation': segm_mask})
return prediction_data_to_store
def keypoints_to_coco(prediction_data_to_store, pred):
for data_record, x_val, y_val, vis in zip(
prediction_data_to_store, pred.x_values, pred.y_values, pred.visibility
):
keypoints = []
for x, y, v in zip(x_val, y_val, vis):
keypoints.extend([x, y, int(v)])
data_record.update({
'keypoints': keypoints
})
return prediction_data_to_store
iou_specific_processing = {
'bbox': box_to_coco,
'segm': segm_to_coco,
'keypoints': keypoints_to_coco
}
class MSCOCOorigBaseMetric(FullDatasetEvaluationMetric):
annotation_types = (DetectionAnnotation, )
prediction_types = (DetectionPrediction, )
iou_type = 'bbox'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'threshold': BaseField(optional=True, default='.50:.05:.95', description='threshold for metric calculation')
})
return parameters
def configure(self):
self.threshold = get_or_parse_value(self.get_value_from_config('threshold'), COCO_THRESHOLDS)
@staticmethod
def generate_map_pred_label_id_to_coco_cat_id(has_background, use_full_label_map):
shift = 0 if has_background else 1
max_cat = 90 if use_full_label_map else 80
max_key = max_cat - shift
res_map = {i: i + shift for i in range(0, max_key+1)}
assert max(res_map.values()) == max_cat
return res_map
def _prepare_coco_structures(self):
from pycocotools.coco import COCO
annotation_conversion_parameters = self.dataset.config.get('annotation_conversion')
if not annotation_conversion_parameters:
raise ValueError('annotation_conversion parameter is not pointed, '
'but it is required for coco original metrics')
annotation_file = annotation_conversion_parameters.get('annotation_file')
if not annotation_file.is_file():
raise ValueError("annotation file '{}' is not found".format(annotation_file))
has_background = annotation_conversion_parameters.get('has_background', False)
use_full_label_map = annotation_conversion_parameters.get('use_full_label_map', False)
meta = self.dataset.metadata
coco = COCO(str(annotation_file))
assert 0 not in coco.cats.keys()
coco_cat_name_to_id = {v['name']: k for k, v in coco.cats.items()}
if has_background:
assert 'background_label' in meta
bg_lbl = meta['background_label']
bg_name = meta['label_map'][bg_lbl]
assert bg_name not in coco_cat_name_to_id
coco_cat_name_to_id[bg_name] = bg_lbl
else:
assert 'background_label' not in meta
if not use_full_label_map:
map_pred_label_id_to_coco_cat_id = {k: coco_cat_name_to_id[v] for k, v in meta['label_map'].items()}
else:
map_pred_label_id_to_coco_cat_id = self.generate_map_pred_label_id_to_coco_cat_id(has_background,
use_full_label_map)
for k, v in meta['label_map'].items():
assert map_pred_label_id_to_coco_cat_id[k] == coco_cat_name_to_id[v], (
"k = {}, v = {}, map_pred_label_id_to_coco_cat_id[k] = {}, coco_cat_name_to_id[v] = {}".format(
k, v, map_pred_label_id_to_coco_cat_id[k], coco_cat_name_to_id[v]))
assert all(map_pred_label_id_to_coco_cat_id[k] == coco_cat_name_to_id[v]
for k, v in meta['label_map'].items())
map_coco_img_file_name_to_img_id = {os.path.basename(v['file_name']): v['id'] for v in coco.dataset['images']}
assert len(map_coco_img_file_name_to_img_id) == len(coco.dataset['images']), "Image name duplications"
return coco, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id
@staticmethod
def _convert_data_to_coco_format(
predictions, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id, iou_type='bbox'
):
coco_data_to_store = []
for pred in predictions:
prediction_data_to_store = []
cur_name = pred.identifier
cur_name = os.path.basename(cur_name)
assert cur_name in map_coco_img_file_name_to_img_id
cur_img_id = map_coco_img_file_name_to_img_id[cur_name]
labels = pred.labels.tolist()
scores = pred.scores.tolist()
cur_num = len(labels)
assert len(scores) == cur_num
coco_cats = [map_pred_label_id_to_coco_cat_id[lbl] for lbl in labels]
for (s, cur_cat) in zip(scores, coco_cats):
prediction_data_to_store.append({
'image_id': cur_img_id,
'score': s,
'category_id': cur_cat,
'_image_name_from_dataset': cur_name,
})
iou_specific_converter = iou_specific_processing.get(iou_type)
if iou_specific_converter is None:
raise ValueError("unknown iou type: '{}'".format(iou_type))
prediction_data_to_store = iou_specific_converter(prediction_data_to_store, pred)
coco_data_to_store.extend(prediction_data_to_store)
return coco_data_to_store
@staticmethod
def _reload_results_to_coco_class(coco, coco_data_to_store):
with tempfile.NamedTemporaryFile() as ftmp:
json_file_to_store = ftmp.name + ".json"
with open(json_file_to_store, 'w') as f:
json.dump(coco_data_to_store, f, indent=4)
json_file_to_load = json_file_to_store
coco_res = coco.loadRes(json_file_to_load)
return coco_res
@staticmethod
def _debug_printing_and_displaying_predictions(coco, coco_res, data_source, should_display_debug_images):
for coco_data_el in coco_res.dataset['annotations']:
cur_name_from_dataset = coco_data_el.get('_image_name_from_dataset', None)
x1, y1, w, h = coco_data_el['bbox']
x2 = x1+w
y2 = y1+h
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
category_id = coco_data_el['category_id']
category_name = coco.cats[category_id]['name']
coco_image_id = coco_data_el['image_id']
cur_name = coco.imgs[coco_image_id]['file_name']
assert cur_name == cur_name_from_dataset or cur_name_from_dataset is None
s = coco_data_el['score']
print_info("cur_name =" + cur_name)
print_info(" {} {} {} {} {} % {}".format(
x1, y1, x2, y2, int(100*s), category_name))
if should_display_debug_images:
img_path = os.path.join(str(data_source), str(cur_name))
img = cv2.imread(img_path)
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.imshow("img", img)
key = 0
while key not in (32, 27):
key = cv2.waitKey() & 0xff
should_display_debug_images = (key != 27)
@staticmethod
def _run_coco_evaluation(coco, coco_res, iou_type='bbox', threshold=None):
from pycocotools.cocoeval import COCOeval
cocoeval = COCOeval(coco, coco_res, iouType=iou_type)
if threshold is not None:
cocoeval.params.iouThrs = threshold
cocoeval.evaluate()
cocoeval.accumulate()
cocoeval.summarize()
res = cocoeval.stats.tolist()
res_len = len(res)
middle_index = res_len //2
assert res_len == 12 if iou_type != 'keypoints' else 10
res = [res[:middle_index], res[middle_index:]]
return res
def compute_precision_recall(self, predictions):
coco, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id = self._prepare_coco_structures()
coco_data_to_store = self._convert_data_to_coco_format(
predictions, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id, self.iou_type
)
coco_res = self._reload_results_to_coco_class(coco, coco_data_to_store)
if SHOULD_SHOW_PREDICTIONS:
data_source = self.dataset.config.get('data_source')
should_display_debug_images = SHOULD_DISPLAY_DEBUG_IMAGES
self._debug_printing_and_displaying_predictions(coco, coco_res, data_source, should_display_debug_images)
res = self._run_coco_evaluation(coco, coco_res, self.iou_type, self.threshold)
print_info("MSCOCOorigBaseMetric.compute_precision_recall: returning " + str(res))
return res
def evaluate(self, annotations, predictions):
pass
class MSCOCOorigAveragePrecision(MSCOCOorigBaseMetric):
__provider__ = 'coco_orig_precision'
def evaluate(self, annotations, predictions):
return self.compute_precision_recall(predictions)[0][0]
class MSCOCOOrigSegmAveragePrecision(MSCOCOorigAveragePrecision):
__provider__ = 'coco_orig_segm_precision'
annotation_types = (CoCoInstanceSegmentationAnnotation, )
prediction_types = (CoCocInstanceSegmentationPrediction, )
iou_type = 'segm'
class MSCOCOorigRecall(MSCOCOorigBaseMetric):
__provider__ = 'coco_orig_recall'
def evaluate(self, annotations, predictions):
return self.compute_precision_recall(predictions)[1][2]
class MSCOCOorigSegmRecall(MSCOCOorigRecall):
__provider__ = 'coco_orig_segm_recall'
annotation_types = (CoCoInstanceSegmentationAnnotation, )
prediction_types = (CoCocInstanceSegmentationPrediction, )
iou_type = 'segm'
class MSCOCOOrigKeyPointsAveragePrecision(MSCOCOorigAveragePrecision):
__provider__ = 'coco_orig_keypoints_precision'
annotation_types = (PoseEstimationAnnotation, )
prediction_types = (PoseEstimationPrediction, )
iou_type = 'keypoints'
| 38.514469
| 120
| 0.671648
|
import os
import tempfile
import json
from ..representation import (
DetectionPrediction,
DetectionAnnotation,
CoCoInstanceSegmentationAnnotation,
CoCocInstanceSegmentationPrediction,
PoseEstimationAnnotation,
PoseEstimationPrediction
)
from ..logging import print_info
from ..config import BaseField
from ..utils import get_or_parse_value
from .metric import FullDatasetEvaluationMetric
from .coco_metrics import COCO_THRESHOLDS
SHOULD_SHOW_PREDICTIONS = False
SHOULD_DISPLAY_DEBUG_IMAGES = False
if SHOULD_DISPLAY_DEBUG_IMAGES:
import cv2
def box_to_coco(prediction_data_to_store, pred):
x_mins = pred.x_mins.tolist()
y_mins = pred.y_mins.tolist()
x_maxs = pred.x_maxs.tolist()
y_maxs = pred.y_maxs.tolist()
for data_record, x_min, y_min, x_max, y_max in zip(
prediction_data_to_store, x_mins, y_mins, x_maxs, y_maxs
):
width = x_max - x_min + 1
height = y_max - y_min + 1
data_record.update({'bbox': [x_min, y_min, width, height]})
return prediction_data_to_store
def segm_to_coco(prediction_data_to_store, pred):
encoded_masks = pred.mask
for data_record, segm_mask in zip(prediction_data_to_store, encoded_masks):
data_record.update({'segmentation': segm_mask})
return prediction_data_to_store
def keypoints_to_coco(prediction_data_to_store, pred):
for data_record, x_val, y_val, vis in zip(
prediction_data_to_store, pred.x_values, pred.y_values, pred.visibility
):
keypoints = []
for x, y, v in zip(x_val, y_val, vis):
keypoints.extend([x, y, int(v)])
data_record.update({
'keypoints': keypoints
})
return prediction_data_to_store
iou_specific_processing = {
'bbox': box_to_coco,
'segm': segm_to_coco,
'keypoints': keypoints_to_coco
}
class MSCOCOorigBaseMetric(FullDatasetEvaluationMetric):
annotation_types = (DetectionAnnotation, )
prediction_types = (DetectionPrediction, )
iou_type = 'bbox'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'threshold': BaseField(optional=True, default='.50:.05:.95', description='threshold for metric calculation')
})
return parameters
def configure(self):
self.threshold = get_or_parse_value(self.get_value_from_config('threshold'), COCO_THRESHOLDS)
@staticmethod
def generate_map_pred_label_id_to_coco_cat_id(has_background, use_full_label_map):
shift = 0 if has_background else 1
max_cat = 90 if use_full_label_map else 80
max_key = max_cat - shift
res_map = {i: i + shift for i in range(0, max_key+1)}
assert max(res_map.values()) == max_cat
return res_map
def _prepare_coco_structures(self):
from pycocotools.coco import COCO
annotation_conversion_parameters = self.dataset.config.get('annotation_conversion')
if not annotation_conversion_parameters:
raise ValueError('annotation_conversion parameter is not pointed, '
'but it is required for coco original metrics')
annotation_file = annotation_conversion_parameters.get('annotation_file')
if not annotation_file.is_file():
raise ValueError("annotation file '{}' is not found".format(annotation_file))
has_background = annotation_conversion_parameters.get('has_background', False)
use_full_label_map = annotation_conversion_parameters.get('use_full_label_map', False)
meta = self.dataset.metadata
coco = COCO(str(annotation_file))
assert 0 not in coco.cats.keys()
coco_cat_name_to_id = {v['name']: k for k, v in coco.cats.items()}
if has_background:
assert 'background_label' in meta
bg_lbl = meta['background_label']
bg_name = meta['label_map'][bg_lbl]
assert bg_name not in coco_cat_name_to_id
coco_cat_name_to_id[bg_name] = bg_lbl
else:
assert 'background_label' not in meta
if not use_full_label_map:
map_pred_label_id_to_coco_cat_id = {k: coco_cat_name_to_id[v] for k, v in meta['label_map'].items()}
else:
map_pred_label_id_to_coco_cat_id = self.generate_map_pred_label_id_to_coco_cat_id(has_background,
use_full_label_map)
for k, v in meta['label_map'].items():
assert map_pred_label_id_to_coco_cat_id[k] == coco_cat_name_to_id[v], (
"k = {}, v = {}, map_pred_label_id_to_coco_cat_id[k] = {}, coco_cat_name_to_id[v] = {}".format(
k, v, map_pred_label_id_to_coco_cat_id[k], coco_cat_name_to_id[v]))
assert all(map_pred_label_id_to_coco_cat_id[k] == coco_cat_name_to_id[v]
for k, v in meta['label_map'].items())
map_coco_img_file_name_to_img_id = {os.path.basename(v['file_name']): v['id'] for v in coco.dataset['images']}
assert len(map_coco_img_file_name_to_img_id) == len(coco.dataset['images']), "Image name duplications"
return coco, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id
@staticmethod
def _convert_data_to_coco_format(
predictions, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id, iou_type='bbox'
):
coco_data_to_store = []
for pred in predictions:
prediction_data_to_store = []
cur_name = pred.identifier
cur_name = os.path.basename(cur_name)
assert cur_name in map_coco_img_file_name_to_img_id
cur_img_id = map_coco_img_file_name_to_img_id[cur_name]
labels = pred.labels.tolist()
scores = pred.scores.tolist()
cur_num = len(labels)
assert len(scores) == cur_num
coco_cats = [map_pred_label_id_to_coco_cat_id[lbl] for lbl in labels]
for (s, cur_cat) in zip(scores, coco_cats):
prediction_data_to_store.append({
'image_id': cur_img_id,
'score': s,
'category_id': cur_cat,
'_image_name_from_dataset': cur_name,
})
iou_specific_converter = iou_specific_processing.get(iou_type)
if iou_specific_converter is None:
raise ValueError("unknown iou type: '{}'".format(iou_type))
prediction_data_to_store = iou_specific_converter(prediction_data_to_store, pred)
coco_data_to_store.extend(prediction_data_to_store)
return coco_data_to_store
@staticmethod
def _reload_results_to_coco_class(coco, coco_data_to_store):
with tempfile.NamedTemporaryFile() as ftmp:
json_file_to_store = ftmp.name + ".json"
with open(json_file_to_store, 'w') as f:
json.dump(coco_data_to_store, f, indent=4)
json_file_to_load = json_file_to_store
coco_res = coco.loadRes(json_file_to_load)
return coco_res
@staticmethod
def _debug_printing_and_displaying_predictions(coco, coco_res, data_source, should_display_debug_images):
for coco_data_el in coco_res.dataset['annotations']:
cur_name_from_dataset = coco_data_el.get('_image_name_from_dataset', None)
x1, y1, w, h = coco_data_el['bbox']
x2 = x1+w
y2 = y1+h
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
category_id = coco_data_el['category_id']
category_name = coco.cats[category_id]['name']
coco_image_id = coco_data_el['image_id']
cur_name = coco.imgs[coco_image_id]['file_name']
assert cur_name == cur_name_from_dataset or cur_name_from_dataset is None
s = coco_data_el['score']
print_info("cur_name =" + cur_name)
print_info(" {} {} {} {} {} % {}".format(
x1, y1, x2, y2, int(100*s), category_name))
if should_display_debug_images:
img_path = os.path.join(str(data_source), str(cur_name))
img = cv2.imread(img_path)
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.imshow("img", img)
key = 0
while key not in (32, 27):
key = cv2.waitKey() & 0xff
should_display_debug_images = (key != 27)
@staticmethod
def _run_coco_evaluation(coco, coco_res, iou_type='bbox', threshold=None):
from pycocotools.cocoeval import COCOeval
cocoeval = COCOeval(coco, coco_res, iouType=iou_type)
if threshold is not None:
cocoeval.params.iouThrs = threshold
cocoeval.evaluate()
cocoeval.accumulate()
cocoeval.summarize()
res = cocoeval.stats.tolist()
res_len = len(res)
middle_index = res_len //2
assert res_len == 12 if iou_type != 'keypoints' else 10
res = [res[:middle_index], res[middle_index:]]
return res
def compute_precision_recall(self, predictions):
coco, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id = self._prepare_coco_structures()
coco_data_to_store = self._convert_data_to_coco_format(
predictions, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id, self.iou_type
)
coco_res = self._reload_results_to_coco_class(coco, coco_data_to_store)
if SHOULD_SHOW_PREDICTIONS:
data_source = self.dataset.config.get('data_source')
should_display_debug_images = SHOULD_DISPLAY_DEBUG_IMAGES
self._debug_printing_and_displaying_predictions(coco, coco_res, data_source, should_display_debug_images)
res = self._run_coco_evaluation(coco, coco_res, self.iou_type, self.threshold)
print_info("MSCOCOorigBaseMetric.compute_precision_recall: returning " + str(res))
return res
def evaluate(self, annotations, predictions):
pass
class MSCOCOorigAveragePrecision(MSCOCOorigBaseMetric):
__provider__ = 'coco_orig_precision'
def evaluate(self, annotations, predictions):
return self.compute_precision_recall(predictions)[0][0]
class MSCOCOOrigSegmAveragePrecision(MSCOCOorigAveragePrecision):
__provider__ = 'coco_orig_segm_precision'
annotation_types = (CoCoInstanceSegmentationAnnotation, )
prediction_types = (CoCocInstanceSegmentationPrediction, )
iou_type = 'segm'
class MSCOCOorigRecall(MSCOCOorigBaseMetric):
__provider__ = 'coco_orig_recall'
def evaluate(self, annotations, predictions):
return self.compute_precision_recall(predictions)[1][2]
class MSCOCOorigSegmRecall(MSCOCOorigRecall):
__provider__ = 'coco_orig_segm_recall'
annotation_types = (CoCoInstanceSegmentationAnnotation, )
prediction_types = (CoCocInstanceSegmentationPrediction, )
iou_type = 'segm'
class MSCOCOOrigKeyPointsAveragePrecision(MSCOCOorigAveragePrecision):
__provider__ = 'coco_orig_keypoints_precision'
annotation_types = (PoseEstimationAnnotation, )
prediction_types = (PoseEstimationPrediction, )
iou_type = 'keypoints'
| true
| true
|
f71647c44c7230a055bc69532f2238510db71164
| 6,897
|
py
|
Python
|
torch_geometric/nn/conv/han_conv.py
|
itamblyn/pytorch_geometric
|
67ed16492863378b8434b03713a75924f0cc5df1
|
[
"MIT"
] | 2
|
2020-08-06T16:14:15.000Z
|
2021-11-08T07:33:21.000Z
|
torch_geometric/nn/conv/han_conv.py
|
itamblyn/pytorch_geometric
|
67ed16492863378b8434b03713a75924f0cc5df1
|
[
"MIT"
] | 1
|
2021-06-05T10:32:22.000Z
|
2021-06-05T10:32:22.000Z
|
torch_geometric/nn/conv/han_conv.py
|
itamblyn/pytorch_geometric
|
67ed16492863378b8434b03713a75924f0cc5df1
|
[
"MIT"
] | null | null | null |
from typing import Union, Dict, Optional, List
import torch
from torch import Tensor, nn
import torch.nn.functional as F
from torch_geometric.typing import NodeType, EdgeType, Metadata, Adj
from torch_geometric.nn.dense import Linear
from torch_geometric.utils import softmax
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.inits import glorot, reset
def group(xs: List[Tensor], q: nn.Parameter,
k_lin: nn.Module) -> Optional[Tensor]:
if len(xs) == 0:
return None
else:
num_edge_types = len(xs)
out = torch.stack(xs)
attn_score = (q * torch.tanh(k_lin(out)).mean(1)).sum(-1)
attn = F.softmax(attn_score, dim=0)
out = torch.sum(attn.view(num_edge_types, 1, -1) * out, dim=0)
return out
class HANConv(MessagePassing):
r"""
The Heterogenous Graph Attention Operator from the
`"Heterogenous Graph Attention Network"
<https://arxiv.org/pdf/1903.07293.pdf>`_ paper.
.. note::
For an example of using HANConv, see `examples/hetero/han_imdb.py
<https://github.com/pyg-team/pytorch_geometric/blob/master/examples/
hetero/han_imdb.py>`_.
Args:
in_channels (int or Dict[str, int]): Size of each input sample of every
node type, or :obj:`-1` to derive the size from the first input(s)
to the forward method.
out_channels (int): Size of each output sample.
metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata
of the heterogeneous graph, *i.e.* its node and edge types given
by a list of strings and a list of string triplets, respectively.
See :meth:`torch_geometric.data.HeteroData.metadata` for more
information.
heads (int, optional): Number of multi-head-attentions.
(default: :obj:`1`)
negative_slope (float, optional): LeakyReLU angle of the negative
slope. (default: :obj:`0.2`)
dropout (float, optional): Dropout probability of the normalized
attention coefficients which exposes each node to a stochastically
sampled neighborhood during training. (default: :obj:`0`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(
self,
in_channels: Union[int, Dict[str, int]],
out_channels: int,
metadata: Metadata,
heads: int = 1,
negative_slope=0.2,
dropout: float = 0.0,
**kwargs,
):
super().__init__(aggr='add', node_dim=0, **kwargs)
if not isinstance(in_channels, dict):
in_channels = {node_type: in_channels for node_type in metadata[0]}
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.metadata = metadata
self.dropout = dropout
self.k_lin = nn.Linear(out_channels, out_channels)
self.q = nn.Parameter(torch.Tensor(1, out_channels))
self.proj = nn.ModuleDict()
for node_type, in_channels in self.in_channels.items():
self.proj[node_type] = Linear(in_channels, out_channels)
self.lin_src = nn.ParameterDict()
self.lin_dst = nn.ParameterDict()
dim = out_channels // heads
for edge_type in metadata[1]:
edge_type = '__'.join(edge_type)
self.lin_src[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim))
self.lin_dst[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim))
self.reset_parameters()
def reset_parameters(self):
reset(self.proj)
glorot(self.lin_src)
glorot(self.lin_dst)
self.k_lin.reset_parameters()
glorot(self.q)
def forward(
self, x_dict: Dict[NodeType, Tensor],
edge_index_dict: Dict[EdgeType,
Adj]) -> Dict[NodeType, Optional[Tensor]]:
r"""
Args:
x_dict (Dict[str, Tensor]): A dictionary holding input node
features for each individual node type.
edge_index_dict: (Dict[str, Union[Tensor, SparseTensor]]): A
dictionary holding graph connectivity information for each
individual edge type, either as a :obj:`torch.LongTensor` of
shape :obj:`[2, num_edges]` or a
:obj:`torch_sparse.SparseTensor`.
:rtype: :obj:`Dict[str, Optional[Tensor]]` - The ouput node embeddings
for each node type.
In case a node type does not receive any message, its output will
be set to :obj:`None`.
"""
H, D = self.heads, self.out_channels // self.heads
x_node_dict, out_dict = {}, {}
# Iterate over node types:
for node_type, x_node in x_dict.items():
x_node_dict[node_type] = self.proj[node_type](x_node).view(
-1, H, D)
out_dict[node_type] = []
# Iterate over edge types:
for edge_type, edge_index in edge_index_dict.items():
src_type, _, dst_type = edge_type
edge_type = '__'.join(edge_type)
lin_src = self.lin_src[edge_type]
lin_dst = self.lin_dst[edge_type]
x_dst = x_node_dict[dst_type]
alpha_src = (x_node_dict[src_type] * lin_src).sum(dim=-1)
alpha_dst = (x_dst * lin_dst).sum(dim=-1)
alpha = (alpha_src, alpha_dst)
# propagate_type: (x_dst: Tensor, alpha: PairTensor)
out = self.propagate(edge_index, x_dst=x_dst, alpha=alpha,
size=None)
out = F.relu(out)
out_dict[dst_type].append(out)
# iterate over node types:
for node_type, outs in out_dict.items():
out = group(outs, self.q, self.k_lin)
if out is None:
out_dict[node_type] = None
continue
out_dict[node_type] = out
return out_dict
def message(self, x_dst_i: Tensor, alpha_i: Tensor, alpha_j: Tensor,
index: Tensor, ptr: Optional[Tensor],
size_i: Optional[int]) -> Tensor:
alpha = alpha_j + alpha_i
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, index, ptr, size_i)
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
out = x_dst_i * alpha.view(-1, self.heads, 1)
return out.view(-1, self.out_channels)
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.out_channels}, '
f'heads={self.heads})')
| 39.637931
| 80
| 0.591562
|
from typing import Union, Dict, Optional, List
import torch
from torch import Tensor, nn
import torch.nn.functional as F
from torch_geometric.typing import NodeType, EdgeType, Metadata, Adj
from torch_geometric.nn.dense import Linear
from torch_geometric.utils import softmax
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.inits import glorot, reset
def group(xs: List[Tensor], q: nn.Parameter,
k_lin: nn.Module) -> Optional[Tensor]:
if len(xs) == 0:
return None
else:
num_edge_types = len(xs)
out = torch.stack(xs)
attn_score = (q * torch.tanh(k_lin(out)).mean(1)).sum(-1)
attn = F.softmax(attn_score, dim=0)
out = torch.sum(attn.view(num_edge_types, 1, -1) * out, dim=0)
return out
class HANConv(MessagePassing):
def __init__(
self,
in_channels: Union[int, Dict[str, int]],
out_channels: int,
metadata: Metadata,
heads: int = 1,
negative_slope=0.2,
dropout: float = 0.0,
**kwargs,
):
super().__init__(aggr='add', node_dim=0, **kwargs)
if not isinstance(in_channels, dict):
in_channels = {node_type: in_channels for node_type in metadata[0]}
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.metadata = metadata
self.dropout = dropout
self.k_lin = nn.Linear(out_channels, out_channels)
self.q = nn.Parameter(torch.Tensor(1, out_channels))
self.proj = nn.ModuleDict()
for node_type, in_channels in self.in_channels.items():
self.proj[node_type] = Linear(in_channels, out_channels)
self.lin_src = nn.ParameterDict()
self.lin_dst = nn.ParameterDict()
dim = out_channels // heads
for edge_type in metadata[1]:
edge_type = '__'.join(edge_type)
self.lin_src[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim))
self.lin_dst[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim))
self.reset_parameters()
def reset_parameters(self):
reset(self.proj)
glorot(self.lin_src)
glorot(self.lin_dst)
self.k_lin.reset_parameters()
glorot(self.q)
def forward(
self, x_dict: Dict[NodeType, Tensor],
edge_index_dict: Dict[EdgeType,
Adj]) -> Dict[NodeType, Optional[Tensor]]:
H, D = self.heads, self.out_channels // self.heads
x_node_dict, out_dict = {}, {}
for node_type, x_node in x_dict.items():
x_node_dict[node_type] = self.proj[node_type](x_node).view(
-1, H, D)
out_dict[node_type] = []
for edge_type, edge_index in edge_index_dict.items():
src_type, _, dst_type = edge_type
edge_type = '__'.join(edge_type)
lin_src = self.lin_src[edge_type]
lin_dst = self.lin_dst[edge_type]
x_dst = x_node_dict[dst_type]
alpha_src = (x_node_dict[src_type] * lin_src).sum(dim=-1)
alpha_dst = (x_dst * lin_dst).sum(dim=-1)
alpha = (alpha_src, alpha_dst)
out = self.propagate(edge_index, x_dst=x_dst, alpha=alpha,
size=None)
out = F.relu(out)
out_dict[dst_type].append(out)
for node_type, outs in out_dict.items():
out = group(outs, self.q, self.k_lin)
if out is None:
out_dict[node_type] = None
continue
out_dict[node_type] = out
return out_dict
def message(self, x_dst_i: Tensor, alpha_i: Tensor, alpha_j: Tensor,
index: Tensor, ptr: Optional[Tensor],
size_i: Optional[int]) -> Tensor:
alpha = alpha_j + alpha_i
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, index, ptr, size_i)
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
out = x_dst_i * alpha.view(-1, self.heads, 1)
return out.view(-1, self.out_channels)
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.out_channels}, '
f'heads={self.heads})')
| true
| true
|
f716483a992293a75bd70369f62a18d9e69ac6ee
| 522
|
py
|
Python
|
ex0076.py
|
GantzLorran/Python
|
ce6073754318443345973471589cceb4a24ed832
|
[
"Apache-2.0"
] | 1
|
2020-03-26T13:23:17.000Z
|
2020-03-26T13:23:17.000Z
|
ex0076.py
|
GantzLorran/Python
|
ce6073754318443345973471589cceb4a24ed832
|
[
"Apache-2.0"
] | null | null | null |
ex0076.py
|
GantzLorran/Python
|
ce6073754318443345973471589cceb4a24ed832
|
[
"Apache-2.0"
] | null | null | null |
'''Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços na sequência.
No final, mostre uma listagem de preços, organizando os dados de forma tabular.'''
produtos = ('Lápis', 0.50, 'Suco', 5.00, 'Playstation', 1500.00, 'TV-led', 1200.00, 'Xbox ONE', 1400.00, 'Forza Horizon 4', 200.00, 'The Last Of Us Part II', 250.00, 'Forza Horizon 3', 150.00)
print('====' * 10)
print('LOJAS RODRIGUES')
for c in range(0,( len(produtos)), 2):
print(produtos[c],f'R$: {produtos[c+1]}')
| 58
| 193
| 0.666667
|
produtos = ('Lápis', 0.50, 'Suco', 5.00, 'Playstation', 1500.00, 'TV-led', 1200.00, 'Xbox ONE', 1400.00, 'Forza Horizon 4', 200.00, 'The Last Of Us Part II', 250.00, 'Forza Horizon 3', 150.00)
print('====' * 10)
print('LOJAS RODRIGUES')
for c in range(0,( len(produtos)), 2):
print(produtos[c],f'R$: {produtos[c+1]}')
| true
| true
|
f71648a0f5d74c82ae3c03976965b13fa5309d24
| 14,003
|
py
|
Python
|
website/canvas/util.py
|
bopopescu/canvas
|
2dfd6009eaecd8dac64ccc6125084e65305fb5d0
|
[
"BSD-3-Clause"
] | 61
|
2015-11-10T17:13:46.000Z
|
2021-08-06T17:58:30.000Z
|
website/canvas/util.py
|
bopopescu/canvas
|
2dfd6009eaecd8dac64ccc6125084e65305fb5d0
|
[
"BSD-3-Clause"
] | 13
|
2015-11-11T07:49:41.000Z
|
2021-06-09T03:45:31.000Z
|
website/canvas/util.py
|
bopopescu/canvas
|
2dfd6009eaecd8dac64ccc6125084e65305fb5d0
|
[
"BSD-3-Clause"
] | 18
|
2015-11-11T04:50:04.000Z
|
2021-08-20T00:57:11.000Z
|
import base64
import cProfile
import cStringIO
import collections
import gzip
import hmac
import inspect
import itertools
import logging
import math
import os
import socket
import struct
import time
from urlparse import urljoin
from django.conf import settings
from django.db.models import Model, FloatField
from django.db.models.query import QuerySet
from django.db.models.sql.compiler import SQLInsertCompiler
from django.http import Http404
from django.template import Context, Template
from django.utils.encoding import force_unicode
from django.utils.functional import Promise
from django.utils.html import escape, strip_tags
from django.utils.safestring import mark_safe
import facebook
from jinja2 import Markup
from canvas.exceptions import NotLoggedIntoFacebookError
from canvas.json import loads, dumps, client_dumps, backend_dumps, JSONDecodeError
from configuration import Config
from services import Services
logger = logging.getLogger()
unique = lambda iterable: list(set(iterable))
clamp = lambda lower, value, upper: min(upper, max(lower, value))
#TODO this is deprecated because of functools.wraps, unless someone knows an advantage to this method. --alex
def simple_decorator(decorator):
"""
This decorator can be used to turn simple functions
into well-behaved decorators, so long as the decorators
are fairly simple. If a decorator expects a function and
returns a function (no descriptors), and if it doesn't
modify function attributes or docstring, then it is
eligible to use this. Simply apply @simple_decorator to
your decorator and it will automatically preserve the
docstring and function attributes of functions to which
it is applied.
"""
def new_decorator(f):
g = decorator(f)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
g.__dict__.update(f.__dict__)
return g
# Now a few lines needed to make simple_decorator itself
# be a well-behaved decorator.
new_decorator.__name__ = decorator.__name__
new_decorator.__doc__ = decorator.__doc__
new_decorator.__dict__.update(decorator.__dict__)
return new_decorator
def iterlist(fun):
def wrapper(*args, **kwargs):
return list(fun(*args, **kwargs))
return wrapper
def ip_to_int(ip):
try:
return struct.unpack('I', socket.inet_aton(ip))[0]
except (socket.error, struct.error, TypeError):
return 0
def int_to_ip(integer):
return socket.inet_ntoa(struct.pack('I', integer))
def flatten(list_of_lists):
""" Flatten one level of nesting. """
return itertools.chain.from_iterable(list_of_lists)
def js_safety(thing, django=True, escape_html=False):
thing = thing.replace('<', '\\u003c').replace('>', '\\u003e')
if django:
return mark_safe(thing)
else:
if escape_html:
return thing
return Markup(thing)
def get_or_create(cls, **kwargs):
inst = cls.objects.get_or_none(**kwargs)
if inst is None:
inst = cls(**kwargs)
inst.save()
return inst
class GetSlice(object):
def __getitem__(self, item):
return item
get_slice = GetSlice()
# Modified, originally from http://en.wikipedia.org/wiki/Base_36
def _raw_base36encode(number):
"""
Convert positive integer to a base36 string.
JS: canvas.base36encode
"""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
if number <= 0:
raise ValueError('number must be a positive integer')
alphabet='0123456789abcdefghijklmnopqrstuvwxyz'
checksum = 0
base36 = ''
while number != 0:
number, i = divmod(number, 36)
checksum += i * 19
base36 = alphabet[i] + base36
return base36, alphabet[checksum % 36]
def base36encode(number):
base36, check = _raw_base36encode(number)
return base36 + check
class Base36DecodeException(Exception): pass
def base36decode(string):
if not string:
raise Base36DecodeException("Empty string")
base36, check = string[:-1], string[-1]
try:
number = int(base36, 36)
except ValueError:
raise Base36DecodeException("Invalid base36 characters.")
try:
_, expected_check = _raw_base36encode(number)
except ValueError:
raise Base36DecodeException("Invalid base36 number.")
if expected_check != check:
raise Base36DecodeException("base36 check character does not match.")
return number
def base36decode_or_404(string):
try:
return base36decode(string)
except Base36DecodeException:
raise Http404
def random_token(length=40):
assert length % 2 == 0
return base64.b16encode(os.urandom(length//2))
def placeholder(self, conn, field, value):
if isinstance(value, Now):
return value.as_sql(None, conn)[0]
else:
return SQLInsertCompiler.placeholder(self, field, value)
# EVIL HAX
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [placeholder(self, self.connection, *v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
params = [param for param in self.query.params if not isinstance(param, Now)]
if self.return_id and self.connection.features.can_return_id_from_insert:
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params = params + r_params
return ' '.join(result), params
SQLInsertCompiler.as_sql = as_sql
class UnixTimestampField(FloatField):
def get_prep_value(self, value):
if isinstance(value, Now):
return value
return FloatField.get_prep_value(self, value)
class Now(object):
def prepare_database_save(self, field):
return self
def _sql(self, executable_name):
return Services.time.sql_now(executable_name)
def as_sql(self, qn, conn):
return self._sql(conn.client.executable_name), []
def get_fb_api(request):
fb_user = facebook.get_user_from_cookie(request.COOKIES,
Config['facebook']['app_id'],
Config['facebook']['secret'])
access_token = fb_user and fb_user.get('access_token')
if not access_token:
raise NotLoggedIntoFacebookError()
return fb_user, facebook.GraphAPI(access_token)
class ArgSpec(object):
"""
Convenience wrapper around `inspect.ArgSpec`.
Properties:
`args`:
The list of arg names. Not the same as `inspect.ArgSpec#args`, however - this excludes the kwarg names.
`kwargs`:
A dictionary of kwarg names mapped to their default values.
Note that if the given function contains a member annotation named `_original_function`, it will use that
instead of the function.
"""
def __init__(self, func):
func = getattr(func, '_original_function', func)
spec = inspect.getargspec(func)
defaults = spec.defaults or []
self.args = spec.args[:len(spec.args) - len(defaults)]
self.kwargs = dict(zip(spec.args[-len(defaults):], defaults))
def page_divide(x, y):
return max(1, int(math.ceil(1.0 * x / y)))
def paginate(iterable, page=1, per_page=50):
count = len(iterable)
page_last = page_divide(count, per_page)
# Handle 'current'.
if page == 'current':
start, stop = max(0, count-per_page), count
page = page_last
else:
# Handle p=9999
page = min(int(page), page_last)
start, stop = per_page * (page-1), per_page * (page)
# page_next is None when there aren't any more pages.
page_next = page+1 if page < page_last else None
return iterable[start:stop], page, page_next, page_last
def profile(fun):
if settings.PROFILE:
def wrap(request, *args, **kwargs):
profiler = cProfile.Profile()
result = profiler.runcall(fun, request, *args, **kwargs)
profiler.dump_stats('/var/canvas/website/run/profile-%s-%s.pstats'
% (request.path.replace('/', '_'), int(time.time() * 1000)))
return result
return wrap
else:
return fun
def generate_email_links():
"""
Feel free to rewrite me, I'm just an example of the last use. Just change 'visitor' and 'data'.
"""
def visitor(item):
from canvas.models import User
username, groups = [x.strip() for x in item.split(':')]
user = User.objects.get(username=username)
subject = '%s, Canvas needs you!' % username
body = """Hey %s!\n\nWe've noticed you're one of the top posters in our Canvas-owned groups (%s), and would love to have you as a referee if you are interested. Referees are able to mark posts in appointed groups as off-topic, collapsing them and helping to keep discussion and posts relevant to the group."""
body += """\n\nIf you would be interested in helping us out, let us know, we'd greatly appreciate it!"""
body += """\n\nThanks for being awesome,\n- The Canvas Team"""
body %= (username, groups)
body = body.replace('\n', '%0A')
return {'to': user.email, 'subject': subject, 'body': body}
data = """blblnk: cute, pop_culture, canvas
nicepunk: cute, the_horror, stamps
powerfuldragon: cute, stamps, girls
cybertaco: games
tobacco: games
straitjacketfun: photography
slack_jack: photography
oliveoodle: pop_culture
ryoshi: pop_culture
oliveiralmeida: nerdy
AquilesBaeza: nerdy, the_horror
nebetsu: nerdy
Laban: food
ROPED: food
MuttonChops: canvas
Degu: stamps
sparknineone: girls"""
for item in data.split('\n'):
print """<a href="mailto:%(to)s?subject=%(subject)s&body=%(body)s">%(to)s</a><br/>""" % visitor(item)
def has_flagged_words(text):
"""
Returns True if @text has flagged words.
"""
return any((flag_word in text) for flag_word in Config.get('autoflag_words', []))
def make_absolute_url(relative_url, protocol=None):
"""
Takes a relative url and makes it absolute by prepending the Canvas absolute domain.
This refers not to relative as in "foo" resolving to "/bar/foo" when you're already on "/bar", but to an
absolute path sans the host portion of the URL.
`protocol` should be the name without the "://", e.g. "http" or "https"
"""
# Is it already absolute?
if relative_url.split('//')[-1].startswith(settings.DOMAIN) and relative_url.startswith(protocol or '//'):
return relative_url
if protocol:
protocol = protocol + '://'
else:
protocol = '//'
base = protocol + settings.DOMAIN
return urljoin(base, relative_url)
_template_tag_cache = {}
def render_template_tag(tag_name, args=None, module=None, context_instance=None):
"""
`args` may be either an list of tuples, or any other iterable. If it contains tuples,
it will create a context object out of it with the car as the key and the cdr as the value,
and the keys will be passed to the template tag. (This is to simulate an ordered dict.)
Otherwise, the items in `args` are given as strings to the template tag.
It caches templates, but only if `args` has tuples.
This renders to a string. To use it as a view response, wrap it in HttpResponse.
"""
def make_cache_key(module, tag_name, arg_cars):
return u'-'.join(e for e in [module, tag_name, arg_cars] if e is not None)
prefix, _args = '', ''
context = {}
cache_key = None # Doesn't cache if this doesn't get set.
if module:
prefix = u'{{% load {0} %}}'.format(module)
if args:
args = list(args)
if isinstance(args[0], tuple):
context.update(dict((arg[0], arg[1]) for arg in args))
_args = u' '.join(arg[0] for arg in args)
cache_key = make_cache_key(module, tag_name, _args)
else:
_args = u' '.join(u'"{0}"'.format(arg) for arg in args)
if cache_key and cache_key in _template_tag_cache:
template = _template_tag_cache[cache_key]
_template_tag_cache[cache_key] = template
else:
template = Template(u'{0}{{% {1} {2} %}}'.format(prefix, tag_name, _args))
if cache_key:
_template_tag_cache[cache_key] = template
if context_instance is None:
context_instance = Context(context)
else:
for key, val in context.iteritems():
context_instance[key] = val
return template.render(context_instance)
def get_arg_names(func):
""" Returns a list with function argument names. """
return inspect.getargspec(func)[0]
def token(msg):
""" Returns a Canvas "signed" hash of a token. This is used in unsubscribe links. """
return hmac.new(settings.SECRET_KEY, msg=str(msg)).hexdigest()
class paramaterized_defaultdict(collections.defaultdict):
""" Defaultdict where the default_factory takes key as an argument. """
def __missing__(self, key):
return self.default_factory(key)
def gzip_string(data):
str_file = cStringIO.StringIO()
gzip_file = gzip.GzipFile(fileobj=str_file, mode='wb')
gzip_file.write(data)
gzip_file.close()
return str_file.getvalue()
def strip_template_chars(text):
text = text.replace('{{', '{' * 2)
text = text.replace('}}', '}' * 2)
text = text.replace('{%', '{%')
text = text.replace('%}', '%}')
text = text.replace('{#', '{#')
text = text.replace('#}', '#}')
return text
| 33.261283
| 317
| 0.66193
|
import base64
import cProfile
import cStringIO
import collections
import gzip
import hmac
import inspect
import itertools
import logging
import math
import os
import socket
import struct
import time
from urlparse import urljoin
from django.conf import settings
from django.db.models import Model, FloatField
from django.db.models.query import QuerySet
from django.db.models.sql.compiler import SQLInsertCompiler
from django.http import Http404
from django.template import Context, Template
from django.utils.encoding import force_unicode
from django.utils.functional import Promise
from django.utils.html import escape, strip_tags
from django.utils.safestring import mark_safe
import facebook
from jinja2 import Markup
from canvas.exceptions import NotLoggedIntoFacebookError
from canvas.json import loads, dumps, client_dumps, backend_dumps, JSONDecodeError
from configuration import Config
from services import Services
logger = logging.getLogger()
unique = lambda iterable: list(set(iterable))
clamp = lambda lower, value, upper: min(upper, max(lower, value))
def simple_decorator(decorator):
"""
This decorator can be used to turn simple functions
into well-behaved decorators, so long as the decorators
are fairly simple. If a decorator expects a function and
returns a function (no descriptors), and if it doesn't
modify function attributes or docstring, then it is
eligible to use this. Simply apply @simple_decorator to
your decorator and it will automatically preserve the
docstring and function attributes of functions to which
it is applied.
"""
def new_decorator(f):
g = decorator(f)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
g.__dict__.update(f.__dict__)
return g
# Now a few lines needed to make simple_decorator itself
# be a well-behaved decorator.
new_decorator.__name__ = decorator.__name__
new_decorator.__doc__ = decorator.__doc__
new_decorator.__dict__.update(decorator.__dict__)
return new_decorator
def iterlist(fun):
def wrapper(*args, **kwargs):
return list(fun(*args, **kwargs))
return wrapper
def ip_to_int(ip):
try:
return struct.unpack('I', socket.inet_aton(ip))[0]
except (socket.error, struct.error, TypeError):
return 0
def int_to_ip(integer):
return socket.inet_ntoa(struct.pack('I', integer))
def flatten(list_of_lists):
""" Flatten one level of nesting. """
return itertools.chain.from_iterable(list_of_lists)
def js_safety(thing, django=True, escape_html=False):
thing = thing.replace('<', '\\u003c').replace('>', '\\u003e')
if django:
return mark_safe(thing)
else:
if escape_html:
return thing
return Markup(thing)
def get_or_create(cls, **kwargs):
inst = cls.objects.get_or_none(**kwargs)
if inst is None:
inst = cls(**kwargs)
inst.save()
return inst
class GetSlice(object):
def __getitem__(self, item):
return item
get_slice = GetSlice()
# Modified, originally from http://en.wikipedia.org/wiki/Base_36
def _raw_base36encode(number):
"""
Convert positive integer to a base36 string.
JS: canvas.base36encode
"""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
if number <= 0:
raise ValueError('number must be a positive integer')
alphabet='0123456789abcdefghijklmnopqrstuvwxyz'
checksum = 0
base36 = ''
while number != 0:
number, i = divmod(number, 36)
checksum += i * 19
base36 = alphabet[i] + base36
return base36, alphabet[checksum % 36]
def base36encode(number):
base36, check = _raw_base36encode(number)
return base36 + check
class Base36DecodeException(Exception): pass
def base36decode(string):
if not string:
raise Base36DecodeException("Empty string")
base36, check = string[:-1], string[-1]
try:
number = int(base36, 36)
except ValueError:
raise Base36DecodeException("Invalid base36 characters.")
try:
_, expected_check = _raw_base36encode(number)
except ValueError:
raise Base36DecodeException("Invalid base36 number.")
if expected_check != check:
raise Base36DecodeException("base36 check character does not match.")
return number
def base36decode_or_404(string):
try:
return base36decode(string)
except Base36DecodeException:
raise Http404
def random_token(length=40):
assert length % 2 == 0
return base64.b16encode(os.urandom(length//2))
def placeholder(self, conn, field, value):
if isinstance(value, Now):
return value.as_sql(None, conn)[0]
else:
return SQLInsertCompiler.placeholder(self, field, value)
# EVIL HAX
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [placeholder(self, self.connection, *v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
params = [param for param in self.query.params if not isinstance(param, Now)]
if self.return_id and self.connection.features.can_return_id_from_insert:
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params = params + r_params
return ' '.join(result), params
SQLInsertCompiler.as_sql = as_sql
class UnixTimestampField(FloatField):
def get_prep_value(self, value):
if isinstance(value, Now):
return value
return FloatField.get_prep_value(self, value)
class Now(object):
def prepare_database_save(self, field):
return self
def _sql(self, executable_name):
return Services.time.sql_now(executable_name)
def as_sql(self, qn, conn):
return self._sql(conn.client.executable_name), []
def get_fb_api(request):
fb_user = facebook.get_user_from_cookie(request.COOKIES,
Config['facebook']['app_id'],
Config['facebook']['secret'])
access_token = fb_user and fb_user.get('access_token')
if not access_token:
raise NotLoggedIntoFacebookError()
return fb_user, facebook.GraphAPI(access_token)
class ArgSpec(object):
"""
Convenience wrapper around `inspect.ArgSpec`.
Properties:
`args`:
The list of arg names. Not the same as `inspect.ArgSpec#args`, however - this excludes the kwarg names.
`kwargs`:
A dictionary of kwarg names mapped to their default values.
Note that if the given function contains a member annotation named `_original_function`, it will use that
instead of the function.
"""
def __init__(self, func):
func = getattr(func, '_original_function', func)
spec = inspect.getargspec(func)
defaults = spec.defaults or []
self.args = spec.args[:len(spec.args) - len(defaults)]
self.kwargs = dict(zip(spec.args[-len(defaults):], defaults))
def page_divide(x, y):
return max(1, int(math.ceil(1.0 * x / y)))
def paginate(iterable, page=1, per_page=50):
count = len(iterable)
page_last = page_divide(count, per_page)
if page == 'current':
start, stop = max(0, count-per_page), count
page = page_last
else:
page = min(int(page), page_last)
start, stop = per_page * (page-1), per_page * (page)
page_next = page+1 if page < page_last else None
return iterable[start:stop], page, page_next, page_last
def profile(fun):
if settings.PROFILE:
def wrap(request, *args, **kwargs):
profiler = cProfile.Profile()
result = profiler.runcall(fun, request, *args, **kwargs)
profiler.dump_stats('/var/canvas/website/run/profile-%s-%s.pstats'
% (request.path.replace('/', '_'), int(time.time() * 1000)))
return result
return wrap
else:
return fun
def generate_email_links():
"""
Feel free to rewrite me, I'm just an example of the last use. Just change 'visitor' and 'data'.
"""
def visitor(item):
from canvas.models import User
username, groups = [x.strip() for x in item.split(':')]
user = User.objects.get(username=username)
subject = '%s, Canvas needs you!' % username
body = """Hey %s!\n\nWe've noticed you're one of the top posters in our Canvas-owned groups (%s), and would love to have you as a referee if you are interested. Referees are able to mark posts in appointed groups as off-topic, collapsing them and helping to keep discussion and posts relevant to the group."""
body += """\n\nIf you would be interested in helping us out, let us know, we'd greatly appreciate it!"""
body += """\n\nThanks for being awesome,\n- The Canvas Team"""
body %= (username, groups)
body = body.replace('\n', '%0A')
return {'to': user.email, 'subject': subject, 'body': body}
data = """blblnk: cute, pop_culture, canvas
nicepunk: cute, the_horror, stamps
powerfuldragon: cute, stamps, girls
cybertaco: games
tobacco: games
straitjacketfun: photography
slack_jack: photography
oliveoodle: pop_culture
ryoshi: pop_culture
oliveiralmeida: nerdy
AquilesBaeza: nerdy, the_horror
nebetsu: nerdy
Laban: food
ROPED: food
MuttonChops: canvas
Degu: stamps
sparknineone: girls"""
for item in data.split('\n'):
print """<a href="mailto:%(to)s?subject=%(subject)s&body=%(body)s">%(to)s</a><br/>""" % visitor(item)
def has_flagged_words(text):
"""
Returns True if @text has flagged words.
"""
return any((flag_word in text) for flag_word in Config.get('autoflag_words', []))
def make_absolute_url(relative_url, protocol=None):
"""
Takes a relative url and makes it absolute by prepending the Canvas absolute domain.
This refers not to relative as in "foo" resolving to "/bar/foo" when you're already on "/bar", but to an
absolute path sans the host portion of the URL.
`protocol` should be the name without the "://", e.g. "http" or "https"
"""
if relative_url.split('//')[-1].startswith(settings.DOMAIN) and relative_url.startswith(protocol or '//'):
return relative_url
if protocol:
protocol = protocol + '://'
else:
protocol = '//'
base = protocol + settings.DOMAIN
return urljoin(base, relative_url)
_template_tag_cache = {}
def render_template_tag(tag_name, args=None, module=None, context_instance=None):
"""
`args` may be either an list of tuples, or any other iterable. If it contains tuples,
it will create a context object out of it with the car as the key and the cdr as the value,
and the keys will be passed to the template tag. (This is to simulate an ordered dict.)
Otherwise, the items in `args` are given as strings to the template tag.
It caches templates, but only if `args` has tuples.
This renders to a string. To use it as a view response, wrap it in HttpResponse.
"""
def make_cache_key(module, tag_name, arg_cars):
return u'-'.join(e for e in [module, tag_name, arg_cars] if e is not None)
prefix, _args = '', ''
context = {}
cache_key = None
if module:
prefix = u'{{% load {0} %}}'.format(module)
if args:
args = list(args)
if isinstance(args[0], tuple):
context.update(dict((arg[0], arg[1]) for arg in args))
_args = u' '.join(arg[0] for arg in args)
cache_key = make_cache_key(module, tag_name, _args)
else:
_args = u' '.join(u'"{0}"'.format(arg) for arg in args)
if cache_key and cache_key in _template_tag_cache:
template = _template_tag_cache[cache_key]
_template_tag_cache[cache_key] = template
else:
template = Template(u'{0}{{% {1} {2} %}}'.format(prefix, tag_name, _args))
if cache_key:
_template_tag_cache[cache_key] = template
if context_instance is None:
context_instance = Context(context)
else:
for key, val in context.iteritems():
context_instance[key] = val
return template.render(context_instance)
def get_arg_names(func):
""" Returns a list with function argument names. """
return inspect.getargspec(func)[0]
def token(msg):
""" Returns a Canvas "signed" hash of a token. This is used in unsubscribe links. """
return hmac.new(settings.SECRET_KEY, msg=str(msg)).hexdigest()
class paramaterized_defaultdict(collections.defaultdict):
""" Defaultdict where the default_factory takes key as an argument. """
def __missing__(self, key):
return self.default_factory(key)
def gzip_string(data):
str_file = cStringIO.StringIO()
gzip_file = gzip.GzipFile(fileobj=str_file, mode='wb')
gzip_file.write(data)
gzip_file.close()
return str_file.getvalue()
def strip_template_chars(text):
text = text.replace('{{', '{' * 2)
text = text.replace('}}', '}' * 2)
text = text.replace('{%', '{%')
text = text.replace('%}', '%}')
text = text.replace('{#', '{#')
text = text.replace('#}', '#}')
return text
| false
| true
|
f71648cbb0f2b7ac15a1c95480a65cfcdb389ca6
| 237
|
py
|
Python
|
kapre/__init__.py
|
postpop/kapre
|
9cf7c3214aae87082f786b7b2d6e5aee64ce6d8f
|
[
"MIT"
] | 1
|
2019-04-07T00:19:19.000Z
|
2019-04-07T00:19:19.000Z
|
kapre/__init__.py
|
postpop/kapre
|
9cf7c3214aae87082f786b7b2d6e5aee64ce6d8f
|
[
"MIT"
] | null | null | null |
kapre/__init__.py
|
postpop/kapre
|
9cf7c3214aae87082f786b7b2d6e5aee64ce6d8f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
__version__ = '0.1.4'
VERSION = __version__
from . import time_frequency
from . import backend
from . import backend_keras
from . import augmentation
from . import filterbank
from . import utils
| 18.230769
| 38
| 0.793249
|
from __future__ import absolute_import
__version__ = '0.1.4'
VERSION = __version__
from . import time_frequency
from . import backend
from . import backend_keras
from . import augmentation
from . import filterbank
from . import utils
| true
| true
|
f71648e767d8118878ec23a67b998d4c9d7a819c
| 1,105
|
py
|
Python
|
app.py
|
CA-CODE-Works/cert-issuer-dev
|
51f01d8b8e51f046898592c8e6afcfa9d942c08b
|
[
"MIT"
] | null | null | null |
app.py
|
CA-CODE-Works/cert-issuer-dev
|
51f01d8b8e51f046898592c8e6afcfa9d942c08b
|
[
"MIT"
] | null | null | null |
app.py
|
CA-CODE-Works/cert-issuer-dev
|
51f01d8b8e51f046898592c8e6afcfa9d942c08b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import json
from flask import Flask, jsonify, request, abort
from subprocess import call
#import cert_issuer.config
#from cert_issuer.blockchain_handlers import bitcoin
#import cert_issuer.issue_certificates
app = Flask(__name__)
config = None
# def get_config():
# global config
# if config == None:
# config = cert_issuer.config.get_config()
# return config
# @app.route('/cert_issuer/api/v1.0/issue', methods=['POST'])
# def issue():
# config = get_config()
# certificate_batch_handler, transaction_handler, connector = \
# bitcoin.instantiate_blockchain_handlers(config, False)
# certificate_batch_handler.set_certificates_in_batch(request.json)
# cert_issuer.issue_certificates.issue(config, certificate_batch_handler, transaction_handler)
# return json.dumps(certificate_batch_handler.proof)
@app.route('/')
def hello_world():
call(["/bin/bash", "test.sh"])
return ""
@app.route('/wallet')
def wallet():
call(["/bin/bash", "wallet.sh"])
return ""
if __name__ == "__main__":
app.run(host="0.0.0.0", port=9000)
| 26.309524
| 97
| 0.714027
|
import json
from flask import Flask, jsonify, request, abort
from subprocess import call
app = Flask(__name__)
config = None
@app.route('/')
def hello_world():
call(["/bin/bash", "test.sh"])
return ""
@app.route('/wallet')
def wallet():
call(["/bin/bash", "wallet.sh"])
return ""
if __name__ == "__main__":
app.run(host="0.0.0.0", port=9000)
| true
| true
|
f71649ac4568e4c1a38f99c707fd42b1af856b73
| 1,511
|
py
|
Python
|
src/tests/dataclass_bakery/generators/test_random_int_generator.py
|
miguelFLG13/dataclass-bakery
|
413b5b88ced200e4208e9a25edf520bfc7c31ca5
|
[
"Apache-2.0"
] | 1
|
2021-10-10T04:52:31.000Z
|
2021-10-10T04:52:31.000Z
|
src/tests/dataclass_bakery/generators/test_random_int_generator.py
|
miguelFLG13/dataclass-bakery
|
413b5b88ced200e4208e9a25edf520bfc7c31ca5
|
[
"Apache-2.0"
] | null | null | null |
src/tests/dataclass_bakery/generators/test_random_int_generator.py
|
miguelFLG13/dataclass-bakery
|
413b5b88ced200e4208e9a25edf520bfc7c31ca5
|
[
"Apache-2.0"
] | 2
|
2021-06-05T18:41:50.000Z
|
2022-03-28T02:05:11.000Z
|
from unittest import TestCase
from dataclass_bakery.generators import defaults
from dataclass_bakery.generators.random_int_generator import RandomIntGenerator
class TestRandomIntGenerator(TestCase):
def setUp(self):
self.random_int_generator = RandomIntGenerator()
def test_generate_int_ok(self):
random_int = self.random_int_generator.generate()
self.assertIsInstance(random_int, int)
def test_generate_int_correct_min_limit_ok(self):
min_limit = defaults.NUMBER_MAX_LIMIT - 1
random_int = self.random_int_generator.generate(
**{defaults.NUMBER_MIN_LIMIT_ARG: min_limit}
)
self.assertIsInstance(random_int, int)
self.assertTrue(min_limit <= random_int <= defaults.NUMBER_MAX_LIMIT)
def test_generate_int_correct_max_limit_ok(self):
max_limit = defaults.NUMBER_MIN_LIMIT + 1
random_int = self.random_int_generator.generate(
**{defaults.NUMBER_MAX_LIMIT_ARG: max_limit}
)
self.assertIsInstance(random_int, int)
self.assertTrue(defaults.NUMBER_MIN_LIMIT <= random_int <= max_limit)
def test_generate_int_incorrect_min_limit_ko(self):
with self.assertRaises(ValueError):
self.random_int_generator.generate(**{defaults.NUMBER_MIN_LIMIT_ARG: "asd"})
def test_generate_int_incorrect_max_limit_ko(self):
with self.assertRaises(TypeError):
self.random_int_generator.generate(**{defaults.NUMBER_MAX_LIMIT_ARG: "asd"})
| 37.775
| 88
| 0.734613
|
from unittest import TestCase
from dataclass_bakery.generators import defaults
from dataclass_bakery.generators.random_int_generator import RandomIntGenerator
class TestRandomIntGenerator(TestCase):
def setUp(self):
self.random_int_generator = RandomIntGenerator()
def test_generate_int_ok(self):
random_int = self.random_int_generator.generate()
self.assertIsInstance(random_int, int)
def test_generate_int_correct_min_limit_ok(self):
min_limit = defaults.NUMBER_MAX_LIMIT - 1
random_int = self.random_int_generator.generate(
**{defaults.NUMBER_MIN_LIMIT_ARG: min_limit}
)
self.assertIsInstance(random_int, int)
self.assertTrue(min_limit <= random_int <= defaults.NUMBER_MAX_LIMIT)
def test_generate_int_correct_max_limit_ok(self):
max_limit = defaults.NUMBER_MIN_LIMIT + 1
random_int = self.random_int_generator.generate(
**{defaults.NUMBER_MAX_LIMIT_ARG: max_limit}
)
self.assertIsInstance(random_int, int)
self.assertTrue(defaults.NUMBER_MIN_LIMIT <= random_int <= max_limit)
def test_generate_int_incorrect_min_limit_ko(self):
with self.assertRaises(ValueError):
self.random_int_generator.generate(**{defaults.NUMBER_MIN_LIMIT_ARG: "asd"})
def test_generate_int_incorrect_max_limit_ko(self):
with self.assertRaises(TypeError):
self.random_int_generator.generate(**{defaults.NUMBER_MAX_LIMIT_ARG: "asd"})
| true
| true
|
f7164a2fdb4f8686098822d51365dede98c4aa16
| 2,547
|
py
|
Python
|
cli/polyaxon/schemas/polyflow/container/__init__.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
cli/polyaxon/schemas/polyflow/container/__init__.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
cli/polyaxon/schemas/polyflow/container/__init__.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
from hestia.list_utils import to_list
from hestia.string_utils import strip_spaces
from marshmallow import ValidationError, fields, validates_schema
from polyaxon.schemas.base import BaseConfig, BaseSchema
from polyaxon.schemas.fields import ObjectOrListObject
from polyaxon.schemas.fields.docker_image import validate_image
def get_container_command_args(config):
def sanitize_str(value):
if not value:
return
value = strip_spaces(value=value, join=False)
value = [c.strip().strip("\\") for c in value if (c and c != "\\")]
value = [c for c in value if (c and c != "\\")]
return " ".join(value)
def sanitize(value):
return (
[sanitize_str(v) for v in value]
if isinstance(value, list)
else to_list(sanitize_str(value), check_none=True)
)
return to_list(config.command, check_none=True), sanitize(config.args)
class ContainerSchema(BaseSchema):
image = fields.Str(required=True)
image_pull_policy = fields.Str(allow_none=True)
command = ObjectOrListObject(fields.Str, allow_none=True)
args = ObjectOrListObject(fields.Str, allow_none=True)
@staticmethod
def schema_config():
return ContainerConfig
@validates_schema
def validate_container(self, values):
validate_image(values.get("image"))
class ContainerConfig(BaseConfig):
SCHEMA = ContainerSchema
IDENTIFIER = "container"
REDUCED_ATTRIBUTES = ["image_pull_policy", "command", "args"]
def __init__(self, image=None, image_pull_policy=None, command=None, args=None):
validate_image(image)
self.image = image
self.image_pull_policy = image_pull_policy
self.command = command
self.args = args
def get_container_command_args(self):
return get_container_command_args(self)
| 33.077922
| 84
| 0.712603
|
from __future__ import absolute_import, division, print_function
from hestia.list_utils import to_list
from hestia.string_utils import strip_spaces
from marshmallow import ValidationError, fields, validates_schema
from polyaxon.schemas.base import BaseConfig, BaseSchema
from polyaxon.schemas.fields import ObjectOrListObject
from polyaxon.schemas.fields.docker_image import validate_image
def get_container_command_args(config):
def sanitize_str(value):
if not value:
return
value = strip_spaces(value=value, join=False)
value = [c.strip().strip("\\") for c in value if (c and c != "\\")]
value = [c for c in value if (c and c != "\\")]
return " ".join(value)
def sanitize(value):
return (
[sanitize_str(v) for v in value]
if isinstance(value, list)
else to_list(sanitize_str(value), check_none=True)
)
return to_list(config.command, check_none=True), sanitize(config.args)
class ContainerSchema(BaseSchema):
image = fields.Str(required=True)
image_pull_policy = fields.Str(allow_none=True)
command = ObjectOrListObject(fields.Str, allow_none=True)
args = ObjectOrListObject(fields.Str, allow_none=True)
@staticmethod
def schema_config():
return ContainerConfig
@validates_schema
def validate_container(self, values):
validate_image(values.get("image"))
class ContainerConfig(BaseConfig):
SCHEMA = ContainerSchema
IDENTIFIER = "container"
REDUCED_ATTRIBUTES = ["image_pull_policy", "command", "args"]
def __init__(self, image=None, image_pull_policy=None, command=None, args=None):
validate_image(image)
self.image = image
self.image_pull_policy = image_pull_policy
self.command = command
self.args = args
def get_container_command_args(self):
return get_container_command_args(self)
| true
| true
|
f7164a91212b4b092d71dea1f6b264a0d0d7d96d
| 4,288
|
py
|
Python
|
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_ofswitch.py
|
banhr/neutron
|
4b3e73648327ce9f4d3437986a8663372f577f1b
|
[
"Apache-2.0"
] | 1
|
2018-10-19T01:48:37.000Z
|
2018-10-19T01:48:37.000Z
|
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_ofswitch.py
|
weiqiLee/neutron
|
ddc72ebd41a0e7804b33a21583d3add008191229
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_ofswitch.py
|
weiqiLee/neutron
|
ddc72ebd41a0e7804b33a21583d3add008191229
|
[
"Apache-2.0"
] | 1
|
2018-08-28T17:13:16.000Z
|
2018-08-28T17:13:16.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import ofswitch
from neutron.tests import base
class FakeReply(object):
def __init__(self, type):
self.type = type
class TestBundledOpenFlowBridge(base.BaseTestCase):
def setUp(self):
super(TestBundledOpenFlowBridge, self).setUp()
br = mock.Mock(spec=['install_instructions', 'foo'])
br._get_dp = lambda: (mock.Mock(), ofproto_v1_3, ofproto_v1_3_parser)
br.active_bundles = set()
self.br = ofswitch.BundledOpenFlowBridge(br, False, False)
def test_method_calls(self):
self.br.install_instructions(dummy_arg=1)
self.br.br.install_instructions.assert_called_once_with(dummy_arg=1)
def test_illegal_method_calls(self):
# With python3, this can be written as "with assertRaises..."
try:
self.br.uninstall_foo()
self.fail("Expected an exception")
except Exception as e:
self.assertIsInstance(e, AttributeError)
try:
self.br.foo()
self.fail("Expected an exception")
except Exception as e:
self.assertIsInstance(e, AttributeError)
def test_normal_bundle_context(self):
self.assertIsNone(self.br.active_bundle)
self.br.br._send_msg = mock.Mock(side_effect=[
FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY),
FakeReply(ofproto_v1_3.ONF_BCT_COMMIT_REPLY)])
with self.br:
self.assertIsNotNone(self.br.active_bundle)
# Do nothing
# Assert that the active bundle is gone
self.assertIsNone(self.br.active_bundle)
def test_aborted_bundle_context(self):
self.assertIsNone(self.br.active_bundle)
self.br.br._send_msg = mock.Mock(side_effect=[
FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY),
FakeReply(ofproto_v1_3.ONF_BCT_DISCARD_REPLY)])
try:
with self.br:
self.assertIsNotNone(self.br.active_bundle)
raise Exception()
except Exception:
pass
# Assert that the active bundle is gone
self.assertIsNone(self.br.active_bundle)
self.assertEqual(2, len(self.br.br._send_msg.mock_calls))
args, kwargs = self.br.br._send_msg.call_args_list[0]
self.assertEqual(ofproto_v1_3.ONF_BCT_OPEN_REQUEST,
args[0].type)
args, kwargs = self.br.br._send_msg.call_args_list[1]
self.assertEqual(ofproto_v1_3.ONF_BCT_DISCARD_REQUEST,
args[0].type)
def test_bundle_context_with_error(self):
self.assertIsNone(self.br.active_bundle)
self.br.br._send_msg = mock.Mock(side_effect=[
FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY),
RuntimeError])
try:
with self.br:
saved_bundle_id = self.br.active_bundle
self.assertIsNotNone(self.br.active_bundle)
self.fail("Expected an exception")
except RuntimeError:
pass
# Assert that the active bundle is gone
self.assertIsNone(self.br.active_bundle)
self.assertIn(saved_bundle_id, self.br.br.active_bundles)
self.assertEqual(2, len(self.br.br._send_msg.mock_calls))
args, kwargs = self.br.br._send_msg.call_args_list[0]
self.assertEqual(ofproto_v1_3.ONF_BCT_OPEN_REQUEST,
args[0].type)
args, kwargs = self.br.br._send_msg.call_args_list[1]
self.assertEqual(ofproto_v1_3.ONF_BCT_COMMIT_REQUEST,
args[0].type)
| 40.074766
| 78
| 0.660448
|
import mock
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import ofswitch
from neutron.tests import base
class FakeReply(object):
def __init__(self, type):
self.type = type
class TestBundledOpenFlowBridge(base.BaseTestCase):
def setUp(self):
super(TestBundledOpenFlowBridge, self).setUp()
br = mock.Mock(spec=['install_instructions', 'foo'])
br._get_dp = lambda: (mock.Mock(), ofproto_v1_3, ofproto_v1_3_parser)
br.active_bundles = set()
self.br = ofswitch.BundledOpenFlowBridge(br, False, False)
def test_method_calls(self):
self.br.install_instructions(dummy_arg=1)
self.br.br.install_instructions.assert_called_once_with(dummy_arg=1)
def test_illegal_method_calls(self):
try:
self.br.uninstall_foo()
self.fail("Expected an exception")
except Exception as e:
self.assertIsInstance(e, AttributeError)
try:
self.br.foo()
self.fail("Expected an exception")
except Exception as e:
self.assertIsInstance(e, AttributeError)
def test_normal_bundle_context(self):
self.assertIsNone(self.br.active_bundle)
self.br.br._send_msg = mock.Mock(side_effect=[
FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY),
FakeReply(ofproto_v1_3.ONF_BCT_COMMIT_REPLY)])
with self.br:
self.assertIsNotNone(self.br.active_bundle)
self.assertIsNone(self.br.active_bundle)
def test_aborted_bundle_context(self):
self.assertIsNone(self.br.active_bundle)
self.br.br._send_msg = mock.Mock(side_effect=[
FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY),
FakeReply(ofproto_v1_3.ONF_BCT_DISCARD_REPLY)])
try:
with self.br:
self.assertIsNotNone(self.br.active_bundle)
raise Exception()
except Exception:
pass
self.assertIsNone(self.br.active_bundle)
self.assertEqual(2, len(self.br.br._send_msg.mock_calls))
args, kwargs = self.br.br._send_msg.call_args_list[0]
self.assertEqual(ofproto_v1_3.ONF_BCT_OPEN_REQUEST,
args[0].type)
args, kwargs = self.br.br._send_msg.call_args_list[1]
self.assertEqual(ofproto_v1_3.ONF_BCT_DISCARD_REQUEST,
args[0].type)
def test_bundle_context_with_error(self):
self.assertIsNone(self.br.active_bundle)
self.br.br._send_msg = mock.Mock(side_effect=[
FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY),
RuntimeError])
try:
with self.br:
saved_bundle_id = self.br.active_bundle
self.assertIsNotNone(self.br.active_bundle)
self.fail("Expected an exception")
except RuntimeError:
pass
self.assertIsNone(self.br.active_bundle)
self.assertIn(saved_bundle_id, self.br.br.active_bundles)
self.assertEqual(2, len(self.br.br._send_msg.mock_calls))
args, kwargs = self.br.br._send_msg.call_args_list[0]
self.assertEqual(ofproto_v1_3.ONF_BCT_OPEN_REQUEST,
args[0].type)
args, kwargs = self.br.br._send_msg.call_args_list[1]
self.assertEqual(ofproto_v1_3.ONF_BCT_COMMIT_REQUEST,
args[0].type)
| true
| true
|
f7164b30ca62a217c7eede4b93e61cd343280927
| 25,782
|
py
|
Python
|
Orio/orio/main/tuner/skeleton_code.py
|
HPCL/nametbd
|
1b588cd6ce94ab39a8ba6f89d9eb64e1d3726af5
|
[
"MIT"
] | null | null | null |
Orio/orio/main/tuner/skeleton_code.py
|
HPCL/nametbd
|
1b588cd6ce94ab39a8ba6f89d9eb64e1d3726af5
|
[
"MIT"
] | null | null | null |
Orio/orio/main/tuner/skeleton_code.py
|
HPCL/nametbd
|
1b588cd6ce94ab39a8ba6f89d9eb64e1d3726af5
|
[
"MIT"
] | null | null | null |
#
# The skeleton code used for performance testing
#
import re, sys
from orio.main.util.globals import *
#-----------------------------------------------------
SEQ_TIMER = '''
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <sys/time.h>
#ifdef BGP_COUNTER
#define SPRN_TBRL 0x10C // Time Base Read Lower Register (user & sup R/O)
#define SPRN_TBRU 0x10D // Time Base Read Upper Register (user & sup R/O)
#define _bgp_mfspr( SPRN )\
({\
unsigned int tmp;\
do {\
asm volatile ("mfspr %0,%1" : "=&r" (tmp) : "i" (SPRN) : "memory" );\
}\
while(0);\
tmp;\
})\
double getClock() {
union {
unsigned int ul[2];
unsigned long long ull;
}
hack;
unsigned int utmp;
do {
utmp = _bgp_mfspr( SPRN_TBRU );
hack.ul[1] = _bgp_mfspr( SPRN_TBRL );
hack.ul[0] = _bgp_mfspr( SPRN_TBRU );
}
while(utmp != hack.ul[0]);
return((double) hack.ull );
}
#else
#if !defined(__APPLE__) && !defined(_OPENMP)
double getClock() {
struct timespec ts;
if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) != 0) return -1;
return (double)ts.tv_sec + ((double)ts.tv_nsec)*1.0e-9;
}
#else
double getClock() {
struct timezone tzp;
struct timeval tp;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
#endif
#endif
'''
SEQ_DEFAULT = r'''
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <limits.h>
#include <time.h>
/*@ global @*/
/*@ external @*/
extern double getClock();
//int main(int argc, char *argv[]) { // part of declaration generation
/*@ declarations @*/
/*@ prologue @*/
int orio_i;
/*
Coordinate: /*@ coordinate @*/
*/
/*@ begin outer measurement @*/
for (orio_i=0; orio_i<ORIO_REPS; orio_i++) {
/*@ begin inner measurement @*/
/*@ tested code @*/
/*@ end inner measurement @*/
if (orio_i==0) {
/*@ validation code @*/
}
}
/*@ end outer measurement @*/
/*@ epilogue @*/
return 0;
}
'''
#-----------------------------------------------------
PAR_DEFAULT = r'''
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include "mpi.h"
/*@ global @*/
/*@ external @*/
#define BIG_NUMBER 147483647.0
#ifdef BGP_COUNTER
#define SPRN_TBRL 0x10C // Time Base Read Lower Register (user & sup R/O)
#define SPRN_TBRU 0x10D // Time Base Read Upper Register (user & sup R/O)
#define _bgp_mfspr( SPRN )\
({\
unsigned int tmp;\
do {\
asm volatile ("mfspr %0,%1" : "=&r" (tmp) : "i" (SPRN) : "memory" );\
}\
while(0);\
tmp;\
})\
double getClock()
{
union {
unsigned int ul[2];
unsigned long long ull;
}
hack;
unsigned int utmp;
do {
utmp = _bgp_mfspr( SPRN_TBRU );
hack.ul[1] = _bgp_mfspr( SPRN_TBRL );
hack.ul[0] = _bgp_mfspr( SPRN_TBRU );
}
while(utmp != hack.ul[0]);
return((double) hack.ull );
}
#else
double getClock()
{
struct timezone tzp;
struct timeval tp;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
#endif
typedef struct {
int testid;
char coord[1024];
double tm;
} TimingInfo;
//int main(int argc, char *argv[]) { // part of declaration generation
/*@ declarations @*/
int numprocs, myid, _i;
TimingInfo mytimeinfo;
TimingInfo *timevec;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
/* Construct the MPI type for the timing info (what a pain!) */
MPI_Datatype TimingInfoMPIType;
{
MPI_Datatype type[3] = {MPI_INT, MPI_CHAR, MPI_DOUBLE};
int blocklen[3] = {1,1024,1};
MPI_Aint disp[3], base;
MPI_Get_address( &mytimeinfo.testid, &disp[0]);
MPI_Get_address( &mytimeinfo.coord, &disp[1]);
MPI_Get_address( &mytimeinfo.tm, &disp[2]);
base = disp[0];
for (_i=0; _i <3; _i++) disp[_i] -= base;
MPI_Type_struct( 3, blocklen, disp, type, &TimingInfoMPIType);
MPI_Type_commit( &TimingInfoMPIType);
}
/* end of MPI type construction */
if (myid == 0) timevec = (TimingInfo*) malloc(numprocs * sizeof(TimingInfo));
/*@ prologue @*/
switch (myid)
{
/*@ begin switch body @*/
double orio_t_start, orio_t_end, orio_t, orio_t_total=0, orio_t_min=BIG_NUMBER;
int orio_i;
mytimeinfo.testid = myid;
strcpy(mytimeinfo.coord,"/*@ coordinate @*/");
for (orio_i=0; orio_i<ORIO_REPS; orio_i++)
{
orio_t_start = getClock();
/*@ tested code @*/
orio_t_end = getClock();
orio_t = orio_t_end - orio_t_start;
if (orio_t < orio_t_min) orio_t_min = orio_t;
}
/* Mean of all times -- not a good idea in the presence of noise, instead use min */
/* orio_t_total = orio_t_total / REPS; */
orio_t_total = orio_t_min;
mytimeinfo.tm = orio_t_total;
/*@ end switch body @*/
default:
mytimeinfo.testid = -1;
strcpy(mytimeinfo.coord,"");
mytimeinfo.tm = -1;
break;
}
MPI_Gather(&mytimeinfo, 1, TimingInfoMPIType, timevec, 1, TimingInfoMPIType, 0, MPI_COMM_WORLD);
if (myid==0) {
printf("{");
if (mytimeinfo.tm >= 0 && strcmp(mytimeinfo.coord, "") != 0)
printf(" '%s' : %g,", mytimeinfo.coord, mytimeinfo.tm);
for (_i=1; _i<numprocs; _i++) {
if (timevec[_i].tm >= 0 && strcmp(timevec[_i].coord, "") != 0)
printf(" '%s' : %g,", timevec[_i].coord, timevec[_i].tm);
}
printf("}\n");
}
MPI_Finalize();
/*@ epilogue @*/
return 0;
}
'''
SEQ_FORTRAN_DEFAULT = r'''
program main
implicit none
integer, parameter :: double = selected_real_kind(10,40)
integer, parameter :: single = selected_real_kind(5,20)
real(double) :: orio_t_start, orio_t_end, orio_min_time, orio_delta_time
integer :: orio_i
!@ declarations @!
!@ prologue @!
orio_min_time = X'7FF00000' ! large number
do orio_i = 1, ORIO_REPS
orio_t_start = getClock()
!@ tested code @!
orio_t_end = getClock()
orio_delta_time = orio_t_end - orio_t_start
if (orio_delta_time < orio_min_time) then
orio_min_time = orio_delta_time
end if
enddo
write(*,"(A,ES20.13,A)",advance="no") "{'!@ coordinate @!' : ", orio_delta_time, "}"
!@ epilogue @!
contains
real(double) function getClock()
implicit none
integer (kind = 8) clock_count, clock_max, clock_rate
integer ( kind = 8 ), parameter :: call_num = 100
call system_clock(clock_count, clock_rate, clock_max)
getClock = dble(clock_count) / dble(call_num * clock_rate)
end function
end program main
'''
#-----------------------------------------------------
PAR_FORTRAN_DEFAULT = r'''
program main
use mpi
implicit none
integer, parameter :: double = selected_real_kind(10,40)
integer, parameter :: single = selected_real_kind(5,20)
type TimingInfo
sequence
integer :: testid
character(len=1024) :: coord
real(double) :: tm
end type TimingInfo
integer :: numprocs, myid, i_, ierror
integer :: TimingInfoMPIType
integer :: blocklen(3) = (/ 1, 1024, 1/)
integer :: disp(3) = (/ 0, 4, 4+1024 /) ! assume four-byte integers
integer :: types(3) = (/ MPI_INTEGER, MPI_CHARACTER, &
MPI_DOUBLE_PRECISION /)
type(TimingInfo) :: mytimeinfo
type(TimingInfo), allocatable :: timevec(:)
real(double) :: orio_t_start, orio_t_end, orio_min_time, orio_delta_time
integer :: orio_i
!@ declarations @!
call mpi_init(ierror)
call mpi_comm_size(MPI_COMM_WORLD, numprocs)
call mpi_comm_rank(MPI_COMM_WORLD, myid)
! Construct the MPI type for the timing info (what a pain!)
call mpi_type_create_struct(3, blocklen, disp, types, TimingInfoMPIType, ierror)
call mpi_type_commit(TimingInfoMPIType, ierror)
if (myid == 0) allocate(timevec(0:numprocs-1))
orio_min_time = X'7FF00000' ! large number
!@ prologue @!
select case (myid)
!@ begin switch body @!
mytimeinfo%testid = myid
mytimeinfo%coord = "!@ coordinate @!"
do orio_i = 1, ORIO_REPS
orio_t_start = MPI_Wtime()
!@ tested code @!
orio_t_end = MPI_Wtime()
orio_min_time = min(orio_min_time, orio_t_end - orio_t_start)
enddo
mytimeinfo%tm = orio_min_time
!@ end switch body @!
case default
mytimeinfo%testid = -1
mytimeinfo%coord = ""
mytimeinfo%tm = -1
end select
call mpi_gather(mytimeinfo, 1, TimingInfoMPIType, &
timevec, 1, TimingInfoMPIType, &
0, MPI_COMM_WORLD, ierror)
if (myid == 0) then
write(*,"(A)",advance="no") "{"
if ((mytimeinfo%tm >= 0) .and. (mytimeinfo%coord /= "")) &
write(*,"(3A,ES20.13)",advance="no") " '", mytimeinfo%coord, "' : ", &
mytimeinfo%tm
do i_ = 1, numprocs-1
if ((timevec(i_)%tm >= 0) .and. (timevec(i_)%coord /= ""))
write (*,"(3A,ES20.13)",advance="no") &
" '", timevec(i_)%coord, "' : ", timevec(i_)%tm
enddo
write(*,"(A)",advance="yes") "}"
endif
call mpi_finalize(ierror)
!@ epilogue @!
end program main
'''
#----------------------------------------------------------------------------------------------------------------------
SEQ_DEFAULT_CUDA = r'''
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <limits.h>
#include <time.h>
#include <unistd.h>
#include <cuda.h>
/*@ global @*/
/*@ external @*/
int main(int argc, char *argv[]) {
/*@ declarations @*/
/*@ prologue @*/
cudaSetDeviceFlags(cudaDeviceBlockingSync);
float orcu_elapsed=0.0, orcu_transfer=0.0;
cudaEvent_t tstart, tstop, start, stop;
cudaEventCreate(&tstart); cudaEventCreate(&tstop);
cudaEventCreate(&start); cudaEventCreate(&stop);
/*@ begin outer measurement @*/
for (int orio_i=0; orio_i<ORIO_REPS; orio_i++) {
/*@ begin inner measurement @*/
/*@ tested code @*/
/*@ end inner measurement @*/
printf("{'/*@ coordinate @*/' : (%g,%g)}\n", orcu_elapsed, orcu_transfer);
}
/*@ end outer measurement @*/
cudaEventDestroy(tstart); cudaEventDestroy(tstop);
cudaEventDestroy(start); cudaEventDestroy(stop);
/*@ epilogue @*/
return 0;
}
'''
#----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------
class PerfTestSkeletonCode:
'''The skeleton code used in the performance testing'''
# tags
__GLOBAL_TAG = r'/\*@\s*global\s*@\*/'
__EXTERNAL_TAG = r'/\*@\s*external\s*@\*/'
__DECLARATIONS_TAG = r'/\*@\s*declarations\s*@\*/'
__PROLOGUE_TAG = r'/\*@\s*prologue\s*@\*/'
__EPILOGUE_TAG = r'/\*@\s*epilogue\s*@\*/'
__TCODE_TAG = r'/\*@\s*tested\s+code\s*@\*/'
__BEGIN_INNER_MEASURE_TAG = r'/\*@\s*begin\s+inner\s+measurement\s*@\*/'
__END_INNER_MEASURE_TAG = r'/\*@\s*end\s+inner\s+measurement\s*@\*/'
__BEGIN_OUTER_MEASURE_TAG = r'/\*@\s*begin\s+outer\s+measurement\s*@\*/'
__END_OUTER_MEASURE_TAG = r'/\*@\s*end\s+outer\s+measurement\s*@\*/'
__VALIDATION_TAG = r'/\*@\s*validation\s+code\s*@\*/'
__COORD_TAG = r'/\*@\s*coordinate\s*@\*/'
__BEGIN_SWITCHBODY_TAG = r'/\*@\s*begin\s+switch\s+body\s*@\*/'
__END_SWITCHBODY_TAG = r'/\*@\s*end\s+switch\s+body\s*@\*/'
__SWITCHBODY_TAG = __BEGIN_SWITCHBODY_TAG + r'((.|\n)*?)' + __END_SWITCHBODY_TAG
#-----------------------------------------------------
def __init__(self, code, use_parallel_search, language='c'):
'''To instantiate the skeleton code for the performance testing'''
if code == None:
if use_parallel_search:
code = PAR_DEFAULT
else:
if language == 'c':
code = SEQ_DEFAULT
else:
code = SEQ_DEFAULT_CUDA
self.code = code
self.use_parallel_search = use_parallel_search
self.language = language
self.__checkSkeletonCode(self.code)
#-----------------------------------------------------
def __checkSkeletonCode(self, code):
'''To check the validity of the skeleton code'''
match_obj = re.search(self.__GLOBAL_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "global" tag in the skeleton code')
match_obj = re.search(self.__EXTERNAL_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "external" tag in the skeleton code')
match_obj = re.search(self.__DECLARATIONS_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "declarations" tag in the skeleton code')
match_obj = re.search(self.__PROLOGUE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "prologue" tag in the skeleton code')
match_obj = re.search(self.__EPILOGUE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "epilogue" tag in the skeleton code')
match_obj = re.search(self.__TCODE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "tested code" tag in the skeleton code')
match_obj = re.search(self.__BEGIN_INNER_MEASURE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "begin inner measurement" tag in the skeleton code')
match_obj = re.search(self.__END_INNER_MEASURE_TAG,code)
if not match_obj:
err('main.tuner.skeleton_code: missing "end inner measurement" tag in the skeleton code')
match_obj = re.search(self.__BEGIN_OUTER_MEASURE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "begin outer measurement" tag in the skeleton code')
match_obj = re.search(self.__END_OUTER_MEASURE_TAG,code)
if not match_obj:
err('main.tuner.skeleton_code: missing "end outer measurement" tag in the skeleton code')
match_obj = re.search(self.__VALIDATION_TAG, code)
if not match_obj:
warn('main.tuner.skeleton_code: missing "validation code" tag in the skeleton code')
match_obj = re.search(self.__COORD_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "coordinate" tag in the skeleton code')
if self.use_parallel_search:
match_obj = re.search(self.__BEGIN_SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "begin switch body" tag in the skeleton code')
match_obj = re.search(self.__END_SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "end switch body" tag in the skeleton code')
match_obj = re.search(self.__SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code internal error: missing placement of switch body statement')
switch_body_code = match_obj.group(1)
match_obj = re.search(self.__TCODE_TAG, switch_body_code)
if not match_obj:
err('main.tuner.skeleton_code: missing "tested code" tag in the switch body statement')
match_obj = re.search(self.__VALIDATION_TAG, switch_body_code)
if not match_obj:
warn('main.tuner.skeleton_code: missing "validation code" tag in the switch body statement')
match_obj = re.search(self.__COORD_TAG, switch_body_code)
if not match_obj:
err('main.tuner.skeleton_code: missing "coordinate" tag in the switch body statement')
#-----------------------------------------------------
def insertCode(self, global_code, prologue_code, epilogue_code, validation_code,
begin_inner_measure_code, end_inner_measure_code,
begin_outer_measure_code, end_outer_measure_code,
tested_code_map):
'''
Insert code fragments into the skeleton driver code.
@return: Complete specialized C source code string for the performance testing driver.
@param global_code:
@param prologue_code:
@param epilogue code:
@param validation_code:
@param begin_inner_measure_code: start inner loop measurement, e.g., initialze time variable
@param end_inner_measure_code: stop inner loop measurement, e.g., get time and find elapsed time value
@param begin_outer_measure_code: start measurement around repetitions loop, e.g., initialze time variable
@param end_outer_measure_code: stop measurement around repetitions loop, e.g., get time and find elapsed time value
@param tested_code_map:
'''
# check the given tested code mapping
if len(tested_code_map) == 0:
err('main.tuner.skeleton_code internal error: the number of tested codes cannot be zero')
if not self.use_parallel_search and len(tested_code_map) != 1:
err('main.tuner.skeleton_code internal error: the number of tested sequential codes must be exactly one')
# initialize the performance-testing code
code = self.code
# add cuda kernel definitions if any
g = Globals()
if self.language == 'cuda' and len(g.cunit_declarations) > 0:
global_code += reduce(lambda x,y: x + y, g.cunit_declarations)
g.cunit_declarations = []
# TODO: make this less ugly
# Declarations that must be in main() scope (not global)
declarations_code = '\n#ifdef MAIN_DECLARATIONS\n MAIN_DECLARATIONS()\n#endif'
# insert global definitions, prologue, and epilogue codes
code = re.sub(self.__GLOBAL_TAG, global_code, code)
code = re.sub(self.__DECLARATIONS_TAG, declarations_code, code)
code = re.sub(self.__PROLOGUE_TAG, prologue_code, code)
code = re.sub(self.__EPILOGUE_TAG, epilogue_code, code)
# insert the parallel code
if self.use_parallel_search:
switch_body_code = re.search(self.__SWITCHBODY_TAG, code).group(1)
tcode = ''
par_externals = ''
for i, (code_key, (code_value, externals)) in enumerate(tested_code_map.items()):
scode = switch_body_code
scode = re.sub(self.__COORD_TAG, code_key, scode)
scode = re.sub(self.__TCODE_TAG, code_value, scode)
tcode += '\n'
tcode += ' case %s:\n' % i
tcode += ' {\n' + scode + '\n }\n'
tcode += ' break;\n'
par_externals += externals
code = re.sub(self.__EXTERNAL_TAG, par_externals, code)
code = re.sub(self.__SWITCHBODY_TAG, tcode, code)
# insert the sequential code
else:
((coord_key, (tcode, externals)),) = tested_code_map.items()
# TODO: customizable timing code for parallel cases
code = re.sub(self.__BEGIN_INNER_MEASURE_TAG, begin_inner_measure_code, code)
code = re.sub(self.__END_INNER_MEASURE_TAG, re.sub(self.__COORD_TAG, coord_key, end_inner_measure_code), code)
code = re.sub(self.__BEGIN_OUTER_MEASURE_TAG, begin_outer_measure_code, code)
code = re.sub(self.__END_OUTER_MEASURE_TAG, re.sub(self.__COORD_TAG, coord_key, end_outer_measure_code), code)
code = re.sub(self.__EXTERNAL_TAG, externals, code)
code = re.sub(self.__COORD_TAG, coord_key, code)
code = re.sub(self.__TCODE_TAG, tcode, code)
# insert the validation code
code = re.sub(self.__VALIDATION_TAG, validation_code, code)
# return the performance-testing code
return code
class PerfTestSkeletonCodeFortran:
'''The skeleton code used in the performance testing'''
# tags
__PROLOGUE_TAG = r'!@\s*prologue\s*@!'
__DECLARATIONS_TAG = r'!@\s*declarations\s*@!'
__ALLOCATIONS_TAG = r'!@\s*allocation\s*@!'
__EPILOGUE_TAG = r'!@\s*epilogue\s*@!'
__TCODE_TAG = r'!@\s*tested\s+code\s*@!'
__COORD_TAG = r'!@\s*coordinate\s*@!'
__BEGIN_SWITCHBODY_TAG = r'!@\s*begin\s+switch\s+body\s*@!'
__END_SWITCHBODY_TAG = r'!@\s*end\s+switch\s+body\s*@!'
__SWITCHBODY_TAG = __BEGIN_SWITCHBODY_TAG + r'((.|\n)*?)' + __END_SWITCHBODY_TAG
#-----------------------------------------------------
def __init__(self, code, use_parallel_search):
'''To instantiate the skeleton code for the performance testing'''
if code == None:
if use_parallel_search:
code = PAR_FORTRAN_DEFAULT
else:
code = SEQ_FORTRAN_DEFAULT
self.code = code
self.use_parallel_search = use_parallel_search
self.__checkSkeletonCode(self.code)
#-----------------------------------------------------
def __checkSkeletonCode(self, code):
'''To check the validity of the skeleton code'''
match_obj = re.search(self.__PROLOGUE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "prologue" tag in the skeleton code', doexit=True)
match_obj = re.search(self.__EPILOGUE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "epilogue" tag in the skeleton code')
match_obj = re.search(self.__TCODE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "tested code" tag in the skeleton code')
match_obj = re.search(self.__COORD_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "coordinate" tag in the skeleton code')
if self.use_parallel_search:
match_obj = re.search(self.__BEGIN_SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: : missing "begin switch body" tag in the skeleton code')
match_obj = re.search(self.__END_SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "end switch body" tag in the skeleton code')
match_obj = re.search(self.__SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: internal error: missing placement of switch body statement')
switch_body_code = match_obj.group(1)
match_obj = re.search(self.__TCODE_TAG, switch_body_code)
if not match_obj:
err('main.tuner.skeleton_code: missing "tested code" tag in the switch body statement')
match_obj = re.search(self.__COORD_TAG, switch_body_code)
if not match_obj:
err('main.tuner.skeleton_code: missing "coordinate" tag in the switch body statement')
#-----------------------------------------------------
def insertCode(self, decl_code, prologue_code, epilogue_code,
begin_inner_measure_code, end_inner_measure_code,
begin_outer_measure_code, end_outer_measure_code,
tested_code_map):
'''To insert code fragments into the skeleton code'''
# check the given tested code mapping
if len(tested_code_map) == 0:
err('main.tuner.skeleton_code: internal error: the number of tested codes cannot be zero')
if not self.use_parallel_search and len(tested_code_map) != 1:
err('main.tuner.skeleton_code: internal error: the number of tested sequential codes must be exactly one')
# initialize the performance-testing code
code = self.code
# insert global definitions, prologue, and epilogue codes
code = re.sub(self.__DECLARATIONS_TAG, decl_code, code)
code = re.sub(self.__EPILOGUE_TAG, epilogue_code, code)
# TODO: Insert profiling (e.g., timing) code
code = re.sub(self.__BEGIN_INNER_MEASURE_TAG, begin_inner_measure_code, code)
code = re.sub(self.__END_INNER_MEASURE_TAG, re.sub(self.__COORD_TAG, coord_key, end_inner_measure_code), code)
code = re.sub(self.__BEGIN_OUTER_MEASURE_TAG, begin_outer_measure_code, code)
code = re.sub(self.__END_OUTER_MEASURE_TAG, re.sub(self.__COORD_TAG, coord_key, end_outer_measure_code), code)
# insert the parallel code
if self.use_parallel_search:
switch_body_code = re.search(self.__SWITCHBODY_TAG, code).group(1)
tcode = ''
for i, (code_key, code_value) in enumerate(tested_code_map.items()):
scode = switch_body_code
scode = re.sub(self.__COORD_TAG, code_key, scode)
scode = re.sub(self.__TCODE_TAG, code_value, scode)
tcode += '\n'
tcode += ' case (%s)\n' % i
tcode += ' \n' + scode + '\n\n'
code = re.sub(self.__SWITCHBODY_TAG, tcode, code)
# insert the sequential code
else:
((coord_key, tcode),) = tested_code_map.items()
code = re.sub(self.__COORD_TAG, coord_key, code)
code = re.sub(self.__TCODE_TAG, tcode, code)
# return the performance-testing code
return code
| 33.614081
| 123
| 0.59526
|
import re, sys
from orio.main.util.globals import *
SEQ_TIMER = '''
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <sys/time.h>
#ifdef BGP_COUNTER
#define SPRN_TBRL 0x10C // Time Base Read Lower Register (user & sup R/O)
#define SPRN_TBRU 0x10D // Time Base Read Upper Register (user & sup R/O)
#define _bgp_mfspr( SPRN )\
({\
unsigned int tmp;\
do {\
asm volatile ("mfspr %0,%1" : "=&r" (tmp) : "i" (SPRN) : "memory" );\
}\
while(0);\
tmp;\
})\
double getClock() {
union {
unsigned int ul[2];
unsigned long long ull;
}
hack;
unsigned int utmp;
do {
utmp = _bgp_mfspr( SPRN_TBRU );
hack.ul[1] = _bgp_mfspr( SPRN_TBRL );
hack.ul[0] = _bgp_mfspr( SPRN_TBRU );
}
while(utmp != hack.ul[0]);
return((double) hack.ull );
}
#else
#if !defined(__APPLE__) && !defined(_OPENMP)
double getClock() {
struct timespec ts;
if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) != 0) return -1;
return (double)ts.tv_sec + ((double)ts.tv_nsec)*1.0e-9;
}
#else
double getClock() {
struct timezone tzp;
struct timeval tp;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
#endif
#endif
'''
SEQ_DEFAULT = r'''
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <limits.h>
#include <time.h>
/*@ global @*/
/*@ external @*/
extern double getClock();
//int main(int argc, char *argv[]) { // part of declaration generation
/*@ declarations @*/
/*@ prologue @*/
int orio_i;
/*
Coordinate: /*@ coordinate @*/
*/
/*@ begin outer measurement @*/
for (orio_i=0; orio_i<ORIO_REPS; orio_i++) {
/*@ begin inner measurement @*/
/*@ tested code @*/
/*@ end inner measurement @*/
if (orio_i==0) {
/*@ validation code @*/
}
}
/*@ end outer measurement @*/
/*@ epilogue @*/
return 0;
}
'''
PAR_DEFAULT = r'''
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include "mpi.h"
/*@ global @*/
/*@ external @*/
#define BIG_NUMBER 147483647.0
#ifdef BGP_COUNTER
#define SPRN_TBRL 0x10C // Time Base Read Lower Register (user & sup R/O)
#define SPRN_TBRU 0x10D // Time Base Read Upper Register (user & sup R/O)
#define _bgp_mfspr( SPRN )\
({\
unsigned int tmp;\
do {\
asm volatile ("mfspr %0,%1" : "=&r" (tmp) : "i" (SPRN) : "memory" );\
}\
while(0);\
tmp;\
})\
double getClock()
{
union {
unsigned int ul[2];
unsigned long long ull;
}
hack;
unsigned int utmp;
do {
utmp = _bgp_mfspr( SPRN_TBRU );
hack.ul[1] = _bgp_mfspr( SPRN_TBRL );
hack.ul[0] = _bgp_mfspr( SPRN_TBRU );
}
while(utmp != hack.ul[0]);
return((double) hack.ull );
}
#else
double getClock()
{
struct timezone tzp;
struct timeval tp;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
#endif
typedef struct {
int testid;
char coord[1024];
double tm;
} TimingInfo;
//int main(int argc, char *argv[]) { // part of declaration generation
/*@ declarations @*/
int numprocs, myid, _i;
TimingInfo mytimeinfo;
TimingInfo *timevec;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
/* Construct the MPI type for the timing info (what a pain!) */
MPI_Datatype TimingInfoMPIType;
{
MPI_Datatype type[3] = {MPI_INT, MPI_CHAR, MPI_DOUBLE};
int blocklen[3] = {1,1024,1};
MPI_Aint disp[3], base;
MPI_Get_address( &mytimeinfo.testid, &disp[0]);
MPI_Get_address( &mytimeinfo.coord, &disp[1]);
MPI_Get_address( &mytimeinfo.tm, &disp[2]);
base = disp[0];
for (_i=0; _i <3; _i++) disp[_i] -= base;
MPI_Type_struct( 3, blocklen, disp, type, &TimingInfoMPIType);
MPI_Type_commit( &TimingInfoMPIType);
}
/* end of MPI type construction */
if (myid == 0) timevec = (TimingInfo*) malloc(numprocs * sizeof(TimingInfo));
/*@ prologue @*/
switch (myid)
{
/*@ begin switch body @*/
double orio_t_start, orio_t_end, orio_t, orio_t_total=0, orio_t_min=BIG_NUMBER;
int orio_i;
mytimeinfo.testid = myid;
strcpy(mytimeinfo.coord,"/*@ coordinate @*/");
for (orio_i=0; orio_i<ORIO_REPS; orio_i++)
{
orio_t_start = getClock();
/*@ tested code @*/
orio_t_end = getClock();
orio_t = orio_t_end - orio_t_start;
if (orio_t < orio_t_min) orio_t_min = orio_t;
}
/* Mean of all times -- not a good idea in the presence of noise, instead use min */
/* orio_t_total = orio_t_total / REPS; */
orio_t_total = orio_t_min;
mytimeinfo.tm = orio_t_total;
/*@ end switch body @*/
default:
mytimeinfo.testid = -1;
strcpy(mytimeinfo.coord,"");
mytimeinfo.tm = -1;
break;
}
MPI_Gather(&mytimeinfo, 1, TimingInfoMPIType, timevec, 1, TimingInfoMPIType, 0, MPI_COMM_WORLD);
if (myid==0) {
printf("{");
if (mytimeinfo.tm >= 0 && strcmp(mytimeinfo.coord, "") != 0)
printf(" '%s' : %g,", mytimeinfo.coord, mytimeinfo.tm);
for (_i=1; _i<numprocs; _i++) {
if (timevec[_i].tm >= 0 && strcmp(timevec[_i].coord, "") != 0)
printf(" '%s' : %g,", timevec[_i].coord, timevec[_i].tm);
}
printf("}\n");
}
MPI_Finalize();
/*@ epilogue @*/
return 0;
}
'''
SEQ_FORTRAN_DEFAULT = r'''
program main
implicit none
integer, parameter :: double = selected_real_kind(10,40)
integer, parameter :: single = selected_real_kind(5,20)
real(double) :: orio_t_start, orio_t_end, orio_min_time, orio_delta_time
integer :: orio_i
!@ declarations @!
!@ prologue @!
orio_min_time = X'7FF00000' ! large number
do orio_i = 1, ORIO_REPS
orio_t_start = getClock()
!@ tested code @!
orio_t_end = getClock()
orio_delta_time = orio_t_end - orio_t_start
if (orio_delta_time < orio_min_time) then
orio_min_time = orio_delta_time
end if
enddo
write(*,"(A,ES20.13,A)",advance="no") "{'!@ coordinate @!' : ", orio_delta_time, "}"
!@ epilogue @!
contains
real(double) function getClock()
implicit none
integer (kind = 8) clock_count, clock_max, clock_rate
integer ( kind = 8 ), parameter :: call_num = 100
call system_clock(clock_count, clock_rate, clock_max)
getClock = dble(clock_count) / dble(call_num * clock_rate)
end function
end program main
'''
PAR_FORTRAN_DEFAULT = r'''
program main
use mpi
implicit none
integer, parameter :: double = selected_real_kind(10,40)
integer, parameter :: single = selected_real_kind(5,20)
type TimingInfo
sequence
integer :: testid
character(len=1024) :: coord
real(double) :: tm
end type TimingInfo
integer :: numprocs, myid, i_, ierror
integer :: TimingInfoMPIType
integer :: blocklen(3) = (/ 1, 1024, 1/)
integer :: disp(3) = (/ 0, 4, 4+1024 /) ! assume four-byte integers
integer :: types(3) = (/ MPI_INTEGER, MPI_CHARACTER, &
MPI_DOUBLE_PRECISION /)
type(TimingInfo) :: mytimeinfo
type(TimingInfo), allocatable :: timevec(:)
real(double) :: orio_t_start, orio_t_end, orio_min_time, orio_delta_time
integer :: orio_i
!@ declarations @!
call mpi_init(ierror)
call mpi_comm_size(MPI_COMM_WORLD, numprocs)
call mpi_comm_rank(MPI_COMM_WORLD, myid)
! Construct the MPI type for the timing info (what a pain!)
call mpi_type_create_struct(3, blocklen, disp, types, TimingInfoMPIType, ierror)
call mpi_type_commit(TimingInfoMPIType, ierror)
if (myid == 0) allocate(timevec(0:numprocs-1))
orio_min_time = X'7FF00000' ! large number
!@ prologue @!
select case (myid)
!@ begin switch body @!
mytimeinfo%testid = myid
mytimeinfo%coord = "!@ coordinate @!"
do orio_i = 1, ORIO_REPS
orio_t_start = MPI_Wtime()
!@ tested code @!
orio_t_end = MPI_Wtime()
orio_min_time = min(orio_min_time, orio_t_end - orio_t_start)
enddo
mytimeinfo%tm = orio_min_time
!@ end switch body @!
case default
mytimeinfo%testid = -1
mytimeinfo%coord = ""
mytimeinfo%tm = -1
end select
call mpi_gather(mytimeinfo, 1, TimingInfoMPIType, &
timevec, 1, TimingInfoMPIType, &
0, MPI_COMM_WORLD, ierror)
if (myid == 0) then
write(*,"(A)",advance="no") "{"
if ((mytimeinfo%tm >= 0) .and. (mytimeinfo%coord /= "")) &
write(*,"(3A,ES20.13)",advance="no") " '", mytimeinfo%coord, "' : ", &
mytimeinfo%tm
do i_ = 1, numprocs-1
if ((timevec(i_)%tm >= 0) .and. (timevec(i_)%coord /= ""))
write (*,"(3A,ES20.13)",advance="no") &
" '", timevec(i_)%coord, "' : ", timevec(i_)%tm
enddo
write(*,"(A)",advance="yes") "}"
endif
call mpi_finalize(ierror)
!@ epilogue @!
end program main
'''
SEQ_DEFAULT_CUDA = r'''
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <limits.h>
#include <time.h>
#include <unistd.h>
#include <cuda.h>
/*@ global @*/
/*@ external @*/
int main(int argc, char *argv[]) {
/*@ declarations @*/
/*@ prologue @*/
cudaSetDeviceFlags(cudaDeviceBlockingSync);
float orcu_elapsed=0.0, orcu_transfer=0.0;
cudaEvent_t tstart, tstop, start, stop;
cudaEventCreate(&tstart); cudaEventCreate(&tstop);
cudaEventCreate(&start); cudaEventCreate(&stop);
/*@ begin outer measurement @*/
for (int orio_i=0; orio_i<ORIO_REPS; orio_i++) {
/*@ begin inner measurement @*/
/*@ tested code @*/
/*@ end inner measurement @*/
printf("{'/*@ coordinate @*/' : (%g,%g)}\n", orcu_elapsed, orcu_transfer);
}
/*@ end outer measurement @*/
cudaEventDestroy(tstart); cudaEventDestroy(tstop);
cudaEventDestroy(start); cudaEventDestroy(stop);
/*@ epilogue @*/
return 0;
}
'''
class PerfTestSkeletonCode:
__GLOBAL_TAG = r'/\*@\s*global\s*@\*/'
__EXTERNAL_TAG = r'/\*@\s*external\s*@\*/'
__DECLARATIONS_TAG = r'/\*@\s*declarations\s*@\*/'
__PROLOGUE_TAG = r'/\*@\s*prologue\s*@\*/'
__EPILOGUE_TAG = r'/\*@\s*epilogue\s*@\*/'
__TCODE_TAG = r'/\*@\s*tested\s+code\s*@\*/'
__BEGIN_INNER_MEASURE_TAG = r'/\*@\s*begin\s+inner\s+measurement\s*@\*/'
__END_INNER_MEASURE_TAG = r'/\*@\s*end\s+inner\s+measurement\s*@\*/'
__BEGIN_OUTER_MEASURE_TAG = r'/\*@\s*begin\s+outer\s+measurement\s*@\*/'
__END_OUTER_MEASURE_TAG = r'/\*@\s*end\s+outer\s+measurement\s*@\*/'
__VALIDATION_TAG = r'/\*@\s*validation\s+code\s*@\*/'
__COORD_TAG = r'/\*@\s*coordinate\s*@\*/'
__BEGIN_SWITCHBODY_TAG = r'/\*@\s*begin\s+switch\s+body\s*@\*/'
__END_SWITCHBODY_TAG = r'/\*@\s*end\s+switch\s+body\s*@\*/'
__SWITCHBODY_TAG = __BEGIN_SWITCHBODY_TAG + r'((.|\n)*?)' + __END_SWITCHBODY_TAG
def __init__(self, code, use_parallel_search, language='c'):
if code == None:
if use_parallel_search:
code = PAR_DEFAULT
else:
if language == 'c':
code = SEQ_DEFAULT
else:
code = SEQ_DEFAULT_CUDA
self.code = code
self.use_parallel_search = use_parallel_search
self.language = language
self.__checkSkeletonCode(self.code)
def __checkSkeletonCode(self, code):
match_obj = re.search(self.__GLOBAL_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "global" tag in the skeleton code')
match_obj = re.search(self.__EXTERNAL_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "external" tag in the skeleton code')
match_obj = re.search(self.__DECLARATIONS_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "declarations" tag in the skeleton code')
match_obj = re.search(self.__PROLOGUE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "prologue" tag in the skeleton code')
match_obj = re.search(self.__EPILOGUE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "epilogue" tag in the skeleton code')
match_obj = re.search(self.__TCODE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "tested code" tag in the skeleton code')
match_obj = re.search(self.__BEGIN_INNER_MEASURE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "begin inner measurement" tag in the skeleton code')
match_obj = re.search(self.__END_INNER_MEASURE_TAG,code)
if not match_obj:
err('main.tuner.skeleton_code: missing "end inner measurement" tag in the skeleton code')
match_obj = re.search(self.__BEGIN_OUTER_MEASURE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "begin outer measurement" tag in the skeleton code')
match_obj = re.search(self.__END_OUTER_MEASURE_TAG,code)
if not match_obj:
err('main.tuner.skeleton_code: missing "end outer measurement" tag in the skeleton code')
match_obj = re.search(self.__VALIDATION_TAG, code)
if not match_obj:
warn('main.tuner.skeleton_code: missing "validation code" tag in the skeleton code')
match_obj = re.search(self.__COORD_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "coordinate" tag in the skeleton code')
if self.use_parallel_search:
match_obj = re.search(self.__BEGIN_SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "begin switch body" tag in the skeleton code')
match_obj = re.search(self.__END_SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "end switch body" tag in the skeleton code')
match_obj = re.search(self.__SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code internal error: missing placement of switch body statement')
switch_body_code = match_obj.group(1)
match_obj = re.search(self.__TCODE_TAG, switch_body_code)
if not match_obj:
err('main.tuner.skeleton_code: missing "tested code" tag in the switch body statement')
match_obj = re.search(self.__VALIDATION_TAG, switch_body_code)
if not match_obj:
warn('main.tuner.skeleton_code: missing "validation code" tag in the switch body statement')
match_obj = re.search(self.__COORD_TAG, switch_body_code)
if not match_obj:
err('main.tuner.skeleton_code: missing "coordinate" tag in the switch body statement')
def insertCode(self, global_code, prologue_code, epilogue_code, validation_code,
begin_inner_measure_code, end_inner_measure_code,
begin_outer_measure_code, end_outer_measure_code,
tested_code_map):
if len(tested_code_map) == 0:
err('main.tuner.skeleton_code internal error: the number of tested codes cannot be zero')
if not self.use_parallel_search and len(tested_code_map) != 1:
err('main.tuner.skeleton_code internal error: the number of tested sequential codes must be exactly one')
code = self.code
g = Globals()
if self.language == 'cuda' and len(g.cunit_declarations) > 0:
global_code += reduce(lambda x,y: x + y, g.cunit_declarations)
g.cunit_declarations = []
declarations_code = '\n#ifdef MAIN_DECLARATIONS\n MAIN_DECLARATIONS()\n#endif'
code = re.sub(self.__GLOBAL_TAG, global_code, code)
code = re.sub(self.__DECLARATIONS_TAG, declarations_code, code)
code = re.sub(self.__PROLOGUE_TAG, prologue_code, code)
code = re.sub(self.__EPILOGUE_TAG, epilogue_code, code)
if self.use_parallel_search:
switch_body_code = re.search(self.__SWITCHBODY_TAG, code).group(1)
tcode = ''
par_externals = ''
for i, (code_key, (code_value, externals)) in enumerate(tested_code_map.items()):
scode = switch_body_code
scode = re.sub(self.__COORD_TAG, code_key, scode)
scode = re.sub(self.__TCODE_TAG, code_value, scode)
tcode += '\n'
tcode += ' case %s:\n' % i
tcode += ' {\n' + scode + '\n }\n'
tcode += ' break;\n'
par_externals += externals
code = re.sub(self.__EXTERNAL_TAG, par_externals, code)
code = re.sub(self.__SWITCHBODY_TAG, tcode, code)
else:
((coord_key, (tcode, externals)),) = tested_code_map.items()
code = re.sub(self.__BEGIN_INNER_MEASURE_TAG, begin_inner_measure_code, code)
code = re.sub(self.__END_INNER_MEASURE_TAG, re.sub(self.__COORD_TAG, coord_key, end_inner_measure_code), code)
code = re.sub(self.__BEGIN_OUTER_MEASURE_TAG, begin_outer_measure_code, code)
code = re.sub(self.__END_OUTER_MEASURE_TAG, re.sub(self.__COORD_TAG, coord_key, end_outer_measure_code), code)
code = re.sub(self.__EXTERNAL_TAG, externals, code)
code = re.sub(self.__COORD_TAG, coord_key, code)
code = re.sub(self.__TCODE_TAG, tcode, code)
code = re.sub(self.__VALIDATION_TAG, validation_code, code)
return code
class PerfTestSkeletonCodeFortran:
__PROLOGUE_TAG = r'!@\s*prologue\s*@!'
__DECLARATIONS_TAG = r'!@\s*declarations\s*@!'
__ALLOCATIONS_TAG = r'!@\s*allocation\s*@!'
__EPILOGUE_TAG = r'!@\s*epilogue\s*@!'
__TCODE_TAG = r'!@\s*tested\s+code\s*@!'
__COORD_TAG = r'!@\s*coordinate\s*@!'
__BEGIN_SWITCHBODY_TAG = r'!@\s*begin\s+switch\s+body\s*@!'
__END_SWITCHBODY_TAG = r'!@\s*end\s+switch\s+body\s*@!'
__SWITCHBODY_TAG = __BEGIN_SWITCHBODY_TAG + r'((.|\n)*?)' + __END_SWITCHBODY_TAG
def __init__(self, code, use_parallel_search):
if code == None:
if use_parallel_search:
code = PAR_FORTRAN_DEFAULT
else:
code = SEQ_FORTRAN_DEFAULT
self.code = code
self.use_parallel_search = use_parallel_search
self.__checkSkeletonCode(self.code)
def __checkSkeletonCode(self, code):
match_obj = re.search(self.__PROLOGUE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "prologue" tag in the skeleton code', doexit=True)
match_obj = re.search(self.__EPILOGUE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "epilogue" tag in the skeleton code')
match_obj = re.search(self.__TCODE_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "tested code" tag in the skeleton code')
match_obj = re.search(self.__COORD_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "coordinate" tag in the skeleton code')
if self.use_parallel_search:
match_obj = re.search(self.__BEGIN_SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: : missing "begin switch body" tag in the skeleton code')
match_obj = re.search(self.__END_SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: missing "end switch body" tag in the skeleton code')
match_obj = re.search(self.__SWITCHBODY_TAG, code)
if not match_obj:
err('main.tuner.skeleton_code: internal error: missing placement of switch body statement')
switch_body_code = match_obj.group(1)
match_obj = re.search(self.__TCODE_TAG, switch_body_code)
if not match_obj:
err('main.tuner.skeleton_code: missing "tested code" tag in the switch body statement')
match_obj = re.search(self.__COORD_TAG, switch_body_code)
if not match_obj:
err('main.tuner.skeleton_code: missing "coordinate" tag in the switch body statement')
def insertCode(self, decl_code, prologue_code, epilogue_code,
begin_inner_measure_code, end_inner_measure_code,
begin_outer_measure_code, end_outer_measure_code,
tested_code_map):
if len(tested_code_map) == 0:
err('main.tuner.skeleton_code: internal error: the number of tested codes cannot be zero')
if not self.use_parallel_search and len(tested_code_map) != 1:
err('main.tuner.skeleton_code: internal error: the number of tested sequential codes must be exactly one')
code = self.code
code = re.sub(self.__DECLARATIONS_TAG, decl_code, code)
code = re.sub(self.__EPILOGUE_TAG, epilogue_code, code)
code = re.sub(self.__BEGIN_INNER_MEASURE_TAG, begin_inner_measure_code, code)
code = re.sub(self.__END_INNER_MEASURE_TAG, re.sub(self.__COORD_TAG, coord_key, end_inner_measure_code), code)
code = re.sub(self.__BEGIN_OUTER_MEASURE_TAG, begin_outer_measure_code, code)
code = re.sub(self.__END_OUTER_MEASURE_TAG, re.sub(self.__COORD_TAG, coord_key, end_outer_measure_code), code)
if self.use_parallel_search:
switch_body_code = re.search(self.__SWITCHBODY_TAG, code).group(1)
tcode = ''
for i, (code_key, code_value) in enumerate(tested_code_map.items()):
scode = switch_body_code
scode = re.sub(self.__COORD_TAG, code_key, scode)
scode = re.sub(self.__TCODE_TAG, code_value, scode)
tcode += '\n'
tcode += ' case (%s)\n' % i
tcode += ' \n' + scode + '\n\n'
code = re.sub(self.__SWITCHBODY_TAG, tcode, code)
else:
((coord_key, tcode),) = tested_code_map.items()
code = re.sub(self.__COORD_TAG, coord_key, code)
code = re.sub(self.__TCODE_TAG, tcode, code)
return code
| true
| true
|
f7164c3b3d1d6ac0d4796def751849537a7f15bf
| 2,893
|
py
|
Python
|
examples/asyncio/wamp/beginner/server.py
|
luhn/AutobahnPython
|
7d519052ab42dc029598ab9e2dbdd7af8e08341f
|
[
"Apache-2.0"
] | null | null | null |
examples/asyncio/wamp/beginner/server.py
|
luhn/AutobahnPython
|
7d519052ab42dc029598ab9e2dbdd7af8e08341f
|
[
"Apache-2.0"
] | null | null | null |
examples/asyncio/wamp/beginner/server.py
|
luhn/AutobahnPython
|
7d519052ab42dc029598ab9e2dbdd7af8e08341f
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
import six
import datetime
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn.asyncio import wamp, websocket
class MyBackendComponent(wamp.ApplicationSession):
"""
Application code goes here. This is an example component that provides
a simple procedure which can be called remotely from any WAMP peer.
It also publishes an event every second to some topic.
"""
def onConnect(self):
self.join(u"realm1")
@asyncio.coroutine
def onJoin(self, details):
## register a procedure for remote calling
##
def utcnow():
print("Someone is calling me;)")
now = datetime.datetime.utcnow()
return six.u(now.strftime("%Y-%m-%dT%H:%M:%SZ"))
reg = yield from self.register(utcnow, u'com.timeservice.now')
print("Registered procedure with ID {}".format(reg.id))
## publish events to a topic
##
counter = 0
while True:
self.publish(u'com.myapp.topic1', counter)
print("Published event.")
counter += 1
yield from asyncio.sleep(1)
if __name__ == '__main__':
## 1) create a WAMP router factory
router_factory = wamp.RouterFactory()
## 2) create a WAMP router session factory
session_factory = wamp.RouterSessionFactory(router_factory)
## 3) Optionally, add embedded WAMP application sessions to the router
session_factory.add(MyBackendComponent())
## 4) create a WAMP-over-WebSocket transport server factory
transport_factory = websocket.WampWebSocketServerFactory(session_factory,
debug = False,
debug_wamp = False)
## 5) start the server
loop = asyncio.get_event_loop()
coro = loop.create_server(transport_factory, '127.0.0.1', 8080)
server = loop.run_until_complete(coro)
try:
## 6) now enter the asyncio event loop
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
| 31.107527
| 79
| 0.620809
| true
| true
|
|
f7164e06d9beb5789eb0b2833a37640e21b54097
| 1,057
|
py
|
Python
|
unit_tests/host/checks/test_securetty.py
|
ChrisMacNaughton/charms.hardening
|
0d98669d4be0a50c2027b0479217c288a61048dd
|
[
"Apache-2.0"
] | 1
|
2016-05-27T14:49:14.000Z
|
2016-05-27T14:49:14.000Z
|
unit_tests/host/checks/test_securetty.py
|
ChrisMacNaughton/charms.hardening
|
0d98669d4be0a50c2027b0479217c288a61048dd
|
[
"Apache-2.0"
] | null | null | null |
unit_tests/host/checks/test_securetty.py
|
ChrisMacNaughton/charms.hardening
|
0d98669d4be0a50c2027b0479217c288a61048dd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from unittest import TestCase
from charms_hardening.host.checks import securetty
class SecureTTYTestCase(TestCase):
def test_securetty(self):
audits = securetty.get_audits()
self.assertEqual(1, len(audits))
audit = audits[0]
self.assertTrue(isinstance(audit, securetty.TemplatedFile))
self.assertEqual('/etc/securetty', audit.paths[0])
| 35.233333
| 74
| 0.747398
|
from unittest import TestCase
from charms_hardening.host.checks import securetty
class SecureTTYTestCase(TestCase):
def test_securetty(self):
audits = securetty.get_audits()
self.assertEqual(1, len(audits))
audit = audits[0]
self.assertTrue(isinstance(audit, securetty.TemplatedFile))
self.assertEqual('/etc/securetty', audit.paths[0])
| true
| true
|
f7164e23a262378cdac37fd86983d9cf906e662f
| 914
|
py
|
Python
|
python/Validate-Binary-Search-Tree/recursion.py
|
yutong-xie/Leetcode-with-python
|
6578f288a757bf76213030b73ec3319a7baa2661
|
[
"MIT"
] | null | null | null |
python/Validate-Binary-Search-Tree/recursion.py
|
yutong-xie/Leetcode-with-python
|
6578f288a757bf76213030b73ec3319a7baa2661
|
[
"MIT"
] | null | null | null |
python/Validate-Binary-Search-Tree/recursion.py
|
yutong-xie/Leetcode-with-python
|
6578f288a757bf76213030b73ec3319a7baa2661
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using recursion to validate BST
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
def helper(node, lower = float('-inf'), upper = float('inf')):
if not node:
return True
val = node.val
if val <= lower or val >= upper:
return False
if not helper(node.left, lower, val):
return False
if not helper(node.right, val, upper) :
return False
return True
return helper(root)
| 24.052632
| 70
| 0.507659
|
class Solution(object):
def isValidBST(self, root):
def helper(node, lower = float('-inf'), upper = float('inf')):
if not node:
return True
val = node.val
if val <= lower or val >= upper:
return False
if not helper(node.left, lower, val):
return False
if not helper(node.right, val, upper) :
return False
return True
return helper(root)
| true
| true
|
f7164ea55e84788c9420c4d16ae5720e198d41c9
| 540
|
py
|
Python
|
hackerearth/Algorithms/XOR subsequences/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/XOR subsequences/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/XOR subsequences/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'1',
'5',
'3 2 3 1 2',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'3\n' +
'1 2 4\n')
if __name__ == '__main__':
unittest.main()
| 22.5
| 46
| 0.568519
|
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'1',
'5',
'3 2 3 1 2',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'3\n' +
'1 2 4\n')
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7164fa5560ca4b44abd3c1dee0041be6cea10e5
| 1,682
|
py
|
Python
|
venv/Lib/site-packages/plotnine/scales/scale_stroke.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/plotnine/scales/scale_stroke.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 1
|
2020-10-02T21:43:06.000Z
|
2020-10-15T22:52:39.000Z
|
venv/Lib/site-packages/plotnine/scales/scale_stroke.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | null | null | null |
from warnings import warn
import numpy as np
from mizani.palettes import rescale_pal
from ..doctools import document
from ..exceptions import PlotnineWarning
from ..utils import alias
from .scale import scale_discrete, scale_continuous
@document
class scale_stroke_continuous(scale_continuous):
"""
Continuous Stroke Scale
Parameters
----------
range : array_like
Range ([Minimum, Maximum]) of output stroke values.
Should be between 0 and 1. Default is ``(1, 6)``
{superclass_parameters}
"""
_aesthetics = ['stroke']
def __init__(self, range=(1, 6), **kwargs):
self.palette = rescale_pal(range)
scale_continuous.__init__(self, **kwargs)
@document
class scale_stroke_ordinal(scale_discrete):
"""
Discrete Stroke Scale
Parameters
----------
range : array_like
Range ([Minimum, Maximum]) of output stroke values.
Should be between 0 and 1. Default is ``(1, 6)``
{superclass_parameters}
"""
_aesthetics = ['stroke']
def __init__(self, range=(1, 6), **kwargs):
def palette(n):
return np.linspace(range[0], range[1], n)
self.palette = palette
scale_discrete.__init__(self, **kwargs)
@document
class scale_stroke_discrete(scale_stroke_ordinal):
"""
Discrete Stroke Scale
Parameters
----------
{superclass_parameters}
"""
_aesthetics = ['stroke']
def __init__(self, **kwargs):
warn(
"Using stroke for a ordinal variable is not advised.",
PlotnineWarning
)
super().__init__(self, **kwargs)
alias('scale_stroke', scale_stroke_continuous)
| 23.041096
| 66
| 0.639715
|
from warnings import warn
import numpy as np
from mizani.palettes import rescale_pal
from ..doctools import document
from ..exceptions import PlotnineWarning
from ..utils import alias
from .scale import scale_discrete, scale_continuous
@document
class scale_stroke_continuous(scale_continuous):
_aesthetics = ['stroke']
def __init__(self, range=(1, 6), **kwargs):
self.palette = rescale_pal(range)
scale_continuous.__init__(self, **kwargs)
@document
class scale_stroke_ordinal(scale_discrete):
_aesthetics = ['stroke']
def __init__(self, range=(1, 6), **kwargs):
def palette(n):
return np.linspace(range[0], range[1], n)
self.palette = palette
scale_discrete.__init__(self, **kwargs)
@document
class scale_stroke_discrete(scale_stroke_ordinal):
_aesthetics = ['stroke']
def __init__(self, **kwargs):
warn(
"Using stroke for a ordinal variable is not advised.",
PlotnineWarning
)
super().__init__(self, **kwargs)
alias('scale_stroke', scale_stroke_continuous)
| true
| true
|
f7164fed64353c46191cfb979c30db130aa2644f
| 624
|
py
|
Python
|
data/external/repositories/141822/AXA_Telematics-master/Features/modules_janto/contrast.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/141822/AXA_Telematics-master/Features/modules_janto/contrast.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/141822/AXA_Telematics-master/Features/modules_janto/contrast.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
# -*- coding: utf-8 -*-
"""
(c) 2015
@author: Janto Oellrich
email: joellrich@uos.de
CONTENT
Function for contrast driver sampling
"""
from modules import *
def sampleContrast(trips,n_ref=1000):
"""
Given the featmatrix samples n_ref contrast trips.
"""
print 'Sampling contrast trips...'
# random sampling of trips from different drivers
ref_trips = np.random.choice(trips.shape[0],size=(n_ref,1),replace=False)
ref = trips[ref_trips[:,0],:]
print '\t\t{0} contrast trips, {1} features'.format(ref.shape[0],ref.shape[1])
return ref
| 23.111111
| 82
| 0.621795
|
"""
(c) 2015
@author: Janto Oellrich
email: joellrich@uos.de
CONTENT
Function for contrast driver sampling
"""
from modules import *
def sampleContrast(trips,n_ref=1000):
"""
Given the featmatrix samples n_ref contrast trips.
"""
print 'Sampling contrast trips...'
ref_trips = np.random.choice(trips.shape[0],size=(n_ref,1),replace=False)
ref = trips[ref_trips[:,0],:]
print '\t\t{0} contrast trips, {1} features'.format(ref.shape[0],ref.shape[1])
return ref
| false
| true
|
f7164ff9b4c3431a3e2f15fb85cf546bac7adf85
| 154
|
py
|
Python
|
bot.py
|
radimbig/Rouneford
|
9803ffdbc406d122a02db00d7dd73e70c7c1b4aa
|
[
"MIT"
] | 10
|
2019-02-21T20:02:18.000Z
|
2022-03-21T13:45:59.000Z
|
bot.py
|
radimbig/Rouneford
|
9803ffdbc406d122a02db00d7dd73e70c7c1b4aa
|
[
"MIT"
] | null | null | null |
bot.py
|
radimbig/Rouneford
|
9803ffdbc406d122a02db00d7dd73e70c7c1b4aa
|
[
"MIT"
] | 4
|
2021-04-05T14:55:15.000Z
|
2022-03-21T12:54:57.000Z
|
# | Created by Ar4ikov
# | Время: 04.02.2019 - 20:19
from core.core import RounefordBot
bot = RounefordBot(access_token="Ваш-Access-Token")
bot.run()
| 15.4
| 51
| 0.714286
|
from core.core import RounefordBot
bot = RounefordBot(access_token="Ваш-Access-Token")
bot.run()
| true
| true
|
f716502c208f4354a8f11badd10567d680d691b0
| 1,626
|
py
|
Python
|
populate_projects.py
|
anibalsolon/brainhack-donostia.github.io
|
ad4f30f938923af7ff85fed542972f94f2032d13
|
[
"MIT"
] | null | null | null |
populate_projects.py
|
anibalsolon/brainhack-donostia.github.io
|
ad4f30f938923af7ff85fed542972f94f2032d13
|
[
"MIT"
] | 4
|
2020-10-08T13:55:23.000Z
|
2020-10-28T13:03:49.000Z
|
populate_projects.py
|
anibalsolon/brainhack-donostia.github.io
|
ad4f30f938923af7ff85fed542972f94f2032d13
|
[
"MIT"
] | 9
|
2020-10-08T14:02:55.000Z
|
2021-12-02T19:00:36.000Z
|
import os
import pandas as pd
from string import Template
import wget
csv_file_path = "https://docs.google.com/spreadsheets/d/1AlflVlTg1KmajQrWBOUBT2XeoAUqfjB9SCQfDIPvSXo/export?format=csv&gid=565678921"
project_card_path = "assets/templates/project_card.html"
projects_page_path = "assets/templates/template_projects.md"
def populate_project_card(title, description, leader):
with open(str(project_card_path), 'r') as card:
card_tpl = Template(card.read())
card_html = card_tpl.substitute(projectTitle=title,
projectDescription=description,
projectLeader=leader)
card.close()
return card_html
def populate_projects_page(html):
with open(str(projects_page_path), 'r') as prj:
prj_tpl = Template(prj.read())
prj_html = prj_tpl.substitute(projectCards=html,
link="/projects/")
prj.close()
return prj_html
def main():
# Download CSV file
filename = wget.download(csv_file_path)
# Read CSV file
df = pd.read_csv(filename)
df = df[df["Leader:"].notna()]
prj_card = ""
for pj_index, prj_row in df.iterrows():
prj_title = prj_row["Project title:"]
prj_descr = prj_row["Project description:"]
prj_leader = prj_row["Leader:"]
prj_card += populate_project_card(prj_title, prj_descr, prj_leader)
prj_page = populate_projects_page(prj_card)
with open("projects.md", "wb") as f:
f.write(prj_page.encode("utf-8"))
os.remove(filename)
if __name__ == "__main__":
main()
| 29.563636
| 133
| 0.652522
|
import os
import pandas as pd
from string import Template
import wget
csv_file_path = "https://docs.google.com/spreadsheets/d/1AlflVlTg1KmajQrWBOUBT2XeoAUqfjB9SCQfDIPvSXo/export?format=csv&gid=565678921"
project_card_path = "assets/templates/project_card.html"
projects_page_path = "assets/templates/template_projects.md"
def populate_project_card(title, description, leader):
with open(str(project_card_path), 'r') as card:
card_tpl = Template(card.read())
card_html = card_tpl.substitute(projectTitle=title,
projectDescription=description,
projectLeader=leader)
card.close()
return card_html
def populate_projects_page(html):
with open(str(projects_page_path), 'r') as prj:
prj_tpl = Template(prj.read())
prj_html = prj_tpl.substitute(projectCards=html,
link="/projects/")
prj.close()
return prj_html
def main():
filename = wget.download(csv_file_path)
df = pd.read_csv(filename)
df = df[df["Leader:"].notna()]
prj_card = ""
for pj_index, prj_row in df.iterrows():
prj_title = prj_row["Project title:"]
prj_descr = prj_row["Project description:"]
prj_leader = prj_row["Leader:"]
prj_card += populate_project_card(prj_title, prj_descr, prj_leader)
prj_page = populate_projects_page(prj_card)
with open("projects.md", "wb") as f:
f.write(prj_page.encode("utf-8"))
os.remove(filename)
if __name__ == "__main__":
main()
| true
| true
|
f7165078f05b4c42f6ab63c51dd0975bdc58bb20
| 1,905
|
py
|
Python
|
tests/test_allocator.py
|
genged/serverallocator
|
39c58ab96d451fc237e055d7b9c6a446d0074877
|
[
"Apache-2.0"
] | null | null | null |
tests/test_allocator.py
|
genged/serverallocator
|
39c58ab96d451fc237e055d7b9c6a446d0074877
|
[
"Apache-2.0"
] | null | null | null |
tests/test_allocator.py
|
genged/serverallocator
|
39c58ab96d451fc237e055d7b9c6a446d0074877
|
[
"Apache-2.0"
] | 1
|
2019-06-26T13:50:04.000Z
|
2019-06-26T13:50:04.000Z
|
from allocation.allocator import Server, App, Allocator
s1 = Server(32, 16, 1000, name="s1")
s2 = Server(32, 16, 1000, name="s2")
def test_allocate_tasks_servers_single_server_task():
_a = App(12, 12, 500)
alloc = Allocator([s1], [_a])
res = alloc.allocate()
expected = [
{
"node": s1,
"apps": [_a]
}
]
assert expected == res
def test_allocate_tasks_servers_more_servers():
a1 = App(12, 12, 500, name="a1")
alloc = Allocator([s1, s2], [a1])
expected1 = [{
"node": s1,
"apps": [a1]
}]
expected2 = [{
"node": s2,
"apps": [a1]
}]
assert alloc.allocate() in [expected1, expected2]
def test_allocate_tasks_servers_more_tasks():
a1 = App(4, 4, 500, name="a1")
a2 = App(4, 4, 100, name="a2")
a3 = App(4, 4, 100, name="a3")
alloc = Allocator([s1], [a1, a2, a3])
expected = [
{
"node": s1,
"apps": [a1, a2, a3]
}
]
res = alloc.allocate()
assert expected == res
def test_allocate_tasks_servers_not_enough_cpu():
server = Server(8, 16, 1000)
a1 = App(4, 4, 100)
a2 = App(4, 4, 100)
a3 = App(4, 4, 100)
alloc = Allocator([server],
[a1, a2, a3])
result = alloc.allocate()
assert [] == result
def test_allocate_tasks_servers_not_enough_mem():
alloc = Allocator([Server(8, 16, 1000)],
[App(4, 12, 100), App(4, 12, 100), App(4, 4, 100)])
result = alloc.allocate()
assert [] == result
def test_anti_affinity_cannot_allocate_on_same_server():
alloc = Allocator([Server(128, 24, 1000)],
[App(2, 4, 10, antiAffinityLabels=["label-1"]), App(4, 8, 10, antiAffinityLabels=["label-1"])])
result = alloc.allocate()
assert [] == result
if __name__ == "__main__":
import pytest
pytest.main()
| 23.8125
| 117
| 0.549606
|
from allocation.allocator import Server, App, Allocator
s1 = Server(32, 16, 1000, name="s1")
s2 = Server(32, 16, 1000, name="s2")
def test_allocate_tasks_servers_single_server_task():
_a = App(12, 12, 500)
alloc = Allocator([s1], [_a])
res = alloc.allocate()
expected = [
{
"node": s1,
"apps": [_a]
}
]
assert expected == res
def test_allocate_tasks_servers_more_servers():
a1 = App(12, 12, 500, name="a1")
alloc = Allocator([s1, s2], [a1])
expected1 = [{
"node": s1,
"apps": [a1]
}]
expected2 = [{
"node": s2,
"apps": [a1]
}]
assert alloc.allocate() in [expected1, expected2]
def test_allocate_tasks_servers_more_tasks():
a1 = App(4, 4, 500, name="a1")
a2 = App(4, 4, 100, name="a2")
a3 = App(4, 4, 100, name="a3")
alloc = Allocator([s1], [a1, a2, a3])
expected = [
{
"node": s1,
"apps": [a1, a2, a3]
}
]
res = alloc.allocate()
assert expected == res
def test_allocate_tasks_servers_not_enough_cpu():
server = Server(8, 16, 1000)
a1 = App(4, 4, 100)
a2 = App(4, 4, 100)
a3 = App(4, 4, 100)
alloc = Allocator([server],
[a1, a2, a3])
result = alloc.allocate()
assert [] == result
def test_allocate_tasks_servers_not_enough_mem():
alloc = Allocator([Server(8, 16, 1000)],
[App(4, 12, 100), App(4, 12, 100), App(4, 4, 100)])
result = alloc.allocate()
assert [] == result
def test_anti_affinity_cannot_allocate_on_same_server():
alloc = Allocator([Server(128, 24, 1000)],
[App(2, 4, 10, antiAffinityLabels=["label-1"]), App(4, 8, 10, antiAffinityLabels=["label-1"])])
result = alloc.allocate()
assert [] == result
if __name__ == "__main__":
import pytest
pytest.main()
| true
| true
|
f71650e79d12d7042562e07291e570fa83922710
| 2,081
|
py
|
Python
|
heart_app/views.py
|
kylepgr/heart-disease-pred
|
d128cc815dde4839ba18e887113bb47387499ce1
|
[
"MIT"
] | null | null | null |
heart_app/views.py
|
kylepgr/heart-disease-pred
|
d128cc815dde4839ba18e887113bb47387499ce1
|
[
"MIT"
] | null | null | null |
heart_app/views.py
|
kylepgr/heart-disease-pred
|
d128cc815dde4839ba18e887113bb47387499ce1
|
[
"MIT"
] | null | null | null |
from typing_extensions import SupportsIndex
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from .forms import InputForm
import pandas as pd
import numpy as np
import pickle
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client['PatientDB']
loaded_model = pickle.load(open("C:/Users/Kyle/Untitled Folder/finalized_model.pkl", 'rb'))
def index(request):
if request.method == "POST":
myform = InputForm(request.POST)
if myform.is_valid():
age = myform.cleaned_data['age_v']
sex = myform.cleaned_data['sex_v']
cp = myform.cleaned_data['cp_v']
thalach = myform.cleaned_data['thalach_v']
exang = myform.cleaned_data['exang_v']
oldpeak = myform.cleaned_data['oldpeak_v']
slope = myform.cleaned_data['slope_v']
ca = myform.cleaned_data['ca_v']
m_inputs = [[age, sex, cp, thalach, exang, oldpeak, slope, ca]]
y_pred = [np.exp(point)/np.sum(np.exp(point), axis=0)
for point in m_inputs]
return render(request, 'index.html', {'prediction': round(y_pred.mean())})
else:
myform = InputForm()
return render(request, 'index.html', {'form': myform})
def updateDataBase(request):
temp={}
temp['age']= myform.cleaned_data['age_v']
temp['sex']= myform.cleaned_data['sex_v']
temp['cp']= myform.cleaned_data['cp_v']
temp['thalach']= myform.cleaned_data['thalach_v']
temp['exang']= myform.cleaned_data['exang_v']
temp['oldpeak']= myform.cleaned_data['oldpeak_v']
temp['slope']= myform.cleaned_data['slope_v']
temp['ca']= myform.cleaned_data['ca_v']
collectionD.insert_one(temp)
countOfrow = collectionD.find().count()
context = {"Row Count": countOfrow}
return render(request,'viewDB.html',context)
| 28.902778
| 92
| 0.600192
|
from typing_extensions import SupportsIndex
from django.shortcuts import render
from django.http import HttpResponse
from .forms import InputForm
import pandas as pd
import numpy as np
import pickle
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client['PatientDB']
loaded_model = pickle.load(open("C:/Users/Kyle/Untitled Folder/finalized_model.pkl", 'rb'))
def index(request):
if request.method == "POST":
myform = InputForm(request.POST)
if myform.is_valid():
age = myform.cleaned_data['age_v']
sex = myform.cleaned_data['sex_v']
cp = myform.cleaned_data['cp_v']
thalach = myform.cleaned_data['thalach_v']
exang = myform.cleaned_data['exang_v']
oldpeak = myform.cleaned_data['oldpeak_v']
slope = myform.cleaned_data['slope_v']
ca = myform.cleaned_data['ca_v']
m_inputs = [[age, sex, cp, thalach, exang, oldpeak, slope, ca]]
y_pred = [np.exp(point)/np.sum(np.exp(point), axis=0)
for point in m_inputs]
return render(request, 'index.html', {'prediction': round(y_pred.mean())})
else:
myform = InputForm()
return render(request, 'index.html', {'form': myform})
def updateDataBase(request):
temp={}
temp['age']= myform.cleaned_data['age_v']
temp['sex']= myform.cleaned_data['sex_v']
temp['cp']= myform.cleaned_data['cp_v']
temp['thalach']= myform.cleaned_data['thalach_v']
temp['exang']= myform.cleaned_data['exang_v']
temp['oldpeak']= myform.cleaned_data['oldpeak_v']
temp['slope']= myform.cleaned_data['slope_v']
temp['ca']= myform.cleaned_data['ca_v']
collectionD.insert_one(temp)
countOfrow = collectionD.find().count()
context = {"Row Count": countOfrow}
return render(request,'viewDB.html',context)
| true
| true
|
f71652cd4034f334dbe9ad941342313344d3ebb5
| 338
|
py
|
Python
|
src/logger.py
|
matthiasBT/task_queue
|
a117c28da0c0150d6f9d8e3e56222e39e9020294
|
[
"MIT"
] | null | null | null |
src/logger.py
|
matthiasBT/task_queue
|
a117c28da0c0150d6f9d8e3e56222e39e9020294
|
[
"MIT"
] | null | null | null |
src/logger.py
|
matthiasBT/task_queue
|
a117c28da0c0150d6f9d8e3e56222e39e9020294
|
[
"MIT"
] | null | null | null |
import logging
FORMATTER = logging.Formatter('%(asctime)-15s %(name)-12s: %(levelname)-8s %(message)s')
def get_logger(logger_name):
logger = logging.getLogger(logger_name)
handler = logging.StreamHandler()
handler.setFormatter(FORMATTER)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
| 26
| 88
| 0.730769
|
import logging
FORMATTER = logging.Formatter('%(asctime)-15s %(name)-12s: %(levelname)-8s %(message)s')
def get_logger(logger_name):
logger = logging.getLogger(logger_name)
handler = logging.StreamHandler()
handler.setFormatter(FORMATTER)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
| true
| true
|
f7165385a558d6bcdd2f949ec3ae80308ee14e61
| 1,382
|
py
|
Python
|
app/schemas/users_schema.py
|
nubilfi/fastapi-starter-kit
|
99a326099c3446584dd0ef18b123c42ba360d364
|
[
"MIT"
] | null | null | null |
app/schemas/users_schema.py
|
nubilfi/fastapi-starter-kit
|
99a326099c3446584dd0ef18b123c42ba360d364
|
[
"MIT"
] | 1
|
2021-03-17T08:24:12.000Z
|
2021-03-17T08:24:12.000Z
|
app/schemas/users_schema.py
|
nubilfi/fastapi-starter-kit
|
99a326099c3446584dd0ef18b123c42ba360d364
|
[
"MIT"
] | null | null | null |
"""
It is a Pydantic model for Users
"""
from typing import Optional
from pydantic import BaseModel, EmailStr
class UsersBase(BaseModel):
"""
A schema class used to represent Users table column values
"""
Username: Optional[str] = None
Fullname: Optional[str] = None
Email: Optional[EmailStr] = None
Status: bool = None
class Config:
"""
Instead of using username = data["Username"]
replace it with username = data.Username
"""
orm_mode = True
class UsersCreate(UsersBase):
"""
A schema class used to represent column to create a new user
"""
Username: str
Password: str
class Config:
"""enable orm mode"""
orm_mode = True
class UsersUpdate(UsersBase):
"""
A schema class used to update user password
"""
Password: Optional[str] = None
class Config:
"""enable orm mode"""
orm_mode = True
class UsersInDBBase(UsersBase):
"""
A schema class used to represent user data based on its ID
"""
UserId: Optional[int] = None
class Config:
"""enable orm mode"""
orm_mode = True
class User(UsersInDBBase):
"""
Provide a user data
"""
pass #pylint: disable=unnecessary-pass
class UsersInDB(UsersInDBBase):
"""Store hashed password through this property"""
Password: str
| 20.028986
| 64
| 0.62301
|
from typing import Optional
from pydantic import BaseModel, EmailStr
class UsersBase(BaseModel):
Username: Optional[str] = None
Fullname: Optional[str] = None
Email: Optional[EmailStr] = None
Status: bool = None
class Config:
orm_mode = True
class UsersCreate(UsersBase):
Username: str
Password: str
class Config:
orm_mode = True
class UsersUpdate(UsersBase):
Password: Optional[str] = None
class Config:
orm_mode = True
class UsersInDBBase(UsersBase):
UserId: Optional[int] = None
class Config:
orm_mode = True
class User(UsersInDBBase):
pass
class UsersInDB(UsersInDBBase):
Password: str
| true
| true
|
f71654c0cce099e4833874597c1371fa21b320d7
| 4,188
|
py
|
Python
|
kubernetes_asyncio/client/api/apiregistration_api.py
|
dineshsonachalam/kubernetes_asyncio
|
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
|
[
"Apache-2.0"
] | 1
|
2021-02-25T04:36:18.000Z
|
2021-02-25T04:36:18.000Z
|
kubernetes_asyncio/client/api/apiregistration_api.py
|
hubo1016/kubernetes_asyncio
|
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/api/apiregistration_api.py
|
hubo1016/kubernetes_asyncio
|
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes_asyncio.client.api_client import ApiClient
class ApiregistrationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_api_group_with_http_info(**kwargs) # noqa: E501
return data
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 33.238095
| 119
| 0.620105
|
from __future__ import absolute_import
import re
import six
from kubernetes_asyncio.client.api_client import ApiClient
class ApiregistrationApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_group_with_http_info(**kwargs)
else:
(data) = self.get_api_group_with_http_info(**kwargs)
return data
def get_api_group_with_http_info(self, **kwargs):
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true
| true
|
f7165577ff3dd7cd5788b42cf832946dcd47c7cc
| 512
|
py
|
Python
|
mainapp/migrations/0056_auto_20180819_1420.py
|
reyasmohammed/rescuekerala
|
68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c
|
[
"MIT"
] | 1
|
2021-12-09T17:59:01.000Z
|
2021-12-09T17:59:01.000Z
|
mainapp/migrations/0056_auto_20180819_1420.py
|
reyasmohammed/rescuekerala
|
68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c
|
[
"MIT"
] | 1
|
2018-08-18T12:00:29.000Z
|
2018-08-18T12:00:29.000Z
|
mainapp/migrations/0056_auto_20180819_1420.py
|
reyasmohammed/rescuekerala
|
68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c
|
[
"MIT"
] | 5
|
2019-11-07T11:34:56.000Z
|
2019-11-07T11:36:00.000Z
|
# Generated by Django 2.1 on 2018-08-19 08:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0055_rescuecamp_facilities_available'),
]
operations = [
migrations.AlterField(
model_name='rescuecamp',
name='facilities_available',
field=models.TextField(blank=True, null=True, verbose_name='Facilities Available (light, kitchen, toilets etc.) - ലഭ്യമായ സൗകര്യങ്ങൾ'),
),
]
| 26.947368
| 147
| 0.646484
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0055_rescuecamp_facilities_available'),
]
operations = [
migrations.AlterField(
model_name='rescuecamp',
name='facilities_available',
field=models.TextField(blank=True, null=True, verbose_name='Facilities Available (light, kitchen, toilets etc.) - ലഭ്യമായ സൗകര്യങ്ങൾ'),
),
]
| true
| true
|
f71655cb97a9c639ac16beca73e658af16c9bb94
| 1,003
|
py
|
Python
|
__init__.py
|
Pratik2587/Medical-Cost-Predictor-
|
69e23300e24edd121c6c26e1f3ba71adf8c779ad
|
[
"MIT"
] | 1
|
2020-07-17T23:16:21.000Z
|
2020-07-17T23:16:21.000Z
|
__init__.py
|
Pratik2587/Medical-Cost-Predictor-
|
69e23300e24edd121c6c26e1f3ba71adf8c779ad
|
[
"MIT"
] | null | null | null |
__init__.py
|
Pratik2587/Medical-Cost-Predictor-
|
69e23300e24edd121c6c26e1f3ba71adf8c779ad
|
[
"MIT"
] | null | null | null |
# init.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
# init SQLAlchemy so we can use it later in our models
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = '9OLWxND4o83j4K4iuopO'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
db.init_app(app)
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
from .models import User
@login_manager.user_loader
def load_user(user_id):
# since the user_id is just the primary key of our user table, use it in the query for the user
return User.query.get(int(user_id))
# blueprint for auth routes in our app
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
# blueprint for non-auth parts of app
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| 27.108108
| 103
| 0.724826
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = '9OLWxND4o83j4K4iuopO'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
db.init_app(app)
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
from .models import User
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| true
| true
|
f71655d23318261e91879bbd7165b323acf8dd52
| 51,690
|
py
|
Python
|
src/scseirx/model_SEIRX.py
|
JanaLasser/agent_based_COVID_SEIRX
|
c4e28d472a0484fe1a125ba6974683973141c09e
|
[
"MIT"
] | 7
|
2020-11-16T12:34:18.000Z
|
2022-01-28T15:09:09.000Z
|
src/scseirx/model_SEIRX.py
|
JanaLasser/agent_based_COVID_SEIRX
|
c4e28d472a0484fe1a125ba6974683973141c09e
|
[
"MIT"
] | 62
|
2020-11-23T07:51:44.000Z
|
2022-03-18T12:56:37.000Z
|
src/scseirx/model_SEIRX.py
|
JanaLasser/agent_based_COVID_SEIRX
|
c4e28d472a0484fe1a125ba6974683973141c09e
|
[
"MIT"
] | 8
|
2020-11-30T09:45:55.000Z
|
2022-03-18T11:20:23.000Z
|
import numpy as np
import networkx as nx
from math import gamma
from scipy.optimize import root_scalar
from mesa import Model
from mesa.time import RandomActivation, SimultaneousActivation
from mesa.datacollection import DataCollector
from scseirx.testing_strategy import Testing
## data collection functions ##
def get_N_diagnostic_tests(model):
return model.number_of_diagnostic_tests
def get_N_preventive_screening_tests(model):
return model.number_of_preventive_screening_tests
def get_infection_state(agent):
if agent.exposed == True: return 'exposed'
elif agent.infectious == True: return 'infectious'
elif agent.recovered == True: return 'recovered'
else: return 'susceptible'
def get_quarantine_state(agent):
if agent.quarantined == True: return True
else: return False
def get_undetected_infections(model):
return model.undetected_infections
def get_predetected_infections(model):
return model.predetected_infections
def get_pending_test_infections(model):
return model.pending_test_infections
def get_diagnostic_test_detected_infections_student(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['student']
def get_diagnostic_test_detected_infections_teacher(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['teacher']
def get_diagnostic_test_detected_infections_family_member(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['family_member']
def get_diagnostic_test_detected_infections_resident(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['resident']
def get_diagnostic_test_detected_infections_employee(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['employee']
def get_diagnostic_test_detected_infections_unistudent(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['unistudent']
def get_diagnostic_test_detected_infections_lecturer(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['lecturer']
diagnostic_test_detected_infections_funcs = {
'student':get_diagnostic_test_detected_infections_student,
'teacher':get_diagnostic_test_detected_infections_teacher,
'family_member':get_diagnostic_test_detected_infections_family_member,
'resident':get_diagnostic_test_detected_infections_resident,
'employee':get_diagnostic_test_detected_infections_employee,
'unistudent':get_diagnostic_test_detected_infections_unistudent,
'lecturer':get_diagnostic_test_detected_infections_lecturer
}
def get_preventive_test_detected_infections_student(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['student']
def get_preventive_test_detected_infections_teacher(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['teacher']
def get_preventive_test_detected_infections_family_member(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['family_member']
def get_preventive_test_detected_infections_resident(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['resident']
def get_preventive_test_detected_infections_employee(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['employee']
def get_preventive_test_detected_infections_unistudent(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['unistudent']
def get_preventive_test_detected_infections_lecturer(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['lecturer']
preventive_test_detected_infections_funcs = {
'student':get_preventive_test_detected_infections_student,
'teacher':get_preventive_test_detected_infections_teacher,
'family_member':get_preventive_test_detected_infections_family_member,
'resident':get_preventive_test_detected_infections_resident,
'employee':get_preventive_test_detected_infections_employee,
'unistudent':get_preventive_test_detected_infections_unistudent,
'lecturer':get_preventive_test_detected_infections_lecturer
}
# parameter sanity check functions
def check_positive(var):
assert var >= 0, 'negative number'
return var
def check_bool(var):
assert type(var) == bool, 'not a bool'
return var
def check_positive_int(var):
if var == None:
return var
assert type(var) == int, 'not an integer'
assert var >= 0, 'negative number'
return var
def check_contact_type_dict(var):
assert type(var) == dict, 'not a dictionary'
assert set(var.keys()).issubset({'very_far', 'far', 'intermediate', 'close'}), \
'does not contain the correct contact types (has to be very_far, far, intermediate or close)'
assert all((isinstance(i, int) or isinstance(i, float)) for i in var.values()), \
'contact type weights are not numeric'
return var
def check_K1_contact_types(var):
for area in var:
assert area in ['very_far', 'far', 'intermediate',
'close'], 'K1 contact type not recognised'
return var
def check_testing(var):
assert var in ['diagnostic', 'background', 'preventive',
'background+preventive', False], \
'unknown testing mode: {}'.format(var)
return var
def check_probability(var):
assert (type(var) == float) or (var == 0) or (var == 1), \
'{} not a float'.format(var)
assert var >= 0, 'probability negative'
assert var <= 1, 'probability larger than 1'
return var
def check_graph(var):
assert type(var) in [nx.Graph, nx.MultiGraph], 'not a networkx graph'
assert len(var.nodes) > 0, 'graph has no nodes'
assert len(var.edges) > 0, 'graph has no edges'
areas = [e[2]['contact_type'] for e in var.edges(data=True)]
areas = set(areas)
for a in areas:
assert a in {'very_far', 'far', 'intermediate',
'close'}, 'contact type {} not recognised'.format(a)
return var
def check_index_case(var, agent_types):
allowed_strings = agent_types[:]
allowed_strings.extend(['continuous'])
assert var in allowed_strings, 'unknown index case mode'
return var
def check_discount(var):
if var['slope'] != None:
assert var['slope'] <= 0, 'slope needs to be <= 0 or None'
assert np.abs(var['slope']) <= 1, 'absolute value of slope needs to be <= 1'
assert var['intercept'], 'intercept needs to be positive'
assert var['intercept'], 'intercept needs to be <= 1'
return var
def get_weibull_shape(k, mu, var):
'''
Calculates the shape parameter of a Weibull distribution, given its mean
mu and its variance var
'''
return var / mu**2 - gamma(1 + 2/k) / gamma(1+1/k)**2 + 1
def get_weibull_scale(mu, k):
'''
Calculates the scale parameter of a Weibull distribution, given its mean
mu and its shape parameter k
'''
return mu / gamma(1 + 1/k)
def weibull_two_param(shape, scale):
'''
A two-parameter Weibull distribution, based on numpy ramdon's single
parameter distribution. We use this distribution in the simulation to draw
random epidemiological parameters for agents from the given distribution
See https://numpy.org/doc/stable/reference/random/generated/numpy.random.weibull.html
'''
return scale * np.random.weibull(shape)
class SEIRX(Model):
'''
A model with a number of different agents that reproduces
the SEIRX dynamics of pandemic spread in a facility. Note:
all times are set to correspond to days
G: networkx undirected graph, interaction graph between agents. Edges have
to have edge the edge attribute 'contact_type' specifying the closeness of
contacts, which can be ['very far', 'far', 'intermediate' and 'close'].
Nodes have to have the node attribute 'type' which specifies the agent type
of the given node (for example 'student' or 'teacher' in a school scenario).
In addition, nodes can have the attribute 'unit', which assigns them to a
unit in space (for example a 'class' in a school scenario).
verbosity: integer in [0, 1, 2], controls text output to std out to track
simulation progress and transmission dynamics. Default = 0.
testing, default = 'diagnostic'
'diagnostic': only diagnostic tests for symptomatic agents
'background': adds background screens of all agents after a positive
diagnostic test
'preventive': adds preventive screens of agent groups to diagnostic
testing. Screens happen in time intervals specified
separately for each agent group in the variable
'screening_interval'.
'background+preventive': preventive screens AND background screens on
top of diagnostic testing.
infection_duration, default = 11 NOTE: includes the time an agent is exposed
but not yet infectious at the beginning of an infection
positive integer: mean or median of the infection duration in days
list of two floats: mean and standard deviation of a distribution
specifying the infection duration in days. These
numbers will be used to construct a Weibull
distribution from which the infection duration will
be drawn for every agent individually
exposure_duration, default = 4. Sets the time from transmission to becoming
infectious
positive integer: mean or median of the exposure duration in days
list of two floats: mean and standard deviation of a distribution
specifying the exposure duration in days. These
numbers will be used to construct a Weibull
distributoin from which the exposure duration will
be drawn for every agent individually.
time_until_symptoms, default = 6. Sets the time from transmission to
(potentially) developing symptoms. Symptom probability has to be set for
each agent group individually using the parameter 'symptom_probability'
positive integer: mean or median of the time until symptoms in days
list of two floats: mean and standard deviation of a distribution
specifying the time until symptoms in days. These
numbers will be used to construct a Weibull
distribution from which the time until symptoms will
be drawn for every agent individually.
quarantine_duration, default = 14. Positive integer, sets the time a
positively tested agent is quarantined in days
infection_risk_contact_type_weights: dictionary of the form
{'very_far':float, 'far':float, 'intermediate':float, 'close':float}
that sets transmission risk multipliers for different contact types of
agents specified in the contact network G. Default: {'very_far': 0.1,
'far': 0.5, 'intermediate': 1, 'close': 3}
subclinical_modifier: default = 1.0. Float, modifies the infectiousness of
asymptomatic cases. Example: if subclinical_modifier = 0.5, the
infectiousness of an asymptomatic case will be reduced to 50%.
K1_contact_types: list of strings from ['very_far', 'far', 'intermediate',
'close']. Definition of contact types for which agents are considered
"K1 contact persons" if they had contact to a positively tested person wtith
a specified contact intensity. Default = ['close'].
diagnostic_test_type, default = 'one_day_PCR'. String, specifies the test
technology and test result turnover time used for diagnostic testing. For
example 'same_day_antigen' or 'two_day_PCR'. See module "Testing" for
different implemented testing techologies.
preventive_screening_test_type:, default = 'one_day_PCR', String, specifies
the test technology and test result turnover time used for preventive
sreening. For example 'same_day_antigen' or 'two_day_PCR'. See module
"Testing" for different implemented testing techologies.
follow_up_testing_interval, default = None. Positive integer, sets the time
a follow-up screen (background screen) is initiated after an initial screen
triggered by a positive test result. Only applies if the testing strategy is
'background' or preventive.
liberating_testing, default = False. Boolean, flag that specifies, whether
or not an agent is released from quarantine after returning a negative test
result.
index_case, default = 'employee' (nursing home scenario) or 'teacher'
(school scenario). Specifies how infections are introduced into the facility.
agent_type: If an agent type (for example 'student' or 'teacher' in
the school scenario) is specified, a single randomly
chosen agent from this agent group will become the index
case and no further index cases will be introduced into
the scenario.
'continuous': In this case, agents have a continuous risk to become
index cases in every simulation step. The risk has to
be specified for every agent group individually, using
the 'index_probability' parameter. If only a single
agent group has a non-zero index probability, then only
agents from this group can become index cases.
agent_types: dictionary of the structure
{
agent type:
{
screening interval : integer, number of days between each preventive
screen in this agent group
index probability : float in the range [0, 1], sets the probability
to become an index case in each time step
mask : bool
whether or not the agent type is wearing a mask
}
}
The dictionary's keys are the names of the agent types which have to
correspond to the node attributes in the contact graph. The screening
interval sets the time-delay between preventive screens of this agent group,
the index probability sets the probability of a member of this agent group
becoming an index case in every time step
seed: positive integer, fixes the seed of the simulation to enable
repeatable simulation runs. If seed = None, the simulation will be
initialized at random.
'''
def __init__(self, G,
verbosity = 0,
base_transmission_risk = 0.05,
testing='diagnostic',
exposure_duration = [5.0, 1.9],
time_until_symptoms = [6.4, 0.8],
infection_duration = [10.91, 3.95],
quarantine_duration = 10,
subclinical_modifier = 0.6,
infection_risk_contact_type_weights = {
'very_far': 0.1,
'far': 0.25,
'intermediate': 0.5,
'close': 1},
K1_contact_types = ['close'],
diagnostic_test_type = 'one_day_PCR',
preventive_screening_test_type = 'same_day_antigen',
follow_up_testing_interval = None,
liberating_testing = False,
index_case = 'teacher',
agent_types = {
'teacher': {'screening_interval': None,
'index_probability': 0,
'mask':False,
'vaccination_ratio': 0},
'student': {'screening_interval': None,
'index_probability': 0,
'mask':False,
'vaccination_ratio': 0},
'family_member':{'screening_interval': None,
'index_probability': 0,
'mask':False,
'vaccination_ratio': 0}},
age_transmission_risk_discount = \
{'slope':-0.02,
'intercept':1},
age_symptom_modification = \
{'slope':-0.02545,
'intercept':0.854545},
mask_filter_efficiency = {'exhale':0, 'inhale':0},
transmission_risk_ventilation_modifier = 0,
transmission_risk_vaccination_modifier = {
'reception':1,
'transmission':0},
seed = None):
# mesa models already implement fixed seeds through their own random
# number generations. Sadly, we need to use the Weibull distribution
# here, which is not implemented in mesa's random number generation
# module. Therefore, we need to initialize the numpy random number
# generator with the given seed as well
if seed != None:
np.random.seed(seed)
# sets the (daily) transmission risk for a household contact without
# any precautions. Target infection ratios are taken from literature
# and the value of the base_transmission_risk is calibrated such that
# the simulation produces the correct infection ratios in a household
# setting with the given distributions for epidemiological parameters
# of agents
self.base_transmission_risk = base_transmission_risk
# sets the level of detail of text output to stdout (0 = no output)
self.verbosity = check_positive_int(verbosity)
# flag to turn off the testing & tracing strategy
self.testing = check_testing(testing)
self.running = True # needed for the batch runner implemented by mesa
# set the interaction mode to simultaneous activation
self.schedule = SimultaneousActivation(self)
# internal step counter used to launch screening tests
self.Nstep = 0
# since we may have weekday-specific contact networks, we need
# to keep track of the day of the week. Since the index case
# per default is introduced at step 0 in index case mode, we
# need to offset the starting weekday by a random number of weekdays
# to prevent artifacts from always starting on the same day of the week
self.weekday_offset = self.random.randint(1, 8)
self.weekday = self.Nstep + self.weekday_offset
## epidemiological parameters: can be either a single integer or the
# mean and standard deviation of a distribution
self.epi_params = {}
# counter to track the number of pathological parameter combinations
# that had to be re-rolled (only here for debugging and control reasons)
self.param_rerolls = 0
for param, param_name in zip([exposure_duration, time_until_symptoms,
infection_duration],['exposure_duration', 'time_until_symptoms',
'infection_duration']):
if isinstance(param, int):
self.epi_params[param_name] = check_positive_int(param)
elif isinstance(param, list) and len(param) == 2:
mu = check_positive(param[0])
var = check_positive(param[1]**2)
shape = root_scalar(get_weibull_shape, args=(mu, var),
method='toms748', bracket=[0.2, 500]).root
scale = get_weibull_scale(mu, shape)
self.epi_params[param_name] = [shape, scale]
else:
print('{} format not recognized, should be either a single '+\
'int or a tuple of two positive numbers'.format(param_name))
# duration of quarantine
self.quarantine_duration = check_positive_int(quarantine_duration)
self.infection_risk_area_weights = check_contact_type_dict(
infection_risk_contact_type_weights)
# modifier for infectiosness for asymptomatic cases
self.subclinical_modifier = check_positive(subclinical_modifier)
# modifiers for the infection risk, depending on contact type
self.infection_risk_contact_type_weights = infection_risk_contact_type_weights
# modifications for age-dependent transmission and reception risks and
# symptom probabilities
self.age_transmission_risk_discount = \
check_discount(age_transmission_risk_discount)
self.age_symptom_modification = age_symptom_modification
#check_discount(age_symptom_modification)
self.mask_filter_efficiency = mask_filter_efficiency
self.transmission_risk_ventilation_modifier = \
transmission_risk_ventilation_modifier
self.transmission_risk_vaccination_modifier = \
transmission_risk_vaccination_modifier
## agents and their interactions
# interaction graph of agents
self.G = check_graph(G)
# add weights as edge attributes so they can be visualised easily
if type(self.G) == nx.MultiGraph:
for (u, v, key, contact_type) in self.G.edges(keys=True,
data='contact_type'):
self.G[u][v][key]['weight'] = \
self.infection_risk_contact_type_weights[contact_type]
else:
for e in G.edges(data=True):
G[e[0]][e[1]]['weight'] = self.infection_risk_contact_type_weights\
[G[e[0]][e[1]]['contact_type']]
# extract the different agent types from the contact graph
self.agent_types = list(agent_types.keys())
# dictionary of available agent classes with agent types and classes
self.agent_classes = {}
if 'resident' in agent_types:
from scseirx.agent_resident import resident
self.agent_classes['resident'] = resident
if 'employee' in agent_types:
from scseirx.agent_employee import employee
self.agent_classes['employee'] = employee
if 'student' in agent_types:
from scseirx.agent_student import student
self.agent_classes['student'] = student
if 'teacher' in agent_types:
from scseirx.agent_teacher import teacher
self.agent_classes['teacher'] = teacher
if 'family_member' in agent_types:
from scseirx.agent_family_member import family_member
self.agent_classes['family_member'] = family_member
if 'lecturer' in agent_types:
from scseirx.agent_lecturer import lecturer
self.agent_classes['lecturer'] = lecturer
if 'unistudent' in agent_types:
from scseirx.agent_unistudent import unistudent
self.agent_classes['unistudent'] = unistudent
## set agent characteristics for all agent groups
# list of agent characteristics
params = ['screening_interval','index_probability', 'mask' ,'vaccination_ratio',
'voluntary_testing_rate']
# default values that are used in case a characteristic is not specified
# for an agent group
defaults = {'screening_interval':None,
'index_probability':0,
'mask':False,
'vaccination_ratio':0,
'voluntary_testing_rate':1
}
# sanity checks that are applied to parameters passed to the class
# constructor to make sure they conform to model expectations
check_funcs = [check_positive_int, check_probability, check_bool,
check_probability, check_probability]
# member dicts that store the parameter values for each agent group
self.screening_intervals = {}
self.index_probabilities = {}
self.masks = {}
self.vaccination_probabilities = {}
self.voluntary_testing_rates = {}
param_dicts = [self.screening_intervals, self.index_probabilities,
self.masks, self.vaccination_probabilities, self.voluntary_testing_rates]
# iterate over all possible agent parameters and agent groups: set the
# respective value to the value passed through the constructor or to
# the default value if no value has been passed
for param,param_dict,check_func in zip(params,param_dicts,check_funcs):
for at in self.agent_types:
try:
param_dict.update({at:check_func(agent_types[at][param])})
except KeyError:
param_dict.update({at:defaults[param]})
# pass all parameters relevant for the testing strategy to the testing
# class. NOTE: this separation is not a strictly necessary design
# decision but I like to keep the parameters related to testing and
# tracing in a separate place
self.Testing = Testing(self, diagnostic_test_type,
preventive_screening_test_type,
check_positive_int(follow_up_testing_interval),
self.screening_intervals,
check_bool(liberating_testing),
check_K1_contact_types(K1_contact_types),
verbosity)
# specifies either continuous probability for index cases in agent
# groups based on the 'index_probability' for each agent group, or a
# single (randomly chosen) index case in the passed agent group
self.index_case = check_index_case(index_case, self.agent_types)
self.num_agents = {}
## add agents
# extract the agent nodes from the graph and add them to the scheduler
for agent_type in self.agent_types:
IDs = [x for x,y in G.nodes(data=True) if y['type'] == agent_type]
self.num_agents.update({agent_type:len(IDs)})
# get the agent locations (units) from the graph node attributes
units = [self.G.nodes[ID]['unit'] for ID in IDs]
# determine the agents that will be vaccinated, given the
# vaccination ratio of the respective agent group
vaccination_status = np.asarray([False] * len(IDs))
if self.vaccination_probabilities[agent_type] > 0:
n = round(self.vaccination_probabilities[agent_type] * len(IDs))
idx = list(range(len(IDs)))
rnd_idx = np.asarray(self.random.sample(idx, n))
vaccination_status[rnd_idx] = True
for ID, unit, vaccinated in zip(IDs, units, vaccination_status):
tmp_epi_params = {}
# for each of the three epidemiological parameters, check if
# the parameter is an integer (if yes, pass it directly to the
# agent constructor), or if it is specified by the shape and
# scale parameters of a Weibull distribution. In the latter
# case, draw a new number for every agent from the distribution
# NOTE: parameters drawn from the distribution are rounded to
# the nearest integer
while True:
for param_name, param in self.epi_params.items():
if isinstance(param, int):
tmp_epi_params[param_name] = param
else:
tmp_epi_params[param_name] = \
round(weibull_two_param(param[0], param[1]))
if tmp_epi_params['exposure_duration'] > 0 and \
tmp_epi_params['time_until_symptoms'] >= \
tmp_epi_params['exposure_duration'] and\
tmp_epi_params['infection_duration'] > \
tmp_epi_params['exposure_duration']:
break
else:
self.param_rerolls += 1
if verbosity > 1:
print('pathological epi-param case found!')
print(tmp_epi_params)
# check if the agent participates in voluntary testing
p = self.voluntary_testing_rates[agent_type]
voluntary_testing = np.random.choice([True, False],
p=[p, 1-p])
# construct the agent object
a = self.agent_classes[agent_type](ID, unit, self,
tmp_epi_params['exposure_duration'],
tmp_epi_params['time_until_symptoms'],
tmp_epi_params['infection_duration'],
vaccinated,
voluntary_testing,
verbosity)
self.schedule.add(a)
# infect the first agent in single index case mode
if self.index_case != 'continuous':
infection_targets = [
a for a in self.schedule.agents if a.type == index_case]
# pick a random agent to infect in the selected agent group
target = self.random.randint(0, len(infection_targets) - 1)
infection_targets[target].exposed = True
if self.verbosity > 0:
print('{} exposed: {}'.format(index_case,
infection_targets[target].ID))
# list of agents that were tested positive this turn
self.newly_positive_agents = []
# flag that indicates if there were new positive tests this turn
self.new_positive_tests = False
# dictionary of flags that indicate whether a given agent group has
# been creened this turn
self.screened_agents= {
'reactive':{agent_type: False for agent_type in self.agent_types},
'follow_up':{agent_type: False for agent_type in self.agent_types},
'preventive':{agent_type: False for agent_type in self.agent_types}}
# dictionary of counters that count the days since a given agent group
# was screened. Initialized differently for different index case modes
if (self.index_case == 'continuous') or \
(not np.any(list(self.Testing.screening_intervals.values()))):
self.days_since_last_agent_screen = {agent_type: 0 for agent_type in
self.agent_types}
# NOTE: if we initialize these variables with 0 in the case of a single
# index case, we introduce a bias since in 'single index case mode' the
# first index case will always become exposed in step 0. To realize
# random states of the preventive sceening procedure with respect to the
# incidence of the index case, we have to randomly pick the days since
# the last screen for the agent group from which the index case is
else:
self.days_since_last_agent_screen = {}
for agent_type in self.agent_types:
if self.Testing.screening_intervals[agent_type] != None:
self.days_since_last_agent_screen.update({
agent_type: self.random.choice(range(0,
self.Testing.screening_intervals[agent_type] + 1))})
else:
self.days_since_last_agent_screen.update({agent_type: 0})
# dictionary of flags that indicates whether a follow-up screen for a
# given agent group is scheduled
self.scheduled_follow_up_screen = {agent_type: False for agent_type in
self.agent_types}
# counters
self.number_of_diagnostic_tests = 0
self.number_of_preventive_screening_tests = 0
self.positive_tests = {self.Testing.preventive_screening_test_type:
{agent_type:0 for agent_type in self.agent_types},
self.Testing.diagnostic_test_type:
{agent_type:0 for agent_type in self.agent_types}}
self.undetected_infections = 0
self.predetected_infections = 0
self.pending_test_infections = 0
self.quarantine_counters = {agent_type:0 for agent_type in agent_types.keys()}
self.false_negative = 0
# data collectors to save population counts and agent states every
# time step
model_reporters = {
'N_diagnostic_tests':get_N_diagnostic_tests,
'N_preventive_screening_tests':get_N_preventive_screening_tests,
'undetected_infections':get_undetected_infections,
'predetected_infections':get_predetected_infections,
'pending_test_infections':get_pending_test_infections
}
for agent_type in self.agent_types:
model_reporters.update({
'diagnostic_test_detected_infections_{}'.format(agent_type):\
diagnostic_test_detected_infections_funcs[agent_type]
})
model_reporters.update({
'preventive_test_detected_infections_{}'.format(agent_type):\
preventive_test_detected_infections_funcs[agent_type]
})
self.datacollector = DataCollector(
model_reporters=model_reporters,
agent_reporters=
{
'infection_state': get_infection_state,
'quarantine_state': get_quarantine_state
})
## transmission risk modifiers
def get_transmission_risk_contact_type_modifier(self, source, target):
# construct the edge key as combination between agent IDs and weekday
n1 = source.ID
n2 = target.ID
tmp = [n1, n2]
tmp.sort()
n1, n2 = tmp
key = '{}{}d{}'.format(n1, n2, self.weekday)
contact_weight = self.G.get_edge_data(n1, n2, key)['weight']
# the link weight is a multiplicative modifier of the link strength.
# contacts of type "close" have, by definition, a weight of 1. Contacts
# of type intermediate, far or very far have a weight < 1 and therefore
# are less likely to transmit an infection. For example, if the contact
# type far has a weight of 0.2, a contact of type far has only a 20%
# chance of transmitting an infection, when compared to a contact of
# type close. To calculate the probability of success p in the Bernoulli
# trial, we need to reduce the base risk (or base probability of success)
# by the modifications introduced by preventive measures. These
# modifications are formulated in terms of "probability of failure", or
# "q". A low contact weight has a high probability of failure, therefore
# we return q = 1 - contact_weight here.
q1 = 1 - contact_weight
return q1
def get_transmission_risk_age_modifier_transmission(self, source):
'''linear function such that at age 18 the risk is that of an adult (=1).
The slope of the line needs to be calibrated.
'''
age = source.age
max_age = 18
if age <= max_age:
age_weight = self.age_transmission_risk_discount['slope'] * \
np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']
# The age weight can be interpreted as multiplicative factor that
# reduces the chance for transmission with decreasing age. The slope
# of the age_transmission_discount function is the decrease (in % of
# the transmission risk for an 18 year old or above) of transmission
# risk with every year a person is younger than 18 (the intercept is
# 1 by definition).
# To calculate the probability of success p in the Bernoulli
# trial, we need to reduce the base risk (or base probability of
# success) by the modifications introduced by preventive measures.
# These modifications are formulated in terms of "probability of
# failure", or "q". A low age weight has a high probability of
# failure, therefore we return q = 1 - age_weight here.
q2 = 1 - age_weight
else:
q2 = 0
return q2
def get_transmission_risk_age_modifier_reception(self, target):
'''linear function such that at age 18 the risk is that of an adult (=1).
The slope of the line needs to be calibrated.
'''
age = target.age
max_age = 18
if age <= max_age:
age_weight = self.age_transmission_risk_discount['slope'] * \
np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']
# see description in get_transmission_risk_age_modifier_transmission
q3 = 1 - age_weight
else:
q3 = 0
return q3
# infectiousness is constant and high until symptom onset and then
# decreases monotonically until agents are not infectious anymore
# at the end of the infection_duration
def get_transmission_risk_progression_modifier(self, source):
if source.days_since_exposure < source.exposure_duration:
progression_weight = 0
elif source.days_since_exposure <= source.time_until_symptoms:
progression_weight = 1
elif source.days_since_exposure > source.time_until_symptoms and \
source.days_since_exposure <= source.infection_duration:
# we add 1 in the denominator, such that the source is also
# (slightly) infectious on the last day of the infection_duration
progression_weight = \
(source.days_since_exposure - source.time_until_symptoms) / \
(source.infection_duration - source.time_until_symptoms + 1)
else:
progression_weight = 0
# see description in get_transmission_risk_age_modifier_transmission
q4 = 1 - progression_weight
return q4
def get_transmission_risk_subclinical_modifier(self, source):
if source.symptomatic_course == False:
subclinical_weight = self.subclinical_modifier
else:
subclinical_weight = 1
# see description in get_transmission_risk_age_modifier_transmission
q5 = 1 - subclinical_weight
return q5
def get_transmission_risk_exhale_modifier(self, source):
if source.mask:
exhale_weight = self.mask_filter_efficiency['exhale']
else:
exhale_weight = 1
# see description in get_transmission_risk_age_modifier_transmission
q6 = 1 - exhale_weight
return q6
def get_transmission_risk_inhale_modifier(self, target):
if target.mask:
inhale_weight = self.mask_filter_efficiency['inhale']
else:
inhale_weight = 1
# see description in get_transmission_risk_age_modifier_transmission
q7 = 1 - inhale_weight
return q7
def get_transmission_risk_ventilation_modifier(self):
ventilation_weight = self.transmission_risk_ventilation_modifier
# see description in get_transmission_risk_age_modifier_transmission
q8 = 1 - ventilation_weight
return q8
def get_transmission_risk_vaccination_modifier_reception(self, a):
if a.vaccinated:
q9 = self.transmission_risk_vaccination_modifier['reception']
else:
q9 = 0
return q9
def get_transmission_risk_vaccination_modifier_transmission(self, a):
if a.vaccinated:
q10 = self.transmission_risk_vaccination_modifier['transmission']
else:
q10 = 0
return q10
def test_agent(self, a, test_type):
a.tested = True
a.pending_test = test_type
if test_type == self.Testing.diagnostic_test_type:
self.number_of_diagnostic_tests += 1
else:
self.number_of_preventive_screening_tests += 1
if a.exposed:
# tests that happen in the period of time in which the agent is
# exposed but not yet infectious.
# Note: tests[test_type]['time_until_testable'] is negative for
# tests that can detect an infection before agents become infectious
if a.days_since_exposure >= a.exposure_duration + \
self.Testing.tests[test_type]['time_until_testable']:
if self.verbosity > 1:
print('{} {} sent positive sample (even though not infectious yet)'
.format(a.type, a.ID))
a.sample = 'positive'
self.predetected_infections += 1
self.positive_tests[test_type][a.type] += 1
else:
if self.verbosity > 1: print('{} {} sent negative sample'
.format(a.type, a.ID))
a.sample = 'negative'
elif a.infectious:
# tests that happen in the period of time in which the agent is
# infectious and the infection is detectable by a given test
# Note: tests[test_type]['time_until_testable'] is negative for
# tests that can detect an infection before agents become
# infectious. tests[test_type]['time_testable'] is negative for
# tests that cease to detect an infection before agents stop being
# infectious
if a.days_since_exposure >= a.exposure_duration + \
self.Testing.tests[test_type]['time_until_testable'] and \
a.days_since_exposure <= a.infection_duration + \
self.Testing.tests[test_type]['time_testable']:
if self.verbosity > 1:
print('{} {} sent positive sample'.format(a.type, a.ID))
a.sample = 'positive'
self.positive_tests[test_type][a.type] += 1
# track the undetected infections to assess how important they are
# for infection spread
else:
if self.verbosity > 1:
print('{} {} sent negative sample (even though infectious)'
.format(a.type, a.ID))
a.sample = 'negative'
self.undetected_infections += 1
else:
if self.verbosity > 1: print('{} {} sent negative sample'
.format(a.type, a.ID))
a.sample = 'negative'
# for same-day testing, immediately act on the results of the test
if a.days_since_tested >= self.Testing.tests[test_type]['time_until_test_result']:
a.act_on_test_result()
def screen_agents(self, agent_group, test_type, screen_type):
# only test agents that have not been tested already in this simulation
# step and that are not already known positive cases
if self.verbosity > 0:
print('initiating {} {} screen'\
.format(screen_type, agent_group))
untested_agents = [a for a in self.schedule.agents if
(a.tested == False and a.known_positive == False
and a.type == agent_group)]
if len(untested_agents) > 0:
self.screened_agents[screen_type][agent_group] = True
self.days_since_last_agent_screen[agent_group] = 0
# only test agents if they participate in voluntary testing
if screen_type == 'preventive':
for a in untested_agents:
if a.voluntary_testing:
self.test_agent(a, test_type)
else:
if self.verbosity > 1:
print('not testing {} {}, not participating in voluntary testing'\
.format(agent_group, a.ID))
else:
for a in untested_agents:
self.test_agent(a, test_type)
if self.verbosity > 0:
print()
else:
if self.verbosity > 0:
print('no agents tested because all agents have already been tested')
# the type of the test used in the pending test result is stored in the
# variable pending_test
def collect_test_results(self):
agents_with_test_results = [a for a in self.schedule.agents if
(a.pending_test and
a.days_since_tested >= self.Testing.tests[a.pending_test]['time_until_test_result'])]
return agents_with_test_results
def trace_contacts(self, a):
if a.quarantined == False:
a.quarantined = True
a.quarantine_start = self.Nstep
if self.verbosity > 0:
print('qurantined {} {}'.format(a.type, a.ID))
# find all agents that share edges with the agent
# that are classified as K1 contact types in the testing
# strategy
if a in self.G.nodes():
K1_contacts = [e[1] for e in self.G.edges(a.ID, data=True) if
e[2]['contact_type'] in self.Testing.K1_contact_types]
K1_contacts = [a for a in self.schedule.agents if a.ID in K1_contacts]
for K1_contact in K1_contacts:
if self.verbosity > 0:
print('quarantined {} {} (K1 contact of {} {})'
.format(K1_contact.type, K1_contact.ID, a.type, a.ID))
K1_contact.quarantined = True
K1_contact.quarantine_start = self.Nstep
def test_symptomatic_agents(self):
# find symptomatic agents that have not been tested yet and are not
# in quarantine and test them
newly_symptomatic_agents = np.asarray([a for a in self.schedule.agents
if (a.symptoms == True and a.tested == False and a.quarantined == False)])
for a in newly_symptomatic_agents:
# all symptomatic agents are quarantined by default
if self.verbosity > 0:
print('quarantined: {} {}'.format(a.type, a.ID))
a.quarantined = True
a.quarantine_start = self.Nstep
self.test_agent(a, self.Testing.diagnostic_test_type)
def quarantine_contacts(self):
# trace and quarantine contacts of newly positive agents
if len(self.newly_positive_agents) > 0:
if self.verbosity > 0: print('new positive test(s) from {}'
.format([a.ID for a in self.newly_positive_agents]))
# send all K1 contacts of positive agents into quarantine
for a in self.newly_positive_agents:
self.trace_contacts(a)
# indicate that a screen should happen because there are new
# positive test results
self.new_positive_tests = True
self.newly_positive_agents = []
else:
self.new_positive_tests = False
def step(self):
self.weekday = (self.Nstep + self.weekday_offset) % 7 + 1
# if the connection graph is time-resloved, set the graph that is
# used to determine connections in this step to the sub-graph corres-
# ponding to the current day of the week
if self.dynamic_connections:
self.G = self.weekday_connections[self.weekday]
if self.verbosity > 0:
print('weekday {}'.format(self.weekday))
if self.testing:
for agent_type in self.agent_types:
for screen_type in ['reactive', 'follow_up', 'preventive']:
self.screened_agents[screen_type][agent_type] = False
if self.verbosity > 0:
print('* testing and tracing *')
self.test_symptomatic_agents()
# collect and act on new test results
agents_with_test_results = self.collect_test_results()
for a in agents_with_test_results:
a.act_on_test_result()
self.quarantine_contacts()
# screening:
# a screen should take place if
# (a) there are new positive test results
# (b) as a follow-up screen for a screen that was initiated because
# of new positive cases
# (c) if there is a preventive screening policy and it is time for
# a preventive screen in a given agent group
# (a)
if (self.testing == 'background' or self.testing == 'background+preventive')\
and self.new_positive_tests == True:
for agent_type in self.screening_agents:
self.screen_agents(
agent_type, self.Testing.diagnostic_test_type, 'reactive')
self.scheduled_follow_up_screen[agent_type] = True
# (b)
elif (self.testing == 'background' or self.testing == 'background+preventive') and \
self.Testing.follow_up_testing_interval != None and \
sum(list(self.scheduled_follow_up_screen.values())) > 0:
for agent_type in self.screening_agents:
if self.scheduled_follow_up_screen[agent_type] and\
self.days_since_last_agent_screen[agent_type] >=\
self.Testing.follow_up_testing_interval:
self.screen_agents(
agent_type, self.Testing.diagnostic_test_type, 'follow_up')
else:
if self.verbosity > 0:
print('not initiating {} follow-up screen (last screen too close)'\
.format(agent_type))
# (c)
elif (self.testing == 'preventive' or self.testing == 'background+preventive')and \
np.any(list(self.Testing.screening_intervals.values())):
for agent_type in self.screening_agents:
interval = self.Testing.screening_intervals[agent_type]
assert interval in [7, 3, 2, None], \
'testing interval {} for agent type {} not supported!'\
.format(interval, agent_type)
# (c.1) testing every 7 days = testing on Mondays
if interval == 7 and self.weekday == 1:
self.screen_agents(agent_type,
self.Testing.preventive_screening_test_type,\
'preventive')
# (c.2) testing every 3 days = testing on Mo & Turs
elif interval == 3 and self.weekday in [1, 4]:
self.screen_agents(agent_type,
self.Testing.preventive_screening_test_type,\
'preventive')
# (c.3) testing every 2 days = testing on Mo, Wed & Fri
elif interval == 2 and self.weekday in [1, 3, 5]:
self.screen_agents(agent_type,
self.Testing.preventive_screening_test_type,\
'preventive')
# No interval specified = no testing, even if testing
# mode == preventive
elif interval == None:
pass
else:
if self.verbosity > 0:
print('not initiating {} preventive screen (wrong weekday)'\
.format(agent_type))
else:
# do nothing
pass
for agent_type in self.agent_types:
if not (self.screened_agents['reactive'][agent_type] or \
self.screened_agents['follow_up'][agent_type] or \
self.screened_agents['preventive'][agent_type]):
self.days_since_last_agent_screen[agent_type] += 1
if self.verbosity > 0: print('* agent interaction *')
self.datacollector.collect(self)
self.schedule.step()
self.Nstep += 1
| 44.869792
| 98
| 0.634862
|
import numpy as np
import networkx as nx
from math import gamma
from scipy.optimize import root_scalar
from mesa import Model
from mesa.time import RandomActivation, SimultaneousActivation
from mesa.datacollection import DataCollector
from scseirx.testing_strategy import Testing
):
return model.number_of_diagnostic_tests
def get_N_preventive_screening_tests(model):
return model.number_of_preventive_screening_tests
def get_infection_state(agent):
if agent.exposed == True: return 'exposed'
elif agent.infectious == True: return 'infectious'
elif agent.recovered == True: return 'recovered'
else: return 'susceptible'
def get_quarantine_state(agent):
if agent.quarantined == True: return True
else: return False
def get_undetected_infections(model):
return model.undetected_infections
def get_predetected_infections(model):
return model.predetected_infections
def get_pending_test_infections(model):
return model.pending_test_infections
def get_diagnostic_test_detected_infections_student(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['student']
def get_diagnostic_test_detected_infections_teacher(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['teacher']
def get_diagnostic_test_detected_infections_family_member(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['family_member']
def get_diagnostic_test_detected_infections_resident(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['resident']
def get_diagnostic_test_detected_infections_employee(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['employee']
def get_diagnostic_test_detected_infections_unistudent(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['unistudent']
def get_diagnostic_test_detected_infections_lecturer(model):
return model.positive_tests[model.Testing.diagnostic_test_type]['lecturer']
diagnostic_test_detected_infections_funcs = {
'student':get_diagnostic_test_detected_infections_student,
'teacher':get_diagnostic_test_detected_infections_teacher,
'family_member':get_diagnostic_test_detected_infections_family_member,
'resident':get_diagnostic_test_detected_infections_resident,
'employee':get_diagnostic_test_detected_infections_employee,
'unistudent':get_diagnostic_test_detected_infections_unistudent,
'lecturer':get_diagnostic_test_detected_infections_lecturer
}
def get_preventive_test_detected_infections_student(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['student']
def get_preventive_test_detected_infections_teacher(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['teacher']
def get_preventive_test_detected_infections_family_member(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['family_member']
def get_preventive_test_detected_infections_resident(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['resident']
def get_preventive_test_detected_infections_employee(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['employee']
def get_preventive_test_detected_infections_unistudent(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['unistudent']
def get_preventive_test_detected_infections_lecturer(model):
return model.positive_tests[model.Testing.preventive_screening_test_type]['lecturer']
preventive_test_detected_infections_funcs = {
'student':get_preventive_test_detected_infections_student,
'teacher':get_preventive_test_detected_infections_teacher,
'family_member':get_preventive_test_detected_infections_family_member,
'resident':get_preventive_test_detected_infections_resident,
'employee':get_preventive_test_detected_infections_employee,
'unistudent':get_preventive_test_detected_infections_unistudent,
'lecturer':get_preventive_test_detected_infections_lecturer
}
def check_positive(var):
assert var >= 0, 'negative number'
return var
def check_bool(var):
assert type(var) == bool, 'not a bool'
return var
def check_positive_int(var):
if var == None:
return var
assert type(var) == int, 'not an integer'
assert var >= 0, 'negative number'
return var
def check_contact_type_dict(var):
assert type(var) == dict, 'not a dictionary'
assert set(var.keys()).issubset({'very_far', 'far', 'intermediate', 'close'}), \
'does not contain the correct contact types (has to be very_far, far, intermediate or close)'
assert all((isinstance(i, int) or isinstance(i, float)) for i in var.values()), \
'contact type weights are not numeric'
return var
def check_K1_contact_types(var):
for area in var:
assert area in ['very_far', 'far', 'intermediate',
'close'], 'K1 contact type not recognised'
return var
def check_testing(var):
assert var in ['diagnostic', 'background', 'preventive',
'background+preventive', False], \
'unknown testing mode: {}'.format(var)
return var
def check_probability(var):
assert (type(var) == float) or (var == 0) or (var == 1), \
'{} not a float'.format(var)
assert var >= 0, 'probability negative'
assert var <= 1, 'probability larger than 1'
return var
def check_graph(var):
assert type(var) in [nx.Graph, nx.MultiGraph], 'not a networkx graph'
assert len(var.nodes) > 0, 'graph has no nodes'
assert len(var.edges) > 0, 'graph has no edges'
areas = [e[2]['contact_type'] for e in var.edges(data=True)]
areas = set(areas)
for a in areas:
assert a in {'very_far', 'far', 'intermediate',
'close'}, 'contact type {} not recognised'.format(a)
return var
def check_index_case(var, agent_types):
allowed_strings = agent_types[:]
allowed_strings.extend(['continuous'])
assert var in allowed_strings, 'unknown index case mode'
return var
def check_discount(var):
if var['slope'] != None:
assert var['slope'] <= 0, 'slope needs to be <= 0 or None'
assert np.abs(var['slope']) <= 1, 'absolute value of slope needs to be <= 1'
assert var['intercept'], 'intercept needs to be positive'
assert var['intercept'], 'intercept needs to be <= 1'
return var
def get_weibull_shape(k, mu, var):
return var / mu**2 - gamma(1 + 2/k) / gamma(1+1/k)**2 + 1
def get_weibull_scale(mu, k):
return mu / gamma(1 + 1/k)
def weibull_two_param(shape, scale):
return scale * np.random.weibull(shape)
class SEIRX(Model):
def __init__(self, G,
verbosity = 0,
base_transmission_risk = 0.05,
testing='diagnostic',
exposure_duration = [5.0, 1.9],
time_until_symptoms = [6.4, 0.8],
infection_duration = [10.91, 3.95],
quarantine_duration = 10,
subclinical_modifier = 0.6,
infection_risk_contact_type_weights = {
'very_far': 0.1,
'far': 0.25,
'intermediate': 0.5,
'close': 1},
K1_contact_types = ['close'],
diagnostic_test_type = 'one_day_PCR',
preventive_screening_test_type = 'same_day_antigen',
follow_up_testing_interval = None,
liberating_testing = False,
index_case = 'teacher',
agent_types = {
'teacher': {'screening_interval': None,
'index_probability': 0,
'mask':False,
'vaccination_ratio': 0},
'student': {'screening_interval': None,
'index_probability': 0,
'mask':False,
'vaccination_ratio': 0},
'family_member':{'screening_interval': None,
'index_probability': 0,
'mask':False,
'vaccination_ratio': 0}},
age_transmission_risk_discount = \
{'slope':-0.02,
'intercept':1},
age_symptom_modification = \
{'slope':-0.02545,
'intercept':0.854545},
mask_filter_efficiency = {'exhale':0, 'inhale':0},
transmission_risk_ventilation_modifier = 0,
transmission_risk_vaccination_modifier = {
'reception':1,
'transmission':0},
seed = None):
# module. Therefore, we need to initialize the numpy random number
# generator with the given seed as well
if seed != None:
np.random.seed(seed)
# sets the (daily) transmission risk for a household contact without
# any precautions. Target infection ratios are taken from literature
# and the value of the base_transmission_risk is calibrated such that
# the simulation produces the correct infection ratios in a household
# setting with the given distributions for epidemiological parameters
# of agents
self.base_transmission_risk = base_transmission_risk
# sets the level of detail of text output to stdout (0 = no output)
self.verbosity = check_positive_int(verbosity)
# flag to turn off the testing & tracing strategy
self.testing = check_testing(testing)
self.running = True # needed for the batch runner implemented by mesa
# set the interaction mode to simultaneous activation
self.schedule = SimultaneousActivation(self)
# internal step counter used to launch screening tests
self.Nstep = 0
# since we may have weekday-specific contact networks, we need
# to keep track of the day of the week. Since the index case
# per default is introduced at step 0 in index case mode, we
# need to offset the starting weekday by a random number of weekdays
# to prevent artifacts from always starting on the same day of the week
self.weekday_offset = self.random.randint(1, 8)
self.weekday = self.Nstep + self.weekday_offset
## epidemiological parameters: can be either a single integer or the
# mean and standard deviation of a distribution
self.epi_params = {}
# counter to track the number of pathological parameter combinations
# that had to be re-rolled (only here for debugging and control reasons)
self.param_rerolls = 0
for param, param_name in zip([exposure_duration, time_until_symptoms,
infection_duration],['exposure_duration', 'time_until_symptoms',
'infection_duration']):
if isinstance(param, int):
self.epi_params[param_name] = check_positive_int(param)
elif isinstance(param, list) and len(param) == 2:
mu = check_positive(param[0])
var = check_positive(param[1]**2)
shape = root_scalar(get_weibull_shape, args=(mu, var),
method='toms748', bracket=[0.2, 500]).root
scale = get_weibull_scale(mu, shape)
self.epi_params[param_name] = [shape, scale]
else:
print('{} format not recognized, should be either a single '+\
'int or a tuple of two positive numbers'.format(param_name))
# duration of quarantine
self.quarantine_duration = check_positive_int(quarantine_duration)
self.infection_risk_area_weights = check_contact_type_dict(
infection_risk_contact_type_weights)
# modifier for infectiosness for asymptomatic cases
self.subclinical_modifier = check_positive(subclinical_modifier)
# modifiers for the infection risk, depending on contact type
self.infection_risk_contact_type_weights = infection_risk_contact_type_weights
# modifications for age-dependent transmission and reception risks and
# symptom probabilities
self.age_transmission_risk_discount = \
check_discount(age_transmission_risk_discount)
self.age_symptom_modification = age_symptom_modification
#check_discount(age_symptom_modification)
self.mask_filter_efficiency = mask_filter_efficiency
self.transmission_risk_ventilation_modifier = \
transmission_risk_ventilation_modifier
self.transmission_risk_vaccination_modifier = \
transmission_risk_vaccination_modifier
## agents and their interactions
# interaction graph of agents
self.G = check_graph(G)
# add weights as edge attributes so they can be visualised easily
if type(self.G) == nx.MultiGraph:
for (u, v, key, contact_type) in self.G.edges(keys=True,
data='contact_type'):
self.G[u][v][key]['weight'] = \
self.infection_risk_contact_type_weights[contact_type]
else:
for e in G.edges(data=True):
G[e[0]][e[1]]['weight'] = self.infection_risk_contact_type_weights\
[G[e[0]][e[1]]['contact_type']]
# extract the different agent types from the contact graph
self.agent_types = list(agent_types.keys())
# dictionary of available agent classes with agent types and classes
self.agent_classes = {}
if 'resident' in agent_types:
from scseirx.agent_resident import resident
self.agent_classes['resident'] = resident
if 'employee' in agent_types:
from scseirx.agent_employee import employee
self.agent_classes['employee'] = employee
if 'student' in agent_types:
from scseirx.agent_student import student
self.agent_classes['student'] = student
if 'teacher' in agent_types:
from scseirx.agent_teacher import teacher
self.agent_classes['teacher'] = teacher
if 'family_member' in agent_types:
from scseirx.agent_family_member import family_member
self.agent_classes['family_member'] = family_member
if 'lecturer' in agent_types:
from scseirx.agent_lecturer import lecturer
self.agent_classes['lecturer'] = lecturer
if 'unistudent' in agent_types:
from scseirx.agent_unistudent import unistudent
self.agent_classes['unistudent'] = unistudent
## set agent characteristics for all agent groups
# list of agent characteristics
params = ['screening_interval','index_probability', 'mask' ,'vaccination_ratio',
'voluntary_testing_rate']
# default values that are used in case a characteristic is not specified
# for an agent group
defaults = {'screening_interval':None,
'index_probability':0,
'mask':False,
'vaccination_ratio':0,
'voluntary_testing_rate':1
}
# sanity checks that are applied to parameters passed to the class
# constructor to make sure they conform to model expectations
check_funcs = [check_positive_int, check_probability, check_bool,
check_probability, check_probability]
# member dicts that store the parameter values for each agent group
self.screening_intervals = {}
self.index_probabilities = {}
self.masks = {}
self.vaccination_probabilities = {}
self.voluntary_testing_rates = {}
param_dicts = [self.screening_intervals, self.index_probabilities,
self.masks, self.vaccination_probabilities, self.voluntary_testing_rates]
# iterate over all possible agent parameters and agent groups: set the
# respective value to the value passed through the constructor or to
# the default value if no value has been passed
for param,param_dict,check_func in zip(params,param_dicts,check_funcs):
for at in self.agent_types:
try:
param_dict.update({at:check_func(agent_types[at][param])})
except KeyError:
param_dict.update({at:defaults[param]})
# pass all parameters relevant for the testing strategy to the testing
# class. NOTE: this separation is not a strictly necessary design
# decision but I like to keep the parameters related to testing and
# tracing in a separate place
self.Testing = Testing(self, diagnostic_test_type,
preventive_screening_test_type,
check_positive_int(follow_up_testing_interval),
self.screening_intervals,
check_bool(liberating_testing),
check_K1_contact_types(K1_contact_types),
verbosity)
# specifies either continuous probability for index cases in agent
# groups based on the 'index_probability' for each agent group, or a
# single (randomly chosen) index case in the passed agent group
self.index_case = check_index_case(index_case, self.agent_types)
self.num_agents = {}
## add agents
# extract the agent nodes from the graph and add them to the scheduler
for agent_type in self.agent_types:
IDs = [x for x,y in G.nodes(data=True) if y['type'] == agent_type]
self.num_agents.update({agent_type:len(IDs)})
# get the agent locations (units) from the graph node attributes
units = [self.G.nodes[ID]['unit'] for ID in IDs]
# determine the agents that will be vaccinated, given the
# vaccination ratio of the respective agent group
vaccination_status = np.asarray([False] * len(IDs))
if self.vaccination_probabilities[agent_type] > 0:
n = round(self.vaccination_probabilities[agent_type] * len(IDs))
idx = list(range(len(IDs)))
rnd_idx = np.asarray(self.random.sample(idx, n))
vaccination_status[rnd_idx] = True
for ID, unit, vaccinated in zip(IDs, units, vaccination_status):
tmp_epi_params = {}
# for each of the three epidemiological parameters, check if
# the parameter is an integer (if yes, pass it directly to the
# agent constructor), or if it is specified by the shape and
# scale parameters of a Weibull distribution. In the latter
# case, draw a new number for every agent from the distribution
# NOTE: parameters drawn from the distribution are rounded to
# the nearest integer
while True:
for param_name, param in self.epi_params.items():
if isinstance(param, int):
tmp_epi_params[param_name] = param
else:
tmp_epi_params[param_name] = \
round(weibull_two_param(param[0], param[1]))
if tmp_epi_params['exposure_duration'] > 0 and \
tmp_epi_params['time_until_symptoms'] >= \
tmp_epi_params['exposure_duration'] and\
tmp_epi_params['infection_duration'] > \
tmp_epi_params['exposure_duration']:
break
else:
self.param_rerolls += 1
if verbosity > 1:
print('pathological epi-param case found!')
print(tmp_epi_params)
# check if the agent participates in voluntary testing
p = self.voluntary_testing_rates[agent_type]
voluntary_testing = np.random.choice([True, False],
p=[p, 1-p])
# construct the agent object
a = self.agent_classes[agent_type](ID, unit, self,
tmp_epi_params['exposure_duration'],
tmp_epi_params['time_until_symptoms'],
tmp_epi_params['infection_duration'],
vaccinated,
voluntary_testing,
verbosity)
self.schedule.add(a)
# infect the first agent in single index case mode
if self.index_case != 'continuous':
infection_targets = [
a for a in self.schedule.agents if a.type == index_case]
# pick a random agent to infect in the selected agent group
target = self.random.randint(0, len(infection_targets) - 1)
infection_targets[target].exposed = True
if self.verbosity > 0:
print('{} exposed: {}'.format(index_case,
infection_targets[target].ID))
# list of agents that were tested positive this turn
self.newly_positive_agents = []
# flag that indicates if there were new positive tests this turn
self.new_positive_tests = False
# dictionary of flags that indicate whether a given agent group has
# been creened this turn
self.screened_agents= {
'reactive':{agent_type: False for agent_type in self.agent_types},
'follow_up':{agent_type: False for agent_type in self.agent_types},
'preventive':{agent_type: False for agent_type in self.agent_types}}
# dictionary of counters that count the days since a given agent group
# was screened. Initialized differently for different index case modes
if (self.index_case == 'continuous') or \
(not np.any(list(self.Testing.screening_intervals.values()))):
self.days_since_last_agent_screen = {agent_type: 0 for agent_type in
self.agent_types}
# NOTE: if we initialize these variables with 0 in the case of a single
# index case, we introduce a bias since in 'single index case mode' the
# first index case will always become exposed in step 0. To realize
# random states of the preventive sceening procedure with respect to the
# incidence of the index case, we have to randomly pick the days since
# the last screen for the agent group from which the index case is
else:
self.days_since_last_agent_screen = {}
for agent_type in self.agent_types:
if self.Testing.screening_intervals[agent_type] != None:
self.days_since_last_agent_screen.update({
agent_type: self.random.choice(range(0,
self.Testing.screening_intervals[agent_type] + 1))})
else:
self.days_since_last_agent_screen.update({agent_type: 0})
# dictionary of flags that indicates whether a follow-up screen for a
# given agent group is scheduled
self.scheduled_follow_up_screen = {agent_type: False for agent_type in
self.agent_types}
# counters
self.number_of_diagnostic_tests = 0
self.number_of_preventive_screening_tests = 0
self.positive_tests = {self.Testing.preventive_screening_test_type:
{agent_type:0 for agent_type in self.agent_types},
self.Testing.diagnostic_test_type:
{agent_type:0 for agent_type in self.agent_types}}
self.undetected_infections = 0
self.predetected_infections = 0
self.pending_test_infections = 0
self.quarantine_counters = {agent_type:0 for agent_type in agent_types.keys()}
self.false_negative = 0
# data collectors to save population counts and agent states every
# time step
model_reporters = {
'N_diagnostic_tests':get_N_diagnostic_tests,
'N_preventive_screening_tests':get_N_preventive_screening_tests,
'undetected_infections':get_undetected_infections,
'predetected_infections':get_predetected_infections,
'pending_test_infections':get_pending_test_infections
}
for agent_type in self.agent_types:
model_reporters.update({
'diagnostic_test_detected_infections_{}'.format(agent_type):\
diagnostic_test_detected_infections_funcs[agent_type]
})
model_reporters.update({
'preventive_test_detected_infections_{}'.format(agent_type):\
preventive_test_detected_infections_funcs[agent_type]
})
self.datacollector = DataCollector(
model_reporters=model_reporters,
agent_reporters=
{
'infection_state': get_infection_state,
'quarantine_state': get_quarantine_state
})
## transmission risk modifiers
def get_transmission_risk_contact_type_modifier(self, source, target):
# construct the edge key as combination between agent IDs and weekday
n1 = source.ID
n2 = target.ID
tmp = [n1, n2]
tmp.sort()
n1, n2 = tmp
key = '{}{}d{}'.format(n1, n2, self.weekday)
contact_weight = self.G.get_edge_data(n1, n2, key)['weight']
# the link weight is a multiplicative modifier of the link strength.
# contacts of type "close" have, by definition, a weight of 1. Contacts
# of type intermediate, far or very far have a weight < 1 and therefore
# are less likely to transmit an infection. For example, if the contact
# type far has a weight of 0.2, a contact of type far has only a 20%
# chance of transmitting an infection, when compared to a contact of
# type close. To calculate the probability of success p in the Bernoulli
# trial, we need to reduce the base risk (or base probability of success)
# by the modifications introduced by preventive measures. These
# modifications are formulated in terms of "probability of failure", or
# "q". A low contact weight has a high probability of failure, therefore
# we return q = 1 - contact_weight here.
q1 = 1 - contact_weight
return q1
def get_transmission_risk_age_modifier_transmission(self, source):
age = source.age
max_age = 18
if age <= max_age:
age_weight = self.age_transmission_risk_discount['slope'] * \
np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']
# The age weight can be interpreted as multiplicative factor that
# reduces the chance for transmission with decreasing age. The slope
# of the age_transmission_discount function is the decrease (in % of
# the transmission risk for an 18 year old or above) of transmission
# risk with every year a person is younger than 18 (the intercept is
# 1 by definition).
# To calculate the probability of success p in the Bernoulli
# trial, we need to reduce the base risk (or base probability of
# success) by the modifications introduced by preventive measures.
# These modifications are formulated in terms of "probability of
# failure", or "q". A low age weight has a high probability of
# failure, therefore we return q = 1 - age_weight here.
q2 = 1 - age_weight
else:
q2 = 0
return q2
def get_transmission_risk_age_modifier_reception(self, target):
age = target.age
max_age = 18
if age <= max_age:
age_weight = self.age_transmission_risk_discount['slope'] * \
np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']
# see description in get_transmission_risk_age_modifier_transmission
q3 = 1 - age_weight
else:
q3 = 0
return q3
# infectiousness is constant and high until symptom onset and then
# decreases monotonically until agents are not infectious anymore
# at the end of the infection_duration
def get_transmission_risk_progression_modifier(self, source):
if source.days_since_exposure < source.exposure_duration:
progression_weight = 0
elif source.days_since_exposure <= source.time_until_symptoms:
progression_weight = 1
elif source.days_since_exposure > source.time_until_symptoms and \
source.days_since_exposure <= source.infection_duration:
# we add 1 in the denominator, such that the source is also
# (slightly) infectious on the last day of the infection_duration
progression_weight = \
(source.days_since_exposure - source.time_until_symptoms) / \
(source.infection_duration - source.time_until_symptoms + 1)
else:
progression_weight = 0
# see description in get_transmission_risk_age_modifier_transmission
q4 = 1 - progression_weight
return q4
def get_transmission_risk_subclinical_modifier(self, source):
if source.symptomatic_course == False:
subclinical_weight = self.subclinical_modifier
else:
subclinical_weight = 1
# see description in get_transmission_risk_age_modifier_transmission
q5 = 1 - subclinical_weight
return q5
def get_transmission_risk_exhale_modifier(self, source):
if source.mask:
exhale_weight = self.mask_filter_efficiency['exhale']
else:
exhale_weight = 1
# see description in get_transmission_risk_age_modifier_transmission
q6 = 1 - exhale_weight
return q6
def get_transmission_risk_inhale_modifier(self, target):
if target.mask:
inhale_weight = self.mask_filter_efficiency['inhale']
else:
inhale_weight = 1
# see description in get_transmission_risk_age_modifier_transmission
q7 = 1 - inhale_weight
return q7
def get_transmission_risk_ventilation_modifier(self):
ventilation_weight = self.transmission_risk_ventilation_modifier
# see description in get_transmission_risk_age_modifier_transmission
q8 = 1 - ventilation_weight
return q8
def get_transmission_risk_vaccination_modifier_reception(self, a):
if a.vaccinated:
q9 = self.transmission_risk_vaccination_modifier['reception']
else:
q9 = 0
return q9
def get_transmission_risk_vaccination_modifier_transmission(self, a):
if a.vaccinated:
q10 = self.transmission_risk_vaccination_modifier['transmission']
else:
q10 = 0
return q10
def test_agent(self, a, test_type):
a.tested = True
a.pending_test = test_type
if test_type == self.Testing.diagnostic_test_type:
self.number_of_diagnostic_tests += 1
else:
self.number_of_preventive_screening_tests += 1
if a.exposed:
# tests that happen in the period of time in which the agent is
# exposed but not yet infectious.
# Note: tests[test_type]['time_until_testable'] is negative for
# tests that can detect an infection before agents become infectious
if a.days_since_exposure >= a.exposure_duration + \
self.Testing.tests[test_type]['time_until_testable']:
if self.verbosity > 1:
print('{} {} sent positive sample (even though not infectious yet)'
.format(a.type, a.ID))
a.sample = 'positive'
self.predetected_infections += 1
self.positive_tests[test_type][a.type] += 1
else:
if self.verbosity > 1: print('{} {} sent negative sample'
.format(a.type, a.ID))
a.sample = 'negative'
elif a.infectious:
# tests that happen in the period of time in which the agent is
# infectious and the infection is detectable by a given test
# Note: tests[test_type]['time_until_testable'] is negative for
# tests that can detect an infection before agents become
# infectious. tests[test_type]['time_testable'] is negative for
# tests that cease to detect an infection before agents stop being
# infectious
if a.days_since_exposure >= a.exposure_duration + \
self.Testing.tests[test_type]['time_until_testable'] and \
a.days_since_exposure <= a.infection_duration + \
self.Testing.tests[test_type]['time_testable']:
if self.verbosity > 1:
print('{} {} sent positive sample'.format(a.type, a.ID))
a.sample = 'positive'
self.positive_tests[test_type][a.type] += 1
# track the undetected infections to assess how important they are
# for infection spread
else:
if self.verbosity > 1:
print('{} {} sent negative sample (even though infectious)'
.format(a.type, a.ID))
a.sample = 'negative'
self.undetected_infections += 1
else:
if self.verbosity > 1: print('{} {} sent negative sample'
.format(a.type, a.ID))
a.sample = 'negative'
# for same-day testing, immediately act on the results of the test
if a.days_since_tested >= self.Testing.tests[test_type]['time_until_test_result']:
a.act_on_test_result()
def screen_agents(self, agent_group, test_type, screen_type):
# only test agents that have not been tested already in this simulation
# step and that are not already known positive cases
if self.verbosity > 0:
print('initiating {} {} screen'\
.format(screen_type, agent_group))
untested_agents = [a for a in self.schedule.agents if
(a.tested == False and a.known_positive == False
and a.type == agent_group)]
if len(untested_agents) > 0:
self.screened_agents[screen_type][agent_group] = True
self.days_since_last_agent_screen[agent_group] = 0
# only test agents if they participate in voluntary testing
if screen_type == 'preventive':
for a in untested_agents:
if a.voluntary_testing:
self.test_agent(a, test_type)
else:
if self.verbosity > 1:
print('not testing {} {}, not participating in voluntary testing'\
.format(agent_group, a.ID))
else:
for a in untested_agents:
self.test_agent(a, test_type)
if self.verbosity > 0:
print()
else:
if self.verbosity > 0:
print('no agents tested because all agents have already been tested')
# the type of the test used in the pending test result is stored in the
# variable pending_test
def collect_test_results(self):
agents_with_test_results = [a for a in self.schedule.agents if
(a.pending_test and
a.days_since_tested >= self.Testing.tests[a.pending_test]['time_until_test_result'])]
return agents_with_test_results
def trace_contacts(self, a):
if a.quarantined == False:
a.quarantined = True
a.quarantine_start = self.Nstep
if self.verbosity > 0:
print('qurantined {} {}'.format(a.type, a.ID))
# find all agents that share edges with the agent
# that are classified as K1 contact types in the testing
# strategy
if a in self.G.nodes():
K1_contacts = [e[1] for e in self.G.edges(a.ID, data=True) if
e[2]['contact_type'] in self.Testing.K1_contact_types]
K1_contacts = [a for a in self.schedule.agents if a.ID in K1_contacts]
for K1_contact in K1_contacts:
if self.verbosity > 0:
print('quarantined {} {} (K1 contact of {} {})'
.format(K1_contact.type, K1_contact.ID, a.type, a.ID))
K1_contact.quarantined = True
K1_contact.quarantine_start = self.Nstep
def test_symptomatic_agents(self):
# find symptomatic agents that have not been tested yet and are not
# in quarantine and test them
newly_symptomatic_agents = np.asarray([a for a in self.schedule.agents
if (a.symptoms == True and a.tested == False and a.quarantined == False)])
for a in newly_symptomatic_agents:
# all symptomatic agents are quarantined by default
if self.verbosity > 0:
print('quarantined: {} {}'.format(a.type, a.ID))
a.quarantined = True
a.quarantine_start = self.Nstep
self.test_agent(a, self.Testing.diagnostic_test_type)
def quarantine_contacts(self):
# trace and quarantine contacts of newly positive agents
if len(self.newly_positive_agents) > 0:
if self.verbosity > 0: print('new positive test(s) from {}'
.format([a.ID for a in self.newly_positive_agents]))
# send all K1 contacts of positive agents into quarantine
for a in self.newly_positive_agents:
self.trace_contacts(a)
# indicate that a screen should happen because there are new
# positive test results
self.new_positive_tests = True
self.newly_positive_agents = []
else:
self.new_positive_tests = False
def step(self):
self.weekday = (self.Nstep + self.weekday_offset) % 7 + 1
# if the connection graph is time-resloved, set the graph that is
# used to determine connections in this step to the sub-graph corres-
# ponding to the current day of the week
if self.dynamic_connections:
self.G = self.weekday_connections[self.weekday]
if self.verbosity > 0:
print('weekday {}'.format(self.weekday))
if self.testing:
for agent_type in self.agent_types:
for screen_type in ['reactive', 'follow_up', 'preventive']:
self.screened_agents[screen_type][agent_type] = False
if self.verbosity > 0:
print('* testing and tracing *')
self.test_symptomatic_agents()
# collect and act on new test results
agents_with_test_results = self.collect_test_results()
for a in agents_with_test_results:
a.act_on_test_result()
self.quarantine_contacts()
# screening:
# a screen should take place if
# (a) there are new positive test results
# (b) as a follow-up screen for a screen that was initiated because
# of new positive cases
# (c) if there is a preventive screening policy and it is time for
# a preventive screen in a given agent group
# (a)
if (self.testing == 'background' or self.testing == 'background+preventive')\
and self.new_positive_tests == True:
for agent_type in self.screening_agents:
self.screen_agents(
agent_type, self.Testing.diagnostic_test_type, 'reactive')
self.scheduled_follow_up_screen[agent_type] = True
# (b)
elif (self.testing == 'background' or self.testing == 'background+preventive') and \
self.Testing.follow_up_testing_interval != None and \
sum(list(self.scheduled_follow_up_screen.values())) > 0:
for agent_type in self.screening_agents:
if self.scheduled_follow_up_screen[agent_type] and\
self.days_since_last_agent_screen[agent_type] >=\
self.Testing.follow_up_testing_interval:
self.screen_agents(
agent_type, self.Testing.diagnostic_test_type, 'follow_up')
else:
if self.verbosity > 0:
print('not initiating {} follow-up screen (last screen too close)'\
.format(agent_type))
# (c)
elif (self.testing == 'preventive' or self.testing == 'background+preventive')and \
np.any(list(self.Testing.screening_intervals.values())):
for agent_type in self.screening_agents:
interval = self.Testing.screening_intervals[agent_type]
assert interval in [7, 3, 2, None], \
'testing interval {} for agent type {} not supported!'\
.format(interval, agent_type)
# (c.1) testing every 7 days = testing on Mondays
if interval == 7 and self.weekday == 1:
self.screen_agents(agent_type,
self.Testing.preventive_screening_test_type,\
'preventive')
# (c.2) testing every 3 days = testing on Mo & Turs
elif interval == 3 and self.weekday in [1, 4]:
self.screen_agents(agent_type,
self.Testing.preventive_screening_test_type,\
'preventive')
# (c.3) testing every 2 days = testing on Mo, Wed & Fri
elif interval == 2 and self.weekday in [1, 3, 5]:
self.screen_agents(agent_type,
self.Testing.preventive_screening_test_type,\
'preventive')
# No interval specified = no testing, even if testing
# mode == preventive
elif interval == None:
pass
else:
if self.verbosity > 0:
print('not initiating {} preventive screen (wrong weekday)'\
.format(agent_type))
else:
# do nothing
pass
for agent_type in self.agent_types:
if not (self.screened_agents['reactive'][agent_type] or \
self.screened_agents['follow_up'][agent_type] or \
self.screened_agents['preventive'][agent_type]):
self.days_since_last_agent_screen[agent_type] += 1
if self.verbosity > 0: print('* agent interaction *')
self.datacollector.collect(self)
self.schedule.step()
self.Nstep += 1
| true
| true
|
f7165682eb5afce41035f2bdfef80e240f373985
| 4,901
|
py
|
Python
|
configs/gdrn/lmoPbrSO/resnest50d_online_AugCosyAAEGray_mlBCE_DoubleMask_lmo_pbr_100e_bop_test/resnest50d_online_AugCosyAAEGray_mlBCE_DoubleMask_lmo_pbr_100e_driller_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/gdrn/lmoPbrSO/resnest50d_online_AugCosyAAEGray_mlBCE_DoubleMask_lmo_pbr_100e_bop_test/resnest50d_online_AugCosyAAEGray_mlBCE_DoubleMask_lmo_pbr_100e_driller_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/gdrn/lmoPbrSO/resnest50d_online_AugCosyAAEGray_mlBCE_DoubleMask_lmo_pbr_100e_bop_test/resnest50d_online_AugCosyAAEGray_mlBCE_DoubleMask_lmo_pbr_100e_driller_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = ["../../../_base_/gdrn_base.py"]
OUTPUT_DIR = "output/gdrn/lmoPbrSO/resnest50d_online_AugCosyAAEGray_mlBCE_DoubleMask_lmo_pbr_100e/driller"
INPUT = dict(
DZI_PAD_SCALE=1.5,
TRUNCATE_FG=False,
CHANGE_BG_PROB=0.5,
COLOR_AUG_PROB=0.8,
COLOR_AUG_TYPE="code",
COLOR_AUG_CODE=(
"Sequential(["
# Sometimes(0.5, PerspectiveTransform(0.05)),
# Sometimes(0.5, CropAndPad(percent=(-0.05, 0.1))),
# Sometimes(0.5, Affine(scale=(1.0, 1.2))),
"Sometimes(0.5, CoarseDropout( p=0.2, size_percent=0.05) ),"
"Sometimes(0.4, GaussianBlur((0., 3.))),"
"Sometimes(0.3, pillike.EnhanceSharpness(factor=(0., 50.))),"
"Sometimes(0.3, pillike.EnhanceContrast(factor=(0.2, 50.))),"
"Sometimes(0.5, pillike.EnhanceBrightness(factor=(0.1, 6.))),"
"Sometimes(0.3, pillike.EnhanceColor(factor=(0., 20.))),"
"Sometimes(0.5, Add((-25, 25), per_channel=0.3)),"
"Sometimes(0.3, Invert(0.2, per_channel=True)),"
"Sometimes(0.5, Multiply((0.6, 1.4), per_channel=0.5)),"
"Sometimes(0.5, Multiply((0.6, 1.4))),"
"Sometimes(0.1, AdditiveGaussianNoise(scale=10, per_channel=True)),"
"Sometimes(0.5, iaa.contrast.LinearContrast((0.5, 2.2), per_channel=0.3)),"
"Sometimes(0.5, Grayscale(alpha=(0.0, 1.0)))," # maybe remove for det
"], random_order=True)"
# cosy+aae
),
)
SOLVER = dict(
IMS_PER_BATCH=24,
TOTAL_EPOCHS=100,
LR_SCHEDULER_NAME="flat_and_anneal",
ANNEAL_METHOD="cosine", # "cosine"
ANNEAL_POINT=0.72,
# REL_STEPS=(0.3125, 0.625, 0.9375),
OPTIMIZER_CFG=dict(_delete_=True, type="Ranger", lr=1e-4, weight_decay=0),
WEIGHT_DECAY=0.0,
WARMUP_FACTOR=0.001,
WARMUP_ITERS=1000,
)
DATASETS = dict(
TRAIN=("lmo_pbr_driller_train",),
TEST=("lmo_bop_test",),
# AP AP50 AP75 AR inf.time
# 66.15 91.742 77.288 74.1 24.6ms
DET_FILES_TEST=(
"datasets/BOP_DATASETS/lmo/test/test_bboxes/yolov4x_640_test672_augCosyAAEGray_ranger_lmo_pbr_lmo_bop_test_16e.json",
),
)
MODEL = dict(
LOAD_DETS_TEST=True,
PIXEL_MEAN=[0.0, 0.0, 0.0],
PIXEL_STD=[255.0, 255.0, 255.0],
POSE_NET=dict(
NAME="GDRN_double_mask",
XYZ_ONLINE=True,
BACKBONE=dict(
FREEZE=False,
PRETRAINED="timm",
INIT_CFG=dict(
type="timm/resnest50d",
pretrained=True,
in_chans=3,
features_only=True,
out_indices=(4,),
),
),
## geo head: Mask, XYZ, Region
GEO_HEAD=dict(
FREEZE=False,
INIT_CFG=dict(
type="TopDownDoubleMaskXyzRegionHead",
in_dim=2048, # this is num out channels of backbone conv feature
),
NUM_REGIONS=64,
),
PNP_NET=dict(
INIT_CFG=dict(norm="GN", act="gelu"),
REGION_ATTENTION=True,
WITH_2D_COORD=True,
ROT_TYPE="allo_rot6d",
TRANS_TYPE="centroid_z",
),
LOSS_CFG=dict(
# xyz loss ----------------------------
XYZ_LOSS_TYPE="L1", # L1 | CE_coor
XYZ_LOSS_MASK_GT="visib", # trunc | visib | obj
XYZ_LW=1.0,
# mask loss ---------------------------
MASK_LOSS_TYPE="BCE", # L1 | BCE | CE
MASK_LOSS_GT="trunc", # trunc | visib | gt
MASK_LW=1.0,
# full mask loss ---------------------------
FULL_MASK_LOSS_TYPE="BCE", # L1 | BCE | CE
FULL_MASK_LW=1.0,
# region loss -------------------------
REGION_LOSS_TYPE="CE", # CE
REGION_LOSS_MASK_GT="visib", # trunc | visib | obj
REGION_LW=1.0,
# pm loss --------------
PM_LOSS_SYM=True, # NOTE: sym loss
PM_R_ONLY=True, # only do R loss in PM
PM_LW=1.0,
# centroid loss -------
CENTROID_LOSS_TYPE="L1",
CENTROID_LW=1.0,
# z loss -----------
Z_LOSS_TYPE="L1",
Z_LW=1.0,
),
),
)
VAL = dict(
DATASET_NAME="lmo",
SCRIPT_PATH="lib/pysixd/scripts/eval_pose_results_more.py",
TARGETS_FILENAME="test_targets_bop19.json",
ERROR_TYPES="vsd,mspd,mssd,ad,reS,teS",
RENDERER_TYPE="cpp", # cpp, python, egl
SPLIT="test",
SPLIT_TYPE="",
N_TOP=1, # SISO: 1, VIVO: -1 (for LINEMOD, 1/-1 are the same)
EVAL_CACHED=False, # if the predicted poses have been saved
SCORE_ONLY=False, # if the errors have been calculated
EVAL_PRINT_ONLY=False, # if the scores/recalls have been saved
EVAL_PRECISION=False, # use precision or recall
USE_BOP=True, # whether to use bop toolkit
)
TEST = dict(EVAL_PERIOD=0, VIS=False, TEST_BBOX_TYPE="est") # gt | est
| 35.773723
| 125
| 0.556417
|
_base_ = ["../../../_base_/gdrn_base.py"]
OUTPUT_DIR = "output/gdrn/lmoPbrSO/resnest50d_online_AugCosyAAEGray_mlBCE_DoubleMask_lmo_pbr_100e/driller"
INPUT = dict(
DZI_PAD_SCALE=1.5,
TRUNCATE_FG=False,
CHANGE_BG_PROB=0.5,
COLOR_AUG_PROB=0.8,
COLOR_AUG_TYPE="code",
COLOR_AUG_CODE=(
"Sequential(["
"Sometimes(0.5, CoarseDropout( p=0.2, size_percent=0.05) ),"
"Sometimes(0.4, GaussianBlur((0., 3.))),"
"Sometimes(0.3, pillike.EnhanceSharpness(factor=(0., 50.))),"
"Sometimes(0.3, pillike.EnhanceContrast(factor=(0.2, 50.))),"
"Sometimes(0.5, pillike.EnhanceBrightness(factor=(0.1, 6.))),"
"Sometimes(0.3, pillike.EnhanceColor(factor=(0., 20.))),"
"Sometimes(0.5, Add((-25, 25), per_channel=0.3)),"
"Sometimes(0.3, Invert(0.2, per_channel=True)),"
"Sometimes(0.5, Multiply((0.6, 1.4), per_channel=0.5)),"
"Sometimes(0.5, Multiply((0.6, 1.4))),"
"Sometimes(0.1, AdditiveGaussianNoise(scale=10, per_channel=True)),"
"Sometimes(0.5, iaa.contrast.LinearContrast((0.5, 2.2), per_channel=0.3)),"
"Sometimes(0.5, Grayscale(alpha=(0.0, 1.0))),"
"], random_order=True)"
),
)
SOLVER = dict(
IMS_PER_BATCH=24,
TOTAL_EPOCHS=100,
LR_SCHEDULER_NAME="flat_and_anneal",
ANNEAL_METHOD="cosine",
ANNEAL_POINT=0.72,
OPTIMIZER_CFG=dict(_delete_=True, type="Ranger", lr=1e-4, weight_decay=0),
WEIGHT_DECAY=0.0,
WARMUP_FACTOR=0.001,
WARMUP_ITERS=1000,
)
DATASETS = dict(
TRAIN=("lmo_pbr_driller_train",),
TEST=("lmo_bop_test",),
DET_FILES_TEST=(
"datasets/BOP_DATASETS/lmo/test/test_bboxes/yolov4x_640_test672_augCosyAAEGray_ranger_lmo_pbr_lmo_bop_test_16e.json",
),
)
MODEL = dict(
LOAD_DETS_TEST=True,
PIXEL_MEAN=[0.0, 0.0, 0.0],
PIXEL_STD=[255.0, 255.0, 255.0],
POSE_NET=dict(
NAME="GDRN_double_mask",
XYZ_ONLINE=True,
BACKBONE=dict(
FREEZE=False,
PRETRAINED="timm",
INIT_CFG=dict(
type="timm/resnest50d",
pretrained=True,
in_chans=3,
features_only=True,
out_indices=(4,),
),
),
FREEZE=False,
INIT_CFG=dict(
type="TopDownDoubleMaskXyzRegionHead",
in_dim=2048,
),
NUM_REGIONS=64,
),
PNP_NET=dict(
INIT_CFG=dict(norm="GN", act="gelu"),
REGION_ATTENTION=True,
WITH_2D_COORD=True,
ROT_TYPE="allo_rot6d",
TRANS_TYPE="centroid_z",
),
LOSS_CFG=dict(
XYZ_LOSS_TYPE="L1",
XYZ_LOSS_MASK_GT="visib",
XYZ_LW=1.0,
MASK_LOSS_TYPE="BCE",
MASK_LOSS_GT="trunc",
MASK_LW=1.0,
FULL_MASK_LOSS_TYPE="BCE",
FULL_MASK_LW=1.0,
REGION_LOSS_TYPE="CE",
REGION_LOSS_MASK_GT="visib",
REGION_LW=1.0,
PM_LOSS_SYM=True,
PM_R_ONLY=True,
PM_LW=1.0,
CENTROID_LOSS_TYPE="L1",
CENTROID_LW=1.0,
Z_LOSS_TYPE="L1",
Z_LW=1.0,
),
),
)
VAL = dict(
DATASET_NAME="lmo",
SCRIPT_PATH="lib/pysixd/scripts/eval_pose_results_more.py",
TARGETS_FILENAME="test_targets_bop19.json",
ERROR_TYPES="vsd,mspd,mssd,ad,reS,teS",
RENDERER_TYPE="cpp",
SPLIT="test",
SPLIT_TYPE="",
N_TOP=1,
EVAL_CACHED=False,
SCORE_ONLY=False,
EVAL_PRINT_ONLY=False,
EVAL_PRECISION=False,
USE_BOP=True,
)
TEST = dict(EVAL_PERIOD=0, VIS=False, TEST_BBOX_TYPE="est")
| true
| true
|
f71656b1d13b7744997e72f449279218a8aca12b
| 2,466
|
py
|
Python
|
L1Trigger/L1CaloTrigger/python/Phase1L1TJets_sincosLUT_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
L1Trigger/L1CaloTrigger/python/Phase1L1TJets_sincosLUT_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
L1Trigger/L1CaloTrigger/python/Phase1L1TJets_sincosLUT_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
sinPhi = cms.vdouble(
-0.0353352962792, -0.122533930843, -0.208795013406, -0.293458528818, -0.375876685504, -0.455418871948, -0.531476481737, -0.603467570232, -0.670841307236, -0.733082191603, -0.789713995522, -0.840303408309, -0.884463351833, -0.921855942186, -0.952195074957, -0.975248614326, -0.990840169216, -0.998850442928, -0.999218145922, -0.99194046477, -0.977073083675, -0.954729758418, -0.925081445966, -0.888354996422, -0.844831417308, -0.794843723474, -0.738774389082, -0.677052421152, -0.610150077076, -0.538579251202, -0.462887558141, -0.383654142772, -0.301485248985, -0.217009581095, -0.130873493387, -0.0437360446299, 0.0437360446299, 0.130873493387, 0.217009581095, 0.301485248985, 0.383654142772, 0.462887558141, 0.538579251202, 0.610150077076, 0.677052421152, 0.738774389082, 0.794843723474, 0.844831417308, 0.888354996422, 0.925081445966, 0.954729758418, 0.977073083675, 0.99194046477, 0.999218145922, 0.998850442928, 0.990840169216, 0.975248614326, 0.952195074957, 0.921855942186, 0.884463351833, 0.840303408309, 0.789713995522, 0.733082191603, 0.670841307236, 0.603467570232, 0.531476481737, 0.455418871948, 0.375876685504, 0.293458528818, 0.208795013406, 0.122533930843, 0.0353352962792 )
cosPhi = cms.vdouble( -0.999375513427, -0.992464324695, -0.977959427777, -0.955971804952, -0.926669691581, -0.890277288868, -0.847073048421, -0.797387541713, -0.741600930761, -0.680140059366, -0.613475187173, -0.542116391547, -0.466609664777, -0.387532736497, -0.305490653258, -0.2211111491, -0.135039842524, -0.0479352966351, 0.039536019772, 0.126704831606, 0.212904178348, 0.297474517214, 0.379768769555, 0.459157271892, 0.535032593708, 0.606814185113, 0.673952818851, 0.735934792636, 0.792285859677, 0.842574857312, 0.886417005995, 0.923476853383, 0.953470841004, 0.976169473869, 0.991399076421, 0.999043121392, 0.999043121392, 0.991399076421, 0.976169473869, 0.953470841004, 0.923476853383, 0.886417005995, 0.842574857312, 0.792285859677, 0.735934792636, 0.673952818851, 0.606814185113, 0.535032593708, 0.459157271892, 0.379768769555, 0.297474517214, 0.212904178348, 0.126704831606, 0.039536019772, -0.0479352966351, -0.135039842524, -0.2211111491, -0.305490653258, -0.387532736497, -0.466609664777, -0.542116391547, -0.613475187173, -0.680140059366, -0.741600930761, -0.797387541713, -0.847073048421, -0.890277288868, -0.926669691581, -0.955971804952, -0.977959427777, -0.992464324695, -0.999375513427 )
| 411
| 1,208
| 0.786294
|
import FWCore.ParameterSet.Config as cms
sinPhi = cms.vdouble(
-0.0353352962792, -0.122533930843, -0.208795013406, -0.293458528818, -0.375876685504, -0.455418871948, -0.531476481737, -0.603467570232, -0.670841307236, -0.733082191603, -0.789713995522, -0.840303408309, -0.884463351833, -0.921855942186, -0.952195074957, -0.975248614326, -0.990840169216, -0.998850442928, -0.999218145922, -0.99194046477, -0.977073083675, -0.954729758418, -0.925081445966, -0.888354996422, -0.844831417308, -0.794843723474, -0.738774389082, -0.677052421152, -0.610150077076, -0.538579251202, -0.462887558141, -0.383654142772, -0.301485248985, -0.217009581095, -0.130873493387, -0.0437360446299, 0.0437360446299, 0.130873493387, 0.217009581095, 0.301485248985, 0.383654142772, 0.462887558141, 0.538579251202, 0.610150077076, 0.677052421152, 0.738774389082, 0.794843723474, 0.844831417308, 0.888354996422, 0.925081445966, 0.954729758418, 0.977073083675, 0.99194046477, 0.999218145922, 0.998850442928, 0.990840169216, 0.975248614326, 0.952195074957, 0.921855942186, 0.884463351833, 0.840303408309, 0.789713995522, 0.733082191603, 0.670841307236, 0.603467570232, 0.531476481737, 0.455418871948, 0.375876685504, 0.293458528818, 0.208795013406, 0.122533930843, 0.0353352962792 )
cosPhi = cms.vdouble( -0.999375513427, -0.992464324695, -0.977959427777, -0.955971804952, -0.926669691581, -0.890277288868, -0.847073048421, -0.797387541713, -0.741600930761, -0.680140059366, -0.613475187173, -0.542116391547, -0.466609664777, -0.387532736497, -0.305490653258, -0.2211111491, -0.135039842524, -0.0479352966351, 0.039536019772, 0.126704831606, 0.212904178348, 0.297474517214, 0.379768769555, 0.459157271892, 0.535032593708, 0.606814185113, 0.673952818851, 0.735934792636, 0.792285859677, 0.842574857312, 0.886417005995, 0.923476853383, 0.953470841004, 0.976169473869, 0.991399076421, 0.999043121392, 0.999043121392, 0.991399076421, 0.976169473869, 0.953470841004, 0.923476853383, 0.886417005995, 0.842574857312, 0.792285859677, 0.735934792636, 0.673952818851, 0.606814185113, 0.535032593708, 0.459157271892, 0.379768769555, 0.297474517214, 0.212904178348, 0.126704831606, 0.039536019772, -0.0479352966351, -0.135039842524, -0.2211111491, -0.305490653258, -0.387532736497, -0.466609664777, -0.542116391547, -0.613475187173, -0.680140059366, -0.741600930761, -0.797387541713, -0.847073048421, -0.890277288868, -0.926669691581, -0.955971804952, -0.977959427777, -0.992464324695, -0.999375513427 )
| true
| true
|
f71658adc475a0a5c279f34a5b5c93dc79f0e389
| 4,279
|
py
|
Python
|
examples/models/file/calendars.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 1
|
2020-05-26T15:21:22.000Z
|
2020-05-26T15:21:22.000Z
|
examples/models/file/calendars.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 1
|
2021-12-15T17:32:31.000Z
|
2021-12-21T18:11:05.000Z
|
examples/models/file/calendars.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 1
|
2021-12-20T05:50:00.000Z
|
2021-12-20T05:50:00.000Z
|
''' A rendering of the 2014 monthly calendar.
This example demonstrates the usage of plotting several
plots together using ``gridplot``.
A hover tooltip displays the US holidays on the significant dates.
.. bokeh-example-metadata::
:sampledata: us_holidays
:apis: bokeh.layouts.gridplot, bokeh.models.tools.HoverTool, bokeh.models.plots.Plot, bokeh.models.glyphs.Rect, bokeh.models.glyphs.Text, bokeh.document.document.Document # noqa: E501
:refs: :ref:`userguide_layout` > :ref:`userguide_layout_gridplot`, :ref:`userguide_tools` > :ref:`userguide_tools_hover_tool`
:keywords: gridplot, hover, tooltip
'''
from calendar import Calendar, day_abbr as day_abbrs, month_name as month_names
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.layouts import gridplot
from bokeh.models import (CategoricalAxis, CategoricalScale, ColumnDataSource,
FactorRange, HoverTool, Plot, Rect, Text)
from bokeh.resources import INLINE
from bokeh.sampledata.us_holidays import us_holidays
from bokeh.util.browser import view
def make_calendar(year, month, firstweekday="Mon"):
firstweekday = list(day_abbrs).index(firstweekday)
calendar = Calendar(firstweekday=firstweekday)
month_days = [ None if not day else str(day) for day in calendar.itermonthdays(year, month) ]
month_weeks = len(month_days)//7
workday = "linen"
weekend = "lightsteelblue"
def weekday(date):
return (date.weekday() - firstweekday) % 7
def pick_weekdays(days):
return [ days[i % 7] for i in range(firstweekday, firstweekday+7) ]
day_names = pick_weekdays(day_abbrs)
week_days = pick_weekdays([workday]*5 + [weekend]*2)
source = ColumnDataSource(data=dict(
days = list(day_names)*month_weeks,
weeks = sum([ [str(week)]*7 for week in range(month_weeks) ], []),
month_days = month_days,
day_backgrounds = sum([week_days]*month_weeks, []),
))
holidays = [ (date, summary.replace("(US-OPM)", "").strip()) for (date, summary) in us_holidays
if date.year == year and date.month == month and "(US-OPM)" in summary ]
holidays_source = ColumnDataSource(data=dict(
holidays_days = [ day_names[weekday(date)] for date, _ in holidays ],
holidays_weeks = [ str((weekday(date.replace(day=1)) + date.day) // 7) for date, _ in holidays ],
month_holidays = [ summary for _, summary in holidays ],
))
xdr = FactorRange(factors=list(day_names))
ydr = FactorRange(factors=list(reversed([ str(week) for week in range(month_weeks) ])))
x_scale, y_scale = CategoricalScale(), CategoricalScale()
plot = Plot(x_range=xdr, y_range=ydr, x_scale=x_scale, y_scale=y_scale,
width=300, height=300, outline_line_color=None)
plot.title.text = month_names[month]
plot.title.text_font_size = "16px"
plot.title.text_color = "darkolivegreen"
plot.title.offset = 25
plot.min_border_left = 0
plot.min_border_bottom = 5
rect = Rect(x="days", y="weeks", width=0.9, height=0.9, fill_color="day_backgrounds", line_color="silver")
plot.add_glyph(source, rect)
rect = Rect(x="holidays_days", y="holidays_weeks", width=0.9, height=0.9, fill_color="pink", line_color="indianred")
rect_renderer = plot.add_glyph(holidays_source, rect)
text = Text(x="days", y="weeks", text="month_days", text_align="center", text_baseline="middle")
plot.add_glyph(source, text)
xaxis = CategoricalAxis()
xaxis.major_label_text_font_size = "11px"
xaxis.major_label_standoff = 0
xaxis.major_tick_line_color = None
xaxis.axis_line_color = None
plot.add_layout(xaxis, 'above')
hover_tool = HoverTool(renderers=[rect_renderer], tooltips=[("Holiday", "@month_holidays")])
plot.tools.append(hover_tool)
return plot
months = [ [ make_calendar(2014, 3*i + j + 1) for j in range(3) ] for i in range(4) ]
grid = gridplot(toolbar_location=None, children=months)
doc = Document()
doc.add_root(grid)
if __name__ == "__main__":
doc.validate()
filename = "calendars.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Calendar 2014"))
print("Wrote %s" % filename)
view(filename)
| 39.256881
| 187
| 0.694087
|
from calendar import Calendar, day_abbr as day_abbrs, month_name as month_names
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.layouts import gridplot
from bokeh.models import (CategoricalAxis, CategoricalScale, ColumnDataSource,
FactorRange, HoverTool, Plot, Rect, Text)
from bokeh.resources import INLINE
from bokeh.sampledata.us_holidays import us_holidays
from bokeh.util.browser import view
def make_calendar(year, month, firstweekday="Mon"):
firstweekday = list(day_abbrs).index(firstweekday)
calendar = Calendar(firstweekday=firstweekday)
month_days = [ None if not day else str(day) for day in calendar.itermonthdays(year, month) ]
month_weeks = len(month_days)//7
workday = "linen"
weekend = "lightsteelblue"
def weekday(date):
return (date.weekday() - firstweekday) % 7
def pick_weekdays(days):
return [ days[i % 7] for i in range(firstweekday, firstweekday+7) ]
day_names = pick_weekdays(day_abbrs)
week_days = pick_weekdays([workday]*5 + [weekend]*2)
source = ColumnDataSource(data=dict(
days = list(day_names)*month_weeks,
weeks = sum([ [str(week)]*7 for week in range(month_weeks) ], []),
month_days = month_days,
day_backgrounds = sum([week_days]*month_weeks, []),
))
holidays = [ (date, summary.replace("(US-OPM)", "").strip()) for (date, summary) in us_holidays
if date.year == year and date.month == month and "(US-OPM)" in summary ]
holidays_source = ColumnDataSource(data=dict(
holidays_days = [ day_names[weekday(date)] for date, _ in holidays ],
holidays_weeks = [ str((weekday(date.replace(day=1)) + date.day) // 7) for date, _ in holidays ],
month_holidays = [ summary for _, summary in holidays ],
))
xdr = FactorRange(factors=list(day_names))
ydr = FactorRange(factors=list(reversed([ str(week) for week in range(month_weeks) ])))
x_scale, y_scale = CategoricalScale(), CategoricalScale()
plot = Plot(x_range=xdr, y_range=ydr, x_scale=x_scale, y_scale=y_scale,
width=300, height=300, outline_line_color=None)
plot.title.text = month_names[month]
plot.title.text_font_size = "16px"
plot.title.text_color = "darkolivegreen"
plot.title.offset = 25
plot.min_border_left = 0
plot.min_border_bottom = 5
rect = Rect(x="days", y="weeks", width=0.9, height=0.9, fill_color="day_backgrounds", line_color="silver")
plot.add_glyph(source, rect)
rect = Rect(x="holidays_days", y="holidays_weeks", width=0.9, height=0.9, fill_color="pink", line_color="indianred")
rect_renderer = plot.add_glyph(holidays_source, rect)
text = Text(x="days", y="weeks", text="month_days", text_align="center", text_baseline="middle")
plot.add_glyph(source, text)
xaxis = CategoricalAxis()
xaxis.major_label_text_font_size = "11px"
xaxis.major_label_standoff = 0
xaxis.major_tick_line_color = None
xaxis.axis_line_color = None
plot.add_layout(xaxis, 'above')
hover_tool = HoverTool(renderers=[rect_renderer], tooltips=[("Holiday", "@month_holidays")])
plot.tools.append(hover_tool)
return plot
months = [ [ make_calendar(2014, 3*i + j + 1) for j in range(3) ] for i in range(4) ]
grid = gridplot(toolbar_location=None, children=months)
doc = Document()
doc.add_root(grid)
if __name__ == "__main__":
doc.validate()
filename = "calendars.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Calendar 2014"))
print("Wrote %s" % filename)
view(filename)
| true
| true
|
f7165983d502ba88a113831abb4c27b5654b1d3e
| 34,532
|
py
|
Python
|
release/stubs.min/System/ComponentModel/__init___parts/MaskedTextProvider.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/ComponentModel/__init___parts/MaskedTextProvider.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/ComponentModel/__init___parts/MaskedTextProvider.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class MaskedTextProvider(object, ICloneable):
"""
Represents a mask-parsing service that can be used by any number of controls that support masking,such as the System.Windows.Forms.MaskedTextBox control.
MaskedTextProvider(mask: str)
MaskedTextProvider(mask: str,restrictToAscii: bool)
MaskedTextProvider(mask: str,culture: CultureInfo)
MaskedTextProvider(mask: str,culture: CultureInfo,restrictToAscii: bool)
MaskedTextProvider(mask: str,passwordChar: Char,allowPromptAsInput: bool)
MaskedTextProvider(mask: str,culture: CultureInfo,passwordChar: Char,allowPromptAsInput: bool)
MaskedTextProvider(mask: str,culture: CultureInfo,allowPromptAsInput: bool,promptChar: Char,passwordChar: Char,restrictToAscii: bool)
"""
def Add(self, input, testPosition=None, resultHint=None):
"""
Add(self: MaskedTextProvider,input: str) -> bool
Adds the characters in the specified input string to the end of the formatted string.
input: A System.String containing character values to be appended to the formatted string.
Returns: true if all the characters from the input string were added successfully; otherwise false to
indicate that no characters were added.
Add(self: MaskedTextProvider,input: str) -> (bool,int,MaskedTextResultHint)
Adds the characters in the specified input string to the end of the formatted string,and then
outputs position and descriptive information.
input: A System.String containing character values to be appended to the formatted string.
Returns: true if all the characters from the input string were added successfully; otherwise false to
indicate that no characters were added.
Add(self: MaskedTextProvider,input: Char) -> bool
Adds the specified input character to the end of the formatted string.
input: A System.Char value to be appended to the formatted string.
Returns: true if the input character was added successfully; otherwise false.
Add(self: MaskedTextProvider,input: Char) -> (bool,int,MaskedTextResultHint)
Adds the specified input character to the end of the formatted string,and then outputs position
and descriptive information.
input: A System.Char value to be appended to the formatted string.
Returns: true if the input character was added successfully; otherwise false.
"""
pass
def Clear(self, resultHint=None):
"""
Clear(self: MaskedTextProvider) -> MaskedTextResultHint
Clears all the editable input characters from the formatted string,replacing them with prompt
characters,and then outputs descriptive information.
Clear(self: MaskedTextProvider)
Clears all the editable input characters from the formatted string,replacing them with prompt
characters.
"""
pass
def Clone(self):
"""
Clone(self: MaskedTextProvider) -> object
Creates a copy of the current System.ComponentModel.MaskedTextProvider.
Returns: The System.ComponentModel.MaskedTextProvider object this method creates,cast as an object.
"""
pass
def FindAssignedEditPositionFrom(self, position, direction):
"""
FindAssignedEditPositionFrom(self: MaskedTextProvider,position: int,direction: bool) -> int
Returns the position of the first assigned editable position after the specified position using
the specified search direction.
position: The zero-based position in the formatted string to start the search.
direction: A System.Boolean indicating the search direction; either true to search forward or false to
search backward.
Returns: If successful,an System.Int32 representing the zero-based position of the first assigned
editable position encountered; otherwise System.ComponentModel.MaskedTextProvider.InvalidIndex.
"""
pass
def FindAssignedEditPositionInRange(self, startPosition, endPosition, direction):
"""
FindAssignedEditPositionInRange(self: MaskedTextProvider,startPosition: int,endPosition: int,direction: bool) -> int
Returns the position of the first assigned editable position between the specified positions
using the specified search direction.
startPosition: The zero-based position in the formatted string where the search starts.
endPosition: The zero-based position in the formatted string where the search ends.
direction: A System.Boolean indicating the search direction; either true to search forward or false to
search backward.
Returns: If successful,an System.Int32 representing the zero-based position of the first assigned
editable position encountered; otherwise System.ComponentModel.MaskedTextProvider.InvalidIndex.
"""
pass
def FindEditPositionFrom(self, position, direction):
"""
FindEditPositionFrom(self: MaskedTextProvider,position: int,direction: bool) -> int
Returns the position of the first editable position after the specified position using the
specified search direction.
position: The zero-based position in the formatted string to start the search.
direction: A System.Boolean indicating the search direction; either true to search forward or false to
search backward.
Returns: If successful,an System.Int32 representing the zero-based position of the first editable
position encountered; otherwise System.ComponentModel.MaskedTextProvider.InvalidIndex.
"""
pass
def FindEditPositionInRange(self, startPosition, endPosition, direction):
"""
FindEditPositionInRange(self: MaskedTextProvider,startPosition: int,endPosition: int,direction: bool) -> int
Returns the position of the first editable position between the specified positions using the
specified search direction.
startPosition: The zero-based position in the formatted string where the search starts.
endPosition: The zero-based position in the formatted string where the search ends.
direction: A System.Boolean indicating the search direction; either true to search forward or false to
search backward.
Returns: If successful,an System.Int32 representing the zero-based position of the first editable
position encountered; otherwise System.ComponentModel.MaskedTextProvider.InvalidIndex.
"""
pass
def FindNonEditPositionFrom(self, position, direction):
"""
FindNonEditPositionFrom(self: MaskedTextProvider,position: int,direction: bool) -> int
Returns the position of the first non-editable position after the specified position using the
specified search direction.
position: The zero-based position in the formatted string to start the search.
direction: A System.Boolean indicating the search direction; either true to search forward or false to
search backward.
Returns: If successful,an System.Int32 representing the zero-based position of the first literal
position encountered; otherwise System.ComponentModel.MaskedTextProvider.InvalidIndex.
"""
pass
def FindNonEditPositionInRange(self, startPosition, endPosition, direction):
"""
FindNonEditPositionInRange(self: MaskedTextProvider,startPosition: int,endPosition: int,direction: bool) -> int
Returns the position of the first non-editable position between the specified positions using
the specified search direction.
startPosition: The zero-based position in the formatted string where the search starts.
endPosition: The zero-based position in the formatted string where the search ends.
direction: A System.Boolean indicating the search direction; either true to search forward or false to
search backward.
Returns: If successful,an System.Int32 representing the zero-based position of the first literal
position encountered; otherwise System.ComponentModel.MaskedTextProvider.InvalidIndex.
"""
pass
def FindUnassignedEditPositionFrom(self, position, direction):
"""
FindUnassignedEditPositionFrom(self: MaskedTextProvider,position: int,direction: bool) -> int
Returns the position of the first unassigned editable position after the specified position
using the specified search direction.
position: The zero-based position in the formatted string to start the search.
direction: A System.Boolean indicating the search direction; either true to search forward or false to
search backward.
Returns: If successful,an System.Int32 representing the zero-based position of the first unassigned
editable position encountered; otherwise System.ComponentModel.MaskedTextProvider.InvalidIndex.
"""
pass
def FindUnassignedEditPositionInRange(self, startPosition, endPosition, direction):
"""
FindUnassignedEditPositionInRange(self: MaskedTextProvider,startPosition: int,endPosition: int,direction: bool) -> int
Returns the position of the first unassigned editable position between the specified positions
using the specified search direction.
startPosition: The zero-based position in the formatted string where the search starts.
endPosition: The zero-based position in the formatted string where the search ends.
direction: A System.Boolean indicating the search direction; either true to search forward or false to
search backward.
Returns: If successful,an System.Int32 representing the zero-based position of the first unassigned
editable position encountered; otherwise System.ComponentModel.MaskedTextProvider.InvalidIndex.
"""
pass
@staticmethod
def GetOperationResultFromHint(hint):
"""
GetOperationResultFromHint(hint: MaskedTextResultHint) -> bool
Determines whether the specified System.ComponentModel.MaskedTextResultHint denotes success or
failure.
hint: A System.ComponentModel.MaskedTextResultHint value typically obtained as an output parameter
from a previous operation.
Returns: true if the specified System.ComponentModel.MaskedTextResultHint value represents a success;
otherwise,false if it represents failure.
"""
pass
def InsertAt(self, input, position, testPosition=None, resultHint=None):
"""
InsertAt(self: MaskedTextProvider,input: str,position: int) -> bool
Inserts the specified string at a specified position within the formatted string.
input: The System.String to be inserted.
position: The zero-based position in the formatted string to insert the input string.
Returns: true if the insertion was successful; otherwise,false.
InsertAt(self: MaskedTextProvider,input: str,position: int) -> (bool,int,MaskedTextResultHint)
Inserts the specified string at a specified position within the formatted string,returning the
last insertion position and the status of the operation.
input: The System.String to be inserted.
position: The zero-based position in the formatted string to insert the input string.
Returns: true if the insertion was successful; otherwise,false.
InsertAt(self: MaskedTextProvider,input: Char,position: int) -> bool
Inserts the specified character at the specified position within the formatted string.
input: The System.Char to be inserted.
position: The zero-based position in the formatted string to insert the character.
Returns: true if the insertion was successful; otherwise,false.
InsertAt(self: MaskedTextProvider,input: Char,position: int) -> (bool,int,MaskedTextResultHint)
Inserts the specified character at the specified position within the formatted string,returning
the last insertion position and the status of the operation.
input: The System.Char to be inserted.
position: The zero-based position in the formatted string to insert the character.
Returns: true if the insertion was successful; otherwise,false.
"""
pass
def IsAvailablePosition(self, position):
"""
IsAvailablePosition(self: MaskedTextProvider,position: int) -> bool
Determines whether the specified position is available for assignment.
position: The zero-based position in the mask to test.
Returns: true if the specified position in the formatted string is editable and has not been assigned to
yet; otherwise false.
"""
pass
def IsEditPosition(self, position):
"""
IsEditPosition(self: MaskedTextProvider,position: int) -> bool
Determines whether the specified position is editable.
position: The zero-based position in the mask to test.
Returns: true if the specified position in the formatted string is editable; otherwise false.
"""
pass
@staticmethod
def IsValidInputChar(c):
"""
IsValidInputChar(c: Char) -> bool
Determines whether the specified character is a valid input character.
c: The System.Char value to test.
Returns: true if the specified character contains a valid input value; otherwise false.
"""
pass
@staticmethod
def IsValidMaskChar(c):
"""
IsValidMaskChar(c: Char) -> bool
Determines whether the specified character is a valid mask character.
c: The System.Char value to test.
Returns: true if the specified character contains a valid mask value; otherwise false.
"""
pass
@staticmethod
def IsValidPasswordChar(c):
"""
IsValidPasswordChar(c: Char) -> bool
Determines whether the specified character is a valid password character.
c: The System.Char value to test.
Returns: true if the specified character contains a valid password value; otherwise false.
"""
pass
def Remove(self, testPosition=None, resultHint=None):
"""
Remove(self: MaskedTextProvider) -> (bool,int,MaskedTextResultHint)
Removes the last assigned character from the formatted string,and then outputs the removal
position and descriptive information.
Returns: true if the character was successfully removed; otherwise,false.
Remove(self: MaskedTextProvider) -> bool
Removes the last assigned character from the formatted string.
Returns: true if the character was successfully removed; otherwise,false.
"""
pass
def RemoveAt(self, *__args):
"""
RemoveAt(self: MaskedTextProvider,startPosition: int,endPosition: int) -> (bool,int,MaskedTextResultHint)
Removes the assigned characters between the specified positions from the formatted string,and
then outputs the removal position and descriptive information.
startPosition: The zero-based index of the first assigned character to remove.
endPosition: The zero-based index of the last assigned character to remove.
Returns: true if the character was successfully removed; otherwise,false.
RemoveAt(self: MaskedTextProvider,startPosition: int,endPosition: int) -> bool
Removes the assigned characters between the specified positions from the formatted string.
startPosition: The zero-based index of the first assigned character to remove.
endPosition: The zero-based index of the last assigned character to remove.
Returns: true if the character was successfully removed; otherwise,false.
RemoveAt(self: MaskedTextProvider,position: int) -> bool
Removes the assigned character at the specified position from the formatted string.
position: The zero-based position of the assigned character to remove.
Returns: true if the character was successfully removed; otherwise,false.
"""
pass
def Replace(self, input, *__args):
"""
Replace(self: MaskedTextProvider,input: str,position: int) -> bool
Replaces a range of editable characters starting at the specified position with the specified
string.
input: The System.String value used to replace the existing editable characters.
position: The zero-based position to search for the first editable character to replace.
Returns: true if all the characters were successfully replaced; otherwise,false.
Replace(self: MaskedTextProvider,input: str,position: int) -> (bool,int,MaskedTextResultHint)
Replaces a range of editable characters starting at the specified position with the specified
string,and then outputs the removal position and descriptive information.
input: The System.String value used to replace the existing editable characters.
position: The zero-based position to search for the first editable character to replace.
Returns: true if all the characters were successfully replaced; otherwise,false.
Replace(self: MaskedTextProvider,input: str,startPosition: int,endPosition: int) -> (bool,int,MaskedTextResultHint)
Replaces a range of editable characters between the specified starting and ending positions with
the specified string,and then outputs the removal position and descriptive information.
input: The System.String value used to replace the existing editable characters.
startPosition: The zero-based position in the formatted string where the replacement starts.
endPosition: The zero-based position in the formatted string where the replacement ends.
Returns: true if all the characters were successfully replaced; otherwise,false.
Replace(self: MaskedTextProvider,input: Char,position: int) -> bool
Replaces a single character at or beyond the specified position with the specified character
value.
input: The System.Char value that replaces the existing value.
position: The zero-based position to search for the first editable character to replace.
Returns: true if the character was successfully replaced; otherwise,false.
Replace(self: MaskedTextProvider,input: Char,position: int) -> (bool,int,MaskedTextResultHint)
Replaces a single character at or beyond the specified position with the specified character
value,and then outputs the removal position and descriptive information.
input: The System.Char value that replaces the existing value.
position: The zero-based position to search for the first editable character to replace.
Returns: true if the character was successfully replaced; otherwise,false.
Replace(self: MaskedTextProvider,input: Char,startPosition: int,endPosition: int) -> (bool,int,MaskedTextResultHint)
Replaces a single character between the specified starting and ending positions with the
specified character value,and then outputs the removal position and descriptive information.
input: The System.Char value that replaces the existing value.
startPosition: The zero-based position in the formatted string where the replacement starts.
endPosition: The zero-based position in the formatted string where the replacement ends.
Returns: true if the character was successfully replaced; otherwise,false.
"""
pass
def Set(self, input, testPosition=None, resultHint=None):
"""
Set(self: MaskedTextProvider,input: str) -> (bool,int,MaskedTextResultHint)
Sets the formatted string to the specified input string,and then outputs the removal position
and descriptive information.
input: The System.String value used to set the formatted string.
Returns: true if all the characters were successfully set; otherwise,false.
Set(self: MaskedTextProvider,input: str) -> bool
Sets the formatted string to the specified input string.
input: The System.String value used to set the formatted string.
Returns: true if all the characters were successfully set; otherwise,false.
"""
pass
def ToDisplayString(self):
"""
ToDisplayString(self: MaskedTextProvider) -> str
Returns the formatted string in a displayable form.
Returns: The formatted System.String that includes prompts and mask literals.
"""
pass
def ToString(self, *__args):
"""
ToString(self: MaskedTextProvider,includePrompt: bool,includeLiterals: bool) -> str
Returns the formatted string,optionally including prompt and literal characters.
includePrompt: true to include prompt characters in the return string; otherwise,false.
includeLiterals: true to include literal characters in the return string; otherwise,false.
Returns: The formatted System.String that includes all the assigned character values and optionally
includes literals and prompts.
ToString(self: MaskedTextProvider,includePrompt: bool,includeLiterals: bool,startPosition: int,length: int) -> str
Returns a substring of the formatted string,optionally including prompt and literal characters.
includePrompt: true to include prompt characters in the return string; otherwise,false.
includeLiterals: true to include literal characters in the return string; otherwise,false.
startPosition: The zero-based position in the formatted string where the output begins.
length: The number of characters to return.
Returns: If successful,a substring of the formatted System.String,which includes all the assigned
character values and optionally includes literals and prompts; otherwise the System.String.Empty
string.
ToString(self: MaskedTextProvider,ignorePasswordChar: bool,includePrompt: bool,includeLiterals: bool,startPosition: int,length: int) -> str
Returns a substring of the formatted string,optionally including prompt,literal,and password
characters.
ignorePasswordChar: true to return the actual editable characters; otherwise,false to indicate that the
System.ComponentModel.MaskedTextProvider.PasswordChar property is to be honored.
includePrompt: true to include prompt characters in the return string; otherwise,false.
includeLiterals: true to return literal characters in the return string; otherwise,false.
startPosition: The zero-based position in the formatted string where the output begins.
length: The number of characters to return.
Returns: If successful,a substring of the formatted System.String,which includes all the assigned
character values and optionally includes literals,prompts,and password characters; otherwise
the System.String.Empty string.
ToString(self: MaskedTextProvider,ignorePasswordChar: bool,startPosition: int,length: int) -> str
Returns a substring of the formatted string,optionally including password characters.
ignorePasswordChar: true to return the actual editable characters; otherwise,false to indicate that the
System.ComponentModel.MaskedTextProvider.PasswordChar property is to be honored.
startPosition: The zero-based position in the formatted string where the output begins.
length: The number of characters to return.
Returns: If successful,a substring of the formatted System.String,which includes literals,prompts,and
optionally password characters; otherwise the System.String.Empty string.
ToString(self: MaskedTextProvider) -> str
Returns the formatted string that includes all the assigned character values.
Returns: The formatted System.String that includes all the assigned character values.
ToString(self: MaskedTextProvider,ignorePasswordChar: bool) -> str
Returns the formatted string,optionally including password characters.
ignorePasswordChar: true to return the actual editable characters; otherwise,false to indicate that the
System.ComponentModel.MaskedTextProvider.PasswordChar property is to be honored.
Returns: The formatted System.String that includes literals,prompts,and optionally password characters.
ToString(self: MaskedTextProvider,startPosition: int,length: int) -> str
Returns a substring of the formatted string.
startPosition: The zero-based position in the formatted string where the output begins.
length: The number of characters to return.
Returns: If successful,a substring of the formatted System.String,which includes all the assigned
character values; otherwise the System.String.Empty string.
"""
pass
def VerifyChar(self, input, position, hint):
"""
VerifyChar(self: MaskedTextProvider,input: Char,position: int) -> (bool,MaskedTextResultHint)
Tests whether the specified character could be set successfully at the specified position.
input: The System.Char value to test.
position: The position in the mask to test the input character against.
Returns: true if the specified character is valid for the specified position; otherwise,false.
"""
pass
def VerifyEscapeChar(self, input, position):
"""
VerifyEscapeChar(self: MaskedTextProvider,input: Char,position: int) -> bool
Tests whether the specified character would be escaped at the specified position.
input: The System.Char value to test.
position: The position in the mask to test the input character against.
Returns: true if the specified character would be escaped at the specified position; otherwise,false.
"""
pass
def VerifyString(self, input, testPosition=None, resultHint=None):
"""
VerifyString(self: MaskedTextProvider,input: str) -> (bool,int,MaskedTextResultHint)
Tests whether the specified string could be set successfully,and then outputs position and
descriptive information.
input: The System.String value to test.
Returns: true if the specified string represents valid input; otherwise,false.
VerifyString(self: MaskedTextProvider,input: str) -> bool
Tests whether the specified string could be set successfully.
input: The System.String value to test.
Returns: true if the specified string represents valid input; otherwise,false.
"""
pass
def __add__(self, *args):
""" x.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+y """
pass
def __getitem__(self, *args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, mask, *__args):
"""
__new__(cls: type,mask: str)
__new__(cls: type,mask: str,restrictToAscii: bool)
__new__(cls: type,mask: str,culture: CultureInfo)
__new__(cls: type,mask: str,culture: CultureInfo,restrictToAscii: bool)
__new__(cls: type,mask: str,passwordChar: Char,allowPromptAsInput: bool)
__new__(cls: type,mask: str,culture: CultureInfo,passwordChar: Char,allowPromptAsInput: bool)
__new__(cls: type,mask: str,culture: CultureInfo,allowPromptAsInput: bool,promptChar: Char,passwordChar: Char,restrictToAscii: bool)
"""
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
def __str__(self, *args):
pass
AllowPromptAsInput = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether the prompt character should be treated as a valid input character or not.
Get: AllowPromptAsInput(self: MaskedTextProvider) -> bool
"""
AsciiOnly = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets a value indicating whether the mask accepts characters outside of the ASCII character set.
Get: AsciiOnly(self: MaskedTextProvider) -> bool
"""
AssignedEditPositionCount = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the number of editable character positions that have already been successfully assigned an input value.
Get: AssignedEditPositionCount(self: MaskedTextProvider) -> int
"""
AvailableEditPositionCount = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the number of editable character positions in the input mask that have not yet been assigned an input value.
Get: AvailableEditPositionCount(self: MaskedTextProvider) -> int
"""
Culture = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the culture that determines the value of the localizable separators and placeholders in the input mask.
Get: Culture(self: MaskedTextProvider) -> CultureInfo
"""
EditPositionCount = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the number of editable positions in the formatted string.
Get: EditPositionCount(self: MaskedTextProvider) -> int
"""
EditPositions = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a newly created enumerator for the editable positions in the formatted string.
Get: EditPositions(self: MaskedTextProvider) -> IEnumerator
"""
IncludeLiterals = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value that indicates whether literal characters in the input mask should be included in the formatted string.
Get: IncludeLiterals(self: MaskedTextProvider) -> bool
Set: IncludeLiterals(self: MaskedTextProvider)=value
"""
IncludePrompt = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value indicating whether System.Windows.Forms.MaskedTextBox.PromptChar is used to represent the absence of user input when displaying the formatted string.
Get: IncludePrompt(self: MaskedTextProvider) -> bool
Set: IncludePrompt(self: MaskedTextProvider)=value
"""
IsPassword = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value that determines whether password protection should be applied to the formatted string.
Get: IsPassword(self: MaskedTextProvider) -> bool
Set: IsPassword(self: MaskedTextProvider)=value
"""
LastAssignedPosition = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the index in the mask of the rightmost input character that has been assigned to the mask.
Get: LastAssignedPosition(self: MaskedTextProvider) -> int
"""
Length = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the length of the mask,absent any mask modifier characters.
Get: Length(self: MaskedTextProvider) -> int
"""
Mask = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the input mask.
Get: Mask(self: MaskedTextProvider) -> str
"""
MaskCompleted = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether all required inputs have been entered into the formatted string.
Get: MaskCompleted(self: MaskedTextProvider) -> bool
"""
MaskFull = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets a value indicating whether all required and optional inputs have been entered into the formatted string.
Get: MaskFull(self: MaskedTextProvider) -> bool
"""
PasswordChar = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the character to be substituted for the actual input characters.
Get: PasswordChar(self: MaskedTextProvider) -> Char
Set: PasswordChar(self: MaskedTextProvider)=value
"""
PromptChar = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the character used to represent the absence of user input for all available edit positions.
Get: PromptChar(self: MaskedTextProvider) -> Char
Set: PromptChar(self: MaskedTextProvider)=value
"""
ResetOnPrompt = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value that determines how an input character that matches the prompt character should be handled.
Get: ResetOnPrompt(self: MaskedTextProvider) -> bool
Set: ResetOnPrompt(self: MaskedTextProvider)=value
"""
ResetOnSpace = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value that determines how a space input character should be handled.
Get: ResetOnSpace(self: MaskedTextProvider) -> bool
Set: ResetOnSpace(self: MaskedTextProvider)=value
"""
SkipLiterals = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value indicating whether literal character positions in the mask can be overwritten by their same values.
Get: SkipLiterals(self: MaskedTextProvider) -> bool
Set: SkipLiterals(self: MaskedTextProvider)=value
"""
DefaultPasswordChar = None
InvalidIndex = -1
| 26.894081
| 221
| 0.707923
|
class MaskedTextProvider(object, ICloneable):
def Add(self, input, testPosition=None, resultHint=None):
pass
def Clear(self, resultHint=None):
pass
def Clone(self):
pass
def FindAssignedEditPositionFrom(self, position, direction):
pass
def FindAssignedEditPositionInRange(self, startPosition, endPosition, direction):
pass
def FindEditPositionFrom(self, position, direction):
pass
def FindEditPositionInRange(self, startPosition, endPosition, direction):
pass
def FindNonEditPositionFrom(self, position, direction):
pass
def FindNonEditPositionInRange(self, startPosition, endPosition, direction):
pass
def FindUnassignedEditPositionFrom(self, position, direction):
pass
def FindUnassignedEditPositionInRange(self, startPosition, endPosition, direction):
pass
@staticmethod
def GetOperationResultFromHint(hint):
pass
def InsertAt(self, input, position, testPosition=None, resultHint=None):
pass
def IsAvailablePosition(self, position):
pass
def IsEditPosition(self, position):
pass
@staticmethod
def IsValidInputChar(c):
pass
@staticmethod
def IsValidMaskChar(c):
pass
@staticmethod
def IsValidPasswordChar(c):
pass
def Remove(self, testPosition=None, resultHint=None):
pass
def RemoveAt(self, *__args):
pass
def Replace(self, input, *__args):
pass
def Set(self, input, testPosition=None, resultHint=None):
pass
def ToDisplayString(self):
pass
def ToString(self, *__args):
pass
def VerifyChar(self, input, position, hint):
pass
def VerifyEscapeChar(self, input, position):
pass
def VerifyString(self, input, testPosition=None, resultHint=None):
pass
def __add__(self, *args):
pass
def __getitem__(self, *args):
pass
def __init__(self, *args):
pass
@staticmethod
def __new__(self, mask, *__args):
pass
def __repr__(self, *args):
pass
def __str__(self, *args):
pass
AllowPromptAsInput = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
AsciiOnly = property(lambda self: object(), lambda self, v: None, lambda self: None)
AssignedEditPositionCount = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
AvailableEditPositionCount = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
Culture = property(lambda self: object(), lambda self, v: None, lambda self: None)
EditPositionCount = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
EditPositions = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
IncludeLiterals = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
IncludePrompt = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
IsPassword = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
LastAssignedPosition = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
Length = property(lambda self: object(), lambda self, v: None, lambda self: None)
Mask = property(lambda self: object(), lambda self, v: None, lambda self: None)
MaskCompleted = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
MaskFull = property(lambda self: object(), lambda self, v: None, lambda self: None)
PasswordChar = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
PromptChar = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
ResetOnPrompt = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
ResetOnSpace = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
SkipLiterals = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
DefaultPasswordChar = None
InvalidIndex = -1
| true
| true
|
f7165ac139d7e4ef2705bd723ef20fccdeaff95e
| 39,822
|
py
|
Python
|
task4_crnn.py
|
sankar-mukherjee/DCASE-2018---Task-4-
|
f8034641efef6e60ea721abc5569d9c1aa8ee56d
|
[
"Apache-2.0"
] | null | null | null |
task4_crnn.py
|
sankar-mukherjee/DCASE-2018---Task-4-
|
f8034641efef6e60ea721abc5569d9c1aa8ee56d
|
[
"Apache-2.0"
] | null | null | null |
task4_crnn.py
|
sankar-mukherjee/DCASE-2018---Task-4-
|
f8034641efef6e60ea721abc5569d9c1aa8ee56d
|
[
"Apache-2.0"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# This code is an adaptation from Toni Heittola's code [task1 baseline dcase 2018](https://github.com/DCASE-REPO/dcase2018_baseline/tree/master/task1/)
# Copyright Nicolas Turpault, Romain Serizel, Hamid Eghbal-zadeh, Ankit Parag Shah, 2018, v1.0
# This software is distributed under the terms of the License MIT
#########################################################################
import dcase_util
import sys
import numpy
import os
import random
import pickle
import tensorflow as tf
from keras import backend as K
import keras
#from evaluation_measures import get_f_measure_by_class, event_based_evaluation, segment_based_evaluation
from evaluation_measures import get_f_measure_by_class, event_based_evaluation
from Dataset_dcase2018 import DCASE2018_Task4_DevelopmentSet
dcase_util.utils.setup_logging(logging_file='task4.log')
print(keras.__version__)
random.seed(10)
numpy.random.seed(42)
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph())
K.set_session(sess)
def main(parameters):
log = dcase_util.ui.ui.FancyLogger()
log.title('DCASE2018 / Task4')
overwirte_preprocessing = False
overwrite_learning = False
overwrite_testing = True
# =====================================================================
# Parameters
# =====================================================================
# Process parameters
param = dcase_util.containers.DCASEAppParameterContainer(
parameters,
path_structure={
'FEATURE_EXTRACTOR': [
'DATASET',
'FEATURE_EXTRACTOR'
],
'FEATURE_NORMALIZER': [
'DATASET',
'FEATURE_EXTRACTOR'
],
'LEARNER': [
'DATASET',
'FEATURE_EXTRACTOR',
'FEATURE_NORMALIZER',
'FEATURE_SEQUENCER',
'LEARNER'
],
'RECOGNIZER': [
'DATASET',
'FEATURE_EXTRACTOR',
'FEATURE_NORMALIZER',
'FEATURE_SEQUENCER',
'LEARNER',
'RECOGNIZER'
],
}
).process()
# Make sure all system paths exists
dcase_util.utils.Path().create(
paths=list(param['path'].values())
)
# Initialize
keras_model_first_pass = None
keras_model_second_pass = None
# =====================================================================
# Dataset
# =====================================================================
# Get dataset and initialize it
db = DCASE2018_Task4_DevelopmentSet(included_content_types=['all'],
local_path="",
data_path=param.get_path('path.dataset'),
audio_paths=[
os.path.join("dataset", "audio", "train", "weak"),
os.path.join("dataset", "audio", "train", "unlabel_in_domain"),
os.path.join("dataset", "audio", "train", "unlabel_out_of_domain"),
os.path.join("dataset", "audio", "test")
]
).initialize()
# Active folds
folds = db.folds(
mode=param.get_path('dataset.parameters.evaluation_mode')
)
active_fold_list = param.get_path('dataset.parameters.fold_list')
if active_fold_list:
folds = list(set(folds).intersection(active_fold_list))
# =====================================================================
# Feature extraction stage
# =====================================================================
if param.get_path('flow.feature_extraction'):
log.section_header('Feature Extraction / Train material')
# Prepare feature extractor
mel_extractor = dcase_util.features.MelExtractor(
**param.get_path('feature_extractor.parameters.mel')
)
# Loop over all audio files in the dataset and extract features for them.
# for audio_filename in db.audio_files:
for audio_filename in db.audio_files:
# Get filename for feature data from audio filename
feature_filename = dcase_util.utils.Path(
path=audio_filename
).modify(
path_base=param.get_path('path.application.feature_extractor'),
filename_extension='.cpickle'
)
if not os.path.isfile(feature_filename) or overwirte_preprocessing:
log.line(
data=os.path.split(audio_filename)[1],
indent=2
)
# Load audio data
audio = dcase_util.containers.AudioContainer().load(
filename=audio_filename,
mono=True,
fs=param.get_path('feature_extractor.fs')
)
# Extract features and store them into FeatureContainer, and save it to the disk
dcase_util.containers.FeatureContainer(
data=mel_extractor.extract(audio.data),
time_resolution=param.get_path('feature_extractor.hop_length_seconds')
).save(
filename=feature_filename
)
log.foot()
# =====================================================================
# Feature normalization stage
# =====================================================================
if param.get_path('flow.feature_normalization'):
log.section_header('Feature Normalization')
# Get filename for the normalization factors
features_norm_filename = os.path.join(
param.get_path('path.application.feature_normalizer'),
'normalize_values.cpickle'
)
if not os.path.isfile(features_norm_filename) or overwirte_preprocessing:
normalizer = dcase_util.data.Normalizer(
filename=features_norm_filename
)
# Loop through all training data, two train folds
for fold in folds:
for filename in db.train(fold=fold).unique_files:
# Get feature filename
feature_filename = dcase_util.utils.Path(
path=filename
).modify(
path_base=param.get_path('path.application.feature_extractor'),
filename_extension='.cpickle',
)
# Load feature matrix
features = dcase_util.containers.FeatureContainer().load(
filename=feature_filename
)
# Accumulate statistics
normalizer.accumulate(
data=features.data
)
# Finalize and save
normalizer.finalize().save()
log.foot()
# Create processing chain for features
feature_processing_chain = dcase_util.processors.ProcessingChain()
for chain in param.get_path('feature_processing_chain'):
processor_name = chain.get('processor_name')
init_parameters = chain.get('init_parameters', {})
# Inject parameters
if processor_name == 'dcase_util.processors.NormalizationProcessor':
init_parameters['filename'] = features_norm_filename
if init_parameters.get('enable') is None or init_parameters.get('enable') is True:
feature_processing_chain.push_processor(
processor_name=processor_name,
init_parameters=init_parameters,
)
# =====================================================================
# Learning stage
# =====================================================================
if param.get_path('flow.learning'):
log.section_header('Learning')
# setup keras parameters
dcase_util.keras.setup_keras(
seed=param.get_path('learner.parameters.random_seed'),
profile=param.get_path('learner.parameters.keras_profile'),
backend=param.get_path('learner.parameters.backend'),
device=param.get_path('learner.parameters.device'),
verbose=False
)
# encoder used to convert text labels into vector
many_hot_encoder = dcase_util.data.ManyHotEncoder(
label_list=db.tags(),
time_resolution=1
)
# =====================================================================
# Training first pass
# =====================================================================
fold = 1
# Get model filename
fold1_model_filename = os.path.join(
param.get_path('path.application.learner'),
'model_fold_{fold}.h5'.format(fold=fold)
)
if not os.path.isfile(fold1_model_filename) or overwrite_learning:
# Split the dataset into training and validation files
training_files, validation_files = db.validation_split(
fold=fold,
split_type='random',
validation_amount=param.get_path('learner.parameters.model.first_pass.validation_amount'),
verbose=True
)
batch_size = param.get_path('learner.parameters.model.first_pass.fit.batch_size')
shuffle = param.get_path('learner.parameters.model.first_pass.fit.shuffle')
# Get items (with labels) associated with training files
training_items = db.train(fold=fold).filter(file_list=training_files)
# Create the generator, which convert filename and item into arrays batch_X, batch_y in right formats
training_generator = data_generator(training_items, param.get_path('path.application.feature_extractor'),
many_hot_encoder, feature_processing_chain,
batch_size=batch_size, shuffle=shuffle)
validation_items = db.train(fold=fold).filter(file_list=validation_files)
validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),
many_hot_encoder, feature_processing_chain,
batch_size=batch_size, shuffle=False)
# Update constants with useful information to setup the model
model_parameter_constants = {
'NB_CLASSES': db.tag_count(),
'INPUT_FREQUENCIES': param.get_path('feature_extractor.parameters.mel.n_mels'),
'INPUT_SEQUENCE_LENGTH': param.get_path('feature_sequencer.sequence_length'),
}
model_parameter_constants.update(param.get_path('learner.parameters.model.constants', {}))
# Load the sequential keras model defined in the YAML.
keras_model_first_pass = dcase_util.keras.create_sequential_model(
model_parameter_list=param.get_path('learner.parameters.model.first_pass.config'),
constants=model_parameter_constants
)
# Print the model configuration
keras_model_first_pass.summary(print_fn=log.line)
# Create optimizer object from info given in YAML
param.set_path(
path='learner.parameters.compile.optimizer',
new_value=dcase_util.keras.create_optimizer(
class_name=param.get_path('learner.parameters.optimizer.class_name'),
config=param.get_path('learner.parameters.optimizer.config')
)
)
# Compile model
keras_model_first_pass.compile(
**param.get_path('learner.parameters.compile')
)
epochs = param.get_path('learner.parameters.model.first_pass.fit.epochs')
# Setup callbacks used during training
callback_list = [
dcase_util.keras.ProgressLoggerCallback(
epochs=epochs,
metric=param.get_path('learner.parameters.compile.metrics')[0],
loss=param.get_path('learner.parameters.compile.loss'),
output_type='logging',
**param.get_path('learner.parameters.callbacks.ProgressLoggerCallback')
)
]
if param.get_path('learner.parameters.callbacks.StopperCallback'):
callback_list.append(
dcase_util.keras.StopperCallback(
epochs=epochs,
**param.get_path('learner.parameters.callbacks.StopperCallback')
)
)
if param.get_path('learner.parameters.callbacks.StasherCallback'):
callback_list.append(
dcase_util.keras.StasherCallback(
epochs=epochs,
**param.get_path('learner.parameters.callbacks.StasherCallback')
)
)
processing_interval = param.get_path(
'learner.parameters.callbacks.ProgressLoggerCallback.processing_interval'
)
epochs = param.get_path('learner.parameters.model.first_pass.fit.epochs')
# Iterate through epoch to be able to manually update callbacks
for epoch_start in range(0, epochs, processing_interval):
epoch_end = epoch_start + processing_interval
# Make sure we have only specified amount of epochs
if epoch_end > epochs:
epoch_end = epochs
# Train keras_model_first_pass
keras_model_first_pass.fit_generator(
generator=training_generator,
steps_per_epoch=len(training_files) // batch_size,
validation_data=validation_generator,
validation_steps=len(validation_files) // batch_size,
callbacks=callback_list,
verbose=0,
initial_epoch=epoch_start,
epochs=epoch_end
)
# Get f_measures of the current epoch
val_macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), validation_generator,
len(validation_files) // batch_size)
val_macro_f_measure = val_macro_f_measure.mean()
tra_macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), training_generator,
len(training_files) // batch_size,
)
tra_macro_f_measure = tra_macro_f_measure.mean()
# Inject external metric values to the callbacks
for callback in callback_list:
if hasattr(callback, 'set_external_metric_value'):
callback.set_external_metric_value(
metric_label='val_macro_f_measure',
metric_value=val_macro_f_measure
)
callback.set_external_metric_value(
metric_label='tra_macro_f_measure',
metric_value=tra_macro_f_measure
)
# Manually update callbacks
for callback in callback_list:
if hasattr(callback, 'update'):
callback.update()
# Check we need to stop training
stop_training = False
for callback in callback_list:
if hasattr(callback, 'stop'):
if callback.stop():
log.line("Early stropping")
stop_training = True
if stop_training:
# Stop the training loop
break
# Fetch best model
for callback in callback_list:
if isinstance(callback, dcase_util.keras.StasherCallback):
callback.log()
best_weights = callback.get_best()['weights']
if best_weights:
keras_model_first_pass.set_weights(best_weights)
break
# Save trained model
keras_model_first_pass.save(fold1_model_filename)
log.foot()
# =======
# Calculate best thresholds
# =======
thresholds_filename = os.path.join(
param.get_path('path.application.learner'),
'thresholds_{fold}.p'.format(fold=fold)
)
if not os.path.isfile(thresholds_filename) or overwrite_learning:
training_files, validation_files = db.validation_split(
fold=fold,
split_type='random',
validation_amount=param.get_path('learner.parameters.model.first_pass.validation_amount'),
verbose=True
)
batch_size = param.get_path('learner.parameters.model.first_pass.fit.batch_size')
validation_items = db.train(fold=fold).filter(file_list=validation_files)
validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),
many_hot_encoder, feature_processing_chain,
batch_size=batch_size, shuffle=False)
# Load model if not trained during this run
if not keras_model_first_pass:
keras_model_first_pass = keras.models.load_model(fold1_model_filename)
thresholds = [0] * db.tag_count()
max_f_measure = [-numpy.inf] * db.tag_count()
for threshold in numpy.arange(0., 1 + 1e-6, 0.1):
# Assign current threshold to each class
current_thresholds = [threshold] * db.tag_count()
# Calculate f_measures with the current thresholds
macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), validation_generator,
len(validation_files) // batch_size,
current_thresholds)
# Update thresholds for class with better f_measures
for i, label in enumerate(db.tags()):
f_measure = macro_f_measure[i]
if f_measure > max_f_measure[i]:
max_f_measure[i] = f_measure
thresholds[i] = threshold
for i, label in enumerate(db.tags()):
log.line("{:30}, threshold: {}".format(label, thresholds[i]))
thresholds_filename = os.path.join(
param.get_path('path.application.learner'),
'thresholds.p'.format(fold=fold)
)
pickle.dump(thresholds, open(thresholds_filename, "wb"))
else:
thresholds = pickle.load(open(thresholds_filename, "rb"))
# =====================================================================
# Predict stage from weak to predict unlabel_in_domain tags
# =====================================================================
log.section_header('Predict 1st pass, add labels to unlabel_in_domain data')
# Get results filename
fold_results_filename = os.path.join(
param.get_path('path.application.recognizer'),
'pred_weak_fold_{fold}.txt'.format(fold=fold)
)
if not os.path.isfile(fold_results_filename) or overwrite_testing:
# Initialize results container
res = dcase_util.containers.MetaDataContainer(
filename=fold_results_filename
)
# Load model if not yet loaded
if not keras_model_first_pass:
keras_model_first_pass = keras.models.load_model(fold1_model_filename)
# Loop through all test files from the current cross-validation fold
for item in db.test(fold=fold):
# Get feature filename
feature_filename = dcase_util.utils.Path(
path=item.filename
).modify(
path_base=param.get_path('path.application.feature_extractor'),
filename_extension='.cpickle'
)
features = feature_processing_chain.process(
filename=feature_filename
)
input_data = features.data.reshape(features.shape[:-1]).T # (500, 64)
input_data = input_data.reshape((1,)+input_data.shape) # (1, 500, 64)
# Get network output
probabilities = keras_model_first_pass.predict(x=input_data)
# Binarization of the network output
frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(
probabilities=probabilities,
binarization_type='class_threshold',
threshold=thresholds,
time_axis=0
)
estimated_tags = dcase_util.data.DecisionEncoder(
label_list=db.tags()
).many_hot(
frame_decisions=frame_decisions,
time_axis=0
)
# Store result into results container
res.append(
{
'filename': item.filename,
'tags': estimated_tags[0]
}
)
# Save results container
res.save()
log.foot()
# =====================================================================
# Learning stage 2nd pass, learn from weak and unlabel_in_domain annotated data
# =====================================================================
fold = 2
log.line(data='Fold [{fold}]'.format(fold=fold), indent=2)
# Get model filename
fold2_model_filename = os.path.join(
param.get_path('path.application.learner'),
'model_fold_{fold}.h5'.format(fold=fold)
)
if not os.path.isfile(fold2_model_filename) or overwrite_learning:
model_parameter_constants = {
'NB_CLASSES': db.tag_count(),
'INPUT_FREQUENCIES': param.get_path('feature_extractor.parameters.mel.n_mels'),
'INPUT_SEQUENCE_LENGTH': param.get_path('feature_sequencer.sequence_length'),
}
model_parameter_constants.update(param.get_path('learner.parameters.model.constants', {}))
keras_model_second_pass = dcase_util.keras.create_sequential_model(
model_parameter_list=param.get_path('learner.parameters.model.second_pass.config'),
constants=model_parameter_constants
)
keras_model_second_pass.summary(print_fn=log.line)
# Create optimizer object
param.set_path(
path='learner.parameters.compile.optimizer',
new_value=dcase_util.keras.create_optimizer(
class_name=param.get_path('learner.parameters.optimizer.class_name'),
config=param.get_path('learner.parameters.optimizer.config')
)
)
# Compile model
keras_model_second_pass.compile(
**param.get_path('learner.parameters.compile')
)
# Get annotations from the 1st pass model
fold1_results_filename = os.path.join(
param.get_path('path.application.recognizer'),
'pred_weak_fold_{fold}.txt'.format(fold=1)
)
# Load annotations
predictions_first_pass = dcase_util.containers.MetaDataContainer(
filename=fold1_results_filename
).load()
# Split the dataset into train and validation. If "weak" is provided, files from weak.csv are used to
# validate the model. Else, give a percentage which will be used
if param.get_path('learner.parameters.model.second_pass.validation_amount') == "weak":
training_files = predictions_first_pass.unique_files
training_items = predictions_first_pass
validation_files = db.train(fold=1).unique_files
validation_items = db.train(fold=1)
else:
# Get validation files
training_files, validation_files = db.validation_split(
fold=fold,
split_type='random',
validation_amount=param.get_path('learner.parameters.model.second_pass.validation_amount'),
verbose=False
)
training_fold2 = predictions_first_pass + db.train(fold=1)
training_items = training_fold2.filter(file_list=training_files)
validation_items = training_fold2.filter(file_list=validation_files)
processing_interval = param.get_path(
'learner.parameters.callbacks.ProgressLoggerCallback.processing_interval'
)
epochs = param.get_path('learner.parameters.model.second_pass.fit.epochs')
batch_size = param.get_path('learner.parameters.model.second_pass.fit.batch_size')
shuffle = param.get_path('learner.parameters.model.second_pass.fit.shuffle')
# Create generators, which convert filename and item into arrays batch_X, batch_y in right formats
training_generator = data_generator(training_items, param.get_path('path.application.feature_extractor'),
many_hot_encoder, feature_processing_chain,
batch_size=batch_size, shuffle=shuffle, mode="strong")
validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),
many_hot_encoder,
feature_processing_chain,
batch_size=batch_size, shuffle=False, mode="strong")
# Initialize callbacks used during training
callback_list = [
dcase_util.keras.ProgressLoggerCallback(
epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),
metric=param.get_path('learner.parameters.compile.metrics')[0],
loss=param.get_path('learner.parameters.compile.loss'),
output_type='logging',
**param.get_path('learner.parameters.callbacks.ProgressLoggerCallback')
)
]
if param.get_path('learner.parameters.callbacks.StopperCallback'):
callback_list.append(
dcase_util.keras.StopperCallback(
epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),
**param.get_path('learner.parameters.callbacks.StopperCallback')
)
)
if param.get_path('learner.parameters.callbacks.StasherCallback'):
callback_list.append(
dcase_util.keras.StasherCallback(
epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),
**param.get_path('learner.parameters.callbacks.StasherCallback')
)
)
for epoch_start in range(0, epochs, processing_interval):
epoch_end = epoch_start + processing_interval
# Make sure we have only specified amount of epochs
if epoch_end > epochs:
epoch_end = epochs
# Train keras_model_second_pass
keras_model_second_pass.fit_generator(
generator=training_generator,
steps_per_epoch=len(training_files) // batch_size,
validation_data=validation_generator,
validation_steps=len(validation_files) // batch_size,
callbacks=callback_list,
verbose=0,
initial_epoch=epoch_start,
epochs=epoch_end
)
# Calculate external metrics, f_measure of the current epoch
val_macro_f_measure = get_f_measure_by_class(keras_model_second_pass, db.tag_count(), validation_generator,
len(validation_files) // batch_size, )
val_macro_f_measure = val_macro_f_measure.mean()
tra_macro_f_measure = get_f_measure_by_class(keras_model_second_pass, db.tag_count(), training_generator,
len(training_files) // batch_size,
)
tra_macro_f_measure = tra_macro_f_measure.mean()
# Inject external metric values to the callbacks
for callback in callback_list:
if hasattr(callback, 'set_external_metric_value'):
callback.set_external_metric_value(
metric_label='val_macro_f_measure',
metric_value=val_macro_f_measure
)
callback.set_external_metric_value(
metric_label='tra_macro_f_measure',
metric_value=tra_macro_f_measure
)
# Manually update callbacks
for callback in callback_list:
if hasattr(callback, 'update'):
callback.update()
# Check we need to stop training
stop_training = False
for callback in callback_list:
if hasattr(callback, 'stop'):
if callback.stop():
log.line("Early stropping")
stop_training = True
if stop_training:
# Stop the training loop
break
# Fetch best model
for callback in callback_list:
if isinstance(callback, dcase_util.keras.StasherCallback):
callback.log()
best_weights = callback.get_best()['weights']
if best_weights:
keras_model_second_pass.set_weights(best_weights)
break
# Save trained model
keras_model_second_pass.save(fold2_model_filename)
log.foot()
# =====================================================================
# Testing stage, get strong annotations
# =====================================================================
if param.get_path('flow.testing'):
log.section_header('Testing')
# Get results filename
fold_results_filename = os.path.join(
param.get_path('path.application.recognizer'),
'res_fold_{fold}.txt'.format(fold=2)
)
# Get model filename
fold2_model_filename = os.path.join(
param.get_path('path.application.learner'),
'model_fold_{fold}.h5'.format(fold=2)
)
if not os.path.isfile(fold_results_filename) or overwrite_testing:
# Load model if not yet loaded
if not keras_model_second_pass:
keras_model_second_pass = keras.models.load_model(fold2_model_filename)
# Initialize results container
res = dcase_util.containers.MetaDataContainer(
filename=fold_results_filename
)
# Loop through all test files from the current cross-validation fold
for item in db.test(fold=2):
# Get feature filename
feature_filename = dcase_util.utils.Path(
path=item.filename
).modify(
path_base=param.get_path('path.application.feature_extractor'),
filename_extension='.cpickle'
)
# Get features array
features = feature_processing_chain.process(
filename=feature_filename
)
input_data = features.data.reshape(features.shape[:-1]).T # (500, 64)
# Create a batch with only one file
input_data = input_data.reshape((1,) + input_data.shape) # (1, 500, 64)
# Get network output for strong data
probabilities = keras_model_second_pass.predict(input_data)
# only one file in the batch
probabilities = probabilities[0]
if param.get_path('recognizer.frame_binarization.enable'):
# Binarization of the network output
frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(
probabilities=probabilities,
binarization_type=param.get_path('recognizer.frame_binarization.binarization_type'),
threshold=param.get_path('recognizer.frame_binarization.threshold'),
time_axis=0
)
else:
frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(
probabilities=probabilities,
binarization_type="global_threshold",
threshold=0.5,
time_axis=0
)
decision_encoder = dcase_util.data.DecisionEncoder(
label_list=db.tags()
)
if param.get_path('recognizer.process_activity.enable'):
frame_decisions = decision_encoder.process_activity(
frame_decisions,
window_length=param.get_path('recognizer.process_activity.window_length'),
time_axis=0)
for i, label in enumerate(db.tags()):
# given a list of ones, give the onset and offset in frames
estimated_events = decision_encoder.find_contiguous_regions(
activity_array=frame_decisions[:, i]
)
for [onset, offset] in estimated_events:
hop_length_seconds = param.get_path('feature_extractor.hop_length_seconds')
# Store result into results container, convert frames to seconds
res.append(
{
'filename': item.filename,
'event_label': label,
'onset': onset * hop_length_seconds,
'offset': offset * hop_length_seconds
}
)
# Save results container
res.save()
log.foot()
# =====================================================================
# Evaluation stage, get results
# =====================================================================
if param.get_path('flow.evaluation'):
log.section_header('Evaluation')
stats_filename = os.path.join(param.get_path('path.application.recognizer'), 'evaluation.txt')
if not os.path.isfile(stats_filename) or overwrite_testing:
fold_results_filename = os.path.join(
param.get_path('path.application.recognizer'),
'res_fold_{fold}.txt'.format(fold=fold)
)
# test data used to evaluate the system
reference_event_list = db.eval(fold=fold)
# predictions done during the step test before
estimated_event_list = dcase_util.containers.MetaDataContainer().load(
filename=fold_results_filename
)
# Calculate the metric
event_based_metric = event_based_evaluation(reference_event_list, estimated_event_list)
with open(stats_filename, "w") as stats_file:
stats_file.write(event_based_metric.__str__())
log.line(event_based_metric.__str__(), indent=4)
log.foot()
def data_generator(items, feature_path, many_hot_encoder, feature_processing_chain, batch_size=1, shuffle=True, mode='weak'):
""" Transform MetaDataContainer into batches of data
Parameters
----------
items : MetaDataContainer, items to be generated
feature_path : String, base path where features are stored
many_hot_encoder : ManyHotEncoder, class to encode data
feature_processing_chain : ProcessingChain, chain to process data
batch_size : int, size of the batch to be returned
shuffle : bool, shuffle the items before creating the batch
mode : "weak" or "strong", indicate to return labels as tags (1/file) or event_labels (1/frame)
Return
------
(batch_X, batch_y): generator, arrays containing batches of data.
"""
while True:
batch_X = []
batch_y = []
if shuffle:
random.shuffle(items)
for item in items:
# Get feature filename
feature_filename = dcase_util.utils.Path(
path=item.filename
).modify(
path_base=feature_path,
filename_extension='.cpickle',
)
features = feature_processing_chain.process(
filename=feature_filename
)
input_data = features.data.reshape(features.shape[:-1]).T
# Target
targets = item.tags
targets = many_hot_encoder.encode(targets, length_frames=1).data.flatten()
if mode == "strong":
targets = numpy.repeat(targets.reshape((1,) + targets.shape), input_data.shape[0], axis=0)
if batch_size == 1:
batch_X = input_data.reshape((1,) + input_data.shape)
batch_y = targets.reshape((1,) + targets.shape)
else:
batch_X.append(input_data)
batch_y.append(targets)
if len(batch_X) == batch_size and len(batch_y) == batch_size:
yield numpy.array(batch_X), numpy.array(batch_y)
batch_X = []
batch_y = []
if __name__ == "__main__":
# Read parameters file
parameters = dcase_util.containers.DictContainer().load(
filename='task4_crnn.yaml'
)
try:
sys.exit(main(parameters))
except (ValueError, IOError) as e:
sys.exit(e)
| 42.681672
| 151
| 0.540405
|
data_path=param.get_path('path.dataset'),
audio_paths=[
os.path.join("dataset", "audio", "train", "weak"),
os.path.join("dataset", "audio", "train", "unlabel_in_domain"),
os.path.join("dataset", "audio", "train", "unlabel_out_of_domain"),
os.path.join("dataset", "audio", "test")
]
).initialize()
# Active folds
folds = db.folds(
mode=param.get_path('dataset.parameters.evaluation_mode')
)
active_fold_list = param.get_path('dataset.parameters.fold_list')
if active_fold_list:
folds = list(set(folds).intersection(active_fold_list))
# =====================================================================
# Feature extraction stage
# =====================================================================
if param.get_path('flow.feature_extraction'):
log.section_header('Feature Extraction / Train material')
# Prepare feature extractor
mel_extractor = dcase_util.features.MelExtractor(
**param.get_path('feature_extractor.parameters.mel')
)
# Loop over all audio files in the dataset and extract features for them.
# for audio_filename in db.audio_files:
for audio_filename in db.audio_files:
# Get filename for feature data from audio filename
feature_filename = dcase_util.utils.Path(
path=audio_filename
).modify(
path_base=param.get_path('path.application.feature_extractor'),
filename_extension='.cpickle'
)
if not os.path.isfile(feature_filename) or overwirte_preprocessing:
log.line(
data=os.path.split(audio_filename)[1],
indent=2
)
# Load audio data
audio = dcase_util.containers.AudioContainer().load(
filename=audio_filename,
mono=True,
fs=param.get_path('feature_extractor.fs')
)
# Extract features and store them into FeatureContainer, and save it to the disk
dcase_util.containers.FeatureContainer(
data=mel_extractor.extract(audio.data),
time_resolution=param.get_path('feature_extractor.hop_length_seconds')
).save(
filename=feature_filename
)
log.foot()
# =====================================================================
# Feature normalization stage
# =====================================================================
if param.get_path('flow.feature_normalization'):
log.section_header('Feature Normalization')
# Get filename for the normalization factors
features_norm_filename = os.path.join(
param.get_path('path.application.feature_normalizer'),
'normalize_values.cpickle'
)
if not os.path.isfile(features_norm_filename) or overwirte_preprocessing:
normalizer = dcase_util.data.Normalizer(
filename=features_norm_filename
)
# Loop through all training data, two train folds
for fold in folds:
for filename in db.train(fold=fold).unique_files:
# Get feature filename
feature_filename = dcase_util.utils.Path(
path=filename
).modify(
path_base=param.get_path('path.application.feature_extractor'),
filename_extension='.cpickle',
)
# Load feature matrix
features = dcase_util.containers.FeatureContainer().load(
filename=feature_filename
)
# Accumulate statistics
normalizer.accumulate(
data=features.data
)
# Finalize and save
normalizer.finalize().save()
log.foot()
# Create processing chain for features
feature_processing_chain = dcase_util.processors.ProcessingChain()
for chain in param.get_path('feature_processing_chain'):
processor_name = chain.get('processor_name')
init_parameters = chain.get('init_parameters', {})
# Inject parameters
if processor_name == 'dcase_util.processors.NormalizationProcessor':
init_parameters['filename'] = features_norm_filename
if init_parameters.get('enable') is None or init_parameters.get('enable') is True:
feature_processing_chain.push_processor(
processor_name=processor_name,
init_parameters=init_parameters,
)
# =====================================================================
# Learning stage
# =====================================================================
if param.get_path('flow.learning'):
log.section_header('Learning')
# setup keras parameters
dcase_util.keras.setup_keras(
seed=param.get_path('learner.parameters.random_seed'),
profile=param.get_path('learner.parameters.keras_profile'),
backend=param.get_path('learner.parameters.backend'),
device=param.get_path('learner.parameters.device'),
verbose=False
)
# encoder used to convert text labels into vector
many_hot_encoder = dcase_util.data.ManyHotEncoder(
label_list=db.tags(),
time_resolution=1
)
# =====================================================================
# Training first pass
# =====================================================================
fold = 1
# Get model filename
fold1_model_filename = os.path.join(
param.get_path('path.application.learner'),
'model_fold_{fold}.h5'.format(fold=fold)
)
if not os.path.isfile(fold1_model_filename) or overwrite_learning:
# Split the dataset into training and validation files
training_files, validation_files = db.validation_split(
fold=fold,
split_type='random',
validation_amount=param.get_path('learner.parameters.model.first_pass.validation_amount'),
verbose=True
)
batch_size = param.get_path('learner.parameters.model.first_pass.fit.batch_size')
shuffle = param.get_path('learner.parameters.model.first_pass.fit.shuffle')
# Get items (with labels) associated with training files
training_items = db.train(fold=fold).filter(file_list=training_files)
# Create the generator, which convert filename and item into arrays batch_X, batch_y in right formats
training_generator = data_generator(training_items, param.get_path('path.application.feature_extractor'),
many_hot_encoder, feature_processing_chain,
batch_size=batch_size, shuffle=shuffle)
validation_items = db.train(fold=fold).filter(file_list=validation_files)
validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),
many_hot_encoder, feature_processing_chain,
batch_size=batch_size, shuffle=False)
# Update constants with useful information to setup the model
model_parameter_constants = {
'NB_CLASSES': db.tag_count(),
'INPUT_FREQUENCIES': param.get_path('feature_extractor.parameters.mel.n_mels'),
'INPUT_SEQUENCE_LENGTH': param.get_path('feature_sequencer.sequence_length'),
}
model_parameter_constants.update(param.get_path('learner.parameters.model.constants', {}))
# Load the sequential keras model defined in the YAML.
keras_model_first_pass = dcase_util.keras.create_sequential_model(
model_parameter_list=param.get_path('learner.parameters.model.first_pass.config'),
constants=model_parameter_constants
)
# Print the model configuration
keras_model_first_pass.summary(print_fn=log.line)
# Create optimizer object from info given in YAML
param.set_path(
path='learner.parameters.compile.optimizer',
new_value=dcase_util.keras.create_optimizer(
class_name=param.get_path('learner.parameters.optimizer.class_name'),
config=param.get_path('learner.parameters.optimizer.config')
)
)
# Compile model
keras_model_first_pass.compile(
**param.get_path('learner.parameters.compile')
)
epochs = param.get_path('learner.parameters.model.first_pass.fit.epochs')
# Setup callbacks used during training
callback_list = [
dcase_util.keras.ProgressLoggerCallback(
epochs=epochs,
metric=param.get_path('learner.parameters.compile.metrics')[0],
loss=param.get_path('learner.parameters.compile.loss'),
output_type='logging',
**param.get_path('learner.parameters.callbacks.ProgressLoggerCallback')
)
]
if param.get_path('learner.parameters.callbacks.StopperCallback'):
callback_list.append(
dcase_util.keras.StopperCallback(
epochs=epochs,
**param.get_path('learner.parameters.callbacks.StopperCallback')
)
)
if param.get_path('learner.parameters.callbacks.StasherCallback'):
callback_list.append(
dcase_util.keras.StasherCallback(
epochs=epochs,
**param.get_path('learner.parameters.callbacks.StasherCallback')
)
)
processing_interval = param.get_path(
'learner.parameters.callbacks.ProgressLoggerCallback.processing_interval'
)
epochs = param.get_path('learner.parameters.model.first_pass.fit.epochs')
# Iterate through epoch to be able to manually update callbacks
for epoch_start in range(0, epochs, processing_interval):
epoch_end = epoch_start + processing_interval
# Make sure we have only specified amount of epochs
if epoch_end > epochs:
epoch_end = epochs
# Train keras_model_first_pass
keras_model_first_pass.fit_generator(
generator=training_generator,
steps_per_epoch=len(training_files) // batch_size,
validation_data=validation_generator,
validation_steps=len(validation_files) // batch_size,
callbacks=callback_list,
verbose=0,
initial_epoch=epoch_start,
epochs=epoch_end
)
# Get f_measures of the current epoch
val_macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), validation_generator,
len(validation_files) // batch_size)
val_macro_f_measure = val_macro_f_measure.mean()
tra_macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), training_generator,
len(training_files) // batch_size,
)
tra_macro_f_measure = tra_macro_f_measure.mean()
# Inject external metric values to the callbacks
for callback in callback_list:
if hasattr(callback, 'set_external_metric_value'):
callback.set_external_metric_value(
metric_label='val_macro_f_measure',
metric_value=val_macro_f_measure
)
callback.set_external_metric_value(
metric_label='tra_macro_f_measure',
metric_value=tra_macro_f_measure
)
# Manually update callbacks
for callback in callback_list:
if hasattr(callback, 'update'):
callback.update()
# Check we need to stop training
stop_training = False
for callback in callback_list:
if hasattr(callback, 'stop'):
if callback.stop():
log.line("Early stropping")
stop_training = True
if stop_training:
# Stop the training loop
break
# Fetch best model
for callback in callback_list:
if isinstance(callback, dcase_util.keras.StasherCallback):
callback.log()
best_weights = callback.get_best()['weights']
if best_weights:
keras_model_first_pass.set_weights(best_weights)
break
# Save trained model
keras_model_first_pass.save(fold1_model_filename)
log.foot()
# =======
# Calculate best thresholds
# =======
thresholds_filename = os.path.join(
param.get_path('path.application.learner'),
'thresholds_{fold}.p'.format(fold=fold)
)
if not os.path.isfile(thresholds_filename) or overwrite_learning:
training_files, validation_files = db.validation_split(
fold=fold,
split_type='random',
validation_amount=param.get_path('learner.parameters.model.first_pass.validation_amount'),
verbose=True
)
batch_size = param.get_path('learner.parameters.model.first_pass.fit.batch_size')
validation_items = db.train(fold=fold).filter(file_list=validation_files)
validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),
many_hot_encoder, feature_processing_chain,
batch_size=batch_size, shuffle=False)
# Load model if not trained during this run
if not keras_model_first_pass:
keras_model_first_pass = keras.models.load_model(fold1_model_filename)
thresholds = [0] * db.tag_count()
max_f_measure = [-numpy.inf] * db.tag_count()
for threshold in numpy.arange(0., 1 + 1e-6, 0.1):
# Assign current threshold to each class
current_thresholds = [threshold] * db.tag_count()
# Calculate f_measures with the current thresholds
macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), validation_generator,
len(validation_files) // batch_size,
current_thresholds)
# Update thresholds for class with better f_measures
for i, label in enumerate(db.tags()):
f_measure = macro_f_measure[i]
if f_measure > max_f_measure[i]:
max_f_measure[i] = f_measure
thresholds[i] = threshold
for i, label in enumerate(db.tags()):
log.line("{:30}, threshold: {}".format(label, thresholds[i]))
thresholds_filename = os.path.join(
param.get_path('path.application.learner'),
'thresholds.p'.format(fold=fold)
)
pickle.dump(thresholds, open(thresholds_filename, "wb"))
else:
thresholds = pickle.load(open(thresholds_filename, "rb"))
# =====================================================================
# Predict stage from weak to predict unlabel_in_domain tags
# =====================================================================
log.section_header('Predict 1st pass, add labels to unlabel_in_domain data')
# Get results filename
fold_results_filename = os.path.join(
param.get_path('path.application.recognizer'),
'pred_weak_fold_{fold}.txt'.format(fold=fold)
)
if not os.path.isfile(fold_results_filename) or overwrite_testing:
# Initialize results container
res = dcase_util.containers.MetaDataContainer(
filename=fold_results_filename
)
# Load model if not yet loaded
if not keras_model_first_pass:
keras_model_first_pass = keras.models.load_model(fold1_model_filename)
# Loop through all test files from the current cross-validation fold
for item in db.test(fold=fold):
# Get feature filename
feature_filename = dcase_util.utils.Path(
path=item.filename
).modify(
path_base=param.get_path('path.application.feature_extractor'),
filename_extension='.cpickle'
)
features = feature_processing_chain.process(
filename=feature_filename
)
input_data = features.data.reshape(features.shape[:-1]).T # (500, 64)
input_data = input_data.reshape((1,)+input_data.shape) # (1, 500, 64)
# Get network output
probabilities = keras_model_first_pass.predict(x=input_data)
# Binarization of the network output
frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(
probabilities=probabilities,
binarization_type='class_threshold',
threshold=thresholds,
time_axis=0
)
estimated_tags = dcase_util.data.DecisionEncoder(
label_list=db.tags()
).many_hot(
frame_decisions=frame_decisions,
time_axis=0
)
# Store result into results container
res.append(
{
'filename': item.filename,
'tags': estimated_tags[0]
}
)
# Save results container
res.save()
log.foot()
# =====================================================================
# Learning stage 2nd pass, learn from weak and unlabel_in_domain annotated data
# =====================================================================
fold = 2
log.line(data='Fold [{fold}]'.format(fold=fold), indent=2)
# Get model filename
fold2_model_filename = os.path.join(
param.get_path('path.application.learner'),
'model_fold_{fold}.h5'.format(fold=fold)
)
if not os.path.isfile(fold2_model_filename) or overwrite_learning:
model_parameter_constants = {
'NB_CLASSES': db.tag_count(),
'INPUT_FREQUENCIES': param.get_path('feature_extractor.parameters.mel.n_mels'),
'INPUT_SEQUENCE_LENGTH': param.get_path('feature_sequencer.sequence_length'),
}
model_parameter_constants.update(param.get_path('learner.parameters.model.constants', {}))
keras_model_second_pass = dcase_util.keras.create_sequential_model(
model_parameter_list=param.get_path('learner.parameters.model.second_pass.config'),
constants=model_parameter_constants
)
keras_model_second_pass.summary(print_fn=log.line)
# Create optimizer object
param.set_path(
path='learner.parameters.compile.optimizer',
new_value=dcase_util.keras.create_optimizer(
class_name=param.get_path('learner.parameters.optimizer.class_name'),
config=param.get_path('learner.parameters.optimizer.config')
)
)
# Compile model
keras_model_second_pass.compile(
**param.get_path('learner.parameters.compile')
)
# Get annotations from the 1st pass model
fold1_results_filename = os.path.join(
param.get_path('path.application.recognizer'),
'pred_weak_fold_{fold}.txt'.format(fold=1)
)
# Load annotations
predictions_first_pass = dcase_util.containers.MetaDataContainer(
filename=fold1_results_filename
).load()
# Split the dataset into train and validation. If "weak" is provided, files from weak.csv are used to
# validate the model. Else, give a percentage which will be used
if param.get_path('learner.parameters.model.second_pass.validation_amount') == "weak":
training_files = predictions_first_pass.unique_files
training_items = predictions_first_pass
validation_files = db.train(fold=1).unique_files
validation_items = db.train(fold=1)
else:
# Get validation files
training_files, validation_files = db.validation_split(
fold=fold,
split_type='random',
validation_amount=param.get_path('learner.parameters.model.second_pass.validation_amount'),
verbose=False
)
training_fold2 = predictions_first_pass + db.train(fold=1)
training_items = training_fold2.filter(file_list=training_files)
validation_items = training_fold2.filter(file_list=validation_files)
processing_interval = param.get_path(
'learner.parameters.callbacks.ProgressLoggerCallback.processing_interval'
)
epochs = param.get_path('learner.parameters.model.second_pass.fit.epochs')
batch_size = param.get_path('learner.parameters.model.second_pass.fit.batch_size')
shuffle = param.get_path('learner.parameters.model.second_pass.fit.shuffle')
# Create generators, which convert filename and item into arrays batch_X, batch_y in right formats
training_generator = data_generator(training_items, param.get_path('path.application.feature_extractor'),
many_hot_encoder, feature_processing_chain,
batch_size=batch_size, shuffle=shuffle, mode="strong")
validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),
many_hot_encoder,
feature_processing_chain,
batch_size=batch_size, shuffle=False, mode="strong")
# Initialize callbacks used during training
callback_list = [
dcase_util.keras.ProgressLoggerCallback(
epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),
metric=param.get_path('learner.parameters.compile.metrics')[0],
loss=param.get_path('learner.parameters.compile.loss'),
output_type='logging',
**param.get_path('learner.parameters.callbacks.ProgressLoggerCallback')
)
]
if param.get_path('learner.parameters.callbacks.StopperCallback'):
callback_list.append(
dcase_util.keras.StopperCallback(
epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),
**param.get_path('learner.parameters.callbacks.StopperCallback')
)
)
if param.get_path('learner.parameters.callbacks.StasherCallback'):
callback_list.append(
dcase_util.keras.StasherCallback(
epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),
**param.get_path('learner.parameters.callbacks.StasherCallback')
)
)
for epoch_start in range(0, epochs, processing_interval):
epoch_end = epoch_start + processing_interval
# Make sure we have only specified amount of epochs
if epoch_end > epochs:
epoch_end = epochs
# Train keras_model_second_pass
keras_model_second_pass.fit_generator(
generator=training_generator,
steps_per_epoch=len(training_files) // batch_size,
validation_data=validation_generator,
validation_steps=len(validation_files) // batch_size,
callbacks=callback_list,
verbose=0,
initial_epoch=epoch_start,
epochs=epoch_end
)
# Calculate external metrics, f_measure of the current epoch
val_macro_f_measure = get_f_measure_by_class(keras_model_second_pass, db.tag_count(), validation_generator,
len(validation_files) // batch_size, )
val_macro_f_measure = val_macro_f_measure.mean()
tra_macro_f_measure = get_f_measure_by_class(keras_model_second_pass, db.tag_count(), training_generator,
len(training_files) // batch_size,
)
tra_macro_f_measure = tra_macro_f_measure.mean()
# Inject external metric values to the callbacks
for callback in callback_list:
if hasattr(callback, 'set_external_metric_value'):
callback.set_external_metric_value(
metric_label='val_macro_f_measure',
metric_value=val_macro_f_measure
)
callback.set_external_metric_value(
metric_label='tra_macro_f_measure',
metric_value=tra_macro_f_measure
)
# Manually update callbacks
for callback in callback_list:
if hasattr(callback, 'update'):
callback.update()
# Check we need to stop training
stop_training = False
for callback in callback_list:
if hasattr(callback, 'stop'):
if callback.stop():
log.line("Early stropping")
stop_training = True
if stop_training:
# Stop the training loop
break
# Fetch best model
for callback in callback_list:
if isinstance(callback, dcase_util.keras.StasherCallback):
callback.log()
best_weights = callback.get_best()['weights']
if best_weights:
keras_model_second_pass.set_weights(best_weights)
break
# Save trained model
keras_model_second_pass.save(fold2_model_filename)
log.foot()
# =====================================================================
# Testing stage, get strong annotations
# =====================================================================
if param.get_path('flow.testing'):
log.section_header('Testing')
# Get results filename
fold_results_filename = os.path.join(
param.get_path('path.application.recognizer'),
'res_fold_{fold}.txt'.format(fold=2)
)
# Get model filename
fold2_model_filename = os.path.join(
param.get_path('path.application.learner'),
'model_fold_{fold}.h5'.format(fold=2)
)
if not os.path.isfile(fold_results_filename) or overwrite_testing:
# Load model if not yet loaded
if not keras_model_second_pass:
keras_model_second_pass = keras.models.load_model(fold2_model_filename)
# Initialize results container
res = dcase_util.containers.MetaDataContainer(
filename=fold_results_filename
)
# Loop through all test files from the current cross-validation fold
for item in db.test(fold=2):
# Get feature filename
feature_filename = dcase_util.utils.Path(
path=item.filename
).modify(
path_base=param.get_path('path.application.feature_extractor'),
filename_extension='.cpickle'
)
# Get features array
features = feature_processing_chain.process(
filename=feature_filename
)
input_data = features.data.reshape(features.shape[:-1]).T # (500, 64)
# Create a batch with only one file
input_data = input_data.reshape((1,) + input_data.shape) # (1, 500, 64)
# Get network output for strong data
probabilities = keras_model_second_pass.predict(input_data)
# only one file in the batch
probabilities = probabilities[0]
if param.get_path('recognizer.frame_binarization.enable'):
# Binarization of the network output
frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(
probabilities=probabilities,
binarization_type=param.get_path('recognizer.frame_binarization.binarization_type'),
threshold=param.get_path('recognizer.frame_binarization.threshold'),
time_axis=0
)
else:
frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(
probabilities=probabilities,
binarization_type="global_threshold",
threshold=0.5,
time_axis=0
)
decision_encoder = dcase_util.data.DecisionEncoder(
label_list=db.tags()
)
if param.get_path('recognizer.process_activity.enable'):
frame_decisions = decision_encoder.process_activity(
frame_decisions,
window_length=param.get_path('recognizer.process_activity.window_length'),
time_axis=0)
for i, label in enumerate(db.tags()):
# given a list of ones, give the onset and offset in frames
estimated_events = decision_encoder.find_contiguous_regions(
activity_array=frame_decisions[:, i]
)
for [onset, offset] in estimated_events:
hop_length_seconds = param.get_path('feature_extractor.hop_length_seconds')
# Store result into results container, convert frames to seconds
res.append(
{
'filename': item.filename,
'event_label': label,
'onset': onset * hop_length_seconds,
'offset': offset * hop_length_seconds
}
)
# Save results container
res.save()
log.foot()
# =====================================================================
# Evaluation stage, get results
# =====================================================================
if param.get_path('flow.evaluation'):
log.section_header('Evaluation')
stats_filename = os.path.join(param.get_path('path.application.recognizer'), 'evaluation.txt')
if not os.path.isfile(stats_filename) or overwrite_testing:
fold_results_filename = os.path.join(
param.get_path('path.application.recognizer'),
'res_fold_{fold}.txt'.format(fold=fold)
)
# test data used to evaluate the system
reference_event_list = db.eval(fold=fold)
# predictions done during the step test before
estimated_event_list = dcase_util.containers.MetaDataContainer().load(
filename=fold_results_filename
)
# Calculate the metric
event_based_metric = event_based_evaluation(reference_event_list, estimated_event_list)
with open(stats_filename, "w") as stats_file:
stats_file.write(event_based_metric.__str__())
log.line(event_based_metric.__str__(), indent=4)
log.foot()
def data_generator(items, feature_path, many_hot_encoder, feature_processing_chain, batch_size=1, shuffle=True, mode='weak'):
while True:
batch_X = []
batch_y = []
if shuffle:
random.shuffle(items)
for item in items:
# Get feature filename
feature_filename = dcase_util.utils.Path(
path=item.filename
).modify(
path_base=feature_path,
filename_extension='.cpickle',
)
features = feature_processing_chain.process(
filename=feature_filename
)
input_data = features.data.reshape(features.shape[:-1]).T
# Target
targets = item.tags
targets = many_hot_encoder.encode(targets, length_frames=1).data.flatten()
if mode == "strong":
targets = numpy.repeat(targets.reshape((1,) + targets.shape), input_data.shape[0], axis=0)
if batch_size == 1:
batch_X = input_data.reshape((1,) + input_data.shape)
batch_y = targets.reshape((1,) + targets.shape)
else:
batch_X.append(input_data)
batch_y.append(targets)
if len(batch_X) == batch_size and len(batch_y) == batch_size:
yield numpy.array(batch_X), numpy.array(batch_y)
batch_X = []
batch_y = []
if __name__ == "__main__":
# Read parameters file
parameters = dcase_util.containers.DictContainer().load(
filename='task4_crnn.yaml'
)
try:
sys.exit(main(parameters))
except (ValueError, IOError) as e:
sys.exit(e)
| true
| true
|
f7165bea9e7574fc50217998b978a4d5e0e583bd
| 29,649
|
py
|
Python
|
tests/contrib/hooks/test_spark_submit_hook.py
|
robobario/airflow
|
702005fe35dc5b996a5c5b8d349ed36036472f00
|
[
"Apache-2.0"
] | null | null | null |
tests/contrib/hooks/test_spark_submit_hook.py
|
robobario/airflow
|
702005fe35dc5b996a5c5b8d349ed36036472f00
|
[
"Apache-2.0"
] | 3
|
2021-03-10T02:58:18.000Z
|
2021-09-29T17:34:48.000Z
|
tests/contrib/hooks/test_spark_submit_hook.py
|
robobario/airflow
|
702005fe35dc5b996a5c5b8d349ed36036472f00
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import unittest
from unittest.mock import call, patch
from airflow import AirflowException
from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook
from airflow.models import Connection
from airflow.utils import db
class TestSparkSubmitHook(unittest.TestCase):
_spark_job_file = 'test_application.py'
_config = {
'conf': {
'parquet.compression': 'SNAPPY'
},
'conn_id': 'default_spark',
'files': 'hive-site.xml',
'py_files': 'sample_library.py',
'archives': 'sample_archive.zip#SAMPLE',
'jars': 'parquet.jar',
'packages': 'com.databricks:spark-avro_2.11:3.2.0',
'exclude_packages': 'org.bad.dependency:1.0.0',
'repositories': 'http://myrepo.org',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/spark@airflow.org',
'proxy_user': 'sample_user',
'name': 'spark-job',
'num_executors': 10,
'verbose': True,
'driver_memory': '3g',
'java_class': 'com.foo.bar.AppMain',
'application_args': [
'-f', 'foo',
'--bar', 'bar',
'--with-spaces', 'args should keep embdedded spaces',
'baz'
]
}
@staticmethod
def cmd_args_to_dict(list_cmd):
return_dict = {}
for arg in list_cmd:
if arg.startswith("--"):
pos = list_cmd.index(arg)
return_dict[arg] = list_cmd[pos + 1]
return return_dict
def setUp(self):
db.merge_conn(
Connection(
conn_id='spark_yarn_cluster', conn_type='spark',
host='yarn://yarn-master',
extra='{"queue": "root.etl", "deploy-mode": "cluster"}')
)
db.merge_conn(
Connection(
conn_id='spark_k8s_cluster', conn_type='spark',
host='k8s://https://k8s-master',
extra='{"spark-home": "/opt/spark", ' +
'"deploy-mode": "cluster", ' +
'"namespace": "mynamespace"}')
)
db.merge_conn(
Connection(
conn_id='spark_default_mesos', conn_type='spark',
host='mesos://host', port=5050)
)
db.merge_conn(
Connection(
conn_id='spark_home_set', conn_type='spark',
host='yarn://yarn-master',
extra='{"spark-home": "/opt/myspark"}')
)
db.merge_conn(
Connection(
conn_id='spark_home_not_set', conn_type='spark',
host='yarn://yarn-master')
)
db.merge_conn(
Connection(
conn_id='spark_binary_set', conn_type='spark',
host='yarn', extra='{"spark-binary": "custom-spark-submit"}')
)
db.merge_conn(
Connection(
conn_id='spark_binary_and_home_set', conn_type='spark',
host='yarn',
extra='{"spark-home": "/path/to/spark_home", ' +
'"spark-binary": "custom-spark-submit"}')
)
db.merge_conn(
Connection(
conn_id='spark_standalone_cluster', conn_type='spark',
host='spark://spark-standalone-master:6066',
extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "cluster"}')
)
db.merge_conn(
Connection(
conn_id='spark_standalone_cluster_client_mode', conn_type='spark',
host='spark://spark-standalone-master:6066',
extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "client"}')
)
def test_build_spark_submit_command(self):
# Given
hook = SparkSubmitHook(**self._config)
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_build_cmd = [
'spark-submit',
'--master', 'yarn',
'--conf', 'parquet.compression=SNAPPY',
'--files', 'hive-site.xml',
'--py-files', 'sample_library.py',
'--archives', 'sample_archive.zip#SAMPLE',
'--jars', 'parquet.jar',
'--packages', 'com.databricks:spark-avro_2.11:3.2.0',
'--exclude-packages', 'org.bad.dependency:1.0.0',
'--repositories', 'http://myrepo.org',
'--num-executors', '10',
'--total-executor-cores', '4',
'--executor-cores', '4',
'--executor-memory', '22g',
'--driver-memory', '3g',
'--keytab', 'privileged_user.keytab',
'--principal', 'user/spark@airflow.org',
'--proxy-user', 'sample_user',
'--name', 'spark-job',
'--class', 'com.foo.bar.AppMain',
'--verbose',
'test_application.py',
'-f', 'foo',
'--bar', 'bar',
'--with-spaces', 'args should keep embdedded spaces',
'baz'
]
self.assertEqual(expected_build_cmd, cmd)
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = io.StringIO('stdout')
mock_popen.return_value.stderr = io.StringIO('stderr')
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSubmitHook(conn_id='')
hook.submit()
# Then
self.assertEqual(mock_popen.mock_calls[0],
call(['spark-submit', '--master', 'yarn',
'--name', 'default-name', ''],
stderr=-2, stdout=-1, universal_newlines=True, bufsize=-1))
def test_resolve_should_track_driver_status(self):
# Given
hook_default = SparkSubmitHook(conn_id='')
hook_spark_yarn_cluster = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook_spark_k8s_cluster = SparkSubmitHook(conn_id='spark_k8s_cluster')
hook_spark_default_mesos = SparkSubmitHook(conn_id='spark_default_mesos')
hook_spark_home_set = SparkSubmitHook(conn_id='spark_home_set')
hook_spark_home_not_set = SparkSubmitHook(conn_id='spark_home_not_set')
hook_spark_binary_set = SparkSubmitHook(conn_id='spark_binary_set')
hook_spark_binary_and_home_set = SparkSubmitHook(
conn_id='spark_binary_and_home_set')
hook_spark_standalone_cluster = SparkSubmitHook(
conn_id='spark_standalone_cluster')
# When
should_track_driver_status_default = hook_default \
._resolve_should_track_driver_status()
should_track_driver_status_spark_yarn_cluster = hook_spark_yarn_cluster \
._resolve_should_track_driver_status()
should_track_driver_status_spark_k8s_cluster = hook_spark_k8s_cluster \
._resolve_should_track_driver_status()
should_track_driver_status_spark_default_mesos = hook_spark_default_mesos \
._resolve_should_track_driver_status()
should_track_driver_status_spark_home_set = hook_spark_home_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_home_not_set = hook_spark_home_not_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_binary_set = hook_spark_binary_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_binary_and_home_set = \
hook_spark_binary_and_home_set._resolve_should_track_driver_status()
should_track_driver_status_spark_standalone_cluster = \
hook_spark_standalone_cluster._resolve_should_track_driver_status()
# Then
self.assertEqual(should_track_driver_status_default, False)
self.assertEqual(should_track_driver_status_spark_yarn_cluster, False)
self.assertEqual(should_track_driver_status_spark_k8s_cluster, False)
self.assertEqual(should_track_driver_status_spark_default_mesos, False)
self.assertEqual(should_track_driver_status_spark_home_set, False)
self.assertEqual(should_track_driver_status_spark_home_not_set, False)
self.assertEqual(should_track_driver_status_spark_binary_set, False)
self.assertEqual(should_track_driver_status_spark_binary_and_home_set, False)
self.assertEqual(should_track_driver_status_spark_standalone_cluster, True)
def test_resolve_connection_yarn_default(self):
# Given
hook = SparkSubmitHook(conn_id='')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn")
def test_resolve_connection_yarn_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": "root.default",
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn")
self.assertEqual(dict_cmd["--queue"], "root.default")
def test_resolve_connection_mesos_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default_mesos')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "mesos://host:5050",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "mesos://host:5050")
def test_resolve_connection_spark_yarn_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": "root.etl",
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn://yarn-master")
self.assertEqual(dict_cmd["--queue"], "root.etl")
self.assertEqual(dict_cmd["--deploy-mode"], "cluster")
def test_resolve_connection_spark_k8s_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"spark_home": "/opt/spark",
"queue": None,
"spark_binary": "spark-submit",
"master": "k8s://https://k8s-master",
"deploy_mode": "cluster",
"namespace": "mynamespace"}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "k8s://https://k8s-master")
self.assertEqual(dict_cmd["--deploy-mode"], "cluster")
def test_resolve_connection_spark_home_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_home_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/opt/myspark",
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/opt/myspark/bin/spark-submit')
def test_resolve_connection_spark_home_not_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_home_not_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'spark-submit')
def test_resolve_connection_spark_binary_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'custom-spark-submit')
def test_resolve_connection_spark_binary_default_value_override(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_set',
spark_binary='another-custom-spark-submit')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn",
"spark_binary": "another-custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'another-custom-spark-submit')
def test_resolve_connection_spark_binary_default_value(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": 'root.default',
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'spark-submit')
def test_resolve_connection_spark_binary_and_home_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_and_home_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/path/to/spark_home",
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/path/to/spark_home/bin/custom-spark-submit')
def test_resolve_connection_spark_standalone_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "spark://spark-standalone-master:6066",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": None,
"spark_home": "/path/to/spark_home",
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/path/to/spark_home/bin/spark-submit')
def test_resolve_spark_submit_env_vars_standalone_client_mode(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster_client_mode',
env_vars={"bar": "foo"})
# When
hook._build_spark_submit_command(self._spark_job_file)
# Then
self.assertEqual(hook._env, {"bar": "foo"})
def test_resolve_spark_submit_env_vars_standalone_cluster_mode(self):
def env_vars_exception_in_standalone_cluster_mode():
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster',
env_vars={"bar": "foo"})
# When
hook._build_spark_submit_command(self._spark_job_file)
# Then
self.assertRaises(AirflowException,
env_vars_exception_in_standalone_cluster_mode)
def test_resolve_spark_submit_env_vars_yarn(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster',
env_vars={"bar": "foo"})
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
self.assertEqual(cmd[4], "spark.yarn.appMasterEnv.bar=foo")
self.assertEqual(hook._env, {"bar": "foo"})
def test_resolve_spark_submit_env_vars_k8s(self):
# Given
hook = SparkSubmitHook(conn_id='spark_k8s_cluster',
env_vars={"bar": "foo"})
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
self.assertEqual(cmd[4], "spark.kubernetes.driverEnv.bar=foo")
def test_process_spark_submit_log_yarn(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' +
'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot '
'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 NodeManagers',
'INFO Client: Submitting application application_1486558679801_1820 ' +
'to ResourceManager'
]
# When
hook._process_spark_submit_log(log_lines)
# Then
self.assertEqual(hook._yarn_application_id, 'application_1486558679801_1820')
def test_process_spark_submit_log_k8s(self):
# Given
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
log_lines = [
'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,' +
'spark-role -> driver' +
'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42' +
'creation time: 2018-03-05T10:26:55Z' +
'service account name: spark' +
'volumes: spark-init-properties, download-jars-volume,' +
'download-files-volume, spark-token-2vmlm' +
'node name: N/A' +
'start time: N/A' +
'container images: N/A' +
'phase: Pending' +
'status: []' +
'2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,' +
' new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'Exit code: 999'
]
# When
hook._process_spark_submit_log(log_lines)
# Then
self.assertEqual(hook._kubernetes_driver_pod,
'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver')
self.assertEqual(hook._spark_exit_code, 999)
def test_process_spark_submit_log_standalone_cluster(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
log_lines = [
'Running Spark using the REST application submission protocol.',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request '
'to launch an application in spark://spark-standalone-master:6066',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully ' +
'created as driver-20171128111415-0001. Polling submission state...'
]
# When
hook._process_spark_submit_log(log_lines)
# Then
self.assertEqual(hook._driver_id, 'driver-20171128111415-0001')
def test_process_spark_driver_status_log(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
log_lines = [
'Submitting a request for the status of submission ' +
'driver-20171128111415-0001 in spark://spark-standalone-master:6066',
'17/11/28 11:15:37 INFO RestSubmissionClient: Server responded with ' +
'SubmissionStatusResponse:',
'{',
'"action" : "SubmissionStatusResponse",',
'"driverState" : "RUNNING",',
'"serverSparkVersion" : "1.6.0",',
'"submissionId" : "driver-20171128111415-0001",',
'"success" : true,',
'"workerHostPort" : "172.18.0.7:38561",',
'"workerId" : "worker-20171128110741-172.18.0.7-38561"',
'}'
]
# When
hook._process_spark_status_log(log_lines)
# Then
self.assertEqual(hook._driver_status, 'RUNNING')
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_yarn_process_on_kill(self, mock_popen):
# Given
mock_popen.return_value.stdout = io.StringIO('stdout')
mock_popen.return_value.stderr = io.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' +
'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot ' +
'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 ' +
'NodeManagerapplication_1486558679801_1820s',
'INFO Client: Submitting application application_1486558679801_1820 ' +
'to ResourceManager'
]
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
self.assertIn(call(['yarn', 'application', '-kill',
'application_1486558679801_1820'],
stderr=-1, stdout=-1),
mock_popen.mock_calls)
def test_standalone_cluster_process_on_kill(self):
# Given
log_lines = [
'Running Spark using the REST application submission protocol.',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request ' +
'to launch an application in spark://spark-standalone-master:6066',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully ' +
'created as driver-20171128111415-0001. Polling submission state...'
]
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
hook._process_spark_submit_log(log_lines)
# When
kill_cmd = hook._build_spark_driver_kill_command()
# Then
self.assertEqual(kill_cmd[0], '/path/to/spark_home/bin/spark-submit')
self.assertEqual(kill_cmd[1], '--master')
self.assertEqual(kill_cmd[2], 'spark://spark-standalone-master:6066')
self.assertEqual(kill_cmd[3], '--kill')
self.assertEqual(kill_cmd[4], 'driver-20171128111415-0001')
@patch('airflow.kubernetes.kube_client.get_kube_client')
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_k8s_process_on_kill(self, mock_popen, mock_client_method):
# Given
mock_popen.return_value.stdout = io.StringIO('stdout')
mock_popen.return_value.stderr = io.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
client = mock_client_method.return_value
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
log_lines = [
'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,' +
'spark-role -> driver' +
'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42' +
'creation time: 2018-03-05T10:26:55Z' +
'service account name: spark' +
'volumes: spark-init-properties, download-jars-volume,' +
'download-files-volume, spark-token-2vmlm' +
'node name: N/A' +
'start time: N/A' +
'container images: N/A' +
'phase: Pending' +
'status: []' +
'2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,' +
' new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'Exit code: 0'
]
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
import kubernetes
kwargs = {'pretty': True, 'body': kubernetes.client.V1DeleteOptions()}
client.delete_namespaced_pod.assert_called_once_with(
'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver',
'mynamespace', **kwargs)
if __name__ == '__main__':
unittest.main()
| 42.235043
| 90
| 0.588013
|
import io
import unittest
from unittest.mock import call, patch
from airflow import AirflowException
from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook
from airflow.models import Connection
from airflow.utils import db
class TestSparkSubmitHook(unittest.TestCase):
_spark_job_file = 'test_application.py'
_config = {
'conf': {
'parquet.compression': 'SNAPPY'
},
'conn_id': 'default_spark',
'files': 'hive-site.xml',
'py_files': 'sample_library.py',
'archives': 'sample_archive.zip#SAMPLE',
'jars': 'parquet.jar',
'packages': 'com.databricks:spark-avro_2.11:3.2.0',
'exclude_packages': 'org.bad.dependency:1.0.0',
'repositories': 'http://myrepo.org',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/spark@airflow.org',
'proxy_user': 'sample_user',
'name': 'spark-job',
'num_executors': 10,
'verbose': True,
'driver_memory': '3g',
'java_class': 'com.foo.bar.AppMain',
'application_args': [
'-f', 'foo',
'--bar', 'bar',
'--with-spaces', 'args should keep embdedded spaces',
'baz'
]
}
@staticmethod
def cmd_args_to_dict(list_cmd):
return_dict = {}
for arg in list_cmd:
if arg.startswith("--"):
pos = list_cmd.index(arg)
return_dict[arg] = list_cmd[pos + 1]
return return_dict
def setUp(self):
db.merge_conn(
Connection(
conn_id='spark_yarn_cluster', conn_type='spark',
host='yarn://yarn-master',
extra='{"queue": "root.etl", "deploy-mode": "cluster"}')
)
db.merge_conn(
Connection(
conn_id='spark_k8s_cluster', conn_type='spark',
host='k8s://https://k8s-master',
extra='{"spark-home": "/opt/spark", ' +
'"deploy-mode": "cluster", ' +
'"namespace": "mynamespace"}')
)
db.merge_conn(
Connection(
conn_id='spark_default_mesos', conn_type='spark',
host='mesos://host', port=5050)
)
db.merge_conn(
Connection(
conn_id='spark_home_set', conn_type='spark',
host='yarn://yarn-master',
extra='{"spark-home": "/opt/myspark"}')
)
db.merge_conn(
Connection(
conn_id='spark_home_not_set', conn_type='spark',
host='yarn://yarn-master')
)
db.merge_conn(
Connection(
conn_id='spark_binary_set', conn_type='spark',
host='yarn', extra='{"spark-binary": "custom-spark-submit"}')
)
db.merge_conn(
Connection(
conn_id='spark_binary_and_home_set', conn_type='spark',
host='yarn',
extra='{"spark-home": "/path/to/spark_home", ' +
'"spark-binary": "custom-spark-submit"}')
)
db.merge_conn(
Connection(
conn_id='spark_standalone_cluster', conn_type='spark',
host='spark://spark-standalone-master:6066',
extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "cluster"}')
)
db.merge_conn(
Connection(
conn_id='spark_standalone_cluster_client_mode', conn_type='spark',
host='spark://spark-standalone-master:6066',
extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "client"}')
)
def test_build_spark_submit_command(self):
hook = SparkSubmitHook(**self._config)
cmd = hook._build_spark_submit_command(self._spark_job_file)
expected_build_cmd = [
'spark-submit',
'--master', 'yarn',
'--conf', 'parquet.compression=SNAPPY',
'--files', 'hive-site.xml',
'--py-files', 'sample_library.py',
'--archives', 'sample_archive.zip#SAMPLE',
'--jars', 'parquet.jar',
'--packages', 'com.databricks:spark-avro_2.11:3.2.0',
'--exclude-packages', 'org.bad.dependency:1.0.0',
'--repositories', 'http://myrepo.org',
'--num-executors', '10',
'--total-executor-cores', '4',
'--executor-cores', '4',
'--executor-memory', '22g',
'--driver-memory', '3g',
'--keytab', 'privileged_user.keytab',
'--principal', 'user/spark@airflow.org',
'--proxy-user', 'sample_user',
'--name', 'spark-job',
'--class', 'com.foo.bar.AppMain',
'--verbose',
'test_application.py',
'-f', 'foo',
'--bar', 'bar',
'--with-spaces', 'args should keep embdedded spaces',
'baz'
]
self.assertEqual(expected_build_cmd, cmd)
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_spark_process_runcmd(self, mock_popen):
mock_popen.return_value.stdout = io.StringIO('stdout')
mock_popen.return_value.stderr = io.StringIO('stderr')
mock_popen.return_value.wait.return_value = 0
hook = SparkSubmitHook(conn_id='')
hook.submit()
self.assertEqual(mock_popen.mock_calls[0],
call(['spark-submit', '--master', 'yarn',
'--name', 'default-name', ''],
stderr=-2, stdout=-1, universal_newlines=True, bufsize=-1))
def test_resolve_should_track_driver_status(self):
hook_default = SparkSubmitHook(conn_id='')
hook_spark_yarn_cluster = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook_spark_k8s_cluster = SparkSubmitHook(conn_id='spark_k8s_cluster')
hook_spark_default_mesos = SparkSubmitHook(conn_id='spark_default_mesos')
hook_spark_home_set = SparkSubmitHook(conn_id='spark_home_set')
hook_spark_home_not_set = SparkSubmitHook(conn_id='spark_home_not_set')
hook_spark_binary_set = SparkSubmitHook(conn_id='spark_binary_set')
hook_spark_binary_and_home_set = SparkSubmitHook(
conn_id='spark_binary_and_home_set')
hook_spark_standalone_cluster = SparkSubmitHook(
conn_id='spark_standalone_cluster')
should_track_driver_status_default = hook_default \
._resolve_should_track_driver_status()
should_track_driver_status_spark_yarn_cluster = hook_spark_yarn_cluster \
._resolve_should_track_driver_status()
should_track_driver_status_spark_k8s_cluster = hook_spark_k8s_cluster \
._resolve_should_track_driver_status()
should_track_driver_status_spark_default_mesos = hook_spark_default_mesos \
._resolve_should_track_driver_status()
should_track_driver_status_spark_home_set = hook_spark_home_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_home_not_set = hook_spark_home_not_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_binary_set = hook_spark_binary_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_binary_and_home_set = \
hook_spark_binary_and_home_set._resolve_should_track_driver_status()
should_track_driver_status_spark_standalone_cluster = \
hook_spark_standalone_cluster._resolve_should_track_driver_status()
self.assertEqual(should_track_driver_status_default, False)
self.assertEqual(should_track_driver_status_spark_yarn_cluster, False)
self.assertEqual(should_track_driver_status_spark_k8s_cluster, False)
self.assertEqual(should_track_driver_status_spark_default_mesos, False)
self.assertEqual(should_track_driver_status_spark_home_set, False)
self.assertEqual(should_track_driver_status_spark_home_not_set, False)
self.assertEqual(should_track_driver_status_spark_binary_set, False)
self.assertEqual(should_track_driver_status_spark_binary_and_home_set, False)
self.assertEqual(should_track_driver_status_spark_standalone_cluster, True)
def test_resolve_connection_yarn_default(self):
hook = SparkSubmitHook(conn_id='')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn")
def test_resolve_connection_yarn_default_connection(self):
hook = SparkSubmitHook(conn_id='spark_default')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": "root.default",
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn")
self.assertEqual(dict_cmd["--queue"], "root.default")
def test_resolve_connection_mesos_default_connection(self):
hook = SparkSubmitHook(conn_id='spark_default_mesos')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "mesos://host:5050",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "mesos://host:5050")
def test_resolve_connection_spark_yarn_cluster_connection(self):
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": "root.etl",
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn://yarn-master")
self.assertEqual(dict_cmd["--queue"], "root.etl")
self.assertEqual(dict_cmd["--deploy-mode"], "cluster")
def test_resolve_connection_spark_k8s_cluster_connection(self):
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"spark_home": "/opt/spark",
"queue": None,
"spark_binary": "spark-submit",
"master": "k8s://https://k8s-master",
"deploy_mode": "cluster",
"namespace": "mynamespace"}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "k8s://https://k8s-master")
self.assertEqual(dict_cmd["--deploy-mode"], "cluster")
def test_resolve_connection_spark_home_set_connection(self):
hook = SparkSubmitHook(conn_id='spark_home_set')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/opt/myspark",
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/opt/myspark/bin/spark-submit')
def test_resolve_connection_spark_home_not_set_connection(self):
hook = SparkSubmitHook(conn_id='spark_home_not_set')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'spark-submit')
def test_resolve_connection_spark_binary_set_connection(self):
hook = SparkSubmitHook(conn_id='spark_binary_set')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
expected_spark_connection = {"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'custom-spark-submit')
def test_resolve_connection_spark_binary_default_value_override(self):
hook = SparkSubmitHook(conn_id='spark_binary_set',
spark_binary='another-custom-spark-submit')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
expected_spark_connection = {"master": "yarn",
"spark_binary": "another-custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'another-custom-spark-submit')
def test_resolve_connection_spark_binary_default_value(self):
hook = SparkSubmitHook(conn_id='spark_default')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": 'root.default',
"spark_home": None,
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'spark-submit')
def test_resolve_connection_spark_binary_and_home_set_connection(self):
hook = SparkSubmitHook(conn_id='spark_binary_and_home_set')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
expected_spark_connection = {"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/path/to/spark_home",
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/path/to/spark_home/bin/custom-spark-submit')
def test_resolve_connection_spark_standalone_cluster_connection(self):
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
expected_spark_connection = {"master": "spark://spark-standalone-master:6066",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": None,
"spark_home": "/path/to/spark_home",
"namespace": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/path/to/spark_home/bin/spark-submit')
def test_resolve_spark_submit_env_vars_standalone_client_mode(self):
hook = SparkSubmitHook(conn_id='spark_standalone_cluster_client_mode',
env_vars={"bar": "foo"})
hook._build_spark_submit_command(self._spark_job_file)
self.assertEqual(hook._env, {"bar": "foo"})
def test_resolve_spark_submit_env_vars_standalone_cluster_mode(self):
def env_vars_exception_in_standalone_cluster_mode():
hook = SparkSubmitHook(conn_id='spark_standalone_cluster',
env_vars={"bar": "foo"})
hook._build_spark_submit_command(self._spark_job_file)
self.assertRaises(AirflowException,
env_vars_exception_in_standalone_cluster_mode)
def test_resolve_spark_submit_env_vars_yarn(self):
hook = SparkSubmitHook(conn_id='spark_yarn_cluster',
env_vars={"bar": "foo"})
cmd = hook._build_spark_submit_command(self._spark_job_file)
self.assertEqual(cmd[4], "spark.yarn.appMasterEnv.bar=foo")
self.assertEqual(hook._env, {"bar": "foo"})
def test_resolve_spark_submit_env_vars_k8s(self):
hook = SparkSubmitHook(conn_id='spark_k8s_cluster',
env_vars={"bar": "foo"})
cmd = hook._build_spark_submit_command(self._spark_job_file)
self.assertEqual(cmd[4], "spark.kubernetes.driverEnv.bar=foo")
def test_process_spark_submit_log_yarn(self):
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' +
'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot '
'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 NodeManagers',
'INFO Client: Submitting application application_1486558679801_1820 ' +
'to ResourceManager'
]
hook._process_spark_submit_log(log_lines)
self.assertEqual(hook._yarn_application_id, 'application_1486558679801_1820')
def test_process_spark_submit_log_k8s(self):
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
log_lines = [
'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,' +
'spark-role -> driver' +
'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42' +
'creation time: 2018-03-05T10:26:55Z' +
'service account name: spark' +
'volumes: spark-init-properties, download-jars-volume,' +
'download-files-volume, spark-token-2vmlm' +
'node name: N/A' +
'start time: N/A' +
'container images: N/A' +
'phase: Pending' +
'status: []' +
'2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,' +
' new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'Exit code: 999'
]
hook._process_spark_submit_log(log_lines)
self.assertEqual(hook._kubernetes_driver_pod,
'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver')
self.assertEqual(hook._spark_exit_code, 999)
def test_process_spark_submit_log_standalone_cluster(self):
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
log_lines = [
'Running Spark using the REST application submission protocol.',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request '
'to launch an application in spark://spark-standalone-master:6066',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully ' +
'created as driver-20171128111415-0001. Polling submission state...'
]
hook._process_spark_submit_log(log_lines)
self.assertEqual(hook._driver_id, 'driver-20171128111415-0001')
def test_process_spark_driver_status_log(self):
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
log_lines = [
'Submitting a request for the status of submission ' +
'driver-20171128111415-0001 in spark://spark-standalone-master:6066',
'17/11/28 11:15:37 INFO RestSubmissionClient: Server responded with ' +
'SubmissionStatusResponse:',
'{',
'"action" : "SubmissionStatusResponse",',
'"driverState" : "RUNNING",',
'"serverSparkVersion" : "1.6.0",',
'"submissionId" : "driver-20171128111415-0001",',
'"success" : true,',
'"workerHostPort" : "172.18.0.7:38561",',
'"workerId" : "worker-20171128110741-172.18.0.7-38561"',
'}'
]
hook._process_spark_status_log(log_lines)
self.assertEqual(hook._driver_status, 'RUNNING')
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_yarn_process_on_kill(self, mock_popen):
mock_popen.return_value.stdout = io.StringIO('stdout')
mock_popen.return_value.stderr = io.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' +
'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot ' +
'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 ' +
'NodeManagerapplication_1486558679801_1820s',
'INFO Client: Submitting application application_1486558679801_1820 ' +
'to ResourceManager'
]
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook._process_spark_submit_log(log_lines)
hook.submit()
hook.on_kill()
self.assertIn(call(['yarn', 'application', '-kill',
'application_1486558679801_1820'],
stderr=-1, stdout=-1),
mock_popen.mock_calls)
def test_standalone_cluster_process_on_kill(self):
log_lines = [
'Running Spark using the REST application submission protocol.',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request ' +
'to launch an application in spark://spark-standalone-master:6066',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully ' +
'created as driver-20171128111415-0001. Polling submission state...'
]
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
hook._process_spark_submit_log(log_lines)
kill_cmd = hook._build_spark_driver_kill_command()
self.assertEqual(kill_cmd[0], '/path/to/spark_home/bin/spark-submit')
self.assertEqual(kill_cmd[1], '--master')
self.assertEqual(kill_cmd[2], 'spark://spark-standalone-master:6066')
self.assertEqual(kill_cmd[3], '--kill')
self.assertEqual(kill_cmd[4], 'driver-20171128111415-0001')
@patch('airflow.kubernetes.kube_client.get_kube_client')
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_k8s_process_on_kill(self, mock_popen, mock_client_method):
mock_popen.return_value.stdout = io.StringIO('stdout')
mock_popen.return_value.stderr = io.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
client = mock_client_method.return_value
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
log_lines = [
'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,' +
'spark-role -> driver' +
'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42' +
'creation time: 2018-03-05T10:26:55Z' +
'service account name: spark' +
'volumes: spark-init-properties, download-jars-volume,' +
'download-files-volume, spark-token-2vmlm' +
'node name: N/A' +
'start time: N/A' +
'container images: N/A' +
'phase: Pending' +
'status: []' +
'2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,' +
' new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'Exit code: 0'
]
hook._process_spark_submit_log(log_lines)
hook.submit()
hook.on_kill()
import kubernetes
kwargs = {'pretty': True, 'body': kubernetes.client.V1DeleteOptions()}
client.delete_namespaced_pod.assert_called_once_with(
'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver',
'mynamespace', **kwargs)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7165cf1f7fa7343d3026963a9b227304d4ef57f
| 2,956
|
py
|
Python
|
account_keeping/forms.py
|
bitlabstudio/django-account-keeping
|
9f579a5fd912442a2948e2da858a5720de072568
|
[
"MIT"
] | 14
|
2017-03-29T03:14:16.000Z
|
2022-03-28T14:11:58.000Z
|
account_keeping/forms.py
|
bitlabstudio/django-account-keeping
|
9f579a5fd912442a2948e2da858a5720de072568
|
[
"MIT"
] | 1
|
2016-11-08T08:35:49.000Z
|
2016-11-08T08:35:49.000Z
|
account_keeping/forms.py
|
bitmazk/django-account-keeping
|
9f579a5fd912442a2948e2da858a5720de072568
|
[
"MIT"
] | 4
|
2017-09-06T00:16:53.000Z
|
2018-11-25T21:58:39.000Z
|
"""Forms of the account_keeping app."""
from django import forms
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from . import models
class InvoiceForm(forms.ModelForm):
class Meta:
model = models.Invoice
fields = '__all__'
try:
widgets = {
'invoice_date': forms.widgets.SelectDateWidget(
attrs={'style': 'display: inline; width: auto;'}),
'payment_date': forms.widgets.SelectDateWidget(
attrs={'style': 'display: inline; width: auto;'}),
}
except AttributeError: # pragma: nocover
widgets = {
'invoice_date': forms.widgets.DateInput,
'payment_date': forms.widgets.DateInput,
}
def __init__(self, branch, *args, **kwargs):
self.branch = branch
super(InvoiceForm, self).__init__(*args, **kwargs)
if branch or self.instance.pk:
del self.fields['branch']
def save(self, *args, **kwargs):
if not self.instance.pk and self.branch:
self.instance.branch = self.branch
return super(InvoiceForm, self).save(*args, **kwargs)
class TransactionForm(forms.ModelForm):
mark_invoice = forms.BooleanField(
label=_('Mark invoice as paid?'),
initial=True,
required=False,
widget=forms.widgets.CheckboxInput(
attrs={'data-id': 'mark-invoice-field'}),
)
class Meta:
model = models.Transaction
fields = '__all__'
try:
date_widget = forms.widgets.SelectDateWidget(
attrs={'style': 'display: inline; width: auto;'})
except AttributeError: # pragma: nocover
date_widget = forms.widgets.DateInput
widgets = {
'transaction_date': date_widget,
'invoice': forms.widgets.NumberInput(
attrs={'data-id': 'invoice-field'}),
'parent': forms.widgets.NumberInput(),
}
def __init__(self, branch, *args, **kwargs):
super(TransactionForm, self).__init__(*args, **kwargs)
self.fields['payee'].help_text = _(
'<a href="{}">Add a payee</a>').format(
reverse('account_keeping_payee_create'))
if branch:
self.fields['account'].queryset = self.fields[
'account'].queryset.filter(branch=branch)
def save(self, *args, **kwargs):
if self.instance.invoice and self.cleaned_data.get('mark_invoice'):
# Set the payment date on related invoice
self.instance.invoice.payment_date = self.instance.transaction_date
self.instance.invoice.save()
return super(TransactionForm, self).save(*args, **kwargs)
class ExportForm(forms.Form):
start = forms.DateField(
label=_('Start'),
)
end = forms.DateField(
label=_('End'),
)
| 33.977011
| 79
| 0.589648
|
from django import forms
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from . import models
class InvoiceForm(forms.ModelForm):
class Meta:
model = models.Invoice
fields = '__all__'
try:
widgets = {
'invoice_date': forms.widgets.SelectDateWidget(
attrs={'style': 'display: inline; width: auto;'}),
'payment_date': forms.widgets.SelectDateWidget(
attrs={'style': 'display: inline; width: auto;'}),
}
except AttributeError:
widgets = {
'invoice_date': forms.widgets.DateInput,
'payment_date': forms.widgets.DateInput,
}
def __init__(self, branch, *args, **kwargs):
self.branch = branch
super(InvoiceForm, self).__init__(*args, **kwargs)
if branch or self.instance.pk:
del self.fields['branch']
def save(self, *args, **kwargs):
if not self.instance.pk and self.branch:
self.instance.branch = self.branch
return super(InvoiceForm, self).save(*args, **kwargs)
class TransactionForm(forms.ModelForm):
mark_invoice = forms.BooleanField(
label=_('Mark invoice as paid?'),
initial=True,
required=False,
widget=forms.widgets.CheckboxInput(
attrs={'data-id': 'mark-invoice-field'}),
)
class Meta:
model = models.Transaction
fields = '__all__'
try:
date_widget = forms.widgets.SelectDateWidget(
attrs={'style': 'display: inline; width: auto;'})
except AttributeError:
date_widget = forms.widgets.DateInput
widgets = {
'transaction_date': date_widget,
'invoice': forms.widgets.NumberInput(
attrs={'data-id': 'invoice-field'}),
'parent': forms.widgets.NumberInput(),
}
def __init__(self, branch, *args, **kwargs):
super(TransactionForm, self).__init__(*args, **kwargs)
self.fields['payee'].help_text = _(
'<a href="{}">Add a payee</a>').format(
reverse('account_keeping_payee_create'))
if branch:
self.fields['account'].queryset = self.fields[
'account'].queryset.filter(branch=branch)
def save(self, *args, **kwargs):
if self.instance.invoice and self.cleaned_data.get('mark_invoice'):
self.instance.invoice.payment_date = self.instance.transaction_date
self.instance.invoice.save()
return super(TransactionForm, self).save(*args, **kwargs)
class ExportForm(forms.Form):
start = forms.DateField(
label=_('Start'),
)
end = forms.DateField(
label=_('End'),
)
| true
| true
|
f7165ee6a7c8a5acb376a2fe2456d8037e0db352
| 11,305
|
py
|
Python
|
Autocoders/Python/src/fprime_ac/parsers/XmlPortsParser.py
|
LeStarch/lgtm-fprime
|
904b0311fe647745b29075d44259d1dc1f4284ae
|
[
"Apache-2.0"
] | null | null | null |
Autocoders/Python/src/fprime_ac/parsers/XmlPortsParser.py
|
LeStarch/lgtm-fprime
|
904b0311fe647745b29075d44259d1dc1f4284ae
|
[
"Apache-2.0"
] | null | null | null |
Autocoders/Python/src/fprime_ac/parsers/XmlPortsParser.py
|
LeStarch/lgtm-fprime
|
904b0311fe647745b29075d44259d1dc1f4284ae
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# ===============================================================================
# NAME: XmlPortsParser.py
#
# DESCRIPTION: This class parses the XML port types files.
#
# USAGE:
#
# AUTHOR: reder
# EMAIL: reder@jpl.nasa.gov
# DATE CREATED : Feb. 4, 2013
#
# Copyright 2007, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
#
# Python standard modules
#
import logging
import os
import sys
from lxml import etree
from fprime_ac.utils import ConfigManager
from fprime_ac.utils.exceptions import FprimeRngXmlValidationException
#
# Python extention modules and custom interfaces
#
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
ROOTDIR = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "..")
#
class XmlPortsParser:
"""
An XML parser class that uses lxml.etree to consume an XML
port type documents.
The class is instanced with an XML file name.
"""
def __init__(self, xml_file=None):
"""
Given a well formed XML file (xml_file), read it and turn it into
a big string.
"""
self.__root = None
self.__include_serializable_files = []
self.__include_enum_files = []
self.__include_array_files = []
self.__include_header_files = []
#
self.__config = ConfigManager.ConfigManager.getInstance()
#
self.__port = None
self.__args = []
self.__enum_list_items = []
self.__modifier = None
#
if os.path.isfile(xml_file) == False:
str = "ERROR: Could not find specified XML file %s." % xml_file
raise OSError(str)
fd = open(xml_file)
xml_file = os.path.basename(xml_file)
self.__xml_filename = xml_file
#
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
# Validate against schema
relax_file_handler = open(ROOTDIR + self.__config.get("schema", "interface"))
relax_parsed = etree.parse(relax_file_handler)
relax_file_handler.close()
relax_compiled = etree.RelaxNG(relax_parsed)
# 2/3 conversion
if not relax_compiled.validate(element_tree):
raise FprimeRngXmlValidationException(relax_compiled.error_log)
interface = element_tree.getroot()
if interface.tag != "interface":
PRINT.info("%s is not a interface file" % xml_file)
sys.exit(-1)
print("Parsing Interface %s" % interface.attrib["name"])
if "namespace" in interface.attrib:
namespace_name = interface.attrib["namespace"]
else:
namespace_name = None
self.__port = Interface(namespace_name, interface.attrib["name"])
for interface_tag in interface:
if interface_tag.tag == "comment":
self.__port.set_comment(interface_tag.text.strip())
elif interface_tag.tag == "include_header":
self.__include_header_files.append(interface_tag.text)
elif interface_tag.tag == "import_serializable_type":
self.__include_serializable_files.append(interface_tag.text)
elif interface_tag.tag == "import_enum_type":
self.__include_enum_files.append(interface_tag.text)
elif interface_tag.tag == "import_array_type":
self.__include_array_files.append(interface_tag.text)
elif interface_tag.tag == "args":
for arg in interface_tag:
if arg.tag != "arg":
PRINT.info(
"%s: Invalid tag %s in interface args definition"
% (xml_file, arg.tag)
)
sys.exit(-1)
n = arg.attrib["name"]
t = arg.attrib["type"]
if "pass_by" in list(arg.attrib.keys()):
p = arg.attrib["pass_by"]
else:
p = None
if t == "string" or t == "buffer":
if not "size" in list(arg.attrib.keys()):
PRINT.info(
"%s: arg %s string must specify size tag"
% (xml_file, arg.tag)
)
sys.exit(-1)
else:
s = arg.attrib["size"]
else:
s = None
arg_obj = Arg(n, t, p, s, None)
for arg_tag in arg:
# only valid tag in command args is comment
if arg_tag.tag == "comment":
arg_obj.set_comment(arg_tag.text)
elif arg_tag.tag == "enum" and t == "ENUM":
en = arg_tag.attrib["name"]
enum_members = []
for mem in arg_tag:
mn = mem.attrib["name"]
if "value" in list(mem.attrib.keys()):
v = mem.attrib["value"]
else:
v = None
if "comment" in list(mem.attrib.keys()):
mc = mem.attrib["comment"].strip()
else:
mc = None
enum_members.append((mn, v, mc))
arg_obj.set_type(((t, en), enum_members))
else:
PRINT.info(
"%s: Invalid argument tag %s in port %s argument %s"
% (xml_file, arg_tag.tag, interface_tag.tag, n)
)
sys.exit(-1)
self.__args.append(arg_obj)
elif interface_tag.tag == "return":
t = interface_tag.attrib["type"]
if "pass_by" in list(interface_tag.attrib.keys()):
m = interface_tag.attrib["pass_by"]
else:
m = "value"
for enum_tag in interface_tag:
# The only tags would be enumeration declarations
if enum_tag.tag == "enum" and t == "ENUM":
en = enum_tag.attrib["name"]
enum_members = []
for mem in enum_tag:
mn = mem.attrib["name"]
if "value" in list(mem.attrib.keys()):
v = mem.attrib["value"]
else:
v = None
if "comment" in list(mem.attrib.keys()):
mc = mem.attrib["comment"].strip()
else:
mc = None
enum_members.append((mn, v, mc))
t = ((t, en), enum_members)
else:
PRINT.info(
"%s: Invalid port return value tag %s"
% (xml_file, enum_tag.tag)
)
sys.exit(-1)
self.__port.set_return(t, m)
# Check XML name for compliance here...
# name = self.get_interface().get_name()
# if (os.path.basename(xml_file)[:len(name)] != name):
# PRINT.info("ERROR: Port XML files must begin with name of port...")
# sys.exit(-1)
def __del__(self):
for a in self.__args:
del a
del self.__port
def get_xml_filename(self):
"""
Return the original XML filename parsed.
"""
return self.__xml_filename
def get_include_header_files(self):
"""
Return a list of all imported .hpp or .h files.
"""
return self.__include_header_files
def get_includes_serial_files(self):
"""
Return a list of all imported Serializable XML files.
"""
return self.__include_serializable_files
def get_include_enum_files(self):
"""
Return a list of all imported enum XML files.
"""
return self.__include_enum_files
def get_include_array_files(self):
"""
Return a list of all imported array XML files.
"""
return self.__include_array_files
def get_interface(self):
"""
Returns a interface object.
"""
return self.__port
def get_args(self):
"""
Returns a list of arg objects with all text and attrib needed.
"""
return self.__args
class Interface:
"""
Data container for an interface.
Note in the context of this architecture
a port has just on interface and this is
it.
"""
def __init__(self, namespace, name, comment=None):
"""
Constructor
"""
self.__namespace = namespace
self.__name = name
self.__comment = comment
self.__return_type = None
self.__return_modifier = None
def get_namespace(self):
return self.__namespace
def get_name(self):
return self.__name
def get_comment(self):
return self.__comment
def set_comment(self, comment):
self.__comment = comment
def set_return(self, t, m):
"""
Set a return type and modifier.
"""
self.__return_type = t
self.__return_modifier = m
def get_return_type(self):
return self.__return_type
def get_return_modifier(self):
return self.__return_modifier
class Arg:
"""
Data container for all the port name, type, etc. associated with component.
"""
def __init__(self, name, atype, modifier, size=None, comment=None):
"""
Constructor
@param name: Name of arg (each instance must be unique).
@param type: Type of arg (must have supporting include xml)
@param modifier: Whether argument is passed by value, reference, or pointer
@param size: size of array for string and buffer
@param comment: A single or multline comment
"""
self.__name = name
self.__type = atype
self.__modifier = modifier
self.__size = size
self.__comment = comment
def get_name(self):
return self.__name
def get_type(self):
return self.__type
def get_modifier(self):
return self.__modifier
def get_size(self):
return self.__size
def get_comment(self):
return self.__comment
def set_comment(self, comment):
self.__comment = comment
def set_type(self, type):
self.__type = type
| 33.545994
| 85
| 0.508271
|
import logging
import os
import sys
from lxml import etree
from fprime_ac.utils import ConfigManager
from fprime_ac.utils.exceptions import FprimeRngXmlValidationException
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
ROOTDIR = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "..")
class XmlPortsParser:
def __init__(self, xml_file=None):
self.__root = None
self.__include_serializable_files = []
self.__include_enum_files = []
self.__include_array_files = []
self.__include_header_files = []
self.__config = ConfigManager.ConfigManager.getInstance()
self.__port = None
self.__args = []
self.__enum_list_items = []
self.__modifier = None
if os.path.isfile(xml_file) == False:
str = "ERROR: Could not find specified XML file %s." % xml_file
raise OSError(str)
fd = open(xml_file)
xml_file = os.path.basename(xml_file)
self.__xml_filename = xml_file
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
relax_file_handler = open(ROOTDIR + self.__config.get("schema", "interface"))
relax_parsed = etree.parse(relax_file_handler)
relax_file_handler.close()
relax_compiled = etree.RelaxNG(relax_parsed)
if not relax_compiled.validate(element_tree):
raise FprimeRngXmlValidationException(relax_compiled.error_log)
interface = element_tree.getroot()
if interface.tag != "interface":
PRINT.info("%s is not a interface file" % xml_file)
sys.exit(-1)
print("Parsing Interface %s" % interface.attrib["name"])
if "namespace" in interface.attrib:
namespace_name = interface.attrib["namespace"]
else:
namespace_name = None
self.__port = Interface(namespace_name, interface.attrib["name"])
for interface_tag in interface:
if interface_tag.tag == "comment":
self.__port.set_comment(interface_tag.text.strip())
elif interface_tag.tag == "include_header":
self.__include_header_files.append(interface_tag.text)
elif interface_tag.tag == "import_serializable_type":
self.__include_serializable_files.append(interface_tag.text)
elif interface_tag.tag == "import_enum_type":
self.__include_enum_files.append(interface_tag.text)
elif interface_tag.tag == "import_array_type":
self.__include_array_files.append(interface_tag.text)
elif interface_tag.tag == "args":
for arg in interface_tag:
if arg.tag != "arg":
PRINT.info(
"%s: Invalid tag %s in interface args definition"
% (xml_file, arg.tag)
)
sys.exit(-1)
n = arg.attrib["name"]
t = arg.attrib["type"]
if "pass_by" in list(arg.attrib.keys()):
p = arg.attrib["pass_by"]
else:
p = None
if t == "string" or t == "buffer":
if not "size" in list(arg.attrib.keys()):
PRINT.info(
"%s: arg %s string must specify size tag"
% (xml_file, arg.tag)
)
sys.exit(-1)
else:
s = arg.attrib["size"]
else:
s = None
arg_obj = Arg(n, t, p, s, None)
for arg_tag in arg:
if arg_tag.tag == "comment":
arg_obj.set_comment(arg_tag.text)
elif arg_tag.tag == "enum" and t == "ENUM":
en = arg_tag.attrib["name"]
enum_members = []
for mem in arg_tag:
mn = mem.attrib["name"]
if "value" in list(mem.attrib.keys()):
v = mem.attrib["value"]
else:
v = None
if "comment" in list(mem.attrib.keys()):
mc = mem.attrib["comment"].strip()
else:
mc = None
enum_members.append((mn, v, mc))
arg_obj.set_type(((t, en), enum_members))
else:
PRINT.info(
"%s: Invalid argument tag %s in port %s argument %s"
% (xml_file, arg_tag.tag, interface_tag.tag, n)
)
sys.exit(-1)
self.__args.append(arg_obj)
elif interface_tag.tag == "return":
t = interface_tag.attrib["type"]
if "pass_by" in list(interface_tag.attrib.keys()):
m = interface_tag.attrib["pass_by"]
else:
m = "value"
for enum_tag in interface_tag:
if enum_tag.tag == "enum" and t == "ENUM":
en = enum_tag.attrib["name"]
enum_members = []
for mem in enum_tag:
mn = mem.attrib["name"]
if "value" in list(mem.attrib.keys()):
v = mem.attrib["value"]
else:
v = None
if "comment" in list(mem.attrib.keys()):
mc = mem.attrib["comment"].strip()
else:
mc = None
enum_members.append((mn, v, mc))
t = ((t, en), enum_members)
else:
PRINT.info(
"%s: Invalid port return value tag %s"
% (xml_file, enum_tag.tag)
)
sys.exit(-1)
self.__port.set_return(t, m)
def __del__(self):
for a in self.__args:
del a
del self.__port
def get_xml_filename(self):
return self.__xml_filename
def get_include_header_files(self):
return self.__include_header_files
def get_includes_serial_files(self):
return self.__include_serializable_files
def get_include_enum_files(self):
return self.__include_enum_files
def get_include_array_files(self):
return self.__include_array_files
def get_interface(self):
return self.__port
def get_args(self):
return self.__args
class Interface:
def __init__(self, namespace, name, comment=None):
self.__namespace = namespace
self.__name = name
self.__comment = comment
self.__return_type = None
self.__return_modifier = None
def get_namespace(self):
return self.__namespace
def get_name(self):
return self.__name
def get_comment(self):
return self.__comment
def set_comment(self, comment):
self.__comment = comment
def set_return(self, t, m):
self.__return_type = t
self.__return_modifier = m
def get_return_type(self):
return self.__return_type
def get_return_modifier(self):
return self.__return_modifier
class Arg:
def __init__(self, name, atype, modifier, size=None, comment=None):
self.__name = name
self.__type = atype
self.__modifier = modifier
self.__size = size
self.__comment = comment
def get_name(self):
return self.__name
def get_type(self):
return self.__type
def get_modifier(self):
return self.__modifier
def get_size(self):
return self.__size
def get_comment(self):
return self.__comment
def set_comment(self, comment):
self.__comment = comment
def set_type(self, type):
self.__type = type
| true
| true
|
f7165f0360ddc3c4689659445e5f15cdeeb70bab
| 3,059
|
py
|
Python
|
dockerdb/mongo_pytest.py
|
includeamin/dockerdb
|
cb9aca0ae6be1112b04ef9d843751c355005b47d
|
[
"Apache-2.0"
] | null | null | null |
dockerdb/mongo_pytest.py
|
includeamin/dockerdb
|
cb9aca0ae6be1112b04ef9d843751c355005b47d
|
[
"Apache-2.0"
] | null | null | null |
dockerdb/mongo_pytest.py
|
includeamin/dockerdb
|
cb9aca0ae6be1112b04ef9d843751c355005b47d
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import os
import shutil
import subprocess
import logging
import pytest
import dockerdb.mongo
CONTAINER_CACHE = {}
LOG = logging.getLogger(__name__)
def insert_data(client, data):
for db in data:
for collection in data[db]:
entries = data[db][collection]
re = client[db][collection].insert_many(entries)
def mongorestore(service, restore):
dst = os.path.join(service.share, 'dump')
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(restore, dst)
command = ['mongorestore', dst]
exit_code, output = service.container.exec_run(command)
if exit_code != 0:
LOG.error(output.decode('utf-8'))
raise subprocess.CalledProcessError(exit_code, command, output)
def get_service(version):
service = CONTAINER_CACHE[version]
service.wait()
service.factory_reset()
return service
def ensure_service(version, replicaset, port, client_args):
if version not in CONTAINER_CACHE:
CONTAINER_CACHE[version] = dockerdb.mongo.Mongo(
version, wait=False, replicaset=replicaset, exposed_port=port,
client_args=client_args)
def mongo_fixture(scope='function', versions=None, data=None,
restore=None, reuse=True, replicaset=None, port=27017,
client_args=None):
"""create ficture for py.test
Attributes:
scope (str): py.test scope for this fixture
versions (list): mongodb versions that should be tested
data (dict): A dict containing data to be inserted into the database
before the test. The structure must be:
{'db': {
'collection': [
{'document_data': True},
{'another': 'document'},
...
]
}}
restore (str): path to directory containing a mongo dump
reuse (bool): wether to reuse containers or create a new container
for every requested injection
client_args(dict): arguments that get passed to the pymongo client
"""
# parallelized start of different versions
if versions is None:
versions = ['latest']
if reuse:
for version in versions:
ensure_service(version, replicaset, port, client_args)
@pytest.fixture(scope=scope, params=versions)
def mongo(request):
if reuse:
service = get_service(request.param)
else:
service = dockerdb.service.Mongo(request.param, wait=True,
replicaset=replicaset,
exposed_port=port,
client_args=client_args)
client = service.pymongo_client()
service.wait()
if data:
insert_data(client, data)
if restore:
mongorestore(service, restore)
yield service
if not reuse:
service.remove()
return mongo
| 28.324074
| 76
| 0.601831
|
from __future__ import absolute_import
import os
import shutil
import subprocess
import logging
import pytest
import dockerdb.mongo
CONTAINER_CACHE = {}
LOG = logging.getLogger(__name__)
def insert_data(client, data):
for db in data:
for collection in data[db]:
entries = data[db][collection]
re = client[db][collection].insert_many(entries)
def mongorestore(service, restore):
dst = os.path.join(service.share, 'dump')
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(restore, dst)
command = ['mongorestore', dst]
exit_code, output = service.container.exec_run(command)
if exit_code != 0:
LOG.error(output.decode('utf-8'))
raise subprocess.CalledProcessError(exit_code, command, output)
def get_service(version):
service = CONTAINER_CACHE[version]
service.wait()
service.factory_reset()
return service
def ensure_service(version, replicaset, port, client_args):
if version not in CONTAINER_CACHE:
CONTAINER_CACHE[version] = dockerdb.mongo.Mongo(
version, wait=False, replicaset=replicaset, exposed_port=port,
client_args=client_args)
def mongo_fixture(scope='function', versions=None, data=None,
restore=None, reuse=True, replicaset=None, port=27017,
client_args=None):
if versions is None:
versions = ['latest']
if reuse:
for version in versions:
ensure_service(version, replicaset, port, client_args)
@pytest.fixture(scope=scope, params=versions)
def mongo(request):
if reuse:
service = get_service(request.param)
else:
service = dockerdb.service.Mongo(request.param, wait=True,
replicaset=replicaset,
exposed_port=port,
client_args=client_args)
client = service.pymongo_client()
service.wait()
if data:
insert_data(client, data)
if restore:
mongorestore(service, restore)
yield service
if not reuse:
service.remove()
return mongo
| true
| true
|
f7166010add7d50b95bbe3bc7c86172c2b3ad3fa
| 876
|
py
|
Python
|
tests/test_instruments_request.py
|
rockscie/async_blp
|
acb8777ccf2499681bde87d76ca780b61219699c
|
[
"MIT"
] | 12
|
2019-08-05T16:56:54.000Z
|
2021-02-02T11:09:37.000Z
|
tests/test_instruments_request.py
|
lightning-like/async_blp
|
acb8777ccf2499681bde87d76ca780b61219699c
|
[
"MIT"
] | null | null | null |
tests/test_instruments_request.py
|
lightning-like/async_blp
|
acb8777ccf2499681bde87d76ca780b61219699c
|
[
"MIT"
] | 5
|
2019-12-08T15:43:13.000Z
|
2021-11-14T08:38:07.000Z
|
import pandas as pd
import pytest
from async_blp.instruments_requests import InstrumentRequestBase
@pytest.mark.asyncio
class TestInstrumentRequestBase:
def test__weight(self):
request = InstrumentRequestBase('query', max_results=5)
request.response_fields = ['field_1', 'field_2']
assert request.weight == 10
async def test__process(self, security_lookup_msg):
request = InstrumentRequestBase('query', max_results=5)
request.response_fields = ['security', 'description']
request.send_queue_message(security_lookup_msg)
request.send_queue_message(None)
data, _ = await request.process()
expected_data = pd.DataFrame([['F US Equity', 'Ford Motors Co']],
columns=['security', 'description'])
pd.testing.assert_frame_equal(expected_data, data)
| 30.206897
| 73
| 0.682648
|
import pandas as pd
import pytest
from async_blp.instruments_requests import InstrumentRequestBase
@pytest.mark.asyncio
class TestInstrumentRequestBase:
def test__weight(self):
request = InstrumentRequestBase('query', max_results=5)
request.response_fields = ['field_1', 'field_2']
assert request.weight == 10
async def test__process(self, security_lookup_msg):
request = InstrumentRequestBase('query', max_results=5)
request.response_fields = ['security', 'description']
request.send_queue_message(security_lookup_msg)
request.send_queue_message(None)
data, _ = await request.process()
expected_data = pd.DataFrame([['F US Equity', 'Ford Motors Co']],
columns=['security', 'description'])
pd.testing.assert_frame_equal(expected_data, data)
| true
| true
|
f71662f015d69cc4a3e8904bd18134ab9e12e9e2
| 1,649
|
py
|
Python
|
test/mitmproxy/addons/test_clientplayback.py
|
nikofil/mitmproxy
|
439c113989feb193972b83ffcd0823ea4d2218df
|
[
"MIT"
] | null | null | null |
test/mitmproxy/addons/test_clientplayback.py
|
nikofil/mitmproxy
|
439c113989feb193972b83ffcd0823ea4d2218df
|
[
"MIT"
] | null | null | null |
test/mitmproxy/addons/test_clientplayback.py
|
nikofil/mitmproxy
|
439c113989feb193972b83ffcd0823ea4d2218df
|
[
"MIT"
] | null | null | null |
import pytest
from unittest import mock
from mitmproxy.test import tflow
from mitmproxy import io
from mitmproxy import exceptions
from mitmproxy.addons import clientplayback
from mitmproxy.test import taddons
def tdump(path, flows):
w = io.FlowWriter(open(path, "wb"))
for i in flows:
w.add(i)
class MockThread():
def is_alive(self):
return False
class TestClientPlayback:
def test_playback(self):
cp = clientplayback.ClientPlayback()
with taddons.context() as tctx:
assert cp.count() == 0
f = tflow.tflow(resp=True)
cp.load([f])
assert cp.count() == 1
RP = "mitmproxy.proxy.protocol.http_replay.RequestReplayThread"
with mock.patch(RP) as rp:
assert not cp.current_thread
cp.tick()
assert rp.called
assert cp.current_thread
cp.flows = None
cp.current_thread = None
cp.tick()
assert tctx.master.has_event("processing_complete")
cp.current_thread = MockThread()
cp.tick()
assert cp.current_thread is None
def test_configure(self, tmpdir):
cp = clientplayback.ClientPlayback()
with taddons.context() as tctx:
path = str(tmpdir.join("flows"))
tdump(path, [tflow.tflow()])
tctx.configure(cp, client_replay=[path])
tctx.configure(cp, client_replay=[])
tctx.configure(cp)
with pytest.raises(exceptions.OptionsError):
tctx.configure(cp, client_replay=["nonexistent"])
| 28.929825
| 75
| 0.596725
|
import pytest
from unittest import mock
from mitmproxy.test import tflow
from mitmproxy import io
from mitmproxy import exceptions
from mitmproxy.addons import clientplayback
from mitmproxy.test import taddons
def tdump(path, flows):
w = io.FlowWriter(open(path, "wb"))
for i in flows:
w.add(i)
class MockThread():
def is_alive(self):
return False
class TestClientPlayback:
def test_playback(self):
cp = clientplayback.ClientPlayback()
with taddons.context() as tctx:
assert cp.count() == 0
f = tflow.tflow(resp=True)
cp.load([f])
assert cp.count() == 1
RP = "mitmproxy.proxy.protocol.http_replay.RequestReplayThread"
with mock.patch(RP) as rp:
assert not cp.current_thread
cp.tick()
assert rp.called
assert cp.current_thread
cp.flows = None
cp.current_thread = None
cp.tick()
assert tctx.master.has_event("processing_complete")
cp.current_thread = MockThread()
cp.tick()
assert cp.current_thread is None
def test_configure(self, tmpdir):
cp = clientplayback.ClientPlayback()
with taddons.context() as tctx:
path = str(tmpdir.join("flows"))
tdump(path, [tflow.tflow()])
tctx.configure(cp, client_replay=[path])
tctx.configure(cp, client_replay=[])
tctx.configure(cp)
with pytest.raises(exceptions.OptionsError):
tctx.configure(cp, client_replay=["nonexistent"])
| true
| true
|
f71664184c07162070c1ae4bdafc1b009fa3b980
| 5,121
|
py
|
Python
|
run_quickquasars.py
|
olegs22/Quickquasar_QA
|
df74994780216846501710b79b4dce7d025809c9
|
[
"MIT"
] | null | null | null |
run_quickquasars.py
|
olegs22/Quickquasar_QA
|
df74994780216846501710b79b4dce7d025809c9
|
[
"MIT"
] | null | null | null |
run_quickquasars.py
|
olegs22/Quickquasar_QA
|
df74994780216846501710b79b4dce7d025809c9
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import shutil
import glob as glob
def get_slurm_script(script_name,command,outdir,idir,mail,log,part,nodes,threads,time,job_name):
if os.path.isdir(outdir+'/run') == False:
os.mkdir(outdir+'/run')
file_name = outdir + '/run/' + script_name
f = open(file_name,'w')
slurm_dict = dict()
slurm_dict['line_0'] = '#SBATCH -C haswell\n'
slurm_dict['line_1'] = '#SBATCH --partition='+part+'\n'
slurm_dict['line_2'] = '#SBATCH --account=desi\n'
slurm_dict['line_3'] = '#SBATCH --nodes='+str(nodes)+'\n'
slurm_dict['line_4'] = '#SBATCH --time='+time+'\n'
slurm_dict['line_5'] = '#SBATCH --job-name='+job_name+'\n'
slurm_dict['line_6'] = '#SBATCH --output='+log+'\n'
slurm_dict['line_7'] = '#SBATCH --mail-user='+mail+'\n'
slurm_dict['line_8'] = 'idir='+idir+'\n'
slurm_dict['line_9'] = 'outdir='+outdir+'\n'
slurm_dict['line_10'] = 'nodes='+str(nodes)+'\n' # CHECK MATCHING #SBATCH --nodes ABOVE !!!!
slurm_dict['line_11'] = 'nthreads='+str(threads)+'\n' # TO BE TUNED ; CAN HIT NODE MEMORY LIMIT ; 4 is max on edison for nside=16 and ~50 QSOs/deg2
slurm_dict['line_12'] = 'echo "get list of skewers to run ..."\n'
slurm_dict['line_13'] = 'files=`\ls -1 $idir/*/*/transmission*.fits*`\n'
slurm_dict['line_14'] = 'nfiles=`echo $files | wc -w`\n'
slurm_dict['line_15'] = 'nfilespernode=$((nfiles/nodes+1))\n'
slurm_dict['line_16'] = 'echo "n files =" $nfiles\n'
slurm_dict['line_17'] = 'echo "n files per node =" $nfilespernode\n'
slurm_dict['line_18'] = 'first=1\n'
slurm_dict['line_19'] = 'last=$nfilespernode\n'
slurm_dict['line_20'] = 'for node in `seq $nodes` ; do\n'
slurm_dict['line_21'] = ' echo "starting node $node"\n'
slurm_dict['line_22'] = ' # list of files to run\n'
slurm_dict['line_23'] = ' if (( $node == $nodes )) ; then\n'
slurm_dict['line_24'] = ' last=""\n'
slurm_dict['line_25'] = ' fi\n'
slurm_dict['line_26'] = ' echo ${first}-${last}\n'
slurm_dict['line_27'] = ' tfiles=`echo $files | cut -d " " -f ${first}-${last}`\n'
slurm_dict['line_28'] = ' first=$(( first + nfilespernode ))\n'
slurm_dict['line_29'] = ' last=$(( last + nfilespernode ))\n'
set_up = " srun -N 1 -n 1 -c $nthreads quickquasars -i $tfiles --nproc $nthreads --outdir $outdir/spectra-16 "
slurm_dict['line_30'] = set_up + command +'\n'
slurm_dict['line_31'] = ' done\n'
slurm_dict['line_32'] = 'wait\n'
slurm_dict['line_33'] = 'echo "END"\n'
for i in range(len(slurm_dict)):
f.write(slurm_dict['line_' + str(i)])
return None
if __name__ == "__main__":
import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('--outdir',type=str,help='output directory of the quickquasar run')
parser.add_argument('--idir',type=str,help='directory from where to fetch the input data')
parser.add_argument('--mail',type=str,default=' ',help='email to sent status of the job')
parser.add_argument('--log',type=str,default =' ',help='directory to output the log of the job run')
parser.add_argument('--qos',type=str,default='regular',help='which queue')
parser.add_argument('--nodes',type=int,default=40,help='number numbers to use')
parser.add_argument('--threads',type=int,default=4,help='number of thread to use per node')
parser.add_argument('--time',default='00:30:00',type=str)
parser.add_argument('--name',type=str,default='lyasim',help='name of the job')
parser.add_argument('--seed-generator',type=int,default=15430289,help='seed to run quickquasar')
parser.add_argument('--nruns',type=int,default=1,help='number of quickquasar runs with the same arguments')
args = parser.parse_args()
outfile = open('submit.sh','w+')
np.random.seed(args.seed_generator)
for k in range(args.nruns):
#make the output dirs
output_dirs = args.outdir + '_'+str(k)
if os.path.isdir(output_dirs) == False:
os.mkdir(output_dirs)
if os.path.isdir(output_dirs+'/logs') == False:
os.mkdir(output_dirs+'/logs')
if os.path.isdir(output_dirs+'/spectra-16') == False:
os.mkdir(output_dirs+'/spectra-16')
seed = np.random.randint(12345,98765,size=1)
#read config file for quickquasart
file = open('config.txt','r')
lines = []
for l in file:
lines.append(l)
for i in range(len(lines)):
line_comp = lines[i].split()
if len(line_comp) != 1:
lines[i] = '--' + line_comp[0] + ' ' + line_comp[1] + ' '
else:
lines[i] = '--' + line_comp[0] + ' '
command = "".join(lines) + '--seed '+str(seed[0])
name = 'run_quickquasar.sh'
get_slurm_script(name,command,output_dirs,args.idir,args.mail,args.log,args.qos,args.nodes,args.threads,args.time,args.name)
outfile.write('sbatch '+output_dirs+'/run/'+name+'\n')
outfile.close()
| 48.311321
| 151
| 0.610623
|
import numpy as np
import os
import shutil
import glob as glob
def get_slurm_script(script_name,command,outdir,idir,mail,log,part,nodes,threads,time,job_name):
if os.path.isdir(outdir+'/run') == False:
os.mkdir(outdir+'/run')
file_name = outdir + '/run/' + script_name
f = open(file_name,'w')
slurm_dict = dict()
slurm_dict['line_0'] = '#SBATCH -C haswell\n'
slurm_dict['line_1'] = '#SBATCH --partition='+part+'\n'
slurm_dict['line_2'] = '#SBATCH --account=desi\n'
slurm_dict['line_3'] = '#SBATCH --nodes='+str(nodes)+'\n'
slurm_dict['line_4'] = '#SBATCH --time='+time+'\n'
slurm_dict['line_5'] = '#SBATCH --job-name='+job_name+'\n'
slurm_dict['line_6'] = '#SBATCH --output='+log+'\n'
slurm_dict['line_7'] = '#SBATCH --mail-user='+mail+'\n'
slurm_dict['line_8'] = 'idir='+idir+'\n'
slurm_dict['line_9'] = 'outdir='+outdir+'\n'
slurm_dict['line_10'] = 'nodes='+str(nodes)+'\n' = 'nthreads='+str(threads)+'\n'
slurm_dict['line_12'] = 'echo "get list of skewers to run ..."\n'
slurm_dict['line_13'] = 'files=`\ls -1 $idir/*/*/transmission*.fits*`\n'
slurm_dict['line_14'] = 'nfiles=`echo $files | wc -w`\n'
slurm_dict['line_15'] = 'nfilespernode=$((nfiles/nodes+1))\n'
slurm_dict['line_16'] = 'echo "n files =" $nfiles\n'
slurm_dict['line_17'] = 'echo "n files per node =" $nfilespernode\n'
slurm_dict['line_18'] = 'first=1\n'
slurm_dict['line_19'] = 'last=$nfilespernode\n'
slurm_dict['line_20'] = 'for node in `seq $nodes` ; do\n'
slurm_dict['line_21'] = ' echo "starting node $node"\n'
slurm_dict['line_22'] = ' # list of files to run\n'
slurm_dict['line_23'] = ' if (( $node == $nodes )) ; then\n'
slurm_dict['line_24'] = ' last=""\n'
slurm_dict['line_25'] = ' fi\n'
slurm_dict['line_26'] = ' echo ${first}-${last}\n'
slurm_dict['line_27'] = ' tfiles=`echo $files | cut -d " " -f ${first}-${last}`\n'
slurm_dict['line_28'] = ' first=$(( first + nfilespernode ))\n'
slurm_dict['line_29'] = ' last=$(( last + nfilespernode ))\n'
set_up = " srun -N 1 -n 1 -c $nthreads quickquasars -i $tfiles --nproc $nthreads --outdir $outdir/spectra-16 "
slurm_dict['line_30'] = set_up + command +'\n'
slurm_dict['line_31'] = ' done\n'
slurm_dict['line_32'] = 'wait\n'
slurm_dict['line_33'] = 'echo "END"\n'
for i in range(len(slurm_dict)):
f.write(slurm_dict['line_' + str(i)])
return None
if __name__ == "__main__":
import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('--outdir',type=str,help='output directory of the quickquasar run')
parser.add_argument('--idir',type=str,help='directory from where to fetch the input data')
parser.add_argument('--mail',type=str,default=' ',help='email to sent status of the job')
parser.add_argument('--log',type=str,default =' ',help='directory to output the log of the job run')
parser.add_argument('--qos',type=str,default='regular',help='which queue')
parser.add_argument('--nodes',type=int,default=40,help='number numbers to use')
parser.add_argument('--threads',type=int,default=4,help='number of thread to use per node')
parser.add_argument('--time',default='00:30:00',type=str)
parser.add_argument('--name',type=str,default='lyasim',help='name of the job')
parser.add_argument('--seed-generator',type=int,default=15430289,help='seed to run quickquasar')
parser.add_argument('--nruns',type=int,default=1,help='number of quickquasar runs with the same arguments')
args = parser.parse_args()
outfile = open('submit.sh','w+')
np.random.seed(args.seed_generator)
for k in range(args.nruns):
output_dirs = args.outdir + '_'+str(k)
if os.path.isdir(output_dirs) == False:
os.mkdir(output_dirs)
if os.path.isdir(output_dirs+'/logs') == False:
os.mkdir(output_dirs+'/logs')
if os.path.isdir(output_dirs+'/spectra-16') == False:
os.mkdir(output_dirs+'/spectra-16')
seed = np.random.randint(12345,98765,size=1)
file = open('config.txt','r')
lines = []
for l in file:
lines.append(l)
for i in range(len(lines)):
line_comp = lines[i].split()
if len(line_comp) != 1:
lines[i] = '--' + line_comp[0] + ' ' + line_comp[1] + ' '
else:
lines[i] = '--' + line_comp[0] + ' '
command = "".join(lines) + '--seed '+str(seed[0])
name = 'run_quickquasar.sh'
get_slurm_script(name,command,output_dirs,args.idir,args.mail,args.log,args.qos,args.nodes,args.threads,args.time,args.name)
outfile.write('sbatch '+output_dirs+'/run/'+name+'\n')
outfile.close()
| true
| true
|
f716651b671438a36da3dbcc61e463c30cf5bc75
| 432
|
py
|
Python
|
client.py
|
colinhartigan/valorant-web-auth-server
|
6a12c41ef42d234afbbd6870925c29207428c8ce
|
[
"MIT"
] | 1
|
2022-01-27T07:49:59.000Z
|
2022-01-27T07:49:59.000Z
|
client.py
|
colinhartigan/valorant-web-auth-server
|
6a12c41ef42d234afbbd6870925c29207428c8ce
|
[
"MIT"
] | null | null | null |
client.py
|
colinhartigan/valorant-web-auth-server
|
6a12c41ef42d234afbbd6870925c29207428c8ce
|
[
"MIT"
] | null | null | null |
from valclient import Client
def join_party(username,password,region,party_id):
client = Client(region=region,auth={'username':username,'password':password})
client.activate()
return client.party_join(party_id)
def request_party(username,password,region,party_id):
client = Client(region=region,auth={'username':username,'password':password})
client.activate()
return client.party_request(party_id)
| 39.272727
| 81
| 0.752315
|
from valclient import Client
def join_party(username,password,region,party_id):
client = Client(region=region,auth={'username':username,'password':password})
client.activate()
return client.party_join(party_id)
def request_party(username,password,region,party_id):
client = Client(region=region,auth={'username':username,'password':password})
client.activate()
return client.party_request(party_id)
| true
| true
|
f71665745e05e263180049a3a63e0c9795c76a64
| 1,867
|
py
|
Python
|
google/ads/google_ads/v5/services/campaign_experiment_service_client_config.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v5/services/campaign_experiment_service_client_config.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v5/services/campaign_experiment_service_client_config.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
config = {
"interfaces": {
"google.ads.googleads.v5.services.CampaignExperimentService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetCampaignExperiment": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"CreateCampaignExperiment": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"MutateCampaignExperiments": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"GraduateCampaignExperiment": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"PromoteCampaignExperiment": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"EndCampaignExperiment": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"ListCampaignExperimentAsyncErrors": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
| 30.112903
| 67
| 0.551687
|
config = {
"interfaces": {
"google.ads.googleads.v5.services.CampaignExperimentService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetCampaignExperiment": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"CreateCampaignExperiment": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"MutateCampaignExperiments": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"GraduateCampaignExperiment": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"PromoteCampaignExperiment": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"EndCampaignExperiment": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"ListCampaignExperimentAsyncErrors": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
| true
| true
|
f71665d4d7a7d68df501099943ae56592dc092fb
| 278
|
py
|
Python
|
data_etl/__main__.py
|
VBamgbaye/M_Challenge
|
310f9b8ee3b6c9534a6dfd3f4ca0c8b56f889fdb
|
[
"MIT"
] | null | null | null |
data_etl/__main__.py
|
VBamgbaye/M_Challenge
|
310f9b8ee3b6c9534a6dfd3f4ca0c8b56f889fdb
|
[
"MIT"
] | null | null | null |
data_etl/__main__.py
|
VBamgbaye/M_Challenge
|
310f9b8ee3b6c9534a6dfd3f4ca0c8b56f889fdb
|
[
"MIT"
] | null | null | null |
from .stage_data import StageData
print('please input the following information:)')
password = (input('Database password: '))
host = (input('Host: '))
upload_data = StageData(password, host)
data = upload_data.save_to_database()
print(f"Data extracted, processed and staged!")
| 30.888889
| 49
| 0.755396
|
from .stage_data import StageData
print('please input the following information:)')
password = (input('Database password: '))
host = (input('Host: '))
upload_data = StageData(password, host)
data = upload_data.save_to_database()
print(f"Data extracted, processed and staged!")
| true
| true
|
f7166751991c71ec27687881d5732fe52e4a624f
| 1,379
|
py
|
Python
|
light_famd/mca.py
|
marlon27/Light_FAMD
|
fe4328f15f6145798869908fa126eabe75e85391
|
[
"BSD-2-Clause"
] | 11
|
2019-11-13T21:46:32.000Z
|
2021-08-02T13:41:31.000Z
|
light_famd/mca.py
|
marlon27/Light_FAMD
|
fe4328f15f6145798869908fa126eabe75e85391
|
[
"BSD-2-Clause"
] | 5
|
2019-11-28T10:07:04.000Z
|
2021-03-11T17:21:43.000Z
|
light_famd/mca.py
|
marlon27/Light_FAMD
|
fe4328f15f6145798869908fa126eabe75e85391
|
[
"BSD-2-Clause"
] | 2
|
2021-01-29T02:57:26.000Z
|
2021-06-03T14:20:26.000Z
|
"""Multiple Correspondence Analysis (MCA)"""
import numpy as np
from sklearn import utils
from . import ca
from . import one_hot
class MCA(ca.CA):
def fit(self, X, y=None):
if self.check_input:
utils.check_array(X, dtype=[str, np.number])
n_initial_columns = X.shape[1]
# One-hot encode the data
self.one_hot_ = one_hot.OneHotEncoder().fit(X)
_X_t= self.one_hot_.transform(X)
_0_freq_serie= (_X_t == 0).sum(axis=0)/ len(_X_t)
self._usecols=_0_freq_serie[_0_freq_serie < 0.99].index
print('MCA PROCESS ELIMINATED {0} COLUMNS SINCE THEIR MISS_RATES >= 99%'.format( _X_t.shape[1] - len(self._usecols) ))
n_new_columns = len(self._usecols)
self.total_inertia_ = (n_new_columns - n_initial_columns) / n_initial_columns
# Apply CA to the indicator matrix
super().fit(_X_t.loc[:,self._usecols])
return self
def _transform(self, X):
return super()._transform(self.one_hot_.transform(X).loc[:,self._usecols])
def transform(self, X):
"""Computes the row principal coordinates of a dataset."""
utils.validation.check_is_fitted(self, 'singular_values_')
if self.check_input:
utils.check_array(X, dtype=[str, np.number])
return self._transform(X)
| 28.729167
| 127
| 0.625091
|
import numpy as np
from sklearn import utils
from . import ca
from . import one_hot
class MCA(ca.CA):
def fit(self, X, y=None):
if self.check_input:
utils.check_array(X, dtype=[str, np.number])
n_initial_columns = X.shape[1]
self.one_hot_ = one_hot.OneHotEncoder().fit(X)
_X_t= self.one_hot_.transform(X)
_0_freq_serie= (_X_t == 0).sum(axis=0)/ len(_X_t)
self._usecols=_0_freq_serie[_0_freq_serie < 0.99].index
print('MCA PROCESS ELIMINATED {0} COLUMNS SINCE THEIR MISS_RATES >= 99%'.format( _X_t.shape[1] - len(self._usecols) ))
n_new_columns = len(self._usecols)
self.total_inertia_ = (n_new_columns - n_initial_columns) / n_initial_columns
super().fit(_X_t.loc[:,self._usecols])
return self
def _transform(self, X):
return super()._transform(self.one_hot_.transform(X).loc[:,self._usecols])
def transform(self, X):
utils.validation.check_is_fitted(self, 'singular_values_')
if self.check_input:
utils.check_array(X, dtype=[str, np.number])
return self._transform(X)
| true
| true
|
f716683184b12be70591300c446fc7951aa0428a
| 4,386
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
FcoinFup/litecoin
|
f60e79f2bf373dafd258264ae197cee44ab4a314
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
FcoinFup/litecoin
|
f60e79f2bf373dafd258264ae197cee44ab4a314
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
FcoinFup/litecoin
|
f60e79f2bf373dafd258264ae197cee44ab4a314
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the deliverycoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 2333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 19335)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.328571
| 99
| 0.583903
|
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the deliverycoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 2333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 19335)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true
| true
|
f71669461b57160bac0b4efc0928e6826dc11176
| 15,093
|
py
|
Python
|
tests/test_collections.py
|
fperetti/callee
|
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
|
[
"BSD-3-Clause"
] | 72
|
2016-03-21T03:58:33.000Z
|
2022-03-29T10:24:51.000Z
|
tests/test_collections.py
|
fperetti/callee
|
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
|
[
"BSD-3-Clause"
] | 14
|
2016-03-21T03:58:39.000Z
|
2021-09-07T16:26:03.000Z
|
tests/test_collections.py
|
fperetti/callee
|
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
|
[
"BSD-3-Clause"
] | 9
|
2016-10-26T14:39:00.000Z
|
2021-08-13T17:39:35.000Z
|
"""
Tests for collections' matchers.
"""
import collections
from taipan.testing import skipIf
from callee._compat import OrderedDict as _OrderedDict
import callee.collections as __unit__
from tests import MatcherTestCase
class Iterable(MatcherTestCase):
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_match('')
test_empty_list = lambda self: self.assert_match([])
test_empty_tuple = lambda self: self.assert_match(())
test_empty_dict = lambda self: self.assert_match({})
test_empty_generator = lambda self: self.assert_match(x for x in ())
test_some_string = lambda self: self.assert_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_match([1, 2, 3, 5, 8, 13])
test_some_tuple = lambda self: self.assert_match(('foo', -1, ['bar']))
def test_some_generator(self):
gen = (x for x in [1, 2, 5])
self.assert_match(gen)
self.assertNotEmpty(
gen, msg="matcher shouldn't have iterated over the generator")
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Iterable())
# Assertion functions
def assert_match(self, value):
return super(Iterable, self).assert_match(__unit__.Iterable(), value)
def assert_no_match(self, value):
return super(Iterable, self) \
.assert_no_match(__unit__.Iterable(), value)
class Generator(MatcherTestCase):
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_no_match([])
test_empty_tuple = lambda self: self.assert_no_match(())
test_empty_dict = lambda self: self.assert_no_match({})
test_empty_generator = lambda self: self.assert_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_no_match([1, 2, 3, 5, 8, 13])
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
def test_some_generator(self):
gen = (x for x in [1, 2, 5])
self.assert_match(gen)
self.assertNotEmpty(
gen, msg="matcher shouldn't have iterated over the generator")
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Generator())
# Assertion functions
def assert_match(self, value):
return super(Generator, self).assert_match(__unit__.Generator(), value)
def assert_no_match(self, value):
return super(Generator, self) \
.assert_no_match(__unit__.Generator(), value)
# Ordinary collections
class Sequence(MatcherTestCase):
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_match('')
test_empty_list = lambda self: self.assert_match([])
test_empty_set = lambda self: self.assert_no_match(set())
test_empty_tuple = lambda self: self.assert_match(())
test_empty_dict = lambda self: self.assert_no_match({})
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
def test_some_string(self):
s = "Alice has a cat"
self.assert_match(s)
self.assert_match(s, of=str)
test_some_number = lambda self: self.assert_no_match(42)
def test_some_list(self):
l = [1, 2, 3, 5, 8, 13]
self.assert_match(l)
self.assert_match(l, of=int)
test_some_tuple = lambda self: self.assert_match(('foo', -1, ['bar']))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Sequence())
# Assertion functions
def assert_match(self, value, of=None):
return super(Sequence, self).assert_match(__unit__.Sequence(of), value)
def assert_no_match(self, value, of=None):
return super(Sequence, self) \
.assert_no_match(__unit__.Sequence(of), value)
class List(MatcherTestCase):
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_match([])
test_empty_set = lambda self: self.assert_no_match(set())
test_empty_tuple = lambda self: self.assert_no_match(())
test_empty_dict = lambda self: self.assert_no_match({})
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_match([1, 2, 3, 5, 8, 13], int)
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.List())
# Assertion functions
def assert_match(self, value, of=None):
return super(List, self).assert_match(__unit__.List(of), value)
def assert_no_match(self, value, of=None):
return super(List, self).assert_no_match(__unit__.List(of), value)
class Set(MatcherTestCase):
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_no_match([])
test_empty_set = lambda self: self.assert_match(set())
test_empty_tuple = lambda self: self.assert_no_match(())
test_empty_dict = lambda self: self.assert_no_match({})
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_no_match([1, 2, 3, 5, 8, 13])
test_some_set = lambda self: self.assert_match(set([2, 4, 6, 8, 10]), int)
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Set())
# Assertion functions
def assert_match(self, value, of=None):
return super(Set, self).assert_match(__unit__.Set(of), value)
def assert_no_match(self, value, of=None):
return super(Set, self).assert_no_match(__unit__.Set(of), value)
# Mappings
class CustomDict(collections.MutableMapping):
""""Custom, no-op mapping class that just wraps a regular Python dict
but is not a Python dict itself.
"""
def __init__(self, iterable=(), **kwargs):
if isinstance(iterable, collections.Mapping):
iterable = iterable.items()
self.d = {}
for k, v in iterable:
self.d[k] = v
self.d.update(kwargs)
def __delitem__(self, key):
del self.d[key]
def __getitem__(self, key):
return self.d[key]
def __iter__(self):
return iter(self.d)
def __len__(self):
return len(self.d)
def __setitem__(self, key, value):
self.d[key] = value
class Mapping(MatcherTestCase):
def test_invalid_arg(self):
with self.assertRaises(TypeError):
self.assert_match(None, of='not a pair of matchers')
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_no_match([])
test_empty_set = lambda self: self.assert_no_match(set())
test_empty_tuple = lambda self: self.assert_no_match(())
def test_empty_dict__regular(self):
d = {}
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
def test_empty_dict__custom(self):
d = CustomDict()
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_no_match([1, 2, 3, 5, 8, 13])
test_some_set = lambda self: self.assert_no_match(set([2, 4, 6, 8, 10]))
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
def test_some_dict__regular(self):
d = {'a': 1}
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
def test_some_dict__custom(self):
d = CustomDict({'a': 1})
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Mapping())
# Assertion functions
def assert_match(self, value, *args, **kwargs):
return super(Mapping, self)\
.assert_match(__unit__.Mapping(*args, **kwargs), value)
def assert_no_match(self, value, *args, **kwargs):
return super(Mapping, self) \
.assert_no_match(__unit__.Mapping(*args, **kwargs), value)
class Dict(MatcherTestCase):
def test_invalid_arg(self):
with self.assertRaises(TypeError):
self.assert_match(None, of='not a pair of matchers')
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_no_match([])
test_empty_set = lambda self: self.assert_no_match(set())
test_empty_tuple = lambda self: self.assert_no_match(())
def test_empty_dict__regular(self):
d = {}
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
def test_empty_dict__custom(self):
d = CustomDict()
self.assert_no_match(d)
self.assert_no_match(d, str, int)
self.assert_no_match(d, keys=str, values=int)
self.assert_no_match(d, of=(str, int))
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_no_match([1, 2, 3, 5, 8, 13])
test_some_set = lambda self: self.assert_no_match(set([2, 4, 6, 8, 10]))
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
def test_some_dict__regular(self):
d = {'a': 1}
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
def test_some_dict__custom(self):
d = CustomDict({'a': 1})
self.assert_no_match(d)
self.assert_no_match(d, str, int)
self.assert_no_match(d, keys=str, values=int)
self.assert_no_match(d, of=(str, int))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Dict())
# Assertion functions
def assert_match(self, value, *args, **kwargs):
return super(Dict, self) \
.assert_match(__unit__.Dict(*args, **kwargs), value)
def assert_no_match(self, value, *args, **kwargs):
return super(Dict, self) \
.assert_no_match(__unit__.Dict(*args, **kwargs), value)
class OrderedDict(MatcherTestCase):
def test_invalid_arg(self):
with self.assertRaises(TypeError):
self.assert_match(None, of='not a pair of matchers')
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_no_match([])
test_empty_set = lambda self: self.assert_no_match(set())
test_empty_tuple = lambda self: self.assert_no_match(())
test_empty_dict__regular = lambda self: self.assert_no_match({})
test_empty_dict__custom = lambda self: self.assert_no_match(CustomDict())
@skipIf(_OrderedDict is None,
"requires Python 2.6 or the ordereddict package")
def test_empty_ordereddict(self):
d = _OrderedDict()
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_no_match([1, 2, 3, 5, 8, 13])
test_some_set = lambda self: self.assert_no_match(set([2, 4, 6, 8, 10]))
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
test_some_dict = lambda self: self.assert_no_match({'a': 1})
@skipIf(_OrderedDict is None,
"requires Python 2.6 or the ordereddict package")
def test_some_ordereddict(self):
d = _OrderedDict([('a', 1)])
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.OrderedDict())
# Assertion functions
def assert_match(self, value, *args, **kwargs):
return super(OrderedDict, self) \
.assert_match(__unit__.OrderedDict(*args, **kwargs), value)
def assert_no_match(self, value, *args, **kwargs):
return super(OrderedDict, self) \
.assert_no_match(__unit__.OrderedDict(*args, **kwargs), value)
| 38.899485
| 79
| 0.681309
|
import collections
from taipan.testing import skipIf
from callee._compat import OrderedDict as _OrderedDict
import callee.collections as __unit__
from tests import MatcherTestCase
class Iterable(MatcherTestCase):
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_match('')
test_empty_list = lambda self: self.assert_match([])
test_empty_tuple = lambda self: self.assert_match(())
test_empty_dict = lambda self: self.assert_match({})
test_empty_generator = lambda self: self.assert_match(x for x in ())
test_some_string = lambda self: self.assert_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_match([1, 2, 3, 5, 8, 13])
test_some_tuple = lambda self: self.assert_match(('foo', -1, ['bar']))
def test_some_generator(self):
gen = (x for x in [1, 2, 5])
self.assert_match(gen)
self.assertNotEmpty(
gen, msg="matcher shouldn't have iterated over the generator")
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Iterable())
# Assertion functions
def assert_match(self, value):
return super(Iterable, self).assert_match(__unit__.Iterable(), value)
def assert_no_match(self, value):
return super(Iterable, self) \
.assert_no_match(__unit__.Iterable(), value)
class Generator(MatcherTestCase):
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_no_match([])
test_empty_tuple = lambda self: self.assert_no_match(())
test_empty_dict = lambda self: self.assert_no_match({})
test_empty_generator = lambda self: self.assert_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_no_match([1, 2, 3, 5, 8, 13])
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
def test_some_generator(self):
gen = (x for x in [1, 2, 5])
self.assert_match(gen)
self.assertNotEmpty(
gen, msg="matcher shouldn't have iterated over the generator")
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Generator())
def assert_match(self, value):
return super(Generator, self).assert_match(__unit__.Generator(), value)
def assert_no_match(self, value):
return super(Generator, self) \
.assert_no_match(__unit__.Generator(), value)
class Sequence(MatcherTestCase):
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_match('')
test_empty_list = lambda self: self.assert_match([])
test_empty_set = lambda self: self.assert_no_match(set())
test_empty_tuple = lambda self: self.assert_match(())
test_empty_dict = lambda self: self.assert_no_match({})
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
def test_some_string(self):
s = "Alice has a cat"
self.assert_match(s)
self.assert_match(s, of=str)
test_some_number = lambda self: self.assert_no_match(42)
def test_some_list(self):
l = [1, 2, 3, 5, 8, 13]
self.assert_match(l)
self.assert_match(l, of=int)
test_some_tuple = lambda self: self.assert_match(('foo', -1, ['bar']))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Sequence())
def assert_match(self, value, of=None):
return super(Sequence, self).assert_match(__unit__.Sequence(of), value)
def assert_no_match(self, value, of=None):
return super(Sequence, self) \
.assert_no_match(__unit__.Sequence(of), value)
class List(MatcherTestCase):
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_match([])
test_empty_set = lambda self: self.assert_no_match(set())
test_empty_tuple = lambda self: self.assert_no_match(())
test_empty_dict = lambda self: self.assert_no_match({})
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_match([1, 2, 3, 5, 8, 13], int)
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.List())
def assert_match(self, value, of=None):
return super(List, self).assert_match(__unit__.List(of), value)
def assert_no_match(self, value, of=None):
return super(List, self).assert_no_match(__unit__.List(of), value)
class Set(MatcherTestCase):
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_no_match([])
test_empty_set = lambda self: self.assert_match(set())
test_empty_tuple = lambda self: self.assert_no_match(())
test_empty_dict = lambda self: self.assert_no_match({})
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_no_match([1, 2, 3, 5, 8, 13])
test_some_set = lambda self: self.assert_match(set([2, 4, 6, 8, 10]), int)
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Set())
def assert_match(self, value, of=None):
return super(Set, self).assert_match(__unit__.Set(of), value)
def assert_no_match(self, value, of=None):
return super(Set, self).assert_no_match(__unit__.Set(of), value)
class CustomDict(collections.MutableMapping):
def __init__(self, iterable=(), **kwargs):
if isinstance(iterable, collections.Mapping):
iterable = iterable.items()
self.d = {}
for k, v in iterable:
self.d[k] = v
self.d.update(kwargs)
def __delitem__(self, key):
del self.d[key]
def __getitem__(self, key):
return self.d[key]
def __iter__(self):
return iter(self.d)
def __len__(self):
return len(self.d)
def __setitem__(self, key, value):
self.d[key] = value
class Mapping(MatcherTestCase):
def test_invalid_arg(self):
with self.assertRaises(TypeError):
self.assert_match(None, of='not a pair of matchers')
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_no_match([])
test_empty_set = lambda self: self.assert_no_match(set())
test_empty_tuple = lambda self: self.assert_no_match(())
def test_empty_dict__regular(self):
d = {}
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
def test_empty_dict__custom(self):
d = CustomDict()
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_no_match([1, 2, 3, 5, 8, 13])
test_some_set = lambda self: self.assert_no_match(set([2, 4, 6, 8, 10]))
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
def test_some_dict__regular(self):
d = {'a': 1}
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
def test_some_dict__custom(self):
d = CustomDict({'a': 1})
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Mapping())
def assert_match(self, value, *args, **kwargs):
return super(Mapping, self)\
.assert_match(__unit__.Mapping(*args, **kwargs), value)
def assert_no_match(self, value, *args, **kwargs):
return super(Mapping, self) \
.assert_no_match(__unit__.Mapping(*args, **kwargs), value)
class Dict(MatcherTestCase):
def test_invalid_arg(self):
with self.assertRaises(TypeError):
self.assert_match(None, of='not a pair of matchers')
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_no_match([])
test_empty_set = lambda self: self.assert_no_match(set())
test_empty_tuple = lambda self: self.assert_no_match(())
def test_empty_dict__regular(self):
d = {}
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
def test_empty_dict__custom(self):
d = CustomDict()
self.assert_no_match(d)
self.assert_no_match(d, str, int)
self.assert_no_match(d, keys=str, values=int)
self.assert_no_match(d, of=(str, int))
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_no_match([1, 2, 3, 5, 8, 13])
test_some_set = lambda self: self.assert_no_match(set([2, 4, 6, 8, 10]))
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
def test_some_dict__regular(self):
d = {'a': 1}
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
def test_some_dict__custom(self):
d = CustomDict({'a': 1})
self.assert_no_match(d)
self.assert_no_match(d, str, int)
self.assert_no_match(d, keys=str, values=int)
self.assert_no_match(d, of=(str, int))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.Dict())
def assert_match(self, value, *args, **kwargs):
return super(Dict, self) \
.assert_match(__unit__.Dict(*args, **kwargs), value)
def assert_no_match(self, value, *args, **kwargs):
return super(Dict, self) \
.assert_no_match(__unit__.Dict(*args, **kwargs), value)
class OrderedDict(MatcherTestCase):
def test_invalid_arg(self):
with self.assertRaises(TypeError):
self.assert_match(None, of='not a pair of matchers')
test_none = lambda self: self.assert_no_match(None)
test_zero = lambda self: self.assert_no_match(0)
test_empty_string = lambda self: self.assert_no_match('')
test_empty_list = lambda self: self.assert_no_match([])
test_empty_set = lambda self: self.assert_no_match(set())
test_empty_tuple = lambda self: self.assert_no_match(())
test_empty_dict__regular = lambda self: self.assert_no_match({})
test_empty_dict__custom = lambda self: self.assert_no_match(CustomDict())
@skipIf(_OrderedDict is None,
"requires Python 2.6 or the ordereddict package")
def test_empty_ordereddict(self):
d = _OrderedDict()
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
test_empty_generator = lambda self: self.assert_no_match(x for x in ())
test_some_string = lambda self: self.assert_no_match("Alice has a cat")
test_some_number = lambda self: self.assert_no_match(42)
test_some_list = lambda self: self.assert_no_match([1, 2, 3, 5, 8, 13])
test_some_set = lambda self: self.assert_no_match(set([2, 4, 6, 8, 10]))
test_some_tuple = lambda self: self.assert_no_match(('foo', -1, ['bar']))
test_some_dict = lambda self: self.assert_no_match({'a': 1})
@skipIf(_OrderedDict is None,
"requires Python 2.6 or the ordereddict package")
def test_some_ordereddict(self):
d = _OrderedDict([('a', 1)])
self.assert_match(d)
self.assert_match(d, str, int)
self.assert_match(d, keys=str, values=int)
self.assert_match(d, of=(str, int))
test_some_generator = lambda self: self.assert_no_match(x for x in [1, 2])
test_some_object = lambda self: self.assert_no_match(object())
test_repr = lambda self: self.assert_repr(__unit__.OrderedDict())
def assert_match(self, value, *args, **kwargs):
return super(OrderedDict, self) \
.assert_match(__unit__.OrderedDict(*args, **kwargs), value)
def assert_no_match(self, value, *args, **kwargs):
return super(OrderedDict, self) \
.assert_no_match(__unit__.OrderedDict(*args, **kwargs), value)
| true
| true
|
f71669ba767a939cc5d439ef370d6333952c145a
| 226
|
py
|
Python
|
course_api/templatetags/time_converter.py
|
dragonbone81/bobcat-courses-backend
|
d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c
|
[
"MIT"
] | 3
|
2018-10-25T12:41:33.000Z
|
2019-09-19T19:47:39.000Z
|
course_api/templatetags/time_converter.py
|
dragonbone81/bobcat-courses-backend
|
d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c
|
[
"MIT"
] | 22
|
2018-04-01T02:43:01.000Z
|
2022-03-11T23:15:55.000Z
|
course_api/templatetags/time_converter.py
|
dragonbone81/cse120
|
d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c
|
[
"MIT"
] | 1
|
2019-09-19T19:48:59.000Z
|
2019-09-19T19:48:59.000Z
|
from django.template.defaulttags import register
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.filter
def get_item_dict(dictionary, key):
return {'data': dictionary.get(key)}
| 18.833333
| 48
| 0.761062
|
from django.template.defaulttags import register
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.filter
def get_item_dict(dictionary, key):
return {'data': dictionary.get(key)}
| true
| true
|
f71669f5529851928377789218c8de2abda6eaf6
| 5,513
|
py
|
Python
|
测试/tensorflow_hello/2.practices_on_nlp.py
|
shayxu-ai/A-Repository-for-Machine-Learning
|
4b4cea15bb005d1c58f4395fde97cadf44fb0186
|
[
"Apache-2.0"
] | null | null | null |
测试/tensorflow_hello/2.practices_on_nlp.py
|
shayxu-ai/A-Repository-for-Machine-Learning
|
4b4cea15bb005d1c58f4395fde97cadf44fb0186
|
[
"Apache-2.0"
] | null | null | null |
测试/tensorflow_hello/2.practices_on_nlp.py
|
shayxu-ai/A-Repository-for-Machine-Learning
|
4b4cea15bb005d1c58f4395fde97cadf44fb0186
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time: 2020/2/5,005 22:02
# @Last Update: 2020/2/5,005 22:02
# @Author: 徐缘
# @FileName: 2.practices_on_nlp.py
# @Software: PyCharm
from __future__ import absolute_import, division, print_function, unicode_literals # 导入一些熟悉的陌生人
# 绝对引入,精确除法,print,unicode类型字符串。都是为了适配python2,不加也罢
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
from tensorflow import keras
import tensorflow_hub as hub # 模型库
import tensorflow_datasets as tfds # 数据|库 https://tensorflow.google.cn/datasets/api_docs/python/tfds?hl=en
tfds.disable_progress_bar()
def version():
"""
国际惯例,先看下版本
"""
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("tfds version", tfds.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
def tf_hub_hello():
"""
预训练word2vector(迁移学习) + 全连接层
loss: 0.329
accuracy: 0.858 我记得 cnn 文本分类可以有95%呢
"""
train_data, validation_data, test_data = tfds.load(
name="imdb_reviews", split=('train[:60%]', 'train[60%:]', 'test'),
as_supervised=True)
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
print(train_examples_batch)
print(train_labels_batch)
embedding = "https://hub.tensorflow.google.cn/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embedding, input_shape=[],
dtype=tf.string, trainable=True)
print(hub_layer(train_examples_batch[:3]))
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
# model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_data.shuffle(10000).batch(512),
epochs=20,
validation_data=validation_data.batch(512),
verbose=1)
results = model.evaluate(test_data.batch(512), verbose=2)
for name, value in zip(model.metrics_names, results):
print("%s: %.3f" % (name, value))
def preprocess_text():
"""
"""
(train_data, test_data), info = tfds.load(
# Use the version pre-encoded with an ~8k vocabulary.
'imdb_reviews/subwords8k',
# Return the train/test datasets as a tuple.
split=(tfds.Split.TRAIN, tfds.Split.TEST),
# Return (example, label) pairs from the dataset (instead of a dictionary).
as_supervised=True,
# Also return the `info` structure.
with_info=True)
encoder = info.features['text'].encoder
print('Vocabulary size: {}'.format(encoder.vocab_size))
sample_string = 'Hello TensorFlow.'
encoded_string = encoder.encode(sample_string)
print('Encoded string is {}'.format(encoded_string))
original_string = encoder.decode(encoded_string)
print('The original string: "{}"'.format(original_string))
assert original_string == sample_string
for ts in encoded_string:
print('{} ----> {}'.format(ts, encoder.decode([ts])))
for train_example, train_label in train_data.take(1):
print('Encoded text:', train_example[:10].numpy())
print('Label:', train_label.numpy())
encoder.decode(train_example)
BUFFER_SIZE = 1000
train_batches = (
train_data
.shuffle(BUFFER_SIZE)
.padded_batch(32, train_data.output_shapes))
test_batches = (
test_data
.padded_batch(32, train_data.output_shapes))
for example_batch, label_batch in train_batches.take(2):
print("Batch shape:", example_batch.shape)
print("label shape:", label_batch.shape)
model = keras.Sequential([
keras.layers.Embedding(encoder.vocab_size, 16),
keras.layers.GlobalAveragePooling1D(),
keras.layers.Dense(1, activation='sigmoid')])
model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_batches,
epochs=10,
validation_data=test_batches,
validation_steps=30)
loss, accuracy = model.evaluate(test_batches)
print("Loss: ", loss)
print("Accuracy: ", accuracy)
history_dict = history.history
history_dict.keys()
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
return
if __name__ == '__main__':
# version()
preprocess_text()
| 29.169312
| 108
| 0.643751
|
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
from tensorflow import keras
import tensorflow_hub as hub
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
def version():
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("tfds version", tfds.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
def tf_hub_hello():
train_data, validation_data, test_data = tfds.load(
name="imdb_reviews", split=('train[:60%]', 'train[60%:]', 'test'),
as_supervised=True)
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
print(train_examples_batch)
print(train_labels_batch)
embedding = "https://hub.tensorflow.google.cn/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embedding, input_shape=[],
dtype=tf.string, trainable=True)
print(hub_layer(train_examples_batch[:3]))
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_data.shuffle(10000).batch(512),
epochs=20,
validation_data=validation_data.batch(512),
verbose=1)
results = model.evaluate(test_data.batch(512), verbose=2)
for name, value in zip(model.metrics_names, results):
print("%s: %.3f" % (name, value))
def preprocess_text():
(train_data, test_data), info = tfds.load(
'imdb_reviews/subwords8k',
split=(tfds.Split.TRAIN, tfds.Split.TEST),
as_supervised=True,
with_info=True)
encoder = info.features['text'].encoder
print('Vocabulary size: {}'.format(encoder.vocab_size))
sample_string = 'Hello TensorFlow.'
encoded_string = encoder.encode(sample_string)
print('Encoded string is {}'.format(encoded_string))
original_string = encoder.decode(encoded_string)
print('The original string: "{}"'.format(original_string))
assert original_string == sample_string
for ts in encoded_string:
print('{} ----> {}'.format(ts, encoder.decode([ts])))
for train_example, train_label in train_data.take(1):
print('Encoded text:', train_example[:10].numpy())
print('Label:', train_label.numpy())
encoder.decode(train_example)
BUFFER_SIZE = 1000
train_batches = (
train_data
.shuffle(BUFFER_SIZE)
.padded_batch(32, train_data.output_shapes))
test_batches = (
test_data
.padded_batch(32, train_data.output_shapes))
for example_batch, label_batch in train_batches.take(2):
print("Batch shape:", example_batch.shape)
print("label shape:", label_batch.shape)
model = keras.Sequential([
keras.layers.Embedding(encoder.vocab_size, 16),
keras.layers.GlobalAveragePooling1D(),
keras.layers.Dense(1, activation='sigmoid')])
model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_batches,
epochs=10,
validation_data=test_batches,
validation_steps=30)
loss, accuracy = model.evaluate(test_batches)
print("Loss: ", loss)
print("Accuracy: ", accuracy)
history_dict = history.history
history_dict.keys()
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
return
if __name__ == '__main__':
preprocess_text()
| true
| true
|
f7166a5ae2764771f45e5ae446f7d0e4b402bd2c
| 3,231
|
py
|
Python
|
test/test_default_api.py
|
cinaq/axxell-client-python
|
a862dd36552ef8149517c5d5034a52a37abc2d33
|
[
"Apache-2.0"
] | null | null | null |
test/test_default_api.py
|
cinaq/axxell-client-python
|
a862dd36552ef8149517c5d5034a52a37abc2d33
|
[
"Apache-2.0"
] | null | null | null |
test/test_default_api.py
|
cinaq/axxell-client-python
|
a862dd36552ef8149517c5d5034a52a37abc2d33
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
axxell-api
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import AxxellClient
from AxxellClient.rest import ApiException
from AxxellClient.apis.default_api import DefaultApi
class TestDefaultApi(unittest.TestCase):
""" DefaultApi unit test stubs """
def setUp(self):
self.api = AxxellClient.apis.default_api.DefaultApi()
def tearDown(self):
pass
def test_aggregate_count_events(self):
"""
Test case for aggregate_count_events
"""
pass
def test_aggregate_effective(self):
"""
Test case for aggregate_effective
"""
pass
def test_aggregate_events(self):
"""
Test case for aggregate_events
"""
pass
def test_aggregate_recent(self):
"""
Test case for aggregate_recent
"""
pass
def test_aggregate_top(self):
"""
Test case for aggregate_top
"""
pass
def test_auth_store(self):
"""
Test case for auth_store
"""
pass
def test_delete_all_events(self):
"""
Test case for delete_all_events
"""
pass
def test_delete_all_items(self):
"""
Test case for delete_all_items
"""
pass
def test_delete_item(self):
"""
Test case for delete_item
"""
pass
def test_recommend_interesting(self):
"""
Test case for recommend_interesting
"""
pass
def test_recommend_similar(self):
"""
Test case for recommend_similar
"""
pass
def test_register_event(self):
"""
Test case for register_event
"""
pass
def test_register_item(self):
"""
Test case for register_item
"""
pass
def test_register_store(self):
"""
Test case for register_store
"""
pass
def test_retrieve_events(self):
"""
Test case for retrieve_events
"""
pass
def test_retrieve_items(self):
"""
Test case for retrieve_items
"""
pass
if __name__ == '__main__':
unittest.main()
| 18.357955
| 105
| 0.573197
|
from __future__ import absolute_import
import os
import sys
import unittest
import AxxellClient
from AxxellClient.rest import ApiException
from AxxellClient.apis.default_api import DefaultApi
class TestDefaultApi(unittest.TestCase):
def setUp(self):
self.api = AxxellClient.apis.default_api.DefaultApi()
def tearDown(self):
pass
def test_aggregate_count_events(self):
pass
def test_aggregate_effective(self):
pass
def test_aggregate_events(self):
pass
def test_aggregate_recent(self):
pass
def test_aggregate_top(self):
pass
def test_auth_store(self):
pass
def test_delete_all_events(self):
pass
def test_delete_all_items(self):
pass
def test_delete_item(self):
pass
def test_recommend_interesting(self):
pass
def test_recommend_similar(self):
pass
def test_register_event(self):
pass
def test_register_item(self):
pass
def test_register_store(self):
pass
def test_retrieve_events(self):
pass
def test_retrieve_items(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7166ab579b24ca3fb29824e02008e934e83680a
| 22,183
|
py
|
Python
|
openmdao/components/interp_util/interp.py
|
friedenhe/OpenMDAO
|
db1d7e22a8bf9f66afa82ec3544b7244d5545f6d
|
[
"Apache-2.0"
] | null | null | null |
openmdao/components/interp_util/interp.py
|
friedenhe/OpenMDAO
|
db1d7e22a8bf9f66afa82ec3544b7244d5545f6d
|
[
"Apache-2.0"
] | null | null | null |
openmdao/components/interp_util/interp.py
|
friedenhe/OpenMDAO
|
db1d7e22a8bf9f66afa82ec3544b7244d5545f6d
|
[
"Apache-2.0"
] | null | null | null |
"""
Base class for interpolation methods that calculate values for each dimension independently.
Based on Tables in NPSS, and was added to bridge the gap between some of the slower scipy
implementations.
"""
import numpy as np
from openmdao.components.interp_util.interp_akima import InterpAkima, Interp1DAkima
from openmdao.components.interp_util.interp_bsplines import InterpBSplines
from openmdao.components.interp_util.interp_cubic import InterpCubic
from openmdao.components.interp_util.interp_lagrange2 import InterpLagrange2, Interp3DLagrange2
from openmdao.components.interp_util.interp_lagrange3 import InterpLagrange3, Interp3DLagrange3
from openmdao.components.interp_util.interp_scipy import InterpScipy
from openmdao.components.interp_util.interp_slinear import InterpLinear, Interp3DSlinear, \
Interp1DSlinear, Interp2DSlinear
from openmdao.components.interp_util.outofbounds_error import OutOfBoundsError
from openmdao.utils.om_warnings import warn_deprecation
INTERP_METHODS = {
'slinear': InterpLinear,
'lagrange2': InterpLagrange2,
'lagrange3': InterpLagrange3,
'cubic': InterpCubic,
'akima': InterpAkima,
'scipy_cubic': InterpScipy,
'scipy_slinear': InterpScipy,
'scipy_quintic': InterpScipy,
'bsplines': InterpBSplines,
'1D-slinear': Interp1DSlinear,
'2D-slinear': Interp2DSlinear,
'3D-slinear': Interp3DSlinear,
'3D-lagrange2': Interp3DLagrange2,
'3D-lagrange3': Interp3DLagrange3,
'1D-akima': Interp1DAkima,
'trilinear': Interp3DSlinear, # Deprecated
'akima1D': Interp1DAkima, # Deprecated
}
TABLE_METHODS = ['slinear', 'lagrange2', 'lagrange3', 'cubic', 'akima',
'scipy_cubic', 'scipy_slinear', 'scipy_quintic',
'trilinear', 'akima1D', # These two are Deprecated
'3D-slinear', '2D-slinear', '1D-slinear',
'1D-akima',
'3D-lagrange2', '3D-lagrange3']
SPLINE_METHODS = ['slinear', 'lagrange2', 'lagrange3', 'cubic', 'akima', 'bsplines',
'scipy_cubic', 'scipy_slinear', 'scipy_quintic']
class InterpND(object):
"""
Interpolation on a regular grid of arbitrary dimensions.
The data must be defined on a regular grid; the grid spacing however may be uneven. Several
interpolation methods are supported. These are defined in the child classes. Gradients are
provided for all interpolation methods. Gradients with respect to grid values are also
available optionally.
Parameters
----------
method : str
Name of interpolation method.
points : ndarray or tuple of ndarray
The points defining the regular grid in n dimensions.
For 1D interpolation, this can be an ndarray of table locations.
For table interpolation, it can be a tuple or an ndarray. If it is a tuple, it should
contain one ndarray for each table dimension.
For spline evaluation, num_cp can be specified instead of points.
values : ndarray or tuple of ndarray or None
These must be specified for interpolation.
The data on the regular grid in n dimensions.
x_interp : ndarray or None
If we are always interpolating at a fixed set of locations, then they can be
specified here.
extrapolate : bool
If False, when interpolated values are requested outside of the domain of the input
data, a ValueError is raised. If True, then the methods are allowed to extrapolate.
Default is True (raise an exception).
num_cp : None or int
Optional. When specified, use a linear distribution of num_cp control points. If you
are using 'bsplines' as the method, then num_cp must be set instead of points.
**kwargs : dict
Interpolator-specific options to pass onward.
Attributes
----------
extrapolate : bool
If False, when interpolated values are requested outside of the domain of the input data,
a ValueError is raised. If True, then the methods are allowed to extrapolate.
Default is True.
grid : tuple
Collection of points that determine the regular grid.
table : <InterpTable>
Table object that contains algorithm that performs the interpolation.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
x_interp : ndarray
Cached non-decreasing vector of points to be interpolated when used as an order-reducing
spline.
_compute_d_dvalues : bool
When set to True, compute gradients with respect to the grid values.
_compute_d_dx : bool
When set to True, compute gradients with respect to the interpolated point location.
_d_dx : ndarray
Cache of computed gradients with respect to evaluation point.
_d_dvalues : ndarray
Cache of computed gradients with respect to table values.
_interp : class
Class specified as interpolation algorithm, used to regenerate if needed.
_interp_config : dict
Configuration object that stores the number of points required for each interpolation
method.
_interp_options : dict
Dictionary of cached interpolator-specific options.
_xi : ndarray
Cache of current evaluation point.
"""
def __init__(self, method="slinear", points=None, values=None, x_interp=None, extrapolate=False,
num_cp=None, **kwargs):
"""
Initialize an InterpND object.
This object can be setup and used to interpolate on a curve or multi-dimensional table.
It can also be used to setup an interpolating spline that can be evaluated at fixed
locations.
For interpolation, specify values and points.
For spline evaluation, specifiy x_interp and either points or num_cp.
"""
if not isinstance(method, str):
msg = "Argument 'method' should be a string."
raise ValueError(msg)
elif method not in INTERP_METHODS:
all_m = ', '.join(['"' + m + '"' for m in INTERP_METHODS])
raise ValueError('Interpolation method "%s" is not defined. Valid methods are '
'%s.' % (method, all_m))
elif method == 'akima1D':
warn_deprecation("The 'akima1D' method has been renamed to '1D-akima'.")
elif method == 'trilinear':
warn_deprecation("The 'trilinear' method has been renamed to '3D-slinear'.")
self.extrapolate = extrapolate
# The table points are always defined, by specifying either the points directly, or num_cp.
if points is None:
if num_cp is not None:
points = [np.linspace(0.0, 1.0, num_cp)]
else:
msg = "Either 'points' or 'num_cp' must be specified."
raise ValueError(msg)
else:
if isinstance(points, np.ndarray):
points = [points]
for i, p in enumerate(points):
n_p = len(p)
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
# Table Interpolation
if x_interp is None:
if values is None:
msg = "Either 'values' or 'x_interp' must be specified."
raise ValueError(msg)
if method == 'bsplines':
msg = "Method 'bsplines' is not supported for table interpolation."
raise ValueError(msg)
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if (method.startswith('scipy') or method == 'akima') and \
(np.iscomplexobj(values[:]) or np.any(np.iscomplex(points[0]))):
msg = f"Interpolation method '{method}' does not support complex points or values."
raise ValueError(msg)
for i, p in enumerate(points):
n_p = len(p)
if values.shape[i] != n_p:
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
self.x_interp = x_interp
self._xi = None
self._d_dx = None
self._d_dvalues = None
self._compute_d_dvalues = False
self._compute_d_dx = True
# Cache spline coefficients.
interp = INTERP_METHODS[method]
if method.startswith('scipy'):
kwargs['interp_method'] = method
table = interp(self.grid, values, interp, **kwargs)
table.check_config()
self.table = table
self._interp = interp
self._interp_options = kwargs
def interpolate(self, x, compute_derivative=False):
"""
Interpolate at the sample coordinates.
Parameters
----------
x : ndarray or tuple
Locations to interpolate.
compute_derivative : bool
Set to True to compute derivatives with respect to x.
Returns
-------
ndarray
Value of interpolant at all sample points.
ndarray
Value of derivative of interpolated output with respect to input x. (Only when
compute_derivative is True).
"""
self._compute_d_dx = compute_derivative
self.table._compute_d_dx = compute_derivative
self.table._compute_d_dvalues = False
if isinstance(x, np.ndarray):
if len(x.shape) < 2:
if len(self.grid) > 1:
# Input is an array containing multi-D coordinates of a single point.
x = np.atleast_2d(x)
else:
# Input is an array of separate points on a 1D table.
x = np.atleast_2d(x).T
else:
# Input is a list or tuple of separate points.
x = np.atleast_2d(x)
# cache latest evaluation point for gradient method's use later
self._xi = x
xnew = self._interpolate(x)
if compute_derivative:
return xnew, self._d_dx
else:
return xnew
def evaluate_spline(self, values, compute_derivative=False):
"""
Interpolate at all fixed output coordinates given the new table values.
Parameters
----------
values : ndarray(n_points)
New data values for all points on the regular grid.
compute_derivative : bool
Set to True to compute derivatives with respect to x.
Returns
-------
ndarray
Value of interpolant at all sample points.
ndarray
Value of derivative of interpolated output with respect to values.
"""
self._compute_d_dvalues = compute_derivative
self.table._compute_d_dvalues = compute_derivative
self.table._compute_d_dx = False
if len(values.shape) == 1:
values = np.expand_dims(values, axis=0)
# cache latest evaluation point for gradient method's use later
self._xi = self.x_interp.copy()
result = self._evaluate_spline(values)
if result.shape[0] == 1:
# Not vectorized, so drop the extra dimension.
result = result.ravel()
if compute_derivative:
d_dvalues = self.spline_gradient()
if d_dvalues.shape[0] == 1:
d_dvalues = d_dvalues[0]
return result, d_dvalues
else:
return result
def _interpolate(self, xi):
"""
Interpolate at the sample coordinates.
This method is called from OpenMDAO, and is not meant for standalone use.
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data.
Returns
-------
ndarray
Value of interpolant at all sample points.
"""
if not self.extrapolate:
for i, p in enumerate(xi.T):
if np.isnan(p).any():
raise OutOfBoundsError("One of the requested xi contains a NaN",
i, np.NaN, self.grid[i][0], self.grid[i][-1])
eps = 1e-14 * self.grid[i][-1]
if np.any(p < self.grid[i][0] - eps) or np.any(p > self.grid[i][-1] + eps):
p1 = np.where(self.grid[i][0] > p)[0]
p2 = np.where(p > self.grid[i][-1])[0]
# First violating entry is enough to direct the user.
violated_idx = set(p1).union(p2).pop()
value = p[violated_idx]
raise OutOfBoundsError("One of the requested xi is out of bounds",
i, value, self.grid[i][0], self.grid[i][-1])
if self._compute_d_dvalues:
# If the table grid or values are component inputs, then we need to create a new table
# each iteration.
interp = self._interp
self.table = interp(self.grid, self.values, interp, **self._interp_options)
if not self.table._supports_d_dvalues:
raise RuntimeError(f'Method {self.table._name} does not support the '
'"training_data_gradients" option.')
self.table._compute_d_dvalues = True
table = self.table
if table.vectorized(xi):
result, derivs_x, derivs_val, derivs_grid = table.evaluate_vectorized(xi)
else:
n_nodes, nx = xi.shape
result = np.empty((n_nodes, ), dtype=xi.dtype)
derivs_x = np.empty((n_nodes, nx), dtype=xi.dtype)
derivs_val = None
# TODO: it might be possible to vectorize over n_nodes.
for j in range(n_nodes):
val, d_x, d_values, d_grid = table.evaluate(xi[j, ...])
result[j] = val
derivs_x[j, :] = d_x.ravel()
if d_values is not None:
if derivs_val is None:
dv_shape = [n_nodes]
dv_shape.extend(self.values.shape)
derivs_val = np.zeros(dv_shape, dtype=xi.dtype)
in_slice = table._full_slice
full_slice = [slice(j, j + 1)]
full_slice.extend(in_slice)
shape = derivs_val[tuple(full_slice)].shape
derivs_val[tuple(full_slice)] = d_values.reshape(shape)
# Cache derivatives
self._d_dx = derivs_x
self._d_dvalues = derivs_val
return result
def _evaluate_spline(self, values):
"""
Interpolate at all fixed output coordinates given the new table values.
This method is called from OpenMDAO, and is not meant for standalone use.
Parameters
----------
values : ndarray(n_nodes x n_points)
The data on the regular grid in n dimensions.
Returns
-------
ndarray
Value of interpolant at all sample points.
"""
xi = self.x_interp
self.values = values
table = self.table
if table._vectorized:
if table._name == 'bsplines':
# bsplines is fully vectorized.
table.values = values
result, _, derivs_val, _ = table.evaluate_vectorized(xi)
else:
# Scipy implementation vectorized over lookups, but not over multiple table values.
interp = self._interp
n_nodes, _ = values.shape
nx = np.prod(xi.shape)
result = np.empty((n_nodes, nx), dtype=values.dtype)
derivs_val = None
for j in range(n_nodes):
table = interp(self.grid, values[j, :], interp, **self._interp_options)
table._compute_d_dvalues = False
table._compute_d_dx = False
result[j, :], _, _, _ = table.evaluate_vectorized(xi.reshape((nx, 1)))
else:
interp = self._interp
n_nodes, _ = values.shape
nx = np.prod(xi.shape)
result = np.empty((n_nodes, nx), dtype=values.dtype)
derivs_val = None
# TODO: it might be possible to vectorize over n_nodes.
for j in range(n_nodes):
table = interp(self.grid, values[j, :], interp, **self._interp_options)
table._compute_d_dvalues = True
table._compute_d_dx = False
for k in range(nx):
x_pt = np.atleast_2d(xi[k])
val, _, d_values, _ = table.evaluate(x_pt)
result[j, k] = val
if d_values is not None:
if derivs_val is None:
dv_shape = [n_nodes, nx]
dv_shape.extend(values.shape[1:])
derivs_val = np.zeros(dv_shape, dtype=values.dtype)
in_slice = table._full_slice
full_slice = [slice(j, j + 1), slice(k, k + 1)]
full_slice.extend(in_slice)
shape = derivs_val[tuple(full_slice)].shape
derivs_val[tuple(full_slice)] = d_values.reshape(shape)
# Cache derivatives
self._d_dvalues = derivs_val
self.table = table
return result
def gradient(self, xi):
"""
Compute the gradients at the specified point.
Most of the gradients are computed as the interpolation itself is performed,
but are cached and returned separately by this method.
If the point for evaluation differs from the point used to produce
the currently cached gradient, the interpolation is re-performed in
order to return the correct gradient.
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at.
Returns
-------
ndarray
Vector of gradients of the interpolated values with respect to each value in xi.
"""
if (self._xi is None) or (not np.array_equal(xi, self._xi)):
# If inputs have changed since last computation, then re-interpolate.
self.interpolate(xi)
return self._gradient().reshape(np.asarray(xi).shape)
def _gradient(self):
"""
Return the pre-computed gradients.
Returns
-------
ndarray
Vector of gradients of the interpolated values with respect to each value in xi.
"""
return self._d_dx
def training_gradients(self, pt):
"""
Compute the training gradient for the vector of training points.
Parameters
----------
pt : ndarray
Training point values.
Returns
-------
ndarray
Gradient of output with respect to training point values.
"""
if self.table._vectorized:
return self.table.training_gradients(pt)
else:
grid = self.grid
interp = self._interp
opts = self._interp_options
for i, axis in enumerate(grid):
ngrid = axis.size
values = np.zeros(ngrid)
deriv_i = np.zeros(ngrid)
for j in range(ngrid):
values[j] = 1.0
table = interp([grid[i]], values, interp, **opts)
table._compute_d_dvalues = False
deriv_i[j], _, _, _ = table.evaluate(pt[i:i + 1])
values[j] = 0.0
if i == 0:
deriv_running = deriv_i.copy()
else:
deriv_running = np.outer(deriv_running, deriv_i)
return deriv_running
def spline_gradient(self):
"""
Return derivative of spline with respect to its control points.
Returns
-------
ndarray
Gradient of output with respect to training point values.
"""
vec_size, n_cp = self.values.shape
x_interp = self.x_interp
n_interp = len(x_interp)
d_dvalues = self._d_dvalues
if d_dvalues is not None:
dy_ddata = np.zeros((vec_size, n_interp, n_cp), dtype=d_dvalues.dtype)
if d_dvalues.shape[0] == vec_size:
# Akima precomputes derivs at all points in vec_size.
dy_ddata[:] = d_dvalues
else:
# Bsplines computed derivative is the same at all points in vec_size.
dy_ddata[:] = np.broadcast_to(d_dvalues.toarray(), (vec_size, n_interp, n_cp))
else:
# Note: These derivatives are independent of control point y values, so they will never
# be complex dtype.
dy_ddata = np.zeros((n_interp, n_cp))
# This way works for the rest of the interpolation methods.
for k in range(n_interp):
val = self.training_gradients(x_interp[k:k + 1])
dy_ddata[k, :] = val
dy_ddata = np.broadcast_to(dy_ddata, (vec_size, n_interp, n_cp))
return dy_ddata
| 38.246552
| 100
| 0.580129
|
import numpy as np
from openmdao.components.interp_util.interp_akima import InterpAkima, Interp1DAkima
from openmdao.components.interp_util.interp_bsplines import InterpBSplines
from openmdao.components.interp_util.interp_cubic import InterpCubic
from openmdao.components.interp_util.interp_lagrange2 import InterpLagrange2, Interp3DLagrange2
from openmdao.components.interp_util.interp_lagrange3 import InterpLagrange3, Interp3DLagrange3
from openmdao.components.interp_util.interp_scipy import InterpScipy
from openmdao.components.interp_util.interp_slinear import InterpLinear, Interp3DSlinear, \
Interp1DSlinear, Interp2DSlinear
from openmdao.components.interp_util.outofbounds_error import OutOfBoundsError
from openmdao.utils.om_warnings import warn_deprecation
INTERP_METHODS = {
'slinear': InterpLinear,
'lagrange2': InterpLagrange2,
'lagrange3': InterpLagrange3,
'cubic': InterpCubic,
'akima': InterpAkima,
'scipy_cubic': InterpScipy,
'scipy_slinear': InterpScipy,
'scipy_quintic': InterpScipy,
'bsplines': InterpBSplines,
'1D-slinear': Interp1DSlinear,
'2D-slinear': Interp2DSlinear,
'3D-slinear': Interp3DSlinear,
'3D-lagrange2': Interp3DLagrange2,
'3D-lagrange3': Interp3DLagrange3,
'1D-akima': Interp1DAkima,
'trilinear': Interp3DSlinear,
'akima1D': Interp1DAkima,
}
TABLE_METHODS = ['slinear', 'lagrange2', 'lagrange3', 'cubic', 'akima',
'scipy_cubic', 'scipy_slinear', 'scipy_quintic',
'trilinear', 'akima1D',
'3D-slinear', '2D-slinear', '1D-slinear',
'1D-akima',
'3D-lagrange2', '3D-lagrange3']
SPLINE_METHODS = ['slinear', 'lagrange2', 'lagrange3', 'cubic', 'akima', 'bsplines',
'scipy_cubic', 'scipy_slinear', 'scipy_quintic']
class InterpND(object):
def __init__(self, method="slinear", points=None, values=None, x_interp=None, extrapolate=False,
num_cp=None, **kwargs):
if not isinstance(method, str):
msg = "Argument 'method' should be a string."
raise ValueError(msg)
elif method not in INTERP_METHODS:
all_m = ', '.join(['"' + m + '"' for m in INTERP_METHODS])
raise ValueError('Interpolation method "%s" is not defined. Valid methods are '
'%s.' % (method, all_m))
elif method == 'akima1D':
warn_deprecation("The 'akima1D' method has been renamed to '1D-akima'.")
elif method == 'trilinear':
warn_deprecation("The 'trilinear' method has been renamed to '3D-slinear'.")
self.extrapolate = extrapolate
if points is None:
if num_cp is not None:
points = [np.linspace(0.0, 1.0, num_cp)]
else:
msg = "Either 'points' or 'num_cp' must be specified."
raise ValueError(msg)
else:
if isinstance(points, np.ndarray):
points = [points]
for i, p in enumerate(points):
n_p = len(p)
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if x_interp is None:
if values is None:
msg = "Either 'values' or 'x_interp' must be specified."
raise ValueError(msg)
if method == 'bsplines':
msg = "Method 'bsplines' is not supported for table interpolation."
raise ValueError(msg)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if (method.startswith('scipy') or method == 'akima') and \
(np.iscomplexobj(values[:]) or np.any(np.iscomplex(points[0]))):
msg = f"Interpolation method '{method}' does not support complex points or values."
raise ValueError(msg)
for i, p in enumerate(points):
n_p = len(p)
if values.shape[i] != n_p:
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
self.x_interp = x_interp
self._xi = None
self._d_dx = None
self._d_dvalues = None
self._compute_d_dvalues = False
self._compute_d_dx = True
interp = INTERP_METHODS[method]
if method.startswith('scipy'):
kwargs['interp_method'] = method
table = interp(self.grid, values, interp, **kwargs)
table.check_config()
self.table = table
self._interp = interp
self._interp_options = kwargs
def interpolate(self, x, compute_derivative=False):
self._compute_d_dx = compute_derivative
self.table._compute_d_dx = compute_derivative
self.table._compute_d_dvalues = False
if isinstance(x, np.ndarray):
if len(x.shape) < 2:
if len(self.grid) > 1:
x = np.atleast_2d(x)
else:
x = np.atleast_2d(x).T
else:
x = np.atleast_2d(x)
self._xi = x
xnew = self._interpolate(x)
if compute_derivative:
return xnew, self._d_dx
else:
return xnew
def evaluate_spline(self, values, compute_derivative=False):
self._compute_d_dvalues = compute_derivative
self.table._compute_d_dvalues = compute_derivative
self.table._compute_d_dx = False
if len(values.shape) == 1:
values = np.expand_dims(values, axis=0)
# cache latest evaluation point for gradient method's use later
self._xi = self.x_interp.copy()
result = self._evaluate_spline(values)
if result.shape[0] == 1:
result = result.ravel()
if compute_derivative:
d_dvalues = self.spline_gradient()
if d_dvalues.shape[0] == 1:
d_dvalues = d_dvalues[0]
return result, d_dvalues
else:
return result
def _interpolate(self, xi):
if not self.extrapolate:
for i, p in enumerate(xi.T):
if np.isnan(p).any():
raise OutOfBoundsError("One of the requested xi contains a NaN",
i, np.NaN, self.grid[i][0], self.grid[i][-1])
eps = 1e-14 * self.grid[i][-1]
if np.any(p < self.grid[i][0] - eps) or np.any(p > self.grid[i][-1] + eps):
p1 = np.where(self.grid[i][0] > p)[0]
p2 = np.where(p > self.grid[i][-1])[0]
violated_idx = set(p1).union(p2).pop()
value = p[violated_idx]
raise OutOfBoundsError("One of the requested xi is out of bounds",
i, value, self.grid[i][0], self.grid[i][-1])
if self._compute_d_dvalues:
interp = self._interp
self.table = interp(self.grid, self.values, interp, **self._interp_options)
if not self.table._supports_d_dvalues:
raise RuntimeError(f'Method {self.table._name} does not support the '
'"training_data_gradients" option.')
self.table._compute_d_dvalues = True
table = self.table
if table.vectorized(xi):
result, derivs_x, derivs_val, derivs_grid = table.evaluate_vectorized(xi)
else:
n_nodes, nx = xi.shape
result = np.empty((n_nodes, ), dtype=xi.dtype)
derivs_x = np.empty((n_nodes, nx), dtype=xi.dtype)
derivs_val = None
for j in range(n_nodes):
val, d_x, d_values, d_grid = table.evaluate(xi[j, ...])
result[j] = val
derivs_x[j, :] = d_x.ravel()
if d_values is not None:
if derivs_val is None:
dv_shape = [n_nodes]
dv_shape.extend(self.values.shape)
derivs_val = np.zeros(dv_shape, dtype=xi.dtype)
in_slice = table._full_slice
full_slice = [slice(j, j + 1)]
full_slice.extend(in_slice)
shape = derivs_val[tuple(full_slice)].shape
derivs_val[tuple(full_slice)] = d_values.reshape(shape)
self._d_dx = derivs_x
self._d_dvalues = derivs_val
return result
def _evaluate_spline(self, values):
xi = self.x_interp
self.values = values
table = self.table
if table._vectorized:
if table._name == 'bsplines':
table.values = values
result, _, derivs_val, _ = table.evaluate_vectorized(xi)
else:
interp = self._interp
n_nodes, _ = values.shape
nx = np.prod(xi.shape)
result = np.empty((n_nodes, nx), dtype=values.dtype)
derivs_val = None
for j in range(n_nodes):
table = interp(self.grid, values[j, :], interp, **self._interp_options)
table._compute_d_dvalues = False
table._compute_d_dx = False
result[j, :], _, _, _ = table.evaluate_vectorized(xi.reshape((nx, 1)))
else:
interp = self._interp
n_nodes, _ = values.shape
nx = np.prod(xi.shape)
result = np.empty((n_nodes, nx), dtype=values.dtype)
derivs_val = None
for j in range(n_nodes):
table = interp(self.grid, values[j, :], interp, **self._interp_options)
table._compute_d_dvalues = True
table._compute_d_dx = False
for k in range(nx):
x_pt = np.atleast_2d(xi[k])
val, _, d_values, _ = table.evaluate(x_pt)
result[j, k] = val
if d_values is not None:
if derivs_val is None:
dv_shape = [n_nodes, nx]
dv_shape.extend(values.shape[1:])
derivs_val = np.zeros(dv_shape, dtype=values.dtype)
in_slice = table._full_slice
full_slice = [slice(j, j + 1), slice(k, k + 1)]
full_slice.extend(in_slice)
shape = derivs_val[tuple(full_slice)].shape
derivs_val[tuple(full_slice)] = d_values.reshape(shape)
self._d_dvalues = derivs_val
self.table = table
return result
def gradient(self, xi):
if (self._xi is None) or (not np.array_equal(xi, self._xi)):
self.interpolate(xi)
return self._gradient().reshape(np.asarray(xi).shape)
def _gradient(self):
return self._d_dx
def training_gradients(self, pt):
if self.table._vectorized:
return self.table.training_gradients(pt)
else:
grid = self.grid
interp = self._interp
opts = self._interp_options
for i, axis in enumerate(grid):
ngrid = axis.size
values = np.zeros(ngrid)
deriv_i = np.zeros(ngrid)
for j in range(ngrid):
values[j] = 1.0
table = interp([grid[i]], values, interp, **opts)
table._compute_d_dvalues = False
deriv_i[j], _, _, _ = table.evaluate(pt[i:i + 1])
values[j] = 0.0
if i == 0:
deriv_running = deriv_i.copy()
else:
deriv_running = np.outer(deriv_running, deriv_i)
return deriv_running
def spline_gradient(self):
vec_size, n_cp = self.values.shape
x_interp = self.x_interp
n_interp = len(x_interp)
d_dvalues = self._d_dvalues
if d_dvalues is not None:
dy_ddata = np.zeros((vec_size, n_interp, n_cp), dtype=d_dvalues.dtype)
if d_dvalues.shape[0] == vec_size:
dy_ddata[:] = d_dvalues
else:
dy_ddata[:] = np.broadcast_to(d_dvalues.toarray(), (vec_size, n_interp, n_cp))
else:
dy_ddata = np.zeros((n_interp, n_cp))
for k in range(n_interp):
val = self.training_gradients(x_interp[k:k + 1])
dy_ddata[k, :] = val
dy_ddata = np.broadcast_to(dy_ddata, (vec_size, n_interp, n_cp))
return dy_ddata
| true
| true
|
f7166b29a260dc30035dc8c5f38d515125a10b02
| 1,410
|
py
|
Python
|
data/shape_dataset.py
|
mremilien/object-deformnet
|
bb07fe05f1ee3983835ebe071252541cee5c42f8
|
[
"MIT"
] | 66
|
2020-07-17T05:15:42.000Z
|
2022-02-22T12:28:01.000Z
|
data/shape_dataset.py
|
mremilien/object-deformnet
|
bb07fe05f1ee3983835ebe071252541cee5c42f8
|
[
"MIT"
] | 25
|
2020-07-17T11:45:16.000Z
|
2022-02-07T06:11:44.000Z
|
data/shape_dataset.py
|
mremilien/object-deformnet
|
bb07fe05f1ee3983835ebe071252541cee5c42f8
|
[
"MIT"
] | 16
|
2020-07-18T22:15:20.000Z
|
2022-01-05T09:05:40.000Z
|
import h5py
import numpy as np
import torch.utils.data as data
class ShapeDataset(data.Dataset):
def __init__(self, h5_file, mode, n_points=2048, augment=False):
assert (mode == 'train' or mode == 'val'), 'Mode must be "train" or "val".'
self.mode = mode
self.n_points = n_points
self.augment = augment
# load data from h5py file
with h5py.File(h5_file, 'r') as f:
self.length = f[self.mode].attrs['len']
self.data = f[self.mode]['data'][:]
self.label = f[self.mode]['label'][:]
# augmentation parameters
self.sigma = 0.01
self.clip = 0.02
self.shift_range = 0.02
def __len__(self):
return self.length
def __getitem__(self, index):
xyz = self.data[index]
label = self.label[index] - 1 # data saved indexed from 1
# randomly downsample
np_data = xyz.shape[0]
assert np_data >= self.n_points, 'Not enough points in shape.'
idx = np.random.choice(np_data, self.n_points)
xyz = xyz[idx, :]
# data augmentation
if self.augment:
jitter = np.clip(self.sigma*np.random.randn(self.n_points, 3), -self.clip, self.clip)
xyz[:, :3] += jitter
shift = np.random.uniform(-self.shift_range, self.shift_range, (1, 3))
xyz[:, :3] += shift
return xyz, label
| 35.25
| 97
| 0.576596
|
import h5py
import numpy as np
import torch.utils.data as data
class ShapeDataset(data.Dataset):
def __init__(self, h5_file, mode, n_points=2048, augment=False):
assert (mode == 'train' or mode == 'val'), 'Mode must be "train" or "val".'
self.mode = mode
self.n_points = n_points
self.augment = augment
with h5py.File(h5_file, 'r') as f:
self.length = f[self.mode].attrs['len']
self.data = f[self.mode]['data'][:]
self.label = f[self.mode]['label'][:]
self.sigma = 0.01
self.clip = 0.02
self.shift_range = 0.02
def __len__(self):
return self.length
def __getitem__(self, index):
xyz = self.data[index]
label = self.label[index] - 1
np_data = xyz.shape[0]
assert np_data >= self.n_points, 'Not enough points in shape.'
idx = np.random.choice(np_data, self.n_points)
xyz = xyz[idx, :]
if self.augment:
jitter = np.clip(self.sigma*np.random.randn(self.n_points, 3), -self.clip, self.clip)
xyz[:, :3] += jitter
shift = np.random.uniform(-self.shift_range, self.shift_range, (1, 3))
xyz[:, :3] += shift
return xyz, label
| true
| true
|
f7166b34ac455a0f2c35b9f39db244c8be3a6461
| 1,936
|
py
|
Python
|
evalai/utils/urls.py
|
Ram81/evalai-cli
|
3fee2108b013461b3de8aa354473ba6eaba6539b
|
[
"BSD-3-Clause"
] | null | null | null |
evalai/utils/urls.py
|
Ram81/evalai-cli
|
3fee2108b013461b3de8aa354473ba6eaba6539b
|
[
"BSD-3-Clause"
] | null | null | null |
evalai/utils/urls.py
|
Ram81/evalai-cli
|
3fee2108b013461b3de8aa354473ba6eaba6539b
|
[
"BSD-3-Clause"
] | null | null | null |
from enum import Enum
class URLS(Enum):
login = "/api/auth/login"
challenge_list = "/api/challenges/challenge/all"
past_challenge_list = "/api/challenges/challenge/past"
future_challenge_list = "/api/challenges/challenge/future"
challenge_details = "/api/challenges/challenge/{}"
challenge_phase_details = "/api/challenges/challenge/phase/{}/"
participant_teams = "/api/participants/participant_team"
host_teams = "/api/hosts/challenge_host_team/"
host_challenges = "/api/challenges/challenge_host_team/{}/challenge"
challenge_phase_split_detail = "/api/challenges/{}/challenge_phase_split"
create_host_team = "/api/hosts/create_challenge_host_team"
host_team_list = "/api/hosts/challenge_host_team/"
participant_challenges = "/api/participants/participant_team/{}/challenge"
participant_team_list = "/api/participants/participant_team"
participate_in_a_challenge = (
"/api/challenges/challenge/{}/participant_team/{}"
)
challenge_phase_list = "/api/challenges/challenge/{}/challenge_phase"
challenge_phase_detail = "/api/challenges/challenge/{}/challenge_phase/{}"
my_submissions = "/api/jobs/challenge/{}/challenge_phase/{}/submission/"
make_submission = "/api/jobs/challenge/{}/challenge_phase/{}/submission/"
get_submission = "/api/jobs/submission/{}"
leaderboard = "/api/jobs/challenge_phase_split/{}/leaderboard/"
get_aws_credentials = (
"/api/challenges/phases/{}/participant_team/aws/credentials/"
)
download_file = "/api/jobs/submission_files/?bucket={}&key={}"
phase_details_using_slug = "/api/challenges/phase/{}/"
get_presigned_url_for_annotation_file = "/api/challenges/phases/{}/get_annotation_file_presigned_url/"
get_presigned_url_for_submission_file = "/api/jobs/phases/{}/get_submission_file_presigned_url/"
send_submission_message = "/api/jobs/phases/{}/send_submission_message/{}/"
| 53.777778
| 106
| 0.741736
|
from enum import Enum
class URLS(Enum):
login = "/api/auth/login"
challenge_list = "/api/challenges/challenge/all"
past_challenge_list = "/api/challenges/challenge/past"
future_challenge_list = "/api/challenges/challenge/future"
challenge_details = "/api/challenges/challenge/{}"
challenge_phase_details = "/api/challenges/challenge/phase/{}/"
participant_teams = "/api/participants/participant_team"
host_teams = "/api/hosts/challenge_host_team/"
host_challenges = "/api/challenges/challenge_host_team/{}/challenge"
challenge_phase_split_detail = "/api/challenges/{}/challenge_phase_split"
create_host_team = "/api/hosts/create_challenge_host_team"
host_team_list = "/api/hosts/challenge_host_team/"
participant_challenges = "/api/participants/participant_team/{}/challenge"
participant_team_list = "/api/participants/participant_team"
participate_in_a_challenge = (
"/api/challenges/challenge/{}/participant_team/{}"
)
challenge_phase_list = "/api/challenges/challenge/{}/challenge_phase"
challenge_phase_detail = "/api/challenges/challenge/{}/challenge_phase/{}"
my_submissions = "/api/jobs/challenge/{}/challenge_phase/{}/submission/"
make_submission = "/api/jobs/challenge/{}/challenge_phase/{}/submission/"
get_submission = "/api/jobs/submission/{}"
leaderboard = "/api/jobs/challenge_phase_split/{}/leaderboard/"
get_aws_credentials = (
"/api/challenges/phases/{}/participant_team/aws/credentials/"
)
download_file = "/api/jobs/submission_files/?bucket={}&key={}"
phase_details_using_slug = "/api/challenges/phase/{}/"
get_presigned_url_for_annotation_file = "/api/challenges/phases/{}/get_annotation_file_presigned_url/"
get_presigned_url_for_submission_file = "/api/jobs/phases/{}/get_submission_file_presigned_url/"
send_submission_message = "/api/jobs/phases/{}/send_submission_message/{}/"
| true
| true
|
f7166c21c50fd68775ca54dcd3b3e04554955178
| 581
|
py
|
Python
|
insta/migrations/0007_profile_editor.py
|
SheilaKamotho/InstaClone
|
7dd644118dfb5523fa253d3454d4aa0f2f599c69
|
[
"Unlicense",
"MIT"
] | null | null | null |
insta/migrations/0007_profile_editor.py
|
SheilaKamotho/InstaClone
|
7dd644118dfb5523fa253d3454d4aa0f2f599c69
|
[
"Unlicense",
"MIT"
] | null | null | null |
insta/migrations/0007_profile_editor.py
|
SheilaKamotho/InstaClone
|
7dd644118dfb5523fa253d3454d4aa0f2f599c69
|
[
"Unlicense",
"MIT"
] | null | null | null |
# Generated by Django 2.0.2 on 2020-10-19 10:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('insta', '0006_image_editor'),
]
operations = [
migrations.AddField(
model_name='profile',
name='editor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 26.409091
| 121
| 0.672978
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('insta', '0006_image_editor'),
]
operations = [
migrations.AddField(
model_name='profile',
name='editor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| true
| true
|
f7166c87d0624aa75acb312538b76c621625e86c
| 7,883
|
py
|
Python
|
.history/docs/conf_20191028085309.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
.history/docs/conf_20191028085309.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
.history/docs/conf_20191028085309.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# kaggle_titanic documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kaggle_titanic'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'kaggle_titanicdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'kaggle_titanic.tex',
u'kaggle_titanic Documentation',
u"Bob Kraft", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kaggle_titanic', u'kaggle_titanic Documentation',
[u"Bob Kraft"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'kaggle_titanic', u'kaggle_titanic Documentation',
u"Bob Kraft", 'kaggle_titanic',
'First Kaggle competition', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.044715
| 80
| 0.708487
|
import os
import sys
extensions = []
extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'kaggle_titanic'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'kaggle_titanicdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'kaggle_titanic.tex',
u'kaggle_titanic Documentation',
u"Bob Kraft", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kaggle_titanic', u'kaggle_titanic Documentation',
[u"Bob Kraft"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'kaggle_titanic', u'kaggle_titanic Documentation',
u"Bob Kraft", 'kaggle_titanic',
'First Kaggle competition', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| true
| true
|
f7166ed99d1178bc1af8dfbf95b904d906ba4586
| 12,868
|
py
|
Python
|
src/python/src/grpc/framework/assembly/implementations.py
|
jonywtf/grpc
|
124f3c5a4b65bb88f13be7c68482eb83d945ad02
|
[
"BSD-3-Clause"
] | 1
|
2022-01-14T04:25:01.000Z
|
2022-01-14T04:25:01.000Z
|
src/python/src/grpc/framework/assembly/implementations.py
|
jonywtf/grpc
|
124f3c5a4b65bb88f13be7c68482eb83d945ad02
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/src/grpc/framework/assembly/implementations.py
|
jonywtf/grpc
|
124f3c5a4b65bb88f13be7c68482eb83d945ad02
|
[
"BSD-3-Clause"
] | 1
|
2022-01-14T04:25:02.000Z
|
2022-01-14T04:25:02.000Z
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Implementations for assembling RPC framework values."""
import threading
# tickets_interfaces, face_interfaces, and activated are referenced from
# specification in this module.
from grpc.framework.assembly import interfaces
from grpc.framework.base import util as base_utilities
from grpc.framework.base.packets import implementations as tickets_implementations
from grpc.framework.base.packets import interfaces as tickets_interfaces # pylint: disable=unused-import
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.face import implementations as face_implementations
from grpc.framework.face import interfaces as face_interfaces # pylint: disable=unused-import
from grpc.framework.face import utilities as face_utilities
from grpc.framework.foundation import activated # pylint: disable=unused-import
from grpc.framework.foundation import logging_pool
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
_THREAD_POOL_SIZE = 100
class _FaceStub(object):
def __init__(self, rear_link):
self._rear_link = rear_link
self._lock = threading.Lock()
self._pool = None
self._front = None
self._under_stub = None
def __enter__(self):
with self._lock:
self._pool = logging_pool.pool(_THREAD_POOL_SIZE)
self._front = tickets_implementations.front(
self._pool, self._pool, self._pool)
self._rear_link.start()
self._rear_link.join_fore_link(self._front)
self._front.join_rear_link(self._rear_link)
self._under_stub = face_implementations.stub(self._front, self._pool)
def __exit__(self, exc_type, exc_val, exc_tb):
with self._lock:
self._under_stub = None
self._rear_link.stop()
base_utilities.wait_for_idle(self._front)
self._front = None
self._pool.shutdown(wait=True)
self._pool = None
return False
def __getattr__(self, attr):
with self._lock:
if self._under_stub is None:
raise ValueError('Called out of context!')
else:
return getattr(self._under_stub, attr)
def _behaviors(implementations, front, pool):
behaviors = {}
stub = face_implementations.stub(front, pool)
for name, implementation in implementations.iteritems():
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
behaviors[name] = stub.unary_unary_sync_async(name)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
behaviors[name] = lambda request, context, bound_name=name: (
stub.inline_value_in_stream_out(bound_name, request, context))
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
behaviors[name] = stub.stream_unary_sync_async(name)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
behaviors[name] = lambda request_iterator, context, bound_name=name: (
stub.inline_stream_in_stream_out(
bound_name, request_iterator, context))
return behaviors
class _DynamicInlineStub(object):
def __init__(self, implementations, rear_link):
self._implementations = implementations
self._rear_link = rear_link
self._lock = threading.Lock()
self._pool = None
self._front = None
self._behaviors = None
def __enter__(self):
with self._lock:
self._pool = logging_pool.pool(_THREAD_POOL_SIZE)
self._front = tickets_implementations.front(
self._pool, self._pool, self._pool)
self._rear_link.start()
self._rear_link.join_fore_link(self._front)
self._front.join_rear_link(self._rear_link)
self._behaviors = _behaviors(
self._implementations, self._front, self._pool)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self._lock:
self._behaviors = None
self._rear_link.stop()
base_utilities.wait_for_idle(self._front)
self._front = None
self._pool.shutdown(wait=True)
self._pool = None
return False
def __getattr__(self, attr):
with self._lock:
behavior = self._behaviors.get(attr)
if behavior is None:
for name, behavior in self._behaviors.iteritems():
last_slash_index = name.rfind('/')
if 0 <= last_slash_index and name[last_slash_index + 1:] == attr:
return behavior
else:
raise AttributeError(
'_DynamicInlineStub instance has no attribute "%s"!' % attr)
else:
return behavior
def _servicer(implementations, pool):
inline_value_in_value_out_methods = {}
inline_value_in_stream_out_methods = {}
inline_stream_in_value_out_methods = {}
inline_stream_in_stream_out_methods = {}
event_value_in_value_out_methods = {}
event_value_in_stream_out_methods = {}
event_stream_in_value_out_methods = {}
event_stream_in_stream_out_methods = {}
for name, implementation in implementations.iteritems():
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
if implementation.style is style.Service.INLINE:
inline_value_in_value_out_methods[name] = (
face_utilities.inline_unary_unary_method(implementation.unary_unary_inline))
elif implementation.style is style.Service.EVENT:
event_value_in_value_out_methods[name] = (
face_utilities.event_unary_unary_method(implementation.unary_unary_event))
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
if implementation.style is style.Service.INLINE:
inline_value_in_stream_out_methods[name] = (
face_utilities.inline_unary_stream_method(implementation.unary_stream_inline))
elif implementation.style is style.Service.EVENT:
event_value_in_stream_out_methods[name] = (
face_utilities.event_unary_stream_method(implementation.unary_stream_event))
if implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
if implementation.style is style.Service.INLINE:
inline_stream_in_value_out_methods[name] = (
face_utilities.inline_stream_unary_method(implementation.stream_unary_inline))
elif implementation.style is style.Service.EVENT:
event_stream_in_value_out_methods[name] = (
face_utilities.event_stream_unary_method(implementation.stream_unary_event))
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
if implementation.style is style.Service.INLINE:
inline_stream_in_stream_out_methods[name] = (
face_utilities.inline_stream_stream_method(implementation.stream_stream_inline))
elif implementation.style is style.Service.EVENT:
event_stream_in_stream_out_methods[name] = (
face_utilities.event_stream_stream_method(implementation.stream_stream_event))
return face_implementations.servicer(
pool,
inline_value_in_value_out_methods=inline_value_in_value_out_methods,
inline_value_in_stream_out_methods=inline_value_in_stream_out_methods,
inline_stream_in_value_out_methods=inline_stream_in_value_out_methods,
inline_stream_in_stream_out_methods=inline_stream_in_stream_out_methods,
event_value_in_value_out_methods=event_value_in_value_out_methods,
event_value_in_stream_out_methods=event_value_in_stream_out_methods,
event_stream_in_value_out_methods=event_stream_in_value_out_methods,
event_stream_in_stream_out_methods=event_stream_in_stream_out_methods)
class _ServiceAssembly(interfaces.Server):
def __init__(self, implementations, fore_link):
self._implementations = implementations
self._fore_link = fore_link
self._lock = threading.Lock()
self._pool = None
self._back = None
def _start(self):
with self._lock:
self._pool = logging_pool.pool(_THREAD_POOL_SIZE)
servicer = _servicer(self._implementations, self._pool)
self._back = tickets_implementations.back(
servicer, self._pool, self._pool, self._pool, _ONE_DAY_IN_SECONDS,
_ONE_DAY_IN_SECONDS)
self._fore_link.start()
self._fore_link.join_rear_link(self._back)
self._back.join_fore_link(self._fore_link)
def _stop(self):
with self._lock:
self._fore_link.stop()
base_utilities.wait_for_idle(self._back)
self._back = None
self._pool.shutdown(wait=True)
self._pool = None
def __enter__(self):
self._start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._stop()
return False
def start(self):
return self._start()
def stop(self):
self._stop()
def port(self):
with self._lock:
return self._fore_link.port()
def assemble_face_stub(activated_rear_link):
"""Assembles a face_interfaces.Stub.
The returned object is a context manager and may only be used in context to
invoke RPCs.
Args:
activated_rear_link: An object that is both a tickets_interfaces.RearLink
and an activated.Activated. The object should be in the inactive state
when passed to this method.
Returns:
A face_interfaces.Stub on which, in context, RPCs can be invoked.
"""
return _FaceStub(activated_rear_link)
def assemble_dynamic_inline_stub(implementations, activated_rear_link):
"""Assembles a stub with method names for attributes.
The returned object is a context manager and may only be used in context to
invoke RPCs.
The returned object, when used in context, will respond to attribute access
as follows: if the requested attribute is the name of a unary-unary RPC
method, the value of the attribute will be a
face_interfaces.UnaryUnarySyncAsync with which to invoke the RPC method. If
the requested attribute is the name of a unary-stream RPC method, the value
of the attribute will be a callable with the semantics of
face_interfaces.Stub.inline_value_in_stream_out, minus the "name" parameter,
with which to invoke the RPC method. If the requested attribute is the name
of a stream-unary RPC method, the value of the attribute will be a
face_interfaces.StreamUnarySyncAsync with which to invoke the RPC method. If
the requested attribute is the name of a stream-stream RPC method, the value
of the attribute will be a callable with the semantics of
face_interfaces.Stub.inline_stream_in_stream_out, minus the "name" parameter,
with which to invoke the RPC method.
Args:
implementations: A dictionary from RPC method name to
interfaces.MethodImplementation.
activated_rear_link: An object that is both a tickets_interfaces.RearLink
and an activated.Activated. The object should be in the inactive state
when passed to this method.
Returns:
A stub on which, in context, RPCs can be invoked.
"""
return _DynamicInlineStub(implementations, activated_rear_link)
def assemble_service(implementations, activated_fore_link):
"""Assembles the service-side of the RPC Framework stack.
Args:
implementations: A dictionary from RPC method name to
interfaces.MethodImplementation.
activated_fore_link: An object that is both a tickets_interfaces.ForeLink
and an activated.Activated. The object should be in the inactive state
when passed to this method.
Returns:
An interfaces.Server encapsulating RPC service.
"""
return _ServiceAssembly(implementations, activated_fore_link)
| 40.465409
| 105
| 0.756062
|
import threading
from grpc.framework.assembly import interfaces
from grpc.framework.base import util as base_utilities
from grpc.framework.base.packets import implementations as tickets_implementations
from grpc.framework.base.packets import interfaces as tickets_interfaces
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.face import implementations as face_implementations
from grpc.framework.face import interfaces as face_interfaces
from grpc.framework.face import utilities as face_utilities
from grpc.framework.foundation import activated
from grpc.framework.foundation import logging_pool
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
_THREAD_POOL_SIZE = 100
class _FaceStub(object):
def __init__(self, rear_link):
self._rear_link = rear_link
self._lock = threading.Lock()
self._pool = None
self._front = None
self._under_stub = None
def __enter__(self):
with self._lock:
self._pool = logging_pool.pool(_THREAD_POOL_SIZE)
self._front = tickets_implementations.front(
self._pool, self._pool, self._pool)
self._rear_link.start()
self._rear_link.join_fore_link(self._front)
self._front.join_rear_link(self._rear_link)
self._under_stub = face_implementations.stub(self._front, self._pool)
def __exit__(self, exc_type, exc_val, exc_tb):
with self._lock:
self._under_stub = None
self._rear_link.stop()
base_utilities.wait_for_idle(self._front)
self._front = None
self._pool.shutdown(wait=True)
self._pool = None
return False
def __getattr__(self, attr):
with self._lock:
if self._under_stub is None:
raise ValueError('Called out of context!')
else:
return getattr(self._under_stub, attr)
def _behaviors(implementations, front, pool):
behaviors = {}
stub = face_implementations.stub(front, pool)
for name, implementation in implementations.iteritems():
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
behaviors[name] = stub.unary_unary_sync_async(name)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
behaviors[name] = lambda request, context, bound_name=name: (
stub.inline_value_in_stream_out(bound_name, request, context))
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
behaviors[name] = stub.stream_unary_sync_async(name)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
behaviors[name] = lambda request_iterator, context, bound_name=name: (
stub.inline_stream_in_stream_out(
bound_name, request_iterator, context))
return behaviors
class _DynamicInlineStub(object):
def __init__(self, implementations, rear_link):
self._implementations = implementations
self._rear_link = rear_link
self._lock = threading.Lock()
self._pool = None
self._front = None
self._behaviors = None
def __enter__(self):
with self._lock:
self._pool = logging_pool.pool(_THREAD_POOL_SIZE)
self._front = tickets_implementations.front(
self._pool, self._pool, self._pool)
self._rear_link.start()
self._rear_link.join_fore_link(self._front)
self._front.join_rear_link(self._rear_link)
self._behaviors = _behaviors(
self._implementations, self._front, self._pool)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self._lock:
self._behaviors = None
self._rear_link.stop()
base_utilities.wait_for_idle(self._front)
self._front = None
self._pool.shutdown(wait=True)
self._pool = None
return False
def __getattr__(self, attr):
with self._lock:
behavior = self._behaviors.get(attr)
if behavior is None:
for name, behavior in self._behaviors.iteritems():
last_slash_index = name.rfind('/')
if 0 <= last_slash_index and name[last_slash_index + 1:] == attr:
return behavior
else:
raise AttributeError(
'_DynamicInlineStub instance has no attribute "%s"!' % attr)
else:
return behavior
def _servicer(implementations, pool):
inline_value_in_value_out_methods = {}
inline_value_in_stream_out_methods = {}
inline_stream_in_value_out_methods = {}
inline_stream_in_stream_out_methods = {}
event_value_in_value_out_methods = {}
event_value_in_stream_out_methods = {}
event_stream_in_value_out_methods = {}
event_stream_in_stream_out_methods = {}
for name, implementation in implementations.iteritems():
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
if implementation.style is style.Service.INLINE:
inline_value_in_value_out_methods[name] = (
face_utilities.inline_unary_unary_method(implementation.unary_unary_inline))
elif implementation.style is style.Service.EVENT:
event_value_in_value_out_methods[name] = (
face_utilities.event_unary_unary_method(implementation.unary_unary_event))
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
if implementation.style is style.Service.INLINE:
inline_value_in_stream_out_methods[name] = (
face_utilities.inline_unary_stream_method(implementation.unary_stream_inline))
elif implementation.style is style.Service.EVENT:
event_value_in_stream_out_methods[name] = (
face_utilities.event_unary_stream_method(implementation.unary_stream_event))
if implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
if implementation.style is style.Service.INLINE:
inline_stream_in_value_out_methods[name] = (
face_utilities.inline_stream_unary_method(implementation.stream_unary_inline))
elif implementation.style is style.Service.EVENT:
event_stream_in_value_out_methods[name] = (
face_utilities.event_stream_unary_method(implementation.stream_unary_event))
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
if implementation.style is style.Service.INLINE:
inline_stream_in_stream_out_methods[name] = (
face_utilities.inline_stream_stream_method(implementation.stream_stream_inline))
elif implementation.style is style.Service.EVENT:
event_stream_in_stream_out_methods[name] = (
face_utilities.event_stream_stream_method(implementation.stream_stream_event))
return face_implementations.servicer(
pool,
inline_value_in_value_out_methods=inline_value_in_value_out_methods,
inline_value_in_stream_out_methods=inline_value_in_stream_out_methods,
inline_stream_in_value_out_methods=inline_stream_in_value_out_methods,
inline_stream_in_stream_out_methods=inline_stream_in_stream_out_methods,
event_value_in_value_out_methods=event_value_in_value_out_methods,
event_value_in_stream_out_methods=event_value_in_stream_out_methods,
event_stream_in_value_out_methods=event_stream_in_value_out_methods,
event_stream_in_stream_out_methods=event_stream_in_stream_out_methods)
class _ServiceAssembly(interfaces.Server):
def __init__(self, implementations, fore_link):
self._implementations = implementations
self._fore_link = fore_link
self._lock = threading.Lock()
self._pool = None
self._back = None
def _start(self):
with self._lock:
self._pool = logging_pool.pool(_THREAD_POOL_SIZE)
servicer = _servicer(self._implementations, self._pool)
self._back = tickets_implementations.back(
servicer, self._pool, self._pool, self._pool, _ONE_DAY_IN_SECONDS,
_ONE_DAY_IN_SECONDS)
self._fore_link.start()
self._fore_link.join_rear_link(self._back)
self._back.join_fore_link(self._fore_link)
def _stop(self):
with self._lock:
self._fore_link.stop()
base_utilities.wait_for_idle(self._back)
self._back = None
self._pool.shutdown(wait=True)
self._pool = None
def __enter__(self):
self._start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._stop()
return False
def start(self):
return self._start()
def stop(self):
self._stop()
def port(self):
with self._lock:
return self._fore_link.port()
def assemble_face_stub(activated_rear_link):
return _FaceStub(activated_rear_link)
def assemble_dynamic_inline_stub(implementations, activated_rear_link):
return _DynamicInlineStub(implementations, activated_rear_link)
def assemble_service(implementations, activated_fore_link):
return _ServiceAssembly(implementations, activated_fore_link)
| true
| true
|
f7166f854ffcad07870a5030cb9bfd7720c7b846
| 4,685
|
py
|
Python
|
relationships/relationship.py
|
emre/relationships
|
9452af1dd2897a6b102c4a391c95b499622c2f28
|
[
"MIT"
] | 85
|
2015-08-05T06:13:28.000Z
|
2021-05-07T13:56:30.000Z
|
relationships/relationship.py
|
emre/relationships
|
9452af1dd2897a6b102c4a391c95b499622c2f28
|
[
"MIT"
] | 3
|
2015-08-06T05:50:37.000Z
|
2015-08-07T05:31:50.000Z
|
relationships/relationship.py
|
emre/relationships
|
9452af1dd2897a6b102c4a391c95b499622c2f28
|
[
"MIT"
] | 7
|
2015-08-06T01:34:14.000Z
|
2018-12-21T01:17:33.000Z
|
import redis
from keys import key_list as default_key_list
class Relationship(object):
def __init__(self, redis_connection=None, key_list=None, actor=None):
if key_list:
self.key_list = default_key_list.copy()
self.key_list.update(key_list)
else:
self.key_list = default_key_list
if redis_connection:
self.redis_connection = redis_connection
else:
self.redis_connection = redis.StrictRedis(
host='localhost',
port=6379,
db=0
)
self.actor = actor
def __call__(self, *args, **kwargs):
self.actor = args[0]
return self
def _action_call(self, command, from_id, to_id, operation_key):
command_values = ':'.join(('user', str(from_id), operation_key)), to_id
return getattr(self.redis_connection, command)(*command_values)
def _list_call(self, operation_key):
return self.redis_connection.smembers(
'user:{}:{}'.format(self._get_actor(), operation_key)
)
def _count_call(self, operation_key):
return self.redis_connection.scard(
'user:{}:{}'.format(
self._get_actor(),
operation_key
)
)
def _get_actor(self):
if hasattr(self, 'actor'):
return self.actor
raise ValueError("actor is not defined")
def block(self, to_id):
self._action_call('sadd', self._get_actor(), to_id, self.key_list["blocked"])
self._action_call('sadd', to_id, self._get_actor(), self.key_list["blocked_by"])
def unblock(self, to_id):
self._action_call('srem', self._get_actor(), to_id, self.key_list["blocked"])
self._action_call('srem', to_id, self._get_actor(), self.key_list["blocked_by"])
def follow(self, to_id):
self._action_call('sadd', self._get_actor(), to_id, self.key_list["following"])
self._action_call('sadd', to_id, self._get_actor(), self.key_list["followers"])
def unfollow(self, to_id):
self._action_call('srem', self._get_actor(), to_id, self.key_list["following"])
self._action_call('srem', to_id, self._get_actor(), self.key_list["followers"])
def friends(self):
return self.redis_connection.sinter(
"user:{}:{}".format(self._get_actor(), self.key_list["following"]),
"user:{}:{}".format(self._get_actor(), self.key_list["followers"]),
)
def mutual_friends(self, to_id):
actor_friends, to_id_friends = self(self._get_actor()).friends(), self(to_id).friends()
return actor_friends.intersection(to_id_friends)
def followers(self):
return self._list_call(self.key_list["followers"])
def following(self):
return self._list_call(self.key_list["following"])
def blocks(self):
return self._list_call(self.key_list["blocked"])
def blocked(self):
return self._list_call(self.key_list["blocked_by"])
def follower_count(self):
return self._count_call(self.key_list["followers"])
def following_count(self):
return self._count_call(self.key_list["following"])
def block_count(self):
return self._count_call(self.key_list["blocked"])
def blocked_count(self):
return self._count_call(self.key_list["blocked_by"])
def is_follower(self, follower_id):
return self._action_call('sismember', self._get_actor(), follower_id, self.key_list["followers"])
def is_following(self, following_id):
return self._action_call('sismember', self._get_actor(), following_id, self.key_list["following"])
def is_blocked(self, blocked_id):
return self._action_call('sismember', self._get_actor(), blocked_id, self.key_list["blocked"])
def is_blocked_by(self, blocked_by_id):
return self._action_call('sismember', self._get_actor(), blocked_by_id,self.key_list["blocked_by"])
def get_network(self, output):
user_id = self._get_actor()
try:
import pydot
except ImportError:
raise ImportError("You need pydot library to get network functionality.")
graph = pydot.Dot('network_of_user_{}'.format(user_id), graph_type='digraph')
target_node = pydot.Node(user_id)
for _id in self(user_id).following():
user_node = pydot.Node(_id)
graph.add_edge(pydot.Edge(target_node, user_node))
for _id in self(user_id).followers():
user_node = pydot.Node(_id)
graph.add_edge(pydot.Edge(user_node, target_node))
graph.write_png(output)
| 31.655405
| 108
| 0.639488
|
import redis
from keys import key_list as default_key_list
class Relationship(object):
def __init__(self, redis_connection=None, key_list=None, actor=None):
if key_list:
self.key_list = default_key_list.copy()
self.key_list.update(key_list)
else:
self.key_list = default_key_list
if redis_connection:
self.redis_connection = redis_connection
else:
self.redis_connection = redis.StrictRedis(
host='localhost',
port=6379,
db=0
)
self.actor = actor
def __call__(self, *args, **kwargs):
self.actor = args[0]
return self
def _action_call(self, command, from_id, to_id, operation_key):
command_values = ':'.join(('user', str(from_id), operation_key)), to_id
return getattr(self.redis_connection, command)(*command_values)
def _list_call(self, operation_key):
return self.redis_connection.smembers(
'user:{}:{}'.format(self._get_actor(), operation_key)
)
def _count_call(self, operation_key):
return self.redis_connection.scard(
'user:{}:{}'.format(
self._get_actor(),
operation_key
)
)
def _get_actor(self):
if hasattr(self, 'actor'):
return self.actor
raise ValueError("actor is not defined")
def block(self, to_id):
self._action_call('sadd', self._get_actor(), to_id, self.key_list["blocked"])
self._action_call('sadd', to_id, self._get_actor(), self.key_list["blocked_by"])
def unblock(self, to_id):
self._action_call('srem', self._get_actor(), to_id, self.key_list["blocked"])
self._action_call('srem', to_id, self._get_actor(), self.key_list["blocked_by"])
def follow(self, to_id):
self._action_call('sadd', self._get_actor(), to_id, self.key_list["following"])
self._action_call('sadd', to_id, self._get_actor(), self.key_list["followers"])
def unfollow(self, to_id):
self._action_call('srem', self._get_actor(), to_id, self.key_list["following"])
self._action_call('srem', to_id, self._get_actor(), self.key_list["followers"])
def friends(self):
return self.redis_connection.sinter(
"user:{}:{}".format(self._get_actor(), self.key_list["following"]),
"user:{}:{}".format(self._get_actor(), self.key_list["followers"]),
)
def mutual_friends(self, to_id):
actor_friends, to_id_friends = self(self._get_actor()).friends(), self(to_id).friends()
return actor_friends.intersection(to_id_friends)
def followers(self):
return self._list_call(self.key_list["followers"])
def following(self):
return self._list_call(self.key_list["following"])
def blocks(self):
return self._list_call(self.key_list["blocked"])
def blocked(self):
return self._list_call(self.key_list["blocked_by"])
def follower_count(self):
return self._count_call(self.key_list["followers"])
def following_count(self):
return self._count_call(self.key_list["following"])
def block_count(self):
return self._count_call(self.key_list["blocked"])
def blocked_count(self):
return self._count_call(self.key_list["blocked_by"])
def is_follower(self, follower_id):
return self._action_call('sismember', self._get_actor(), follower_id, self.key_list["followers"])
def is_following(self, following_id):
return self._action_call('sismember', self._get_actor(), following_id, self.key_list["following"])
def is_blocked(self, blocked_id):
return self._action_call('sismember', self._get_actor(), blocked_id, self.key_list["blocked"])
def is_blocked_by(self, blocked_by_id):
return self._action_call('sismember', self._get_actor(), blocked_by_id,self.key_list["blocked_by"])
def get_network(self, output):
user_id = self._get_actor()
try:
import pydot
except ImportError:
raise ImportError("You need pydot library to get network functionality.")
graph = pydot.Dot('network_of_user_{}'.format(user_id), graph_type='digraph')
target_node = pydot.Node(user_id)
for _id in self(user_id).following():
user_node = pydot.Node(_id)
graph.add_edge(pydot.Edge(target_node, user_node))
for _id in self(user_id).followers():
user_node = pydot.Node(_id)
graph.add_edge(pydot.Edge(user_node, target_node))
graph.write_png(output)
| true
| true
|
f71670077ff7bfc22a95558c4ef9773e2b4a8e07
| 4,867
|
py
|
Python
|
deeplearning_examples/loaders/Churn.py
|
dileep-kishore/deeplearning-examples
|
2b230ea17f366f602044d44cc8abcac419d4e521
|
[
"MIT"
] | null | null | null |
deeplearning_examples/loaders/Churn.py
|
dileep-kishore/deeplearning-examples
|
2b230ea17f366f602044d44cc8abcac419d4e521
|
[
"MIT"
] | 321
|
2017-11-23T20:37:03.000Z
|
2020-12-28T13:06:15.000Z
|
deeplearning_examples/loaders/Churn.py
|
dileep-kishore/deeplearning-examples
|
2b230ea17f366f602044d44cc8abcac419d4e521
|
[
"MIT"
] | null | null | null |
# @Author: dileep
# @Last Modified by: dileep
from collections import OrderedDict
import os
from typing import Tuple, Iterable, Sequence, Dict, Union
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from . import datapath
from ..preprocessing import Encoder
from ..sampling import hold_out
#TODO: Make this a subclass of torch.utils.data.Dataset
class Churn:
"""
Class for loading the `churn` dataset to predict whether customer `exited` or not
Parameters:
----------
features : Iterable[str]
List of features to be used in training and testing.
NOTE: Do not include the dependent variable
Options: {RowNumber,CustomerId,Surname,CreditScore,Geography,Gender,
Age,Tenure,Balance,NumOfProducts,HasCrCard,IsActiveMember,
EstimatedSalary}
Attributes:
----------
raw_data : pd.Series
Raw data returned in the form of a pandas dataframe
train_data : Tuple[np.ndarray, np.ndarray]
Tuple of (features, targets) where each is a numpy ndarray
test_data : Tuple[np.ndarray, np.ndarray]
Tuple of (features, targets) where each is a numpy ndarray
"""
_feature_dict = {
'multi-category': {'Geography'},
'binary-category': {'Gender', 'HasCrCard', 'IsActiveMember', 'Exited'},
'int': {'CreditScore', 'Age', 'Tenure', 'NumOfProducts'},
'float': {'Balance', 'EstimatedSalary'}
}
def __init__(self, features: Union[Iterable[str], str] = 'all') -> None:
churn_path = os.path.join(datapath(), 'churn/Churn_Modeling.csv')
self.raw_data = pd.read_csv(churn_path, index_col=0)
if features == 'all':
features = self.all_features
assert self._validate_features(features), "Invalid features given"
self._features = features + ['Exited']
def __call__(self):
raw_train, raw_test = hold_out(self.raw_data[self._features])
feat_meta = self._get_feat_meta(self._features)
data_encoder = Encoder(feat_meta)
return data_encoder.encode(raw_train, raw_test, 'Exited')
@property
def all_features(self) -> Iterable[str]:
"""
Returns all the possible features that can be used
Returns:
-------
Iterable[str]
A list of all possible features
"""
features = list(self.raw_data.columns)
return [f for f in features if f not in {'Exited', 'RowNumber', 'CustomerId', 'Surname'}]
def _validate_features(self, features: Iterable[str]) -> bool:
"""
Returns whether the input set of features are valid
Parameters:
----------
features : Iterable[str]
Features input to the class
Returns:
-------
bool
True/False based on validity
"""
all_features = set()
for f_set in self._feature_dict.values():
all_features.update(f_set)
return not any(filter(lambda f: f not in all_features, features))
def _get_feat_meta(self, features: Iterable[str]) -> Dict[str, str]:
"""
Returns the type for each feature
Parameters:
----------
features : Iterable[str]
A list of features that are to be used for classification
Returns:
-------
Dict[str, str]
Dictionary of features and their corresponding types
"""
invert_fdict = {frozenset(v): k for k, v in self._feature_dict.items()}
feat_meta: Dict[str, str] = OrderedDict()
for feat in features:
for feat_group, data_type in invert_fdict.items():
if feat in feat_group:
feat_meta[feat] = data_type
continue
return feat_meta
def encode_features(self, features: Iterable[str]) -> Tuple[np.ndarray, np.ndarray]:
cat_features = (self._feature_dict['binary-category'] or
self._feature_dict['multi-category'])
for feat in features:
if feat in cat_features:
self.pp
def split_data(self, features: Iterable[str]) -> Sequence[np.ndarray]:
"""
Splits the raw data into training and testing using the features as a filter
Parameters:
----------
features : Iterable[str]
Features that are to be used in the training and testing data
Returns:
-------
Sequence[np.ndarray]
Sequence of x_train, x_test, y_train, y_test
"""
pass
| 38.322835
| 97
| 0.589275
|
from collections import OrderedDict
import os
from typing import Tuple, Iterable, Sequence, Dict, Union
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from . import datapath
from ..preprocessing import Encoder
from ..sampling import hold_out
class Churn:
_feature_dict = {
'multi-category': {'Geography'},
'binary-category': {'Gender', 'HasCrCard', 'IsActiveMember', 'Exited'},
'int': {'CreditScore', 'Age', 'Tenure', 'NumOfProducts'},
'float': {'Balance', 'EstimatedSalary'}
}
def __init__(self, features: Union[Iterable[str], str] = 'all') -> None:
churn_path = os.path.join(datapath(), 'churn/Churn_Modeling.csv')
self.raw_data = pd.read_csv(churn_path, index_col=0)
if features == 'all':
features = self.all_features
assert self._validate_features(features), "Invalid features given"
self._features = features + ['Exited']
def __call__(self):
raw_train, raw_test = hold_out(self.raw_data[self._features])
feat_meta = self._get_feat_meta(self._features)
data_encoder = Encoder(feat_meta)
return data_encoder.encode(raw_train, raw_test, 'Exited')
@property
def all_features(self) -> Iterable[str]:
features = list(self.raw_data.columns)
return [f for f in features if f not in {'Exited', 'RowNumber', 'CustomerId', 'Surname'}]
def _validate_features(self, features: Iterable[str]) -> bool:
all_features = set()
for f_set in self._feature_dict.values():
all_features.update(f_set)
return not any(filter(lambda f: f not in all_features, features))
def _get_feat_meta(self, features: Iterable[str]) -> Dict[str, str]:
invert_fdict = {frozenset(v): k for k, v in self._feature_dict.items()}
feat_meta: Dict[str, str] = OrderedDict()
for feat in features:
for feat_group, data_type in invert_fdict.items():
if feat in feat_group:
feat_meta[feat] = data_type
continue
return feat_meta
def encode_features(self, features: Iterable[str]) -> Tuple[np.ndarray, np.ndarray]:
cat_features = (self._feature_dict['binary-category'] or
self._feature_dict['multi-category'])
for feat in features:
if feat in cat_features:
self.pp
def split_data(self, features: Iterable[str]) -> Sequence[np.ndarray]:
pass
| true
| true
|
f716709e2478d7522c2c591d1f65cf3807762b7e
| 255
|
py
|
Python
|
Project Euler/1_Multiples_of_3_and_5.py
|
Ashwanigupta9125/code-DS-ALGO
|
49f6cf7d0c682da669db23619aef3f80697b352b
|
[
"MIT"
] | 36
|
2019-12-27T08:23:08.000Z
|
2022-01-24T20:35:47.000Z
|
Project Euler/1_Multiples_of_3_and_5.py
|
Ashwanigupta9125/code-DS-ALGO
|
49f6cf7d0c682da669db23619aef3f80697b352b
|
[
"MIT"
] | 10
|
2019-11-13T02:55:18.000Z
|
2021-10-13T23:28:09.000Z
|
Project Euler/1_Multiples_of_3_and_5.py
|
Ashwanigupta9125/code-DS-ALGO
|
49f6cf7d0c682da669db23619aef3f80697b352b
|
[
"MIT"
] | 53
|
2020-08-15T11:08:40.000Z
|
2021-10-09T15:51:38.000Z
|
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
print(sum( i for i in range(1000) if i % 3 == 0 or i % 5 == 0 ))
| 63.75
| 131
| 0.670588
|
print(sum( i for i in range(1000) if i % 3 == 0 or i % 5 == 0 ))
| true
| true
|
f71671890da4aa3c0e2138334e26e8ab78927add
| 451
|
py
|
Python
|
musicbotv2/plugins/loader.py
|
dabolink/MusicBot
|
38ab851b2c533e5bc703c4a4b3bb4af07059139f
|
[
"MIT"
] | null | null | null |
musicbotv2/plugins/loader.py
|
dabolink/MusicBot
|
38ab851b2c533e5bc703c4a4b3bb4af07059139f
|
[
"MIT"
] | null | null | null |
musicbotv2/plugins/loader.py
|
dabolink/MusicBot
|
38ab851b2c533e5bc703c4a4b3bb4af07059139f
|
[
"MIT"
] | null | null | null |
import importlib
from typing import List
class ModuleInterface:
@staticmethod
def register() -> None:
"""Init the command"""
def import_module(name: str) -> ModuleInterface:
return importlib.import_module(name) # type: ignore
def load_commands(commands: List[str]) -> None:
for command_name in commands:
print("loading command :: ", command_name)
cmd = import_module(command_name)
cmd.register()
| 22.55
| 56
| 0.678492
|
import importlib
from typing import List
class ModuleInterface:
@staticmethod
def register() -> None:
def import_module(name: str) -> ModuleInterface:
return importlib.import_module(name)
def load_commands(commands: List[str]) -> None:
for command_name in commands:
print("loading command :: ", command_name)
cmd = import_module(command_name)
cmd.register()
| true
| true
|
f716730fc1f1eb3c334d8d580c59f6f1e1e87d19
| 793
|
py
|
Python
|
setup.py
|
fginter/simstring-cuda
|
7773bca1e3ecf6dd9eac8f7c007549d098539356
|
[
"Apache-2.0"
] | 2
|
2022-02-02T13:47:32.000Z
|
2022-02-10T04:30:38.000Z
|
setup.py
|
fginter/simstring-cuda
|
7773bca1e3ecf6dd9eac8f7c007549d098539356
|
[
"Apache-2.0"
] | 1
|
2022-02-10T04:40:00.000Z
|
2022-02-10T04:40:00.000Z
|
setup.py
|
fginter/simstring-cuda
|
7773bca1e3ecf6dd9eac8f7c007549d098539356
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='SimString-cuda',
version='0.1.0',
url='https://github.com/fginter/simstring-cuda.git',
author='Filip Ginter',
author_email='filip.ginter@gmail.com',
description="A poor-man's version of simistring-like lookup. Can hold its ground if the DB is few million strings, a GPU is present, and queries are batched by about a hundred strings.",
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
install_requires=['sklearn', 'torch'],
scripts=['simscuda']
)
| 37.761905
| 190
| 0.725095
|
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='SimString-cuda',
version='0.1.0',
url='https://github.com/fginter/simstring-cuda.git',
author='Filip Ginter',
author_email='filip.ginter@gmail.com',
description="A poor-man's version of simistring-like lookup. Can hold its ground if the DB is few million strings, a GPU is present, and queries are batched by about a hundred strings.",
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
install_requires=['sklearn', 'torch'],
scripts=['simscuda']
)
| true
| true
|
f716732d7832fd5fb2824492c9a76cc168282fea
| 7,206
|
bzl
|
Python
|
scala_proto/private/scalapb_aspect.bzl
|
majcherm-da/rules_scala
|
78104d8014d4e4fc8f905cd34b91dfabd9a268c8
|
[
"Apache-2.0"
] | null | null | null |
scala_proto/private/scalapb_aspect.bzl
|
majcherm-da/rules_scala
|
78104d8014d4e4fc8f905cd34b91dfabd9a268c8
|
[
"Apache-2.0"
] | null | null | null |
scala_proto/private/scalapb_aspect.bzl
|
majcherm-da/rules_scala
|
78104d8014d4e4fc8f905cd34b91dfabd9a268c8
|
[
"Apache-2.0"
] | null | null | null |
load(
"//scala/private:common.bzl",
"write_manifest_file",
)
load("//scala/private:rule_impls.bzl", "compile_scala")
load("//scala_proto/private:proto_to_scala_src.bzl", "proto_to_scala_src")
ScalaPBAspectInfo = provider(fields = [
"proto_info",
"src_jars",
"output_files",
"java_info",
])
ScalaPBImport = provider(fields = [
"java_info",
"proto_info",
])
ScalaPBInfo = provider(fields = [
"aspect_info",
])
def merge_proto_infos(tis):
return struct(
transitive_sources = [t.transitive_sources for t in tis],
)
def merge_scalapb_aspect_info(scalapbs):
return ScalaPBAspectInfo(
src_jars = depset(transitive = [s.src_jars for s in scalapbs]),
output_files = depset(transitive = [s.output_files for s in scalapbs]),
proto_info = merge_proto_infos([s.proto_info for s in scalapbs]),
java_info = java_common.merge([s.java_info for s in scalapbs]),
)
def _compiled_jar_file(actions, scalapb_jar):
scalapb_jar_name = scalapb_jar.basename
# ends with .srcjar, so remove last 6 characters
without_suffix = scalapb_jar_name[0:len(scalapb_jar_name) - 6]
# this already ends with _scalapb because that is how scalapb_jar is named
compiled_jar = without_suffix + "jar"
return actions.declare_file(compiled_jar, sibling = scalapb_jar)
def _compile_scala(
ctx,
scalac,
label,
output,
scalapb_jar,
deps_java_info,
implicit_deps):
manifest = ctx.actions.declare_file(
label.name + "_MANIFEST.MF",
sibling = scalapb_jar,
)
write_manifest_file(ctx.actions, manifest, None)
statsfile = ctx.actions.declare_file(
label.name + "_scalac.statsfile",
sibling = scalapb_jar,
)
merged_deps = java_common.merge(deps_java_info + implicit_deps)
# this only compiles scala, not the ijar, but we don't
# want the ijar for generated code anyway: any change
# in the proto generally will change the interface and
# method bodies
compile_scala(
ctx,
Label("%s-fast" % (label)),
output,
manifest,
statsfile,
sources = [],
cjars = merged_deps.compile_jars,
all_srcjars = depset([scalapb_jar]),
transitive_compile_jars = merged_deps.transitive_compile_time_jars,
plugins = [],
resource_strip_prefix = "",
resources = [],
resource_jars = [],
labels = {},
in_scalacopts = [],
print_compile_time = False,
expect_java_output = False,
scalac_jvm_flags = [],
scalac = scalac,
)
return JavaInfo(
source_jar = scalapb_jar,
deps = deps_java_info + implicit_deps,
runtime_deps = deps_java_info + implicit_deps,
exports = deps_java_info + implicit_deps,
output_jar = output,
compile_jar = output,
)
def _empty_java_info(deps_java_info, implicit_deps):
return java_common.merge(deps_java_info + implicit_deps)
####
# This is applied to the DAG of proto_librarys reachable from a deps
# or a scalapb_scala_library. Each proto_library will be one scalapb
# invocation assuming it has some sources.
def _scalapb_aspect_impl(target, ctx):
deps = [d[ScalaPBAspectInfo].java_info for d in ctx.rule.attr.deps]
if ProtoInfo not in target:
# We allow some dependencies which are not protobuf, but instead
# are jvm deps. This is to enable cases of custom generators which
# add a needed jvm dependency.
java_info = target[JavaInfo]
src_jars = depset()
outs = depset()
transitive_ti = merge_proto_infos(
[
d[ScalaPBAspectInfo].proto_info
for d in ctx.rule.attr.deps
],
)
else:
target_ti = target[ProtoInfo]
transitive_ti = merge_proto_infos(
[
d[ScalaPBAspectInfo].proto_info
for d in ctx.rule.attr.deps
] + [target_ti],
)
# we sort so the inputs are always the same for caching
compile_protos = sorted(target_ti.direct_sources)
transitive_protos = sorted(target_ti.transitive_sources)
toolchain = ctx.toolchains["@io_bazel_rules_scala//scala_proto:toolchain_type"]
flags = []
imps = [j[JavaInfo] for j in toolchain.implicit_compile_deps]
if toolchain.with_grpc:
flags.append("grpc")
imps.extend([j[JavaInfo] for j in toolchain.grpc_deps])
if toolchain.with_flat_package:
flags.append("flat_package")
if toolchain.with_single_line_to_string:
flags.append("single_line_to_proto_string")
# This feels rather hacky and odd, but we can't compare the labels to ignore a target easily
# since the @ or // forms seem to not have good equality :( , so we aim to make them absolute
#
# the backlisted protos in the tool chain get made into to absolute paths
# so we make the local target we are looking at absolute too
target_absolute_label = target.label
if not str(target_absolute_label)[0] == "@":
target_absolute_label = Label("@%s//%s:%s" % (ctx.workspace_name, target.label.package, target.label.name))
for lbl in toolchain.blacklisted_protos:
if(lbl.label == target_absolute_label):
compile_protos = False
code_generator = toolchain.code_generator
if compile_protos:
scalapb_file = ctx.actions.declare_file(
target.label.name + "_scalapb.srcjar",
)
proto_to_scala_src(
ctx,
target.label,
code_generator,
compile_protos,
transitive_protos,
target_ti.transitive_proto_path.to_list(),
flags,
scalapb_file,
)
src_jars = depset([scalapb_file])
output = _compiled_jar_file(ctx.actions, scalapb_file)
outs = depset([output])
java_info = _compile_scala(
ctx,
toolchain.scalac,
target.label,
output,
scalapb_file,
deps,
imps,
)
else:
# this target is only an aggregation target
src_jars = depset()
outs = depset()
java_info = _empty_java_info(deps, imps)
return [
ScalaPBAspectInfo(
src_jars = src_jars,
output_files = outs,
proto_info = transitive_ti,
java_info = java_info,
),
]
scalapb_aspect = aspect(
implementation = _scalapb_aspect_impl,
attr_aspects = ["deps"],
required_aspect_providers = [
[ProtoInfo],
[ScalaPBImport],
],
attrs = {
"_protoc": attr.label(executable = True, cfg = "host", default = "@com_google_protobuf//:protoc"),
},
toolchains = [
"@io_bazel_rules_scala//scala:toolchain_type",
"@io_bazel_rules_scala//scala_proto:toolchain_type",
],
)
| 31.605263
| 119
| 0.614627
|
load(
"//scala/private:common.bzl",
"write_manifest_file",
)
load("//scala/private:rule_impls.bzl", "compile_scala")
load("//scala_proto/private:proto_to_scala_src.bzl", "proto_to_scala_src")
ScalaPBAspectInfo = provider(fields = [
"proto_info",
"src_jars",
"output_files",
"java_info",
])
ScalaPBImport = provider(fields = [
"java_info",
"proto_info",
])
ScalaPBInfo = provider(fields = [
"aspect_info",
])
def merge_proto_infos(tis):
return struct(
transitive_sources = [t.transitive_sources for t in tis],
)
def merge_scalapb_aspect_info(scalapbs):
return ScalaPBAspectInfo(
src_jars = depset(transitive = [s.src_jars for s in scalapbs]),
output_files = depset(transitive = [s.output_files for s in scalapbs]),
proto_info = merge_proto_infos([s.proto_info for s in scalapbs]),
java_info = java_common.merge([s.java_info for s in scalapbs]),
)
def _compiled_jar_file(actions, scalapb_jar):
scalapb_jar_name = scalapb_jar.basename
without_suffix = scalapb_jar_name[0:len(scalapb_jar_name) - 6]
compiled_jar = without_suffix + "jar"
return actions.declare_file(compiled_jar, sibling = scalapb_jar)
def _compile_scala(
ctx,
scalac,
label,
output,
scalapb_jar,
deps_java_info,
implicit_deps):
manifest = ctx.actions.declare_file(
label.name + "_MANIFEST.MF",
sibling = scalapb_jar,
)
write_manifest_file(ctx.actions, manifest, None)
statsfile = ctx.actions.declare_file(
label.name + "_scalac.statsfile",
sibling = scalapb_jar,
)
merged_deps = java_common.merge(deps_java_info + implicit_deps)
# want the ijar for generated code anyway: any change
# in the proto generally will change the interface and
# method bodies
compile_scala(
ctx,
Label("%s-fast" % (label)),
output,
manifest,
statsfile,
sources = [],
cjars = merged_deps.compile_jars,
all_srcjars = depset([scalapb_jar]),
transitive_compile_jars = merged_deps.transitive_compile_time_jars,
plugins = [],
resource_strip_prefix = "",
resources = [],
resource_jars = [],
labels = {},
in_scalacopts = [],
print_compile_time = False,
expect_java_output = False,
scalac_jvm_flags = [],
scalac = scalac,
)
return JavaInfo(
source_jar = scalapb_jar,
deps = deps_java_info + implicit_deps,
runtime_deps = deps_java_info + implicit_deps,
exports = deps_java_info + implicit_deps,
output_jar = output,
compile_jar = output,
)
def _empty_java_info(deps_java_info, implicit_deps):
return java_common.merge(deps_java_info + implicit_deps)
####
# This is applied to the DAG of proto_librarys reachable from a deps
# or a scalapb_scala_library. Each proto_library will be one scalapb
# invocation assuming it has some sources.
def _scalapb_aspect_impl(target, ctx):
deps = [d[ScalaPBAspectInfo].java_info for d in ctx.rule.attr.deps]
if ProtoInfo not in target:
# We allow some dependencies which are not protobuf, but instead
# are jvm deps. This is to enable cases of custom generators which
# add a needed jvm dependency.
java_info = target[JavaInfo]
src_jars = depset()
outs = depset()
transitive_ti = merge_proto_infos(
[
d[ScalaPBAspectInfo].proto_info
for d in ctx.rule.attr.deps
],
)
else:
target_ti = target[ProtoInfo]
transitive_ti = merge_proto_infos(
[
d[ScalaPBAspectInfo].proto_info
for d in ctx.rule.attr.deps
] + [target_ti],
)
# we sort so the inputs are always the same for caching
compile_protos = sorted(target_ti.direct_sources)
transitive_protos = sorted(target_ti.transitive_sources)
toolchain = ctx.toolchains["@io_bazel_rules_scala//scala_proto:toolchain_type"]
flags = []
imps = [j[JavaInfo] for j in toolchain.implicit_compile_deps]
if toolchain.with_grpc:
flags.append("grpc")
imps.extend([j[JavaInfo] for j in toolchain.grpc_deps])
if toolchain.with_flat_package:
flags.append("flat_package")
if toolchain.with_single_line_to_string:
flags.append("single_line_to_proto_string")
# This feels rather hacky and odd, but we can't compare the labels to ignore a target easily
target_absolute_label = target.label
if not str(target_absolute_label)[0] == "@":
target_absolute_label = Label("@%s//%s:%s" % (ctx.workspace_name, target.label.package, target.label.name))
for lbl in toolchain.blacklisted_protos:
if(lbl.label == target_absolute_label):
compile_protos = False
code_generator = toolchain.code_generator
if compile_protos:
scalapb_file = ctx.actions.declare_file(
target.label.name + "_scalapb.srcjar",
)
proto_to_scala_src(
ctx,
target.label,
code_generator,
compile_protos,
transitive_protos,
target_ti.transitive_proto_path.to_list(),
flags,
scalapb_file,
)
src_jars = depset([scalapb_file])
output = _compiled_jar_file(ctx.actions, scalapb_file)
outs = depset([output])
java_info = _compile_scala(
ctx,
toolchain.scalac,
target.label,
output,
scalapb_file,
deps,
imps,
)
else:
src_jars = depset()
outs = depset()
java_info = _empty_java_info(deps, imps)
return [
ScalaPBAspectInfo(
src_jars = src_jars,
output_files = outs,
proto_info = transitive_ti,
java_info = java_info,
),
]
scalapb_aspect = aspect(
implementation = _scalapb_aspect_impl,
attr_aspects = ["deps"],
required_aspect_providers = [
[ProtoInfo],
[ScalaPBImport],
],
attrs = {
"_protoc": attr.label(executable = True, cfg = "host", default = "@com_google_protobuf//:protoc"),
},
toolchains = [
"@io_bazel_rules_scala//scala:toolchain_type",
"@io_bazel_rules_scala//scala_proto:toolchain_type",
],
)
| true
| true
|
f7167381295d7b56237bfdcc1b5bedebfca834ed
| 477
|
py
|
Python
|
mac/pyobjc-framework-Quartz/PyObjCTest/test_PDFAnnotationLine.py
|
albertz/music-player
|
d23586f5bf657cbaea8147223be7814d117ae73d
|
[
"BSD-2-Clause"
] | 132
|
2015-01-01T10:02:42.000Z
|
2022-03-09T12:51:01.000Z
|
mac/pyobjc-framework-Quartz/PyObjCTest/test_PDFAnnotationLine.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 6
|
2015-01-06T08:23:19.000Z
|
2019-03-14T12:22:06.000Z
|
mac/pyobjc-framework-Quartz/PyObjCTest/test_PDFAnnotationLine.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 27
|
2015-02-23T11:51:43.000Z
|
2022-03-07T02:34:18.000Z
|
from PyObjCTools.TestSupport import *
from Quartz.PDFKit import *
class TestPDFAnnotationLine (TestCase):
def testConstants(self):
self.assertEqual(kPDFLineStyleNone, 0)
self.assertEqual(kPDFLineStyleSquare, 1)
self.assertEqual(kPDFLineStyleCircle, 2)
self.assertEqual(kPDFLineStyleDiamond, 3)
self.assertEqual(kPDFLineStyleOpenArrow, 4)
self.assertEqual(kPDFLineStyleClosedArrow, 5)
if __name__ == "__main__":
main()
| 29.8125
| 53
| 0.727463
|
from PyObjCTools.TestSupport import *
from Quartz.PDFKit import *
class TestPDFAnnotationLine (TestCase):
def testConstants(self):
self.assertEqual(kPDFLineStyleNone, 0)
self.assertEqual(kPDFLineStyleSquare, 1)
self.assertEqual(kPDFLineStyleCircle, 2)
self.assertEqual(kPDFLineStyleDiamond, 3)
self.assertEqual(kPDFLineStyleOpenArrow, 4)
self.assertEqual(kPDFLineStyleClosedArrow, 5)
if __name__ == "__main__":
main()
| true
| true
|
f71673edb368999b65c6db4d94b46852b8198252
| 886
|
py
|
Python
|
setup.py
|
olivier-m/django-graffle
|
e2049b048e0c7944022e99800ca8f241c7d849ba
|
[
"BSD-3-Clause"
] | 1
|
2015-10-21T21:09:39.000Z
|
2015-10-21T21:09:39.000Z
|
setup.py
|
olivier-m/django-graffle
|
e2049b048e0c7944022e99800ca8f241c7d849ba
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
olivier-m/django-graffle
|
e2049b048e0c7944022e99800ca8f241c7d849ba
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of Django graffle released under the BSD license.
# See the LICENSE for more information.
from setuptools import setup, find_packages
version = '0.5'
packages = ['django_graffle'] + ['django_graffle.%s' % x for x in find_packages('django_graffle',)]
setup(
name='django_graffle',
version=version,
description='Graph your django views with Omnigraffle.',
author='Olivier Meunier',
author_email='om@neokraft.net',
url='http://bitbucket.org/cedarlab/django-graffle/',
packages=packages,
classifiers=[
'Development Status :: %s' % version,
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
| 30.551724
| 99
| 0.654628
|
from setuptools import setup, find_packages
version = '0.5'
packages = ['django_graffle'] + ['django_graffle.%s' % x for x in find_packages('django_graffle',)]
setup(
name='django_graffle',
version=version,
description='Graph your django views with Omnigraffle.',
author='Olivier Meunier',
author_email='om@neokraft.net',
url='http://bitbucket.org/cedarlab/django-graffle/',
packages=packages,
classifiers=[
'Development Status :: %s' % version,
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
| true
| true
|
f716742ca70ffea5e8234ece3da288effc4a72ca
| 5,631
|
py
|
Python
|
tests/test_metrics/test_issue_metrics.py
|
Joshinn-io/augur
|
6759204dbae2ebb992dcd8c1f05408b9206c4ba3
|
[
"MIT"
] | 443
|
2018-09-19T00:30:36.000Z
|
2022-03-31T11:39:13.000Z
|
tests/test_metrics/test_issue_metrics.py
|
Romelo-S/augur
|
e9410887f58af2b26c350edf08e3f70ff783bdc5
|
[
"MIT"
] | 613
|
2018-09-19T18:31:13.000Z
|
2022-03-31T05:41:16.000Z
|
tests/test_metrics/test_issue_metrics.py
|
Romelo-S/augur
|
e9410887f58af2b26c350edf08e3f70ff783bdc5
|
[
"MIT"
] | 764
|
2018-10-17T01:08:10.000Z
|
2022-03-31T05:25:01.000Z
|
#SPDX-License-Identifier: MIT
import pytest
import pandas as pd
def test_issues_new(metrics):
#repo_id
assert metrics.issues_new(1, 1 , period='year').iloc[0]['issues'] > 0
#repo_group_id
assert metrics.issues_new(10, period='year').iloc[1]['issues'] > 0
#begin_date & end_date
assert metrics.issues_new(10, 25430, period='week', begin_date='2017',
end_date='2017-10').iloc[1]['issues'] > 0
assert metrics.issues_new(10, period='month', begin_date='2017-05',
end_date='2018').iloc[2]['issues'] > 0
def test_issues_active(metrics):
# repo
assert metrics.issues_active(1, 1, period='year').iloc[0]['issues'] > 0
# repo_group
assert metrics.issues_active(10, period='year').iloc[0]['issues'] > 0
# begin_date & end_date
assert metrics.issues_active(10, 25430, period='month', begin_date='2020-02',
end_date='2020-03').iloc[0]['issues'] > 0
assert metrics.issues_active(10, period='week', begin_date='2020-01',
end_date='2020-03') .iloc[0]['issues'] > 0
def test_issues_closed(metrics):
# repo
assert metrics.issues_closed(10, 25430, period='year').iloc[0]['issues'] > 0
#repo_group
assert metrics.issues_closed(10, period='year').iloc[0]['issues'] > 0
# begin_date & end_date
assert metrics.issues_closed(10, 25430, period='week', begin_date='2019',
end_date='2020-02').iloc[0]['issues'] > 0
assert metrics.issues_closed(10, period='month', begin_date='2018-05',
end_date='2019-08-15').iloc[0]['issues'] > 0
def test_issue_duration(metrics):
# repo
assert metrics.issue_duration(10, 25430).iloc[0]['duration'] == '20 days 03:08:22.000000000'
# repo_group
assert metrics.issue_duration(10).iloc[0]['duration'] == '20 days 03:08:22.000000000'
def test_issue_participants(metrics):
# repo
assert metrics.issue_participants(10, 25430).iloc[0]['participants'] > 0
# repo_group
assert metrics.issue_participants(10).iloc[0]['participants'] > 0
def test_issue_throughput(metrics):
# repo
assert metrics.issue_throughput(10, 25430).iloc[0]['throughput'] >= 0
# repo_group
assert metrics.issue_throughput(10).iloc[0]['throughput'] >= 0
def test_issue_backlog(metrics):
#repo_id
assert metrics.issue_backlog(10, 25430).iloc[0]['issue_backlog'] > 0
#repo_group_id
assert metrics.issue_backlog(10).iloc[0]['issue_backlog'] > 0
def test_issues_first_time_closed(metrics):
# repo id
assert metrics.issues_first_time_closed(10, repo_id=25430, period='year').isin(
[pd.Timestamp('2019', tz='UTC')]).any().any()
# repo_group_id
assert metrics.issues_first_time_closed(10, period='year').isin(
[pd.Timestamp('2020', tz='UTC')]).any().any()
# begin_date and end_date
assert metrics.issues_first_time_closed(10, period='year', begin_date='2019-1-1 00:00:00',
end_date='2019-12-31 23:59:59').isin([pd.Timestamp('2019-01-01 00:00:00', tz='UTC')]).any().any()
assert metrics.issues_first_time_closed(10, repo_id=25430, period='year', begin_date='2019-1-1 00:00:00',
end_date='2019-12-31 23:59:59').isin([pd.Timestamp('2019-01-01 00:00:00', tz='UTC')]).any().any()
def test_open_issues_count(metrics):
# repo
assert metrics.open_issues_count(10, 25430).iloc[0]['open_count'] > 0
# repo_group
assert metrics.open_issues_count(10).iloc[0]['open_count'] > 0
def test_closed_issues_count(metrics):
# repo
assert metrics.closed_issues_count(10, 25430).iloc[0]['closed_count'] > 0
# repo_group
assert metrics.closed_issues_count(10).iloc[0]['closed_count'] > 0
def test_issues_open_age(metrics):
#repo group
assert metrics.issues_open_age(10).iloc[0]['open_date'] > 0
# repo
assert metrics.issues_open_age(10, 25430).iloc[0]['open_date'] > 0
def test_issues_closed_resolution_duration(metrics):
# repo group
assert metrics.issues_closed_resolution_duration(10).iloc[0]['diffdate'] >= 0
# repo
assert metrics.issues_closed_resolution_duration(10, 25430).iloc[0]['diffdate'] >= 0
def test_average_issue_resolution_time(metrics):
#repo
assert metrics.average_issue_resolution_time(10, 25430).isin(
['augur', '61 days 12:20:43.791667']).any().any()
# repo_group
assert metrics.average_issue_resolution_time(10).isin(
['grimoirelab', ' 67 days 22:41:55.260417']).any().any()
def test_issues_maintainer_response_duration(metrics):
assert metrics.issues_maintainer_response_duration(10, 25430).iloc[0].average_days_comment > 0
assert metrics.issues_maintainer_response_duration(10).iloc[0].average_days_comment > 0
assert metrics.issues_maintainer_response_duration(10, 25430).iloc[0].average_days_comment > 0
def test_issue_comments_mean(metrics):
assert metrics.issue_comments_mean(10).any().any()
assert metrics.issue_comments_mean(10, 25430).any().any()
assert metrics.issue_comments_mean(10, group_by='year').any().any()
assert metrics.issue_comments_mean(10, 25430, group_by='year').any().any()
def test_issue_comments_mean_std(metrics):
assert metrics.issue_comments_mean_std(10).any().any()
assert metrics.issue_comments_mean_std(10, 25430).any().any()
assert metrics.issue_comments_mean_std(10, group_by='year').any().any()
assert metrics.issue_comments_mean_std(10, 25430, group_by='year').any().any()
| 38.834483
| 142
| 0.672527
|
import pytest
import pandas as pd
def test_issues_new(metrics):
assert metrics.issues_new(1, 1 , period='year').iloc[0]['issues'] > 0
assert metrics.issues_new(10, period='year').iloc[1]['issues'] > 0
assert metrics.issues_new(10, 25430, period='week', begin_date='2017',
end_date='2017-10').iloc[1]['issues'] > 0
assert metrics.issues_new(10, period='month', begin_date='2017-05',
end_date='2018').iloc[2]['issues'] > 0
def test_issues_active(metrics):
assert metrics.issues_active(1, 1, period='year').iloc[0]['issues'] > 0
assert metrics.issues_active(10, period='year').iloc[0]['issues'] > 0
assert metrics.issues_active(10, 25430, period='month', begin_date='2020-02',
end_date='2020-03').iloc[0]['issues'] > 0
assert metrics.issues_active(10, period='week', begin_date='2020-01',
end_date='2020-03') .iloc[0]['issues'] > 0
def test_issues_closed(metrics):
assert metrics.issues_closed(10, 25430, period='year').iloc[0]['issues'] > 0
assert metrics.issues_closed(10, period='year').iloc[0]['issues'] > 0
assert metrics.issues_closed(10, 25430, period='week', begin_date='2019',
end_date='2020-02').iloc[0]['issues'] > 0
assert metrics.issues_closed(10, period='month', begin_date='2018-05',
end_date='2019-08-15').iloc[0]['issues'] > 0
def test_issue_duration(metrics):
assert metrics.issue_duration(10, 25430).iloc[0]['duration'] == '20 days 03:08:22.000000000'
assert metrics.issue_duration(10).iloc[0]['duration'] == '20 days 03:08:22.000000000'
def test_issue_participants(metrics):
assert metrics.issue_participants(10, 25430).iloc[0]['participants'] > 0
assert metrics.issue_participants(10).iloc[0]['participants'] > 0
def test_issue_throughput(metrics):
assert metrics.issue_throughput(10, 25430).iloc[0]['throughput'] >= 0
assert metrics.issue_throughput(10).iloc[0]['throughput'] >= 0
def test_issue_backlog(metrics):
assert metrics.issue_backlog(10, 25430).iloc[0]['issue_backlog'] > 0
assert metrics.issue_backlog(10).iloc[0]['issue_backlog'] > 0
def test_issues_first_time_closed(metrics):
assert metrics.issues_first_time_closed(10, repo_id=25430, period='year').isin(
[pd.Timestamp('2019', tz='UTC')]).any().any()
assert metrics.issues_first_time_closed(10, period='year').isin(
[pd.Timestamp('2020', tz='UTC')]).any().any()
assert metrics.issues_first_time_closed(10, period='year', begin_date='2019-1-1 00:00:00',
end_date='2019-12-31 23:59:59').isin([pd.Timestamp('2019-01-01 00:00:00', tz='UTC')]).any().any()
assert metrics.issues_first_time_closed(10, repo_id=25430, period='year', begin_date='2019-1-1 00:00:00',
end_date='2019-12-31 23:59:59').isin([pd.Timestamp('2019-01-01 00:00:00', tz='UTC')]).any().any()
def test_open_issues_count(metrics):
assert metrics.open_issues_count(10, 25430).iloc[0]['open_count'] > 0
assert metrics.open_issues_count(10).iloc[0]['open_count'] > 0
def test_closed_issues_count(metrics):
assert metrics.closed_issues_count(10, 25430).iloc[0]['closed_count'] > 0
assert metrics.closed_issues_count(10).iloc[0]['closed_count'] > 0
def test_issues_open_age(metrics):
assert metrics.issues_open_age(10).iloc[0]['open_date'] > 0
assert metrics.issues_open_age(10, 25430).iloc[0]['open_date'] > 0
def test_issues_closed_resolution_duration(metrics):
assert metrics.issues_closed_resolution_duration(10).iloc[0]['diffdate'] >= 0
assert metrics.issues_closed_resolution_duration(10, 25430).iloc[0]['diffdate'] >= 0
def test_average_issue_resolution_time(metrics):
assert metrics.average_issue_resolution_time(10, 25430).isin(
['augur', '61 days 12:20:43.791667']).any().any()
assert metrics.average_issue_resolution_time(10).isin(
['grimoirelab', ' 67 days 22:41:55.260417']).any().any()
def test_issues_maintainer_response_duration(metrics):
assert metrics.issues_maintainer_response_duration(10, 25430).iloc[0].average_days_comment > 0
assert metrics.issues_maintainer_response_duration(10).iloc[0].average_days_comment > 0
assert metrics.issues_maintainer_response_duration(10, 25430).iloc[0].average_days_comment > 0
def test_issue_comments_mean(metrics):
assert metrics.issue_comments_mean(10).any().any()
assert metrics.issue_comments_mean(10, 25430).any().any()
assert metrics.issue_comments_mean(10, group_by='year').any().any()
assert metrics.issue_comments_mean(10, 25430, group_by='year').any().any()
def test_issue_comments_mean_std(metrics):
assert metrics.issue_comments_mean_std(10).any().any()
assert metrics.issue_comments_mean_std(10, 25430).any().any()
assert metrics.issue_comments_mean_std(10, group_by='year').any().any()
assert metrics.issue_comments_mean_std(10, 25430, group_by='year').any().any()
| true
| true
|
f716748088838174bfd7244706be4ea05367a0b3
| 5,750
|
py
|
Python
|
selfdrive/car/tesla/carcontroller.py
|
openmechanics/openpilot-tesla
|
9019ec0320d3a3973e6ff26e91c51ee267ee0c1b
|
[
"MIT"
] | 1
|
2018-06-15T04:05:00.000Z
|
2018-06-15T04:05:00.000Z
|
selfdrive/car/tumbler/carcontroller.py
|
openmechanics/fakeport
|
52118b05db0ba216133ddcad5c732baa0efb7799
|
[
"MIT"
] | null | null | null |
selfdrive/car/tumbler/carcontroller.py
|
openmechanics/fakeport
|
52118b05db0ba216133ddcad5c732baa0efb7799
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
import os
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.controls.lib.drive_helpers import rate_limit
from common.numpy_fast import clip
from . import teslacan
from .values import AH
from common.fingerprints import TESLA as CAR
from selfdrive.can.packer import CANPacker
def actuator_hystereses(brake, braking, brake_steady, v_ego, car_fingerprint):
# hyst params... TODO: move these to VehicleParams
brake_hyst_on = 0.02 # to activate brakes exceed this value
brake_hyst_off = 0.005 # to deactivate brakes below this value
brake_hyst_gap = 0.01 # don't change brake command for small ocilalitons within this value
#*** histeresys logic to avoid brake blinking. go above 0.1 to trigger
if (brake < brake_hyst_on and not braking) or brake < brake_hyst_off:
brake = 0.
braking = brake > 0.
# for small brake oscillations within brake_hyst_gap, don't change the brake command
if brake == 0.:
brake_steady = 0.
elif brake > brake_steady + brake_hyst_gap:
brake_steady = brake - brake_hyst_gap
elif brake < brake_steady - brake_hyst_gap:
brake_steady = brake + brake_hyst_gap
brake = brake_steady
return brake, braking, brake_steady
def process_hud_alert(hud_alert):
# initialize to no alert
fcw_display = 0
steer_required = 0
acc_alert = 0
if hud_alert == AH.NONE: # no alert
pass
elif hud_alert == AH.FCW: # FCW
fcw_display = hud_alert[1]
elif hud_alert == AH.STEER: # STEER
steer_required = hud_alert[1]
else: # any other ACC alert
acc_alert = hud_alert[1]
return fcw_display, steer_required, acc_alert
HUDData = namedtuple("HUDData",
["pcm_accel", "v_cruise", "mini_car", "car", "X4",
"lanes", "beep", "chime", "fcw", "acc_alert", "steer_required"])
class CarController(object):
def __init__(self, dbc_name, enable_camera=True):
self.braking = False
self.brake_steady = 0.
self.brake_last = 0.
self.enable_camera = enable_camera
self.packer = CANPacker(dbc_name)
def update(self, sendcan, enabled, CS, frame, actuators, \
pcm_speed, pcm_override, pcm_cancel_cmd, pcm_accel, \
hud_v_cruise, hud_show_lanes, hud_show_car, hud_alert, \
snd_beep, snd_chime):
""" Controls thread """
## Todo add code to detect Tesla DAS (camera) and go into listen and record mode only
if not self.enable_camera:
return
# *** apply brake hysteresis ***
brake, self.braking, self.brake_steady = actuator_hystereses(actuators.brake, self.braking, self.brake_steady, CS.v_ego, CS.CP.carFingerprint)
# *** no output if not enabled ***
if not enabled and CS.pcm_acc_status:
# send pcm acc cancel cmd if drive is disabled but pcm is still on, or if the system can't be activated
pcm_cancel_cmd = True
# *** rate limit after the enable check ***
self.brake_last = rate_limit(brake, self.brake_last, -2., 1./100)
# vehicle hud display, wait for one update from 10Hz 0x304 msg
if hud_show_lanes:
hud_lanes = 1
else:
hud_lanes = 0
# TODO: factor this out better
if enabled:
if hud_show_car:
hud_car = 2
else:
hud_car = 1
else:
hud_car = 0
#print chime, alert_id, hud_alert
fcw_display, steer_required, acc_alert = process_hud_alert(hud_alert)
hud = HUDData(int(pcm_accel), int(round(hud_v_cruise)), 1, hud_car,
0xc1, hud_lanes, int(snd_beep), snd_chime, fcw_display, acc_alert, steer_required)
if not all(isinstance(x, int) and 0 <= x < 256 for x in hud):
print "INVALID HUD", hud
hud = HUDData(0xc6, 255, 64, 0xc0, 209, 0x40, 0, 0, 0, 0)
# **** process the car messages ****
# *** compute control surfaces ***
BRAKE_MAX = 1024/4
STEER_MAX = 0x4000 #16384
# steer torque is converted back to CAN reference (positive when steering right)
apply_gas = clip(actuators.gas, 0., 1.)
apply_brake = int(clip(self.brake_last * BRAKE_MAX, 0, BRAKE_MAX - 1))
apply_steer = int(clip(-actuators.steer * STEER_MAX, -STEER_MAX, STEER_MAX))
# any other cp.vl[0x18F]['STEER_STATUS'] is common and can happen during user override. sending 0 torque to avoid EPS sending error 5
if CS.steer_not_allowed:
apply_steer = 0
# Send CAN commands.
can_sends = []
# Send steering command.
idx = frame % 4
can_sends.append(teslacan.create_steering_control(self.packer, enabled ,apply_steer, CS.CP.carFingerprint, idx))
# Send gas and brake commands.
#if (frame % 2) == 0:
# idx = (frame / 2) % 4
# can_sends.append(
# teslacan.create_brake_command(self.packer, apply_brake, pcm_override,
# pcm_cancel_cmd, hud.chime, hud.fcw, idx))
# if not CS.brake_only:
# # send exactly zero if apply_gas is zero. Interceptor will send the max between read value and apply_gas.
# # This prevents unexpected pedal range rescaling
# can_sends.append(teslacan.create_gas_command(self.packer, apply_gas, idx))
#
# Send dashboard UI commands.
#if (frame % 10) == 0:
# idx = (frame/10) % 4
# can_sends.extend(teslacan.create_ui_commands(self.packer, pcm_speed, hud, CS.CP.carFingerprint, idx))
radar_send_step = 5
if (frame % radar_send_step) == 0:
idx = (frame/radar_send_step) % 4
#print "Steer command", apply_steer
# can_sends.extend(teslacan.create_radar_commands(CS.v_ego, CS.CP.carFingerprint, idx))
sendcan.send(can_list_to_can_capnp(can_sends, msgtype='sendcan').to_bytes())
| 36.392405
| 146
| 0.670435
|
from collections import namedtuple
import os
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.controls.lib.drive_helpers import rate_limit
from common.numpy_fast import clip
from . import teslacan
from .values import AH
from common.fingerprints import TESLA as CAR
from selfdrive.can.packer import CANPacker
def actuator_hystereses(brake, braking, brake_steady, v_ego, car_fingerprint):
brake_hyst_on = 0.02
brake_hyst_off = 0.005
brake_hyst_gap = 0.01
#*** histeresys logic to avoid brake blinking. go above 0.1 to trigger
if (brake < brake_hyst_on and not braking) or brake < brake_hyst_off:
brake = 0.
braking = brake > 0.
# for small brake oscillations within brake_hyst_gap, don't change the brake command
if brake == 0.:
brake_steady = 0.
elif brake > brake_steady + brake_hyst_gap:
brake_steady = brake - brake_hyst_gap
elif brake < brake_steady - brake_hyst_gap:
brake_steady = brake + brake_hyst_gap
brake = brake_steady
return brake, braking, brake_steady
def process_hud_alert(hud_alert):
fcw_display = 0
steer_required = 0
acc_alert = 0
if hud_alert == AH.NONE:
pass
elif hud_alert == AH.FCW:
fcw_display = hud_alert[1]
elif hud_alert == AH.STEER:
steer_required = hud_alert[1]
else:
acc_alert = hud_alert[1]
return fcw_display, steer_required, acc_alert
HUDData = namedtuple("HUDData",
["pcm_accel", "v_cruise", "mini_car", "car", "X4",
"lanes", "beep", "chime", "fcw", "acc_alert", "steer_required"])
class CarController(object):
def __init__(self, dbc_name, enable_camera=True):
self.braking = False
self.brake_steady = 0.
self.brake_last = 0.
self.enable_camera = enable_camera
self.packer = CANPacker(dbc_name)
def update(self, sendcan, enabled, CS, frame, actuators, \
pcm_speed, pcm_override, pcm_cancel_cmd, pcm_accel, \
hud_v_cruise, hud_show_lanes, hud_show_car, hud_alert, \
snd_beep, snd_chime):
""" Controls thread """
ke_steady = actuator_hystereses(actuators.brake, self.braking, self.brake_steady, CS.v_ego, CS.CP.carFingerprint)
if not enabled and CS.pcm_acc_status:
pcm_cancel_cmd = True
# *** rate limit after the enable check ***
self.brake_last = rate_limit(brake, self.brake_last, -2., 1./100)
# vehicle hud display, wait for one update from 10Hz 0x304 msg
if hud_show_lanes:
hud_lanes = 1
else:
hud_lanes = 0
# TODO: factor this out better
if enabled:
if hud_show_car:
hud_car = 2
else:
hud_car = 1
else:
hud_car = 0
#print chime, alert_id, hud_alert
fcw_display, steer_required, acc_alert = process_hud_alert(hud_alert)
hud = HUDData(int(pcm_accel), int(round(hud_v_cruise)), 1, hud_car,
0xc1, hud_lanes, int(snd_beep), snd_chime, fcw_display, acc_alert, steer_required)
if not all(isinstance(x, int) and 0 <= x < 256 for x in hud):
print "INVALID HUD", hud
hud = HUDData(0xc6, 255, 64, 0xc0, 209, 0x40, 0, 0, 0, 0)
# **** process the car messages ****
# *** compute control surfaces ***
BRAKE_MAX = 1024/4
STEER_MAX = 0x4000 #16384
# steer torque is converted back to CAN reference (positive when steering right)
apply_gas = clip(actuators.gas, 0., 1.)
apply_brake = int(clip(self.brake_last * BRAKE_MAX, 0, BRAKE_MAX - 1))
apply_steer = int(clip(-actuators.steer * STEER_MAX, -STEER_MAX, STEER_MAX))
# any other cp.vl[0x18F]['STEER_STATUS'] is common and can happen during user override. sending 0 torque to avoid EPS sending error 5
if CS.steer_not_allowed:
apply_steer = 0
# Send CAN commands.
can_sends = []
# Send steering command.
idx = frame % 4
can_sends.append(teslacan.create_steering_control(self.packer, enabled ,apply_steer, CS.CP.carFingerprint, idx))
# Send gas and brake commands.
#if (frame % 2) == 0:
# idx = (frame / 2) % 4
# can_sends.append(
# teslacan.create_brake_command(self.packer, apply_brake, pcm_override,
# pcm_cancel_cmd, hud.chime, hud.fcw, idx))
# if not CS.brake_only:
# # send exactly zero if apply_gas is zero. Interceptor will send the max between read value and apply_gas.
# # This prevents unexpected pedal range rescaling
# can_sends.append(teslacan.create_gas_command(self.packer, apply_gas, idx))
#
# Send dashboard UI commands.
#if (frame % 10) == 0:
# idx = (frame/10) % 4
# can_sends.extend(teslacan.create_ui_commands(self.packer, pcm_speed, hud, CS.CP.carFingerprint, idx))
radar_send_step = 5
if (frame % radar_send_step) == 0:
idx = (frame/radar_send_step) % 4
#print "Steer command", apply_steer
# can_sends.extend(teslacan.create_radar_commands(CS.v_ego, CS.CP.carFingerprint, idx))
sendcan.send(can_list_to_can_capnp(can_sends, msgtype='sendcan').to_bytes())
| false
| true
|
f71676329ad1aa128a74b82803791f78a3149af3
| 1,831
|
py
|
Python
|
pydescriptors/helpers.py
|
c-martinez/compactness
|
679a1644e0cd3ded278e9917efe171b5e89fc780
|
[
"Apache-2.0"
] | 9
|
2017-02-15T10:44:17.000Z
|
2022-03-05T10:14:21.000Z
|
pydescriptors/helpers.py
|
c-martinez/compactness
|
679a1644e0cd3ded278e9917efe171b5e89fc780
|
[
"Apache-2.0"
] | 1
|
2021-11-22T02:31:37.000Z
|
2021-11-22T10:33:39.000Z
|
pydescriptors/helpers.py
|
c-martinez/compactness
|
679a1644e0cd3ded278e9917efe171b5e89fc780
|
[
"Apache-2.0"
] | 4
|
2019-04-17T02:28:01.000Z
|
2022-02-15T02:03:38.000Z
|
import numpy as _np
from .moments import immoment3D as _immoment3D
def getSphere(side):
"""Create a 3D volume of sideXsideXside, where voxels representing a
sphere are ones and background is zeros.
Keyword arguments:
side -- the number of voxels the 3D volume should have on each side.
Returns:
A (side,side,side) shaped matrix of zeros and ones.
"""
volume = _np.zeros((side, side, side))
r = side / 2
Xs, Ys = _np.meshgrid(_np.arange(-r, r), _np.arange(-r, r))
for k, z in enumerate(_np.arange(-r, r)):
volume[:, :, k] = _np.sqrt(Xs ** 2 + Ys ** 2 + z ** 2) < r
return volume
def rotate3D(X, Y, Z, rx, ry):
"""Rotates a 3D object along one ordinate axis at a time.
Keyword arguments:
X -- The X coordinate of the voxels to be rotated.
Y -- The Y coordinate of the voxels to be rotated.
Z -- The Z coordinate of the voxels to be rotated.
Returns:
X,Y,Z coordinates of the rotated voxels.
"""
R = _np.eye(3)
Rx = _np.array([[1, 0, 0],
[0, _np.cos(rx), -_np.sin(rx)],
[0, _np.sin(rx), _np.cos(rx)]])
Ry = _np.array([[_np.cos(ry), 0, _np.sin(ry)],
[0, 1, 0],
[-_np.sin(ry), 0, _np.cos(ry)]])
R = _np.dot(R, Rx)
R = _np.dot(R, Ry)
XYZ = _np.vstack([X, Y, Z])
XYZ_ = _np.dot(XYZ.T, R)
return XYZ_[:, 0], XYZ_[:, 1], XYZ_[:, 2]
def recenter(X, Y, Z):
# TODO: Document, write unit test
m000 = _immoment3D(X, Y, Z, 0, 0, 0)
m100 = _immoment3D(X, Y, Z, 1, 0, 0)
m010 = _immoment3D(X, Y, Z, 0, 1, 0)
m001 = _immoment3D(X, Y, Z, 0, 0, 1)
# Find centroid
cx = m100 / m000
cy = m010 / m000
cz = m001 / m000
# Recentering
X_ = X - cx
Y_ = Y - cy
Z_ = Z - cz
return X_, Y_, Z_
| 26.926471
| 72
| 0.550519
|
import numpy as _np
from .moments import immoment3D as _immoment3D
def getSphere(side):
volume = _np.zeros((side, side, side))
r = side / 2
Xs, Ys = _np.meshgrid(_np.arange(-r, r), _np.arange(-r, r))
for k, z in enumerate(_np.arange(-r, r)):
volume[:, :, k] = _np.sqrt(Xs ** 2 + Ys ** 2 + z ** 2) < r
return volume
def rotate3D(X, Y, Z, rx, ry):
R = _np.eye(3)
Rx = _np.array([[1, 0, 0],
[0, _np.cos(rx), -_np.sin(rx)],
[0, _np.sin(rx), _np.cos(rx)]])
Ry = _np.array([[_np.cos(ry), 0, _np.sin(ry)],
[0, 1, 0],
[-_np.sin(ry), 0, _np.cos(ry)]])
R = _np.dot(R, Rx)
R = _np.dot(R, Ry)
XYZ = _np.vstack([X, Y, Z])
XYZ_ = _np.dot(XYZ.T, R)
return XYZ_[:, 0], XYZ_[:, 1], XYZ_[:, 2]
def recenter(X, Y, Z):
m000 = _immoment3D(X, Y, Z, 0, 0, 0)
m100 = _immoment3D(X, Y, Z, 1, 0, 0)
m010 = _immoment3D(X, Y, Z, 0, 1, 0)
m001 = _immoment3D(X, Y, Z, 0, 0, 1)
cx = m100 / m000
cy = m010 / m000
cz = m001 / m000
X_ = X - cx
Y_ = Y - cy
Z_ = Z - cz
return X_, Y_, Z_
| true
| true
|
f716766002a0b6aae9b9a89d8422883c38d8d8f9
| 19,179
|
py
|
Python
|
tests/jobs/test_local_task_job.py
|
anitakar/airflow
|
fc32d36d44f9d36b30333ce0676f2f3a6b133619
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/jobs/test_local_task_job.py
|
anitakar/airflow
|
fc32d36d44f9d36b30333ce0676f2f3a6b133619
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/jobs/test_local_task_job.py
|
anitakar/airflow
|
fc32d36d44f9d36b30333ce0676f2f3a6b133619
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import os
import time
import unittest
import uuid
from multiprocessing import Lock, Value
from unittest import mock
from unittest.mock import patch
import pytest
from airflow import settings
from airflow.exceptions import AirflowException, AirflowFailException
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models.dag import DAG
from airflow.models.dagbag import DagBag
from airflow.models.taskinstance import TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.db import clear_db_jobs, clear_db_runs
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_jobs()
clear_db_runs()
patcher = patch('airflow.jobs.base_job.sleep')
self.addCleanup(patcher.stop)
self.mock_base_job_sleep = patcher.start()
def tearDown(self) -> None:
clear_db_jobs()
clear_db_runs()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}
)
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(
run_id="test", state=State.SUCCESS, execution_date=DEFAULT_DATE, start_date=DEFAULT_DATE
)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
assert all(check_result_1)
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
assert all(check_result_2)
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG('test_localtaskjob_heartbeat', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with pytest.raises(AirflowException):
job1.heartbeat_callback() # pylint: disable=no-value-for-parameter
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
mock_pid.return_value = 2
with pytest.raises(AirflowException):
job1.heartbeat_callback() # pylint: disable=no-value-for-parameter
def test_heartbeat_failed_fast(self):
"""
Test that task heartbeat will sleep when it fails fast
"""
self.mock_base_job_sleep.side_effect = time.sleep
with create_session() as session:
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(
run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
assert len(heartbeat_records) > 2
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
# Assert that difference small enough
delta = (time2 - time1).total_seconds()
assert abs(delta - job.heartrate) < 0.05
@pytest.mark.quarantined
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
assert State.RUNNING == ti.state
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
assert not process.is_alive()
ti.refresh_from_db()
assert State.SUCCESS == ti.state
def test_localtaskjob_double_trigger(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
assert ti.pid == 1
assert ti.state == State.RUNNING
session.close()
@pytest.mark.quarantined
def test_localtaskjob_maintain_heart_rate(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
assert mock_start.call_count == 1
assert mock_ret_code.call_count == 2
time_end = time.time()
assert self.mock_base_job_sleep.call_count == 1
assert job1.state == State.SUCCESS
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
assert time_end - time_start < job1.heartrate
session.close()
def test_mark_failure_on_failure_callback(self):
"""
Test that ensures that mark_failure in the UI fails
the task, and executes on_failure_callback
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
def check_failure(context):
with failure_callback_called.get_lock():
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure'
assert context['exception'] == "task marked as failed externally"
def task_function(ti):
with create_session() as session:
assert State.RUNNING == ti.state
ti.log.info("Marking TI as failed 'externally'")
ti.state = State.FAILED
session.merge(ti)
session.commit()
time.sleep(10)
# This should not happen -- the state change should be noticed and the task should get killed
with task_terminated_externally.get_lock():
task_terminated_externally.value = 0
with DAG(dag_id='test_mark_failure', start_date=DEFAULT_DATE) as dag:
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_failure_callback=check_failure,
)
dag.clear()
with create_session() as session:
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with timeout(30):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
@patch('airflow.utils.process_utils.subprocess.check_call')
@patch.object(StandardTaskRunner, 'return_code')
def test_failure_callback_only_called_once(self, mock_return_code, _check_call):
"""
Test that ensures that when a task exits with failure by itself,
failure callback is only called once
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
callback_count_lock = Lock()
def failure_callback(context):
with callback_count_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_failure_callback_race'
assert isinstance(context['exception'], AirflowFailException)
def task_function(ti):
raise AirflowFailException()
dag = DAG(dag_id='test_failure_callback_race', start_date=DEFAULT_DATE)
task = PythonOperator(
task_id='test_exit_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
dag=dag,
)
dag.clear()
with create_session() as session:
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Simulate race condition where job1 heartbeat ran right after task
# state got set to failed by ti.handle_failure but before task process
# fully exits. See _execute loop in airflow/jobs/local_task_job.py.
# In this case, we have:
# * task_runner.return_code() is None
# * ti.state == State.Failed
#
# We also need to set return_code to a valid int after job1.terminating
# is set to True so _execute loop won't loop forever.
def dummy_return_code(*args, **kwargs):
return None if not job1.terminating else -9
mock_return_code.side_effect = dummy_return_code
with timeout(10):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED # task exits with failure state
assert failure_callback_called.value == 1
def test_mark_success_on_success_callback(self):
"""
Test that ensures that where a task is marked success in the UI
on_success_callback gets executed
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
success_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def success_callback(context):
with shared_mem_lock:
success_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_success'
dag = DAG(dag_id='test_mark_success', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
def task_function(ti):
# pylint: disable=unused-argument
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_success_callback=success_callback,
dag=dag,
)
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 25):
ti.refresh_from_db()
if ti.state == State.RUNNING:
break
time.sleep(0.2)
assert ti.state == State.RUNNING
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
assert success_callback_called.value == 1
assert task_terminated_externally.value == 1
assert not process.is_alive()
@pytest.fixture()
def clean_db_helper():
yield
clear_db_jobs()
clear_db_runs()
@pytest.mark.usefixtures("clean_db_helper")
class TestLocalTaskJobPerformance:
@pytest.mark.parametrize("return_codes", [[0], 9 * [None] + [0]]) # type: ignore
@mock.patch("airflow.jobs.local_task_job.get_task_runner")
def test_number_of_queries_single_loop(self, mock_get_task_runner, return_codes):
unique_prefix = str(uuid.uuid4())
dag = DAG(dag_id=f'{unique_prefix}_test_number_of_queries', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag.clear()
dag.create_dagrun(run_id=unique_prefix, state=State.NONE)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
mock_get_task_runner.return_value.return_code.side_effects = return_codes
job = LocalTaskJob(task_instance=ti, executor=MockExecutor())
with assert_queries_count(13):
job.run()
| 36.531429
| 107
| 0.640388
|
import multiprocessing
import os
import time
import unittest
import uuid
from multiprocessing import Lock, Value
from unittest import mock
from unittest.mock import patch
import pytest
from airflow import settings
from airflow.exceptions import AirflowException, AirflowFailException
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models.dag import DAG
from airflow.models.dagbag import DagBag
from airflow.models.taskinstance import TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.db import clear_db_jobs, clear_db_runs
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_jobs()
clear_db_runs()
patcher = patch('airflow.jobs.base_job.sleep')
self.addCleanup(patcher.stop)
self.mock_base_job_sleep = patcher.start()
def tearDown(self) -> None:
clear_db_jobs()
clear_db_runs()
def test_localtaskjob_essential_attr(self):
dag = DAG(
'test_localtaskjob_essential_attr', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}
)
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(
run_id="test", state=State.SUCCESS, execution_date=DEFAULT_DATE, start_date=DEFAULT_DATE
)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
assert all(check_result_1)
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
assert all(check_result_2)
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG('test_localtaskjob_heartbeat', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with pytest.raises(AirflowException):
job1.heartbeat_callback()
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
mock_pid.return_value = 2
with pytest.raises(AirflowException):
job1.heartbeat_callback()
def test_heartbeat_failed_fast(self):
self.mock_base_job_sleep.side_effect = time.sleep
with create_session() as session:
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(
run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
assert len(heartbeat_records) > 2
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
delta = (time2 - time1).total_seconds()
assert abs(delta - job.heartrate) < 0.05
@pytest.mark.quarantined
def test_mark_success_no_kill(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
assert State.RUNNING == ti.state
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
assert not process.is_alive()
ti.refresh_from_db()
assert State.SUCCESS == ti.state
def test_localtaskjob_double_trigger(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
assert ti.pid == 1
assert ti.state == State.RUNNING
session.close()
@pytest.mark.quarantined
def test_localtaskjob_maintain_heart_rate(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
assert mock_start.call_count == 1
assert mock_ret_code.call_count == 2
time_end = time.time()
assert self.mock_base_job_sleep.call_count == 1
assert job1.state == State.SUCCESS
assert time_end - time_start < job1.heartrate
session.close()
def test_mark_failure_on_failure_callback(self):
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
def check_failure(context):
with failure_callback_called.get_lock():
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure'
assert context['exception'] == "task marked as failed externally"
def task_function(ti):
with create_session() as session:
assert State.RUNNING == ti.state
ti.log.info("Marking TI as failed 'externally'")
ti.state = State.FAILED
session.merge(ti)
session.commit()
time.sleep(10)
# This should not happen -- the state change should be noticed and the task should get killed
with task_terminated_externally.get_lock():
task_terminated_externally.value = 0
with DAG(dag_id='test_mark_failure', start_date=DEFAULT_DATE) as dag:
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_failure_callback=check_failure,
)
dag.clear()
with create_session() as session:
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with timeout(30):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
@patch('airflow.utils.process_utils.subprocess.check_call')
@patch.object(StandardTaskRunner, 'return_code')
def test_failure_callback_only_called_once(self, mock_return_code, _check_call):
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
callback_count_lock = Lock()
def failure_callback(context):
with callback_count_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_failure_callback_race'
assert isinstance(context['exception'], AirflowFailException)
def task_function(ti):
raise AirflowFailException()
dag = DAG(dag_id='test_failure_callback_race', start_date=DEFAULT_DATE)
task = PythonOperator(
task_id='test_exit_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
dag=dag,
)
dag.clear()
with create_session() as session:
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
def dummy_return_code(*args, **kwargs):
return None if not job1.terminating else -9
mock_return_code.side_effect = dummy_return_code
with timeout(10):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED # task exits with failure state
assert failure_callback_called.value == 1
def test_mark_success_on_success_callback(self):
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
success_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def success_callback(context):
with shared_mem_lock:
success_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_success'
dag = DAG(dag_id='test_mark_success', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
def task_function(ti):
time.sleep(60)
with shared_mem_lock:
task_terminated_externally.value = 0
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_success_callback=success_callback,
dag=dag,
)
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 25):
ti.refresh_from_db()
if ti.state == State.RUNNING:
break
time.sleep(0.2)
assert ti.state == State.RUNNING
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
assert success_callback_called.value == 1
assert task_terminated_externally.value == 1
assert not process.is_alive()
@pytest.fixture()
def clean_db_helper():
yield
clear_db_jobs()
clear_db_runs()
@pytest.mark.usefixtures("clean_db_helper")
class TestLocalTaskJobPerformance:
@pytest.mark.parametrize("return_codes", [[0], 9 * [None] + [0]])
@mock.patch("airflow.jobs.local_task_job.get_task_runner")
def test_number_of_queries_single_loop(self, mock_get_task_runner, return_codes):
unique_prefix = str(uuid.uuid4())
dag = DAG(dag_id=f'{unique_prefix}_test_number_of_queries', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag.clear()
dag.create_dagrun(run_id=unique_prefix, state=State.NONE)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
mock_get_task_runner.return_value.return_code.side_effects = return_codes
job = LocalTaskJob(task_instance=ti, executor=MockExecutor())
with assert_queries_count(13):
job.run()
| true
| true
|
f71676683a6637366c41785b4c0e85fe9f8fcaf2
| 2,554
|
py
|
Python
|
tests/test_submission.py
|
Uchimura85/wswp
|
54b7e59390721cbd1e3ce0d7ed255af3c2b4511e
|
[
"MIT"
] | 4
|
2020-12-27T00:31:49.000Z
|
2021-08-03T22:33:41.000Z
|
tests/test_submission.py
|
Uchimura85/wswp
|
54b7e59390721cbd1e3ce0d7ed255af3c2b4511e
|
[
"MIT"
] | 1
|
2021-04-08T02:59:31.000Z
|
2021-04-08T02:59:31.000Z
|
tests/test_submission.py
|
Uchimura85/wswp
|
54b7e59390721cbd1e3ce0d7ed255af3c2b4511e
|
[
"MIT"
] | 1
|
2021-08-03T22:33:42.000Z
|
2021-08-03T22:33:42.000Z
|
"""Tests for submission functionality-- primarily if a submission
form is validated properly and passed to the backend.
"""
import pytest
import pathlib, json
from src.model import Activity, Submission
# Load in sample submissions and their expected status codes if they were submitted:
with open(pathlib.Path(__file__).parent.absolute()/'data'/'sample_submissions_base.json') as samples:
json_data = json.load(samples)
sample_submissions = [pytest.param(x['submission'], x['status_code'], x.get('broken_field'), id=x['id']) for x in json_data['sample_submissions']]
@pytest.mark.parametrize("submission, expected_code, problematic_field", sample_submissions)
def test_submissions(app, client, submission, expected_code, problematic_field):
"""Tests some typical submissions to make sure the error code raised
is the one expected (422 in a validation error) and that any validation
errors raised correspond to the right field that failed validation.
"""
rv = client.post('/v1/games/suggest', json=submission)
response_json = rv.get_json()
assert rv.status_code == expected_code
# In the response, validation issues are given in the 'issues' object, like:
# .. 'issues': { 'url': ['Field may not be null'] }
# So, we check to see if the issues object contains the field we expected
# to trigger a validation error.
if 'issues' in response_json and problematic_field:
assert problematic_field in response_json['issues']
# If we expected a 200, was the submission actually added?
if expected_code == 200:
with app.app_context():
game = Submission.query.filter_by(name=submission['name']).first()
assert game is not None
@pytest.mark.parametrize("max_players", [0, None])
def test_blank_max_players(app, client, max_players):
"""Leaving the max number of players blank (or 0) should result
in a NULL value going to the database.
"""
submission = {
"name": "TestMaxPlayersBlank",
"url": "https://google.ca",
"description": "test desc",
"min_players": 4,
"max_players": max_players,
"paid": False,
"submitted_by": "Matt"
}
rv = client.post('/v1/games/suggest', json=submission)
assert rv.status_code == 200
with app.app_context():
# Physically go into the submissions table and ensure the created
# record has NULL max_players
game = Submission.query.filter_by(name="TestMaxPlayersBlank").first()
assert game.max_players is None
| 43.288136
| 150
| 0.698904
|
import pytest
import pathlib, json
from src.model import Activity, Submission
with open(pathlib.Path(__file__).parent.absolute()/'data'/'sample_submissions_base.json') as samples:
json_data = json.load(samples)
sample_submissions = [pytest.param(x['submission'], x['status_code'], x.get('broken_field'), id=x['id']) for x in json_data['sample_submissions']]
@pytest.mark.parametrize("submission, expected_code, problematic_field", sample_submissions)
def test_submissions(app, client, submission, expected_code, problematic_field):
rv = client.post('/v1/games/suggest', json=submission)
response_json = rv.get_json()
assert rv.status_code == expected_code
if 'issues' in response_json and problematic_field:
assert problematic_field in response_json['issues']
if expected_code == 200:
with app.app_context():
game = Submission.query.filter_by(name=submission['name']).first()
assert game is not None
@pytest.mark.parametrize("max_players", [0, None])
def test_blank_max_players(app, client, max_players):
submission = {
"name": "TestMaxPlayersBlank",
"url": "https://google.ca",
"description": "test desc",
"min_players": 4,
"max_players": max_players,
"paid": False,
"submitted_by": "Matt"
}
rv = client.post('/v1/games/suggest', json=submission)
assert rv.status_code == 200
with app.app_context():
game = Submission.query.filter_by(name="TestMaxPlayersBlank").first()
assert game.max_players is None
| true
| true
|
f71678195657d243066d4e9b9ea9b1e9901b4130
| 4,061
|
py
|
Python
|
scripts/Emdometrial/Statistics/mut_analysis.py
|
yaront/MutSig
|
456dc793ab2dbd955b5cef098fd14539d428de0b
|
[
"Apache-2.0"
] | null | null | null |
scripts/Emdometrial/Statistics/mut_analysis.py
|
yaront/MutSig
|
456dc793ab2dbd955b5cef098fd14539d428de0b
|
[
"Apache-2.0"
] | null | null | null |
scripts/Emdometrial/Statistics/mut_analysis.py
|
yaront/MutSig
|
456dc793ab2dbd955b5cef098fd14539d428de0b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 26 20:42:43 2018
@author: tomer
"""
#%%
# =================================================
# # Mutation per gene
# =================================================
import numpy as np
import pandas as pd
#%%
#tumor = sys.argv[1]
#tumor = tumor.split('/')[-1].split('.')[0]
#print tumor
tumor = 'UCEC'
#%% Reading data
print("Starting: " + tumor)
mut_data = pd.read_table('./../../../databases/Endometrial/TCGA_MAFs/' + tumor + '.maf', sep = '\t')
bmi_data = pd.read_table('./../../../databases/Endometrial/information/TCGA_bmi_data.txt', sep = '\t')
pat_bmi = bmi_data[bmi_data['bmi'] != '--']
pat_bmi = pat_bmi[(18.5 < pd.to_numeric(pat_bmi['bmi'])) & (pd.to_numeric(pat_bmi['bmi']) < 90)]
patients = list(set(np.unique(['-'.join(x.split('-')[0:3]) for x in mut_data['Tumor_Sample_Barcode']])).intersection(list(pat_bmi['submitter_id'].values)))
pat_bmi = pat_bmi[[(x in patients) for x in pat_bmi['submitter_id'].values]].sort_values(by = ['bmi'])
pat_mut = mut_data[[(x in patients) for x in ['-'.join(x.split('-')[0:3]) for x in mut_data['Tumor_Sample_Barcode']]]]
pat_mut = pat_mut[pat_mut['Variant_Classification'].isin(['Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins', 'Missense_Mutation', 'Nonsense_Mutation', 'Nonstop_Mutation', 'Translation_Start_Site'])]
#%% Creating table of mutations per BMI and mutation burden per patient
gene_bmi_mut = pd.DataFrame(0, columns = ['BMI','Total_Mutations'] + list(np.unique(pat_mut['Hugo_Symbol'])), index = np.sort(pat_bmi[['submitter_id','bmi']])[:,1])
gene_bmi_mut['BMI'] = np.sort(pat_bmi[['submitter_id','bmi']])[:,0]
pat_name_mut = ['-'.join(x.split('-')[0:3]) for x in pat_mut['Tumor_Sample_Barcode']]
for pat in gene_bmi_mut.index:
gene_bmi_mut.loc[pat,'Total_Mutations'] = pat_name_mut.count(pat)
gene_bmi_mut = gene_bmi_mut[gene_bmi_mut['Total_Mutations'] < 3000]
#%% Assigning mutations per gene per patient
print("Calculating mutations for " + tumor)
for g in np.unique(pat_mut['Hugo_Symbol']):
gene_mut = pat_mut[pat_mut['Hugo_Symbol'] == g]
gene_pat = ['-'.join(x.split('-')[0:3]) for x in gene_mut['Tumor_Sample_Barcode']]
for p in np.unique(gene_pat):
gene_bmi_mut.loc[p,g] = gene_pat.count(p)
gene_bmi_mut = gene_bmi_mut.transpose()
norm_gene_bmi_mut = []
#%% Finding the slope
print("Calculating slope for " + tumor)
inds = {bmi: ind for ind,bmi in enumerate(set(pd.to_numeric(gene_bmi_mut.loc['BMI',:])))}
bmi_ind = [inds[bmi] for bmi in pd.to_numeric(gene_bmi_mut.loc['BMI',:])]
slope = []
for i,j in gene_bmi_mut.iloc[2:,:].iterrows():
norm_mut = pd.to_numeric(j) / pd.to_numeric(gene_bmi_mut.loc['Total_Mutations'])
norm_gene_bmi_mut.append(norm_mut)
weight_mut = np.bincount(np.array(bmi_ind),weights=list(map(float,norm_mut.values))) / np.bincount(np.array(bmi_ind))
slope.append(np.polyfit(list(range(len(weight_mut))), weight_mut,1)[0])
norm_gene_bmi_mut = pd.DataFrame(norm_gene_bmi_mut)
norm_gene_bmi_mut = pd.concat([gene_bmi_mut.loc[['BMI','Total_Mutations'],:],norm_gene_bmi_mut])
norm_gene_bmi_mut.index = gene_bmi_mut.index
gene_bmi_mut['Slope'] = [-np.inf,-np.inf] + slope
gene_bmi_mut = gene_bmi_mut.sort_values(by = ['Slope'])
gene_bmi_mut.loc[['BMI','Total_Mutations'],'Slope'] = '-'
norm_gene_bmi_mut['Slope'] = [-np.inf,-np.inf] + slope
norm_gene_bmi_mut = norm_gene_bmi_mut.sort_values(by = ['Slope'])
norm_gene_bmi_mut.loc[['BMI','Total_Mutations'],'Slope'] = '-'
#%% Writing the data
print("Writing " + tumor)
gene_bmi_mut.to_csv('./../output/' + tumor + '_bmi_gene_mut.txt', header = True, index = True, sep = '\t')
norm_gene_bmi_mut.to_csv('./../output/' + tumor + '_bmi_gene_mut_norm.txt', header = True, index = True, sep = '\t')
writer = pd.ExcelWriter('./../output/' + tumor + '_bmi_gene_mut_slope.xlsx', engine='xlsxwriter')
gene_bmi_mut.to_excel(writer, sheet_name = tumor + '_binary')
norm_gene_bmi_mut.to_excel(writer, sheet_name = tumor + '_norm')
writer.save()
print("Done: " + tumor)
| 35.622807
| 217
| 0.676927
|
np
import pandas as pd
tumor = 'UCEC'
print("Starting: " + tumor)
mut_data = pd.read_table('./../../../databases/Endometrial/TCGA_MAFs/' + tumor + '.maf', sep = '\t')
bmi_data = pd.read_table('./../../../databases/Endometrial/information/TCGA_bmi_data.txt', sep = '\t')
pat_bmi = bmi_data[bmi_data['bmi'] != '--']
pat_bmi = pat_bmi[(18.5 < pd.to_numeric(pat_bmi['bmi'])) & (pd.to_numeric(pat_bmi['bmi']) < 90)]
patients = list(set(np.unique(['-'.join(x.split('-')[0:3]) for x in mut_data['Tumor_Sample_Barcode']])).intersection(list(pat_bmi['submitter_id'].values)))
pat_bmi = pat_bmi[[(x in patients) for x in pat_bmi['submitter_id'].values]].sort_values(by = ['bmi'])
pat_mut = mut_data[[(x in patients) for x in ['-'.join(x.split('-')[0:3]) for x in mut_data['Tumor_Sample_Barcode']]]]
pat_mut = pat_mut[pat_mut['Variant_Classification'].isin(['Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins', 'Missense_Mutation', 'Nonsense_Mutation', 'Nonstop_Mutation', 'Translation_Start_Site'])]
gene_bmi_mut = pd.DataFrame(0, columns = ['BMI','Total_Mutations'] + list(np.unique(pat_mut['Hugo_Symbol'])), index = np.sort(pat_bmi[['submitter_id','bmi']])[:,1])
gene_bmi_mut['BMI'] = np.sort(pat_bmi[['submitter_id','bmi']])[:,0]
pat_name_mut = ['-'.join(x.split('-')[0:3]) for x in pat_mut['Tumor_Sample_Barcode']]
for pat in gene_bmi_mut.index:
gene_bmi_mut.loc[pat,'Total_Mutations'] = pat_name_mut.count(pat)
gene_bmi_mut = gene_bmi_mut[gene_bmi_mut['Total_Mutations'] < 3000]
print("Calculating mutations for " + tumor)
for g in np.unique(pat_mut['Hugo_Symbol']):
gene_mut = pat_mut[pat_mut['Hugo_Symbol'] == g]
gene_pat = ['-'.join(x.split('-')[0:3]) for x in gene_mut['Tumor_Sample_Barcode']]
for p in np.unique(gene_pat):
gene_bmi_mut.loc[p,g] = gene_pat.count(p)
gene_bmi_mut = gene_bmi_mut.transpose()
norm_gene_bmi_mut = []
print("Calculating slope for " + tumor)
inds = {bmi: ind for ind,bmi in enumerate(set(pd.to_numeric(gene_bmi_mut.loc['BMI',:])))}
bmi_ind = [inds[bmi] for bmi in pd.to_numeric(gene_bmi_mut.loc['BMI',:])]
slope = []
for i,j in gene_bmi_mut.iloc[2:,:].iterrows():
norm_mut = pd.to_numeric(j) / pd.to_numeric(gene_bmi_mut.loc['Total_Mutations'])
norm_gene_bmi_mut.append(norm_mut)
weight_mut = np.bincount(np.array(bmi_ind),weights=list(map(float,norm_mut.values))) / np.bincount(np.array(bmi_ind))
slope.append(np.polyfit(list(range(len(weight_mut))), weight_mut,1)[0])
norm_gene_bmi_mut = pd.DataFrame(norm_gene_bmi_mut)
norm_gene_bmi_mut = pd.concat([gene_bmi_mut.loc[['BMI','Total_Mutations'],:],norm_gene_bmi_mut])
norm_gene_bmi_mut.index = gene_bmi_mut.index
gene_bmi_mut['Slope'] = [-np.inf,-np.inf] + slope
gene_bmi_mut = gene_bmi_mut.sort_values(by = ['Slope'])
gene_bmi_mut.loc[['BMI','Total_Mutations'],'Slope'] = '-'
norm_gene_bmi_mut['Slope'] = [-np.inf,-np.inf] + slope
norm_gene_bmi_mut = norm_gene_bmi_mut.sort_values(by = ['Slope'])
norm_gene_bmi_mut.loc[['BMI','Total_Mutations'],'Slope'] = '-'
print("Writing " + tumor)
gene_bmi_mut.to_csv('./../output/' + tumor + '_bmi_gene_mut.txt', header = True, index = True, sep = '\t')
norm_gene_bmi_mut.to_csv('./../output/' + tumor + '_bmi_gene_mut_norm.txt', header = True, index = True, sep = '\t')
writer = pd.ExcelWriter('./../output/' + tumor + '_bmi_gene_mut_slope.xlsx', engine='xlsxwriter')
gene_bmi_mut.to_excel(writer, sheet_name = tumor + '_binary')
norm_gene_bmi_mut.to_excel(writer, sheet_name = tumor + '_norm')
writer.save()
print("Done: " + tumor)
| true
| true
|
f71678545058b774df26f6e4aec31a97d2933afd
| 406
|
py
|
Python
|
zerver/migrations/0062_default_timezone.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | 17,004
|
2015-09-25T18:27:24.000Z
|
2022-03-31T22:02:32.000Z
|
zerver/migrations/0062_default_timezone.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | 20,344
|
2015-09-25T19:02:42.000Z
|
2022-03-31T23:54:40.000Z
|
zerver/migrations/0062_default_timezone.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | 7,271
|
2015-09-25T18:48:39.000Z
|
2022-03-31T21:06:11.000Z
|
# Generated by Django 1.10.5 on 2017-03-16 12:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0061_userprofile_timezone"),
]
operations = [
migrations.AlterField(
model_name="userprofile",
name="timezone",
field=models.CharField(default="", max_length=40),
),
]
| 22.555556
| 62
| 0.608374
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0061_userprofile_timezone"),
]
operations = [
migrations.AlterField(
model_name="userprofile",
name="timezone",
field=models.CharField(default="", max_length=40),
),
]
| true
| true
|
f7167876873bfd4fe4f89c9dbfccf3393adb7076
| 1,589
|
py
|
Python
|
expmgmt/utils/structures.py
|
wbrandenburger/ExpMgmt
|
bc2383bb360d8d7e876d503c1570c2d9188f8e39
|
[
"MIT"
] | null | null | null |
expmgmt/utils/structures.py
|
wbrandenburger/ExpMgmt
|
bc2383bb360d8d7e876d503c1570c2d9188f8e39
|
[
"MIT"
] | null | null | null |
expmgmt/utils/structures.py
|
wbrandenburger/ExpMgmt
|
bc2383bb360d8d7e876d503c1570c2d9188f8e39
|
[
"MIT"
] | null | null | null |
# ===========================================================================
# dictionary.py -----------------------------------------------------------
# ===========================================================================
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def update_dict(a, b):
# @todo[comment]:
if a and b and isinstance(a, dict):
a.update(b)
return a
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def get_dict_element(dict_list, field, query):
# @todo[comment]:
for item in dict_list:
if item[field] == query:
return item
return dict()
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def get_dict_elements(dict_list, field, query, update=False):
# @todo[comment]:
if not isinstance(field, list) and isinstance(query, list):
field = [field] * len(query)
result = list() if not update else dict()
if isinstance(field, list) and isinstance(query, list):
for field_item, query_item in zip(field, query):
item = get_dict_element(dict_list, field_item, query_item)
if item:
if not update:
result.append(item)
else:
result.update(item)
return result
| 38.756098
| 77
| 0.349276
|
def update_dict(a, b):
if a and b and isinstance(a, dict):
a.update(b)
return a
def get_dict_element(dict_list, field, query):
for item in dict_list:
if item[field] == query:
return item
return dict()
def get_dict_elements(dict_list, field, query, update=False):
if not isinstance(field, list) and isinstance(query, list):
field = [field] * len(query)
result = list() if not update else dict()
if isinstance(field, list) and isinstance(query, list):
for field_item, query_item in zip(field, query):
item = get_dict_element(dict_list, field_item, query_item)
if item:
if not update:
result.append(item)
else:
result.update(item)
return result
| true
| true
|
f71678a61c98b95204ecbdc16742b5edb4c3e82f
| 458
|
py
|
Python
|
dataset_results.py
|
polikutinevgeny/FrontsCNN
|
a9f48d5afcdd7e0fe561840d94af36c0fedf1c15
|
[
"MIT"
] | 1
|
2019-12-28T08:40:44.000Z
|
2019-12-28T08:40:44.000Z
|
dataset_results.py
|
polikutinevgeny/FrontsCNN
|
a9f48d5afcdd7e0fe561840d94af36c0fedf1c15
|
[
"MIT"
] | null | null | null |
dataset_results.py
|
polikutinevgeny/FrontsCNN
|
a9f48d5afcdd7e0fe561840d94af36c0fedf1c15
|
[
"MIT"
] | null | null | null |
import gc
import numpy as np
def dataset_results(dataset, model, binary=False):
x = np.array([dataset[i][0][0] for i in range(len(dataset))])
y_true = np.array([dataset[i][1][0] for i in range(len(dataset))])
y_pred = model.predict(x, batch_size=1, verbose=0).flatten()
if binary:
y_true = y_true[..., 0].flatten()
else:
y_true = np.argmax(y_true, axis=-1).flatten()
del x
gc.collect()
return y_true, y_pred
| 28.625
| 70
| 0.626638
|
import gc
import numpy as np
def dataset_results(dataset, model, binary=False):
x = np.array([dataset[i][0][0] for i in range(len(dataset))])
y_true = np.array([dataset[i][1][0] for i in range(len(dataset))])
y_pred = model.predict(x, batch_size=1, verbose=0).flatten()
if binary:
y_true = y_true[..., 0].flatten()
else:
y_true = np.argmax(y_true, axis=-1).flatten()
del x
gc.collect()
return y_true, y_pred
| true
| true
|
f716792771771c2a540c7c03471ad8050e7db370
| 6,984
|
py
|
Python
|
mbrl/env/pets_reacher.py
|
MaxSobolMark/mbrl-lib
|
bc8ccfe8a56b58d3ce5bae2c4ccdadd82ecdb594
|
[
"MIT"
] | null | null | null |
mbrl/env/pets_reacher.py
|
MaxSobolMark/mbrl-lib
|
bc8ccfe8a56b58d3ce5bae2c4ccdadd82ecdb594
|
[
"MIT"
] | null | null | null |
mbrl/env/pets_reacher.py
|
MaxSobolMark/mbrl-lib
|
bc8ccfe8a56b58d3ce5bae2c4ccdadd82ecdb594
|
[
"MIT"
] | null | null | null |
import os
from typing import Tuple
import numpy as np
from numpy.random import MT19937, RandomState, SeedSequence
import torch
from gym import utils
from gym.envs.mujoco import mujoco_env
class Reacher3DEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, task_id=None, hide_goal=False):
self.viewer = None
utils.EzPickle.__init__(self)
dir_path = os.path.dirname(os.path.realpath(__file__))
self.goal = np.zeros(3)
self._hide_goal = hide_goal
mujoco_env.MujocoEnv.__init__(
self, os.path.join(dir_path, "assets/reacher3d.xml"), 2)
self._task_id = task_id
if task_id is not None:
self._rng = RandomState(MT19937(SeedSequence(task_id)))
self.goal = self._rng.normal(loc=0, scale=0.1, size=[3])
def step(self, a):
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
# print('[pets_reacher:22] ob[7:10]: ', ob[7:10])
reward = -np.sum(
np.square(Reacher3DEnv.get_EE_pos(ob[None]) - self.goal))
reward -= 0.01 * np.square(a).sum()
done = False
return ob, reward, done, dict(reward_dist=0, reward_ctrl=0)
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = 2.5
self.viewer.cam.elevation = -30
self.viewer.cam.azimuth = 270
def reset_model(self):
qpos, qvel = np.copy(self.init_qpos), np.copy(self.init_qvel)
if self._task_id is not None:
qpos[-3:] += self.goal
else:
qpos[-3:] += np.random.normal(loc=0, scale=0.1, size=[3])
self.goal = qpos[-3:]
qvel[-3:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
if not self._hide_goal:
return np.concatenate([
self.data.qpos.flat,
self.data.qvel.flat[:-3],
])
return np.concatenate([
self.data.qpos.flat[:-3],
self.data.qvel.flat[:-3],
])
@staticmethod
def get_EE_pos(states, are_tensors=False):
theta1, theta2, theta3, theta4, theta5, theta6, _ = (
states[:, :1],
states[:, 1:2],
states[:, 2:3],
states[:, 3:4],
states[:, 4:5],
states[:, 5:6],
states[:, 6:],
)
if not are_tensors:
rot_axis = np.concatenate(
[
np.cos(theta2) * np.cos(theta1),
np.cos(theta2) * np.sin(theta1),
-np.sin(theta2),
],
axis=1,
)
rot_perp_axis = np.concatenate(
[-np.sin(theta1),
np.cos(theta1),
np.zeros(theta1.shape)],
axis=1)
cur_end = np.concatenate(
[
0.1 * np.cos(theta1) +
0.4 * np.cos(theta1) * np.cos(theta2),
0.1 * np.sin(theta1) +
0.4 * np.sin(theta1) * np.cos(theta2) - 0.188,
-0.4 * np.sin(theta2),
],
axis=1,
)
for length, hinge, roll in [(0.321, theta4, theta3),
(0.16828, theta6, theta5)]:
perp_all_axis = np.cross(rot_axis, rot_perp_axis)
x = np.cos(hinge) * rot_axis
y = np.sin(hinge) * np.sin(roll) * rot_perp_axis
z = -np.sin(hinge) * np.cos(roll) * perp_all_axis
new_rot_axis = x + y + z
new_rot_perp_axis = np.cross(new_rot_axis, rot_axis)
new_rot_perp_axis[np.linalg.norm(
new_rot_perp_axis, axis=1) < 1e-30] = rot_perp_axis[
np.linalg.norm(new_rot_perp_axis, axis=1) < 1e-30]
new_rot_perp_axis /= np.linalg.norm(new_rot_perp_axis,
axis=1,
keepdims=True)
rot_axis, rot_perp_axis, cur_end = (
new_rot_axis,
new_rot_perp_axis,
cur_end + length * new_rot_axis,
)
return cur_end
else:
rot_axis = torch.cat(
[
torch.cos(theta2) * torch.cos(theta1),
torch.cos(theta2) * torch.sin(theta1),
-torch.sin(theta2),
],
dim=1,
)
rot_perp_axis = torch.cat([
-torch.sin(theta1),
torch.cos(theta1),
torch.zeros_like(theta1)
],
dim=1)
cur_end = torch.cat(
[
0.1 * torch.cos(theta1) +
0.4 * torch.cos(theta1) * torch.cos(theta2),
0.1 * torch.sin(theta1) +
0.4 * torch.sin(theta1) * torch.cos(theta2) - 0.188,
-0.4 * torch.sin(theta2),
],
dim=1,
)
for length, hinge, roll in [(0.321, theta4, theta3),
(0.16828, theta6, theta5)]:
perp_all_axis = torch.cross(rot_axis, rot_perp_axis)
x = torch.cos(hinge) * rot_axis
y = torch.sin(hinge) * torch.sin(roll) * rot_perp_axis
z = -torch.sin(hinge) * torch.cos(roll) * perp_all_axis
new_rot_axis = x + y + z
new_rot_perp_axis = torch.cross(new_rot_axis, rot_axis)
new_rot_perp_axis[torch.linalg.norm(
new_rot_perp_axis, dim=1) < 1e-30] = rot_perp_axis[
torch.linalg.norm(new_rot_perp_axis, dim=1) < 1e-30]
new_rot_perp_axis /= torch.linalg.norm(new_rot_perp_axis,
dim=1,
keepdims=True)
rot_axis, rot_perp_axis, cur_end = (
new_rot_axis,
new_rot_perp_axis,
cur_end + length * new_rot_axis,
)
return cur_end
@staticmethod
def get_reward(ob, action):
# This is a bit tricky to implement, implement when needed
print('NOT SUPPOSED TO RUN THIS!')
raise NotImplementedError
def forward_postprocess_fn(
self, inputs: torch.Tensor, mean: torch.Tensor, logvar: torch.Tensor,
min_logvar: torch.nn.parameter.Parameter
) -> Tuple[torch.Tensor, torch.Tensor]:
if not self._hide_goal:
mean[..., 7:10] = inputs[..., 7:10]
logvar[..., 7:10] = torch.full(logvar[..., 7:10].shape,
-float('inf'))
return mean, logvar
| 37.751351
| 77
| 0.477663
|
import os
from typing import Tuple
import numpy as np
from numpy.random import MT19937, RandomState, SeedSequence
import torch
from gym import utils
from gym.envs.mujoco import mujoco_env
class Reacher3DEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, task_id=None, hide_goal=False):
self.viewer = None
utils.EzPickle.__init__(self)
dir_path = os.path.dirname(os.path.realpath(__file__))
self.goal = np.zeros(3)
self._hide_goal = hide_goal
mujoco_env.MujocoEnv.__init__(
self, os.path.join(dir_path, "assets/reacher3d.xml"), 2)
self._task_id = task_id
if task_id is not None:
self._rng = RandomState(MT19937(SeedSequence(task_id)))
self.goal = self._rng.normal(loc=0, scale=0.1, size=[3])
def step(self, a):
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
reward = -np.sum(
np.square(Reacher3DEnv.get_EE_pos(ob[None]) - self.goal))
reward -= 0.01 * np.square(a).sum()
done = False
return ob, reward, done, dict(reward_dist=0, reward_ctrl=0)
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = 2.5
self.viewer.cam.elevation = -30
self.viewer.cam.azimuth = 270
def reset_model(self):
qpos, qvel = np.copy(self.init_qpos), np.copy(self.init_qvel)
if self._task_id is not None:
qpos[-3:] += self.goal
else:
qpos[-3:] += np.random.normal(loc=0, scale=0.1, size=[3])
self.goal = qpos[-3:]
qvel[-3:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
if not self._hide_goal:
return np.concatenate([
self.data.qpos.flat,
self.data.qvel.flat[:-3],
])
return np.concatenate([
self.data.qpos.flat[:-3],
self.data.qvel.flat[:-3],
])
@staticmethod
def get_EE_pos(states, are_tensors=False):
theta1, theta2, theta3, theta4, theta5, theta6, _ = (
states[:, :1],
states[:, 1:2],
states[:, 2:3],
states[:, 3:4],
states[:, 4:5],
states[:, 5:6],
states[:, 6:],
)
if not are_tensors:
rot_axis = np.concatenate(
[
np.cos(theta2) * np.cos(theta1),
np.cos(theta2) * np.sin(theta1),
-np.sin(theta2),
],
axis=1,
)
rot_perp_axis = np.concatenate(
[-np.sin(theta1),
np.cos(theta1),
np.zeros(theta1.shape)],
axis=1)
cur_end = np.concatenate(
[
0.1 * np.cos(theta1) +
0.4 * np.cos(theta1) * np.cos(theta2),
0.1 * np.sin(theta1) +
0.4 * np.sin(theta1) * np.cos(theta2) - 0.188,
-0.4 * np.sin(theta2),
],
axis=1,
)
for length, hinge, roll in [(0.321, theta4, theta3),
(0.16828, theta6, theta5)]:
perp_all_axis = np.cross(rot_axis, rot_perp_axis)
x = np.cos(hinge) * rot_axis
y = np.sin(hinge) * np.sin(roll) * rot_perp_axis
z = -np.sin(hinge) * np.cos(roll) * perp_all_axis
new_rot_axis = x + y + z
new_rot_perp_axis = np.cross(new_rot_axis, rot_axis)
new_rot_perp_axis[np.linalg.norm(
new_rot_perp_axis, axis=1) < 1e-30] = rot_perp_axis[
np.linalg.norm(new_rot_perp_axis, axis=1) < 1e-30]
new_rot_perp_axis /= np.linalg.norm(new_rot_perp_axis,
axis=1,
keepdims=True)
rot_axis, rot_perp_axis, cur_end = (
new_rot_axis,
new_rot_perp_axis,
cur_end + length * new_rot_axis,
)
return cur_end
else:
rot_axis = torch.cat(
[
torch.cos(theta2) * torch.cos(theta1),
torch.cos(theta2) * torch.sin(theta1),
-torch.sin(theta2),
],
dim=1,
)
rot_perp_axis = torch.cat([
-torch.sin(theta1),
torch.cos(theta1),
torch.zeros_like(theta1)
],
dim=1)
cur_end = torch.cat(
[
0.1 * torch.cos(theta1) +
0.4 * torch.cos(theta1) * torch.cos(theta2),
0.1 * torch.sin(theta1) +
0.4 * torch.sin(theta1) * torch.cos(theta2) - 0.188,
-0.4 * torch.sin(theta2),
],
dim=1,
)
for length, hinge, roll in [(0.321, theta4, theta3),
(0.16828, theta6, theta5)]:
perp_all_axis = torch.cross(rot_axis, rot_perp_axis)
x = torch.cos(hinge) * rot_axis
y = torch.sin(hinge) * torch.sin(roll) * rot_perp_axis
z = -torch.sin(hinge) * torch.cos(roll) * perp_all_axis
new_rot_axis = x + y + z
new_rot_perp_axis = torch.cross(new_rot_axis, rot_axis)
new_rot_perp_axis[torch.linalg.norm(
new_rot_perp_axis, dim=1) < 1e-30] = rot_perp_axis[
torch.linalg.norm(new_rot_perp_axis, dim=1) < 1e-30]
new_rot_perp_axis /= torch.linalg.norm(new_rot_perp_axis,
dim=1,
keepdims=True)
rot_axis, rot_perp_axis, cur_end = (
new_rot_axis,
new_rot_perp_axis,
cur_end + length * new_rot_axis,
)
return cur_end
@staticmethod
def get_reward(ob, action):
print('NOT SUPPOSED TO RUN THIS!')
raise NotImplementedError
def forward_postprocess_fn(
self, inputs: torch.Tensor, mean: torch.Tensor, logvar: torch.Tensor,
min_logvar: torch.nn.parameter.Parameter
) -> Tuple[torch.Tensor, torch.Tensor]:
if not self._hide_goal:
mean[..., 7:10] = inputs[..., 7:10]
logvar[..., 7:10] = torch.full(logvar[..., 7:10].shape,
-float('inf'))
return mean, logvar
| true
| true
|
f716795d8f15462f698be1ecce4ae982839d72ed
| 2,091
|
py
|
Python
|
src/front-door/azext_front_door/vendored_sdks/models/managed_rule_override.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2022-03-22T15:02:32.000Z
|
2022-03-22T15:02:32.000Z
|
src/front-door/azext_front_door/vendored_sdks/models/managed_rule_override.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2021-02-10T22:04:59.000Z
|
2021-02-10T22:04:59.000Z
|
src/front-door/azext_front_door/vendored_sdks/models/managed_rule_override.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2021-06-03T19:31:10.000Z
|
2021-06-03T19:31:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ManagedRuleOverride(Model):
"""Defines a managed rule group override setting.
All required parameters must be populated in order to send to Azure.
:param rule_id: Required. Identifier for the managed rule.
:type rule_id: str
:param enabled_state: Describes if the managed rule is in enabled or
disabled state. Defaults to Disabled if not specified. Possible values
include: 'Disabled', 'Enabled'
:type enabled_state: str or
~azure.mgmt.frontdoor.models.ManagedRuleEnabledState
:param action: Describes the override action to be applied when rule
matches. Possible values include: 'Allow', 'Block', 'Log', 'Redirect'
:type action: str or ~azure.mgmt.frontdoor.models.ActionType
:param exclusions: Describes the exclusions that are applied to this
specific rule.
:type exclusions: list[~azure.mgmt.frontdoor.models.ManagedRuleExclusion]
"""
_validation = {
'rule_id': {'required': True},
}
_attribute_map = {
'rule_id': {'key': 'ruleId', 'type': 'str'},
'enabled_state': {'key': 'enabledState', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'exclusions': {'key': 'exclusions', 'type': '[ManagedRuleExclusion]'},
}
def __init__(self, **kwargs):
super(ManagedRuleOverride, self).__init__(**kwargs)
self.rule_id = kwargs.get('rule_id', None)
self.enabled_state = kwargs.get('enabled_state', None)
self.action = kwargs.get('action', None)
self.exclusions = kwargs.get('exclusions', None)
| 40.211538
| 78
| 0.636059
|
from msrest.serialization import Model
class ManagedRuleOverride(Model):
_validation = {
'rule_id': {'required': True},
}
_attribute_map = {
'rule_id': {'key': 'ruleId', 'type': 'str'},
'enabled_state': {'key': 'enabledState', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'exclusions': {'key': 'exclusions', 'type': '[ManagedRuleExclusion]'},
}
def __init__(self, **kwargs):
super(ManagedRuleOverride, self).__init__(**kwargs)
self.rule_id = kwargs.get('rule_id', None)
self.enabled_state = kwargs.get('enabled_state', None)
self.action = kwargs.get('action', None)
self.exclusions = kwargs.get('exclusions', None)
| true
| true
|
f71679d226b40ff0d00a91dabe615ef3a5c5b9e5
| 1,354
|
py
|
Python
|
Asyncio/asyncio_queue.py
|
xlui/PythonExamples
|
0389efb84e01dc1310bb2bab7aa2433c0e1b45c4
|
[
"MIT"
] | null | null | null |
Asyncio/asyncio_queue.py
|
xlui/PythonExamples
|
0389efb84e01dc1310bb2bab7aa2433c0e1b45c4
|
[
"MIT"
] | null | null | null |
Asyncio/asyncio_queue.py
|
xlui/PythonExamples
|
0389efb84e01dc1310bb2bab7aa2433c0e1b45c4
|
[
"MIT"
] | null | null | null |
# asyncio_queue.py
import asyncio
async def consumer(n, _queue):
""":type _queue asyncio.Queue"""
# print('consumer {}: waiting for item'.format(n))
while True:
print('consumer {}: waiting for item'.format(n))
item = await _queue.get()
print('consumer {}: has item {}'.format(n, item))
if item is None:
_queue.task_done()
break
else:
await asyncio.sleep(.01 * item)
_queue.task_done()
print('consumer {}: ending'.format(n))
async def producer(_queue, workers):
""":type _queue asyncio.Queue"""
print('producer: starting')
for i in range(workers * 3):
await _queue.put(i)
print('producer: add task {} to queue'.format(i))
print('producer: adding stop signals to the queue')
for i in range(workers):
await _queue.put(None)
print('producer: waiting for queue to empty')
await _queue.join()
print('producer: ending')
async def main(loop, _consumers):
queue = asyncio.Queue(maxsize=_consumers)
consumers = [loop.create_task(consumer(i, queue)) for i in range(_consumers)]
prod = loop.create_task(producer(queue, _consumers))
await asyncio.wait(consumers + [prod])
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(main(event_loop, 2))
event_loop.close()
| 28.208333
| 81
| 0.637371
|
import asyncio
async def consumer(n, _queue):
while True:
print('consumer {}: waiting for item'.format(n))
item = await _queue.get()
print('consumer {}: has item {}'.format(n, item))
if item is None:
_queue.task_done()
break
else:
await asyncio.sleep(.01 * item)
_queue.task_done()
print('consumer {}: ending'.format(n))
async def producer(_queue, workers):
print('producer: starting')
for i in range(workers * 3):
await _queue.put(i)
print('producer: add task {} to queue'.format(i))
print('producer: adding stop signals to the queue')
for i in range(workers):
await _queue.put(None)
print('producer: waiting for queue to empty')
await _queue.join()
print('producer: ending')
async def main(loop, _consumers):
queue = asyncio.Queue(maxsize=_consumers)
consumers = [loop.create_task(consumer(i, queue)) for i in range(_consumers)]
prod = loop.create_task(producer(queue, _consumers))
await asyncio.wait(consumers + [prod])
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(main(event_loop, 2))
event_loop.close()
| true
| true
|
f71679e9a79db28502e7b077981e0b0b7bee67f6
| 37,770
|
py
|
Python
|
apprise_api/api/views.py
|
adamus1red/apprise-api
|
757adce17642a3dfaa2b2e3244e147911386dabb
|
[
"MIT"
] | 160
|
2019-10-27T19:39:01.000Z
|
2022-03-30T21:43:16.000Z
|
apprise_api/api/views.py
|
adamus1red/apprise-api
|
757adce17642a3dfaa2b2e3244e147911386dabb
|
[
"MIT"
] | 51
|
2019-12-25T12:28:11.000Z
|
2022-03-31T23:57:30.000Z
|
apprise_api/api/views.py
|
adamus1red/apprise-api
|
757adce17642a3dfaa2b2e3244e147911386dabb
|
[
"MIT"
] | 26
|
2020-01-10T14:58:01.000Z
|
2022-02-19T03:02:28.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
from django.views import View
from django.conf import settings
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.gzip import gzip_page
from django.utils.translation import gettext_lazy as _
from django.core.serializers.json import DjangoJSONEncoder
from .utils import ConfigCache
from .forms import AddByUrlForm
from .forms import AddByConfigForm
from .forms import NotifyForm
from .forms import NotifyByUrlForm
from .forms import CONFIG_FORMATS
from .forms import AUTO_DETECT_CONFIG_KEYWORD
import apprise
import json
import re
# import the logging library
import logging
# Get an instance of a logger
logger = logging.getLogger('django')
# Content-Type Parsing
# application/x-www-form-urlencoded
# application/x-www-form-urlencoded
# multipart/form-data
MIME_IS_FORM = re.compile(
r'(multipart|application)/(x-www-)?form-(data|urlencoded)', re.I)
# Support JSON formats
# text/json
# text/x-json
# application/json
# application/x-json
MIME_IS_JSON = re.compile(
r'(text|application)/(x-)?json', re.I)
class JSONEncoder(DjangoJSONEncoder):
"""
A wrapper to the DjangoJSONEncoder to support
sets() (converting them to lists).
"""
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return super().default(obj)
class ResponseCode(object):
"""
These codes are based on those provided by the requests object
"""
okay = 200
no_content = 204
bad_request = 400
no_access = 403
not_found = 404
method_not_allowed = 405
method_not_accepted = 406
failed_dependency = 424
internal_server_error = 500
class WelcomeView(View):
"""
A simple welcome/index page
"""
template_name = 'welcome.html'
def get(self, request):
return render(request, self.template_name, {})
@method_decorator(never_cache, name='dispatch')
class ConfigView(View):
"""
A Django view used to manage configuration
"""
template_name = 'config.html'
def get(self, request, key):
"""
Handle a GET request
"""
return render(request, self.template_name, {
'key': key,
'form_url': AddByUrlForm(),
'form_cfg': AddByConfigForm(),
'form_notify': NotifyForm(),
})
@method_decorator(never_cache, name='dispatch')
class AddView(View):
"""
A Django view used to store Apprise configuration
"""
def post(self, request, key):
"""
Handle a POST request
"""
# Detect the format our response should be in
json_response = MIME_IS_JSON.match(request.content_type) is not None
if settings.APPRISE_CONFIG_LOCK:
# General Access Control
msg = _('The site has been configured to deny this request.')
status = ResponseCode.no_access
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# our content
content = {}
if MIME_IS_FORM.match(request.content_type):
content = {}
form = AddByConfigForm(request.POST)
if form.is_valid():
content.update(form.cleaned_data)
form = AddByUrlForm(request.POST)
if form.is_valid():
content.update(form.cleaned_data)
elif json_response:
# Prepare our default response
try:
# load our JSON content
content = json.loads(request.body.decode('utf-8'))
except (AttributeError, ValueError):
# could not parse JSON response...
return JsonResponse({
'error': _('Invalid JSON specified.'),
},
encoder=JSONEncoder,
safe=False,
status=ResponseCode.bad_request,
)
if not content:
# No information was posted
msg = _('The message format is not supported.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# Create ourselves an apprise object to work with
a_obj = apprise.Apprise()
if 'urls' in content:
# Load our content
a_obj.add(content['urls'])
if not len(a_obj):
# No URLs were loaded
msg = _('No valid URLs were found.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
if not ConfigCache.put(
key, '\r\n'.join([s.url() for s in a_obj]),
apprise.ConfigFormat.TEXT):
msg = _('The configuration could not be saved.')
status = ResponseCode.internal_server_error
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
elif 'config' in content:
fmt = content.get('format', '').lower()
if fmt not in [i[0] for i in CONFIG_FORMATS]:
# Format must be one supported by apprise
msg = _('The format specified is invalid.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# prepare our apprise config object
ac_obj = apprise.AppriseConfig()
if fmt == AUTO_DETECT_CONFIG_KEYWORD:
# By setting format to None, it is automatically detected from
# within the add_config() call
fmt = None
# Load our configuration
if not ac_obj.add_config(content['config'], format=fmt):
# The format could not be detected
msg = _('The configuration format could not be detected.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# Add our configuration
a_obj.add(ac_obj)
if not len(a_obj):
# No specified URL(s) were loaded due to
# mis-configuration on the caller's part
msg = _('No valid URL(s) were specified.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
if not ConfigCache.put(
key, content['config'], fmt=ac_obj[0].config_format):
# Something went very wrong; return 500
msg = _('An error occured saving configuration.')
status = ResponseCode.internal_server_error
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
else:
# No configuration specified; we're done
msg = _('No configuration specified.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# If we reach here; we successfully loaded the configuration so we can
# go ahead and write it to disk and alert our caller of the success.
return HttpResponse(
_('Successfully saved configuration.'),
status=ResponseCode.okay,
)
@method_decorator(never_cache, name='dispatch')
class DelView(View):
"""
A Django view for removing content associated with a key
"""
def post(self, request, key):
"""
Handle a POST request
"""
# Detect the format our response should be in
json_response = MIME_IS_JSON.match(request.content_type) is not None
if settings.APPRISE_CONFIG_LOCK:
# General Access Control
msg = _('The site has been configured to deny this request.')
status = ResponseCode.no_access
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# Clear the key
result = ConfigCache.clear(key)
if result is None:
msg = _('There was no configuration to remove.')
status = ResponseCode.no_content
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
elif result is False:
# There was a failure at the os level
msg = _('The configuration could not be removed.')
status = ResponseCode.internal_server_error
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# Removed content
return HttpResponse(
_('Successfully removed configuration.'),
status=ResponseCode.okay,
)
@method_decorator((gzip_page, never_cache), name='dispatch')
class GetView(View):
"""
A Django view used to retrieve previously stored Apprise configuration
"""
def post(self, request, key):
"""
Handle a POST request
"""
# Detect the format our response should be in
json_response = MIME_IS_JSON.match(request.content_type) is not None
if settings.APPRISE_CONFIG_LOCK:
# General Access Control
return HttpResponse(
_('The site has been configured to deny this request.'),
status=ResponseCode.no_access,
) if not json_response else JsonResponse({
'error':
_('The site has been configured to deny this request.')
},
encoder=JSONEncoder,
safe=False,
status=ResponseCode.no_access,
)
config, format = ConfigCache.get(key)
if config is None:
# The returned value of config and format tell a rather cryptic
# story; this portion could probably be updated in the future.
# but for now it reads like this:
# config == None and format == None: We had an internal error
# config == None and format != None: we simply have no data
# config != None: we simply have no data
if format is not None:
# no content to return
return HttpResponse(
_('There was no configuration found.'),
status=ResponseCode.no_content,
) if not json_response else JsonResponse({
'error': _('There was no configuration found.')
},
encoder=JSONEncoder,
safe=False,
status=ResponseCode.no_content,
)
# Something went very wrong; return 500
msg = _('An error occured accessing configuration.')
status = ResponseCode.internal_server_error
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# Our configuration was retrieved; now our response varies on whether
# we are a YAML configuration or a TEXT based one. This allows us to
# be compatible with those using the AppriseConfig() library or the
# reference to it through the --config (-c) option in the CLI.
content_type = 'text/yaml; charset=utf-8' \
if format == apprise.ConfigFormat.YAML \
else 'text/html; charset=utf-8'
# Return our retrieved content
return HttpResponse(
config,
content_type=content_type,
status=ResponseCode.okay,
) if not json_response else JsonResponse({
'format': format,
'config': config,
},
encoder=JSONEncoder,
safe=False,
status=ResponseCode.okay,
)
@method_decorator((gzip_page, never_cache), name='dispatch')
class NotifyView(View):
"""
A Django view for sending a notification
"""
def post(self, request, key):
"""
Handle a POST request
"""
# Detect the format our response should be in
json_response = MIME_IS_JSON.match(request.content_type) is not None
# our content
content = {}
if MIME_IS_FORM.match(request.content_type):
content = {}
form = NotifyForm(request.POST)
if form.is_valid():
content.update(form.cleaned_data)
elif json_response:
# Prepare our default response
try:
# load our JSON content
content = json.loads(request.body.decode('utf-8'))
except (AttributeError, ValueError):
# could not parse JSON response...
return JsonResponse(
_('Invalid JSON specified.'),
encoder=JSONEncoder,
safe=False,
status=ResponseCode.bad_request)
if not content:
# We could not handle the Content-Type
msg = _('The message format is not supported.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# Some basic error checking
if not content.get('body') or \
content.get('type', apprise.NotifyType.INFO) \
not in apprise.NOTIFY_TYPES:
msg = _('An invalid payload was specified.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# Acquire our body format (if identified)
body_format = content.get('format', apprise.NotifyFormat.TEXT)
if body_format and body_format not in apprise.NOTIFY_FORMATS:
msg = _('An invalid body input format was specified.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# If we get here, we have enough information to generate a notification
# with.
config, format = ConfigCache.get(key)
if config is None:
# The returned value of config and format tell a rather cryptic
# story; this portion could probably be updated in the future.
# but for now it reads like this:
# config == None and format == None: We had an internal error
# config == None and format != None: we simply have no data
# config != None: we simply have no data
if format is not None:
# no content to return
msg = _('There was no configuration found.')
status = ResponseCode.no_content
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# Something went very wrong; return 500
msg = _('An error occured accessing configuration.')
status = ResponseCode.internal_server_error
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
#
# Apply Any Global Filters (if identified)
#
if settings.APPRISE_ALLOW_SERVICES:
alphanum_re = re.compile(
r'^(?P<name>[a-z][a-z0-9]+)', re.IGNORECASE)
entries = \
[alphanum_re.match(x).group('name').lower()
for x in re.split(r'[ ,]+', settings.APPRISE_ALLOW_SERVICES)
if alphanum_re.match(x)]
for plugin in set(apprise.plugins.SCHEMA_MAP.values()):
if entries:
# Get a list of the current schema's associated with
# a given plugin
schemas = set(apprise.plugins.details(plugin)
['tokens']['schema']['values'])
# Check what was defined and see if there is a hit
for entry in entries:
if entry in schemas:
# We had a hit; we're done
break
if entry in schemas:
entries.remove(entry)
# We can keep this plugin enabled and move along to the
# next one...
continue
# if we reach here, we have to block our plugin
plugin.enabled = False
for entry in entries:
# Generate some noise for those who have bad configurations
logger.warning(
'APPRISE_ALLOW_SERVICES plugin %s:// was not found - '
'ignoring.', entry)
elif settings.APPRISE_DENY_SERVICES:
alphanum_re = re.compile(
r'^(?P<name>[a-z][a-z0-9]+)', re.IGNORECASE)
entries = \
[alphanum_re.match(x).group('name').lower()
for x in re.split(r'[ ,]+', settings.APPRISE_DENY_SERVICES)
if alphanum_re.match(x)]
for name in entries:
try:
# Force plugin to be disabled
apprise.plugins.SCHEMA_MAP[name].enabled = False
except KeyError:
logger.warning(
'APPRISE_DENY_SERVICES plugin %s:// was not found -'
' ignoring.', name)
# Prepare our keyword arguments (to be passed into an AppriseAsset
# object)
kwargs = {}
if body_format:
# Store our defined body format
kwargs['body_format'] = body_format
# Acquire our recursion count (if defined)
try:
recursion = \
int(request.headers.get('X-Apprise-Recursion-Count', 0))
if recursion < 0:
# We do not accept negative numbers
raise TypeError("Invalid Recursion Value")
if recursion > settings.APPRISE_RECURSION_MAX:
return HttpResponse(
_('The recursion limit has been reached.'),
status=ResponseCode.method_not_accepted)
# Store our recursion value for our AppriseAsset() initialization
kwargs['_recursion'] = recursion
except (TypeError, ValueError):
return HttpResponse(
_('An invalid recursion value was specified.'),
status=ResponseCode.bad_request)
# Acquire our unique identifier (if defined)
uid = request.headers.get('X-Apprise-ID', '').strip()
if uid:
kwargs['_uid'] = uid
# Prepare ourselves a default Asset
asset = None if not body_format else \
apprise.AppriseAsset(body_format=body_format)
# Prepare our apprise object
a_obj = apprise.Apprise(asset=asset)
# Create an apprise config object
ac_obj = apprise.AppriseConfig()
# Load our configuration
ac_obj.add_config(config, format=format)
# Add our configuration
a_obj.add(ac_obj)
# Our return content type can be controlled by the Accept keyword
# If it includes /* or /html somewhere then we return html, otherwise
# we return the logs as they're processed in their text format.
# The HTML response type has a bit of overhead where as it's not
# the case with text/plain
content_type = \
'text/html' if re.search(r'text\/(\*|html)',
request.headers.get('Accept', ''),
re.IGNORECASE) \
else 'text/plain'
# Acquire our log level from headers if defined, otherwise use
# the global one set in the settings
level = request.headers.get(
'X-Apprise-Log-Level',
settings.LOGGING['loggers']['apprise']['level']).upper()
# Initialize our response object
response = None
if level in ('CRITICAL', 'ERROR' 'WARNING', 'INFO', 'DEBUG'):
level = getattr(apprise.logging, level)
esc = '<!!-!ESC!-!!>'
fmt = '<li class="log_%(levelname)s">' \
'<div class="log_time">%(asctime)s</div>' \
'<div class="log_level">%(levelname)s</div>' \
f'<div class="log_msg">{esc}%(message)s{esc}</div></li>' \
if content_type == 'text/html' else \
settings.LOGGING['formatters']['standard']['format']
# Now specify our format (and over-ride the default):
with apprise.LogCapture(level=level, fmt=fmt) as logs:
# Perform our notification at this point
result = a_obj.notify(
content.get('body'),
title=content.get('title', ''),
notify_type=content.get('type', apprise.NotifyType.INFO),
tag=content.get('tag'),
)
if content_type == 'text/html':
# Iterate over our entries so that we can prepare to escape
# things to be presented as HTML
esc = re.escape(esc)
entries = re.findall(
r'(?P<head><li .+?){}(?P<to_escape>.*?)'
r'{}(?P<tail>.+li>$)(?=$|<li .+{})'.format(
esc, esc, esc), logs.getvalue(),
re.DOTALL)
# Wrap logs in `<ul>` tag and escape our message body:
response = '<ul class="logs">{}</ul>'.format(
''.join([e[0] + escape(e[1]) + e[2] for e in entries]))
else: # content_type == 'text/plain'
response = logs.getvalue()
else:
# Perform our notification at this point without logging
result = a_obj.notify(
content.get('body'),
title=content.get('title', ''),
notify_type=content.get('type', apprise.NotifyType.INFO),
tag=content.get('tag'),
)
if not result:
# If at least one notification couldn't be sent; change up
# the response to a 424 error code
msg = _('One or more notification could not be sent.')
status = ResponseCode.failed_dependency
return HttpResponse(response if response else msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# Return our retrieved content
return HttpResponse(
response if response is not None else
_('Notification(s) sent.'),
content_type=content_type,
status=ResponseCode.okay,
)
@method_decorator((gzip_page, never_cache), name='dispatch')
class StatelessNotifyView(View):
"""
A Django view for sending a stateless notification
"""
def post(self, request):
"""
Handle a POST request
"""
# our content
content = {}
if MIME_IS_FORM.match(request.content_type):
content = {}
form = NotifyByUrlForm(request.POST)
if form.is_valid():
content.update(form.cleaned_data)
elif MIME_IS_JSON.match(request.content_type):
# Prepare our default response
try:
# load our JSON content
content = json.loads(request.body.decode('utf-8'))
except (AttributeError, ValueError):
# could not parse JSON response...
return HttpResponse(
_('Invalid JSON specified.'),
status=ResponseCode.bad_request)
if not content:
# We could not handle the Content-Type
return HttpResponse(
_('The message format is not supported.'),
status=ResponseCode.bad_request)
if not content.get('urls') and settings.APPRISE_STATELESS_URLS:
# fallback to settings.APPRISE_STATELESS_URLS if no urls were
# defined
content['urls'] = settings.APPRISE_STATELESS_URLS
# Some basic error checking
if not content.get('body') or \
content.get('type', apprise.NotifyType.INFO) \
not in apprise.NOTIFY_TYPES:
return HttpResponse(
_('An invalid payload was specified.'),
status=ResponseCode.bad_request)
# Acquire our body format (if identified)
body_format = content.get('format', apprise.NotifyFormat.TEXT)
if body_format and body_format not in apprise.NOTIFY_FORMATS:
return HttpResponse(
_('An invalid (body) format was specified.'),
status=ResponseCode.bad_request)
# Prepare our keyword arguments (to be passed into an AppriseAsset
# object)
kwargs = {}
if body_format:
# Store our defined body format
kwargs['body_format'] = body_format
# Acquire our recursion count (if defined)
try:
recursion = \
int(request.headers.get('X-Apprise-Recursion-Count', 0))
if recursion < 0:
# We do not accept negative numbers
raise TypeError("Invalid Recursion Value")
if recursion > settings.APPRISE_RECURSION_MAX:
return HttpResponse(
_('The recursion limit has been reached.'),
status=ResponseCode.method_not_accepted)
# Store our recursion value for our AppriseAsset() initialization
kwargs['_recursion'] = recursion
except (TypeError, ValueError):
return HttpResponse(
_('An invalid recursion value was specified.'),
status=ResponseCode.bad_request)
# Acquire our unique identifier (if defined)
uid = request.headers.get('X-Apprise-ID', '').strip()
if uid:
kwargs['_uid'] = uid
# Prepare ourselves a default Asset
asset = None if not body_format else \
apprise.AppriseAsset(body_format=body_format)
#
# Apply Any Global Filters (if identified)
#
if settings.APPRISE_ALLOW_SERVICES:
alphanum_re = re.compile(
r'^(?P<name>[a-z][a-z0-9]+)', re.IGNORECASE)
entries = \
[alphanum_re.match(x).group('name').lower()
for x in re.split(r'[ ,]+', settings.APPRISE_ALLOW_SERVICES)
if alphanum_re.match(x)]
for plugin in set(apprise.plugins.SCHEMA_MAP.values()):
if entries:
# Get a list of the current schema's associated with
# a given plugin
schemas = set(apprise.plugins.details(plugin)
['tokens']['schema']['values'])
# Check what was defined and see if there is a hit
for entry in entries:
if entry in schemas:
# We had a hit; we're done
break
if entry in schemas:
entries.remove(entry)
# We can keep this plugin enabled and move along to the
# next one...
continue
# if we reach here, we have to block our plugin
plugin.enabled = False
for entry in entries:
# Generate some noise for those who have bad configurations
logger.warning(
'APPRISE_ALLOW_SERVICES plugin %s:// was not found - '
'ignoring.', entry)
elif settings.APPRISE_DENY_SERVICES:
alphanum_re = re.compile(
r'^(?P<name>[a-z][a-z0-9]+)', re.IGNORECASE)
entries = \
[alphanum_re.match(x).group('name').lower()
for x in re.split(r'[ ,]+', settings.APPRISE_DENY_SERVICES)
if alphanum_re.match(x)]
for name in entries:
try:
# Force plugin to be disabled
apprise.plugins.SCHEMA_MAP[name].enabled = False
except KeyError:
logger.warning(
'APPRISE_DENY_SERVICES plugin %s:// was not found -'
' ignoring.', name)
# Prepare our apprise object
a_obj = apprise.Apprise(asset=asset)
# Add URLs
a_obj.add(content.get('urls'))
if not len(a_obj):
return HttpResponse(
_('There was no services to notify.'),
status=ResponseCode.no_content,
)
# Perform our notification at this point
result = a_obj.notify(
content.get('body'),
title=content.get('title', ''),
notify_type=content.get('type', apprise.NotifyType.INFO),
tag='all',
)
if not result:
# If at least one notification couldn't be sent; change up the
# response to a 424 error code
return HttpResponse(
_('One or more notification could not be sent.'),
status=ResponseCode.failed_dependency)
# Return our retrieved content
return HttpResponse(
_('Notification(s) sent.'),
status=ResponseCode.okay,
)
@method_decorator((gzip_page, never_cache), name='dispatch')
class JsonUrlView(View):
"""
A Django view that lists all loaded tags and URLs for a given key
"""
def get(self, request, key):
"""
Handle a POST request
"""
# Now build our tag response that identifies all of the tags
# and the URL's they're associated with
# {
# "tags": ["tag1', "tag2", "tag3"],
# "urls": [
# {
# "url": "windows://",
# "tags": [],
# },
# {
# "url": "mailto://user:pass@gmail.com"
# "tags": ["tag1", "tag2", "tag3"]
# }
# ]
# }
response = {
'tags': set(),
'urls': [],
}
# Privacy flag
# Support 'yes', '1', 'true', 'enable', 'active', and +
privacy = settings.APPRISE_CONFIG_LOCK or \
request.GET.get('privacy', 'no')[0] in (
'a', 'y', '1', 't', 'e', '+')
# Optionally filter on tags. Use comma to identify more then one
tag = request.GET.get('tag', 'all')
config, format = ConfigCache.get(key)
if config is None:
# The returned value of config and format tell a rather cryptic
# story; this portion could probably be updated in the future.
# but for now it reads like this:
# config == None and format == None: We had an internal error
# config == None and format != None: we simply have no data
# config != None: we simply have no data
if format is not None:
# no content to return
return JsonResponse(
response,
encoder=JSONEncoder,
safe=False,
status=ResponseCode.no_content,
)
# Something went very wrong; return 500
response['error'] = _('There was no configuration found.')
return JsonResponse(
response,
encoder=JSONEncoder,
safe=False,
status=ResponseCode.internal_server_error,
)
# Prepare our apprise object
a_obj = apprise.Apprise()
# Create an apprise config object
ac_obj = apprise.AppriseConfig()
# Load our configuration
ac_obj.add_config(config, format=format)
# Add our configuration
a_obj.add(ac_obj)
for notification in a_obj.find(tag):
# Set Notification
response['urls'].append({
'url': notification.url(privacy=privacy),
'tags': notification.tags,
})
# Store Tags
response['tags'] |= notification.tags
# Return our retrieved content
return JsonResponse(
response,
encoder=JSONEncoder,
safe=False,
status=ResponseCode.okay
)
| 36.074499
| 79
| 0.533625
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
from django.views import View
from django.conf import settings
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.gzip import gzip_page
from django.utils.translation import gettext_lazy as _
from django.core.serializers.json import DjangoJSONEncoder
from .utils import ConfigCache
from .forms import AddByUrlForm
from .forms import AddByConfigForm
from .forms import NotifyForm
from .forms import NotifyByUrlForm
from .forms import CONFIG_FORMATS
from .forms import AUTO_DETECT_CONFIG_KEYWORD
import apprise
import json
import re
import logging
logger = logging.getLogger('django')
MIME_IS_FORM = re.compile(
r'(multipart|application)/(x-www-)?form-(data|urlencoded)', re.I)
MIME_IS_JSON = re.compile(
r'(text|application)/(x-)?json', re.I)
class JSONEncoder(DjangoJSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return super().default(obj)
class ResponseCode(object):
okay = 200
no_content = 204
bad_request = 400
no_access = 403
not_found = 404
method_not_allowed = 405
method_not_accepted = 406
failed_dependency = 424
internal_server_error = 500
class WelcomeView(View):
template_name = 'welcome.html'
def get(self, request):
return render(request, self.template_name, {})
@method_decorator(never_cache, name='dispatch')
class ConfigView(View):
template_name = 'config.html'
def get(self, request, key):
return render(request, self.template_name, {
'key': key,
'form_url': AddByUrlForm(),
'form_cfg': AddByConfigForm(),
'form_notify': NotifyForm(),
})
@method_decorator(never_cache, name='dispatch')
class AddView(View):
def post(self, request, key):
json_response = MIME_IS_JSON.match(request.content_type) is not None
if settings.APPRISE_CONFIG_LOCK:
msg = _('The site has been configured to deny this request.')
status = ResponseCode.no_access
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
content = {}
if MIME_IS_FORM.match(request.content_type):
content = {}
form = AddByConfigForm(request.POST)
if form.is_valid():
content.update(form.cleaned_data)
form = AddByUrlForm(request.POST)
if form.is_valid():
content.update(form.cleaned_data)
elif json_response:
try:
content = json.loads(request.body.decode('utf-8'))
except (AttributeError, ValueError):
return JsonResponse({
'error': _('Invalid JSON specified.'),
},
encoder=JSONEncoder,
safe=False,
status=ResponseCode.bad_request,
)
if not content:
msg = _('The message format is not supported.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
a_obj = apprise.Apprise()
if 'urls' in content:
a_obj.add(content['urls'])
if not len(a_obj):
msg = _('No valid URLs were found.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
if not ConfigCache.put(
key, '\r\n'.join([s.url() for s in a_obj]),
apprise.ConfigFormat.TEXT):
msg = _('The configuration could not be saved.')
status = ResponseCode.internal_server_error
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
elif 'config' in content:
fmt = content.get('format', '').lower()
if fmt not in [i[0] for i in CONFIG_FORMATS]:
msg = _('The format specified is invalid.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
ac_obj = apprise.AppriseConfig()
if fmt == AUTO_DETECT_CONFIG_KEYWORD:
fmt = None
if not ac_obj.add_config(content['config'], format=fmt):
msg = _('The configuration format could not be detected.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
a_obj.add(ac_obj)
if not len(a_obj):
msg = _('No valid URL(s) were specified.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
if not ConfigCache.put(
key, content['config'], fmt=ac_obj[0].config_format):
# Something went very wrong; return 500
msg = _('An error occured saving configuration.')
status = ResponseCode.internal_server_error
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
else:
# No configuration specified; we're done
msg = _('No configuration specified.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
return HttpResponse(
_('Successfully saved configuration.'),
status=ResponseCode.okay,
)
@method_decorator(never_cache, name='dispatch')
class DelView(View):
def post(self, request, key):
json_response = MIME_IS_JSON.match(request.content_type) is not None
if settings.APPRISE_CONFIG_LOCK:
msg = _('The site has been configured to deny this request.')
status = ResponseCode.no_access
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
result = ConfigCache.clear(key)
if result is None:
msg = _('There was no configuration to remove.')
status = ResponseCode.no_content
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
elif result is False:
msg = _('The configuration could not be removed.')
status = ResponseCode.internal_server_error
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
return HttpResponse(
_('Successfully removed configuration.'),
status=ResponseCode.okay,
)
@method_decorator((gzip_page, never_cache), name='dispatch')
class GetView(View):
def post(self, request, key):
json_response = MIME_IS_JSON.match(request.content_type) is not None
if settings.APPRISE_CONFIG_LOCK:
return HttpResponse(
_('The site has been configured to deny this request.'),
status=ResponseCode.no_access,
) if not json_response else JsonResponse({
'error':
_('The site has been configured to deny this request.')
},
encoder=JSONEncoder,
safe=False,
status=ResponseCode.no_access,
)
config, format = ConfigCache.get(key)
if config is None:
if format is not None:
return HttpResponse(
_('There was no configuration found.'),
status=ResponseCode.no_content,
) if not json_response else JsonResponse({
'error': _('There was no configuration found.')
},
encoder=JSONEncoder,
safe=False,
status=ResponseCode.no_content,
)
msg = _('An error occured accessing configuration.')
status = ResponseCode.internal_server_error
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
content_type = 'text/yaml; charset=utf-8' \
if format == apprise.ConfigFormat.YAML \
else 'text/html; charset=utf-8'
return HttpResponse(
config,
content_type=content_type,
status=ResponseCode.okay,
) if not json_response else JsonResponse({
'format': format,
'config': config,
},
encoder=JSONEncoder,
safe=False,
status=ResponseCode.okay,
)
@method_decorator((gzip_page, never_cache), name='dispatch')
class NotifyView(View):
def post(self, request, key):
json_response = MIME_IS_JSON.match(request.content_type) is not None
content = {}
if MIME_IS_FORM.match(request.content_type):
content = {}
form = NotifyForm(request.POST)
if form.is_valid():
content.update(form.cleaned_data)
elif json_response:
try:
content = json.loads(request.body.decode('utf-8'))
except (AttributeError, ValueError):
return JsonResponse(
_('Invalid JSON specified.'),
encoder=JSONEncoder,
safe=False,
status=ResponseCode.bad_request)
if not content:
msg = _('The message format is not supported.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
if not content.get('body') or \
content.get('type', apprise.NotifyType.INFO) \
not in apprise.NOTIFY_TYPES:
msg = _('An invalid payload was specified.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
body_format = content.get('format', apprise.NotifyFormat.TEXT)
if body_format and body_format not in apprise.NOTIFY_FORMATS:
msg = _('An invalid body input format was specified.')
status = ResponseCode.bad_request
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
config, format = ConfigCache.get(key)
if config is None:
if format is not None:
msg = _('There was no configuration found.')
status = ResponseCode.no_content
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
msg = _('An error occured accessing configuration.')
status = ResponseCode.internal_server_error
return HttpResponse(msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
if settings.APPRISE_ALLOW_SERVICES:
alphanum_re = re.compile(
r'^(?P<name>[a-z][a-z0-9]+)', re.IGNORECASE)
entries = \
[alphanum_re.match(x).group('name').lower()
for x in re.split(r'[ ,]+', settings.APPRISE_ALLOW_SERVICES)
if alphanum_re.match(x)]
for plugin in set(apprise.plugins.SCHEMA_MAP.values()):
if entries:
# a given plugin
schemas = set(apprise.plugins.details(plugin)
['tokens']['schema']['values'])
# Check what was defined and see if there is a hit
for entry in entries:
if entry in schemas:
# We had a hit; we're done
break
if entry in schemas:
entries.remove(entry)
continue
plugin.enabled = False
for entry in entries:
logger.warning(
'APPRISE_ALLOW_SERVICES plugin %s:// was not found - '
'ignoring.', entry)
elif settings.APPRISE_DENY_SERVICES:
alphanum_re = re.compile(
r'^(?P<name>[a-z][a-z0-9]+)', re.IGNORECASE)
entries = \
[alphanum_re.match(x).group('name').lower()
for x in re.split(r'[ ,]+', settings.APPRISE_DENY_SERVICES)
if alphanum_re.match(x)]
for name in entries:
try:
apprise.plugins.SCHEMA_MAP[name].enabled = False
except KeyError:
logger.warning(
'APPRISE_DENY_SERVICES plugin %s:// was not found -'
' ignoring.', name)
kwargs = {}
if body_format:
kwargs['body_format'] = body_format
try:
recursion = \
int(request.headers.get('X-Apprise-Recursion-Count', 0))
if recursion < 0:
raise TypeError("Invalid Recursion Value")
if recursion > settings.APPRISE_RECURSION_MAX:
return HttpResponse(
_('The recursion limit has been reached.'),
status=ResponseCode.method_not_accepted)
kwargs['_recursion'] = recursion
except (TypeError, ValueError):
return HttpResponse(
_('An invalid recursion value was specified.'),
status=ResponseCode.bad_request)
uid = request.headers.get('X-Apprise-ID', '').strip()
if uid:
kwargs['_uid'] = uid
asset = None if not body_format else \
apprise.AppriseAsset(body_format=body_format)
a_obj = apprise.Apprise(asset=asset)
ac_obj = apprise.AppriseConfig()
ac_obj.add_config(config, format=format)
a_obj.add(ac_obj)
# The HTML response type has a bit of overhead where as it's not
content_type = \
'text/html' if re.search(r'text\/(\*|html)',
request.headers.get('Accept', ''),
re.IGNORECASE) \
else 'text/plain'
level = request.headers.get(
'X-Apprise-Log-Level',
settings.LOGGING['loggers']['apprise']['level']).upper()
response = None
if level in ('CRITICAL', 'ERROR' 'WARNING', 'INFO', 'DEBUG'):
level = getattr(apprise.logging, level)
esc = '<!!-!ESC!-!!>'
fmt = '<li class="log_%(levelname)s">' \
'<div class="log_time">%(asctime)s</div>' \
'<div class="log_level">%(levelname)s</div>' \
f'<div class="log_msg">{esc}%(message)s{esc}</div></li>' \
if content_type == 'text/html' else \
settings.LOGGING['formatters']['standard']['format']
with apprise.LogCapture(level=level, fmt=fmt) as logs:
result = a_obj.notify(
content.get('body'),
title=content.get('title', ''),
notify_type=content.get('type', apprise.NotifyType.INFO),
tag=content.get('tag'),
)
if content_type == 'text/html':
esc = re.escape(esc)
entries = re.findall(
r'(?P<head><li .+?){}(?P<to_escape>.*?)'
r'{}(?P<tail>.+li>$)(?=$|<li .+{})'.format(
esc, esc, esc), logs.getvalue(),
re.DOTALL)
response = '<ul class="logs">{}</ul>'.format(
''.join([e[0] + escape(e[1]) + e[2] for e in entries]))
else:
response = logs.getvalue()
else:
result = a_obj.notify(
content.get('body'),
title=content.get('title', ''),
notify_type=content.get('type', apprise.NotifyType.INFO),
tag=content.get('tag'),
)
if not result:
# the response to a 424 error code
msg = _('One or more notification could not be sent.')
status = ResponseCode.failed_dependency
return HttpResponse(response if response else msg, status=status) \
if not json_response else JsonResponse({
'error': msg,
},
encoder=JSONEncoder,
safe=False,
status=status,
)
# Return our retrieved content
return HttpResponse(
response if response is not None else
_('Notification(s) sent.'),
content_type=content_type,
status=ResponseCode.okay,
)
@method_decorator((gzip_page, never_cache), name='dispatch')
class StatelessNotifyView(View):
def post(self, request):
# our content
content = {}
if MIME_IS_FORM.match(request.content_type):
content = {}
form = NotifyByUrlForm(request.POST)
if form.is_valid():
content.update(form.cleaned_data)
elif MIME_IS_JSON.match(request.content_type):
# Prepare our default response
try:
# load our JSON content
content = json.loads(request.body.decode('utf-8'))
except (AttributeError, ValueError):
# could not parse JSON response...
return HttpResponse(
_('Invalid JSON specified.'),
status=ResponseCode.bad_request)
if not content:
# We could not handle the Content-Type
return HttpResponse(
_('The message format is not supported.'),
status=ResponseCode.bad_request)
if not content.get('urls') and settings.APPRISE_STATELESS_URLS:
# fallback to settings.APPRISE_STATELESS_URLS if no urls were
# defined
content['urls'] = settings.APPRISE_STATELESS_URLS
# Some basic error checking
if not content.get('body') or \
content.get('type', apprise.NotifyType.INFO) \
not in apprise.NOTIFY_TYPES:
return HttpResponse(
_('An invalid payload was specified.'),
status=ResponseCode.bad_request)
# Acquire our body format (if identified)
body_format = content.get('format', apprise.NotifyFormat.TEXT)
if body_format and body_format not in apprise.NOTIFY_FORMATS:
return HttpResponse(
_('An invalid (body) format was specified.'),
status=ResponseCode.bad_request)
# Prepare our keyword arguments (to be passed into an AppriseAsset
# object)
kwargs = {}
if body_format:
# Store our defined body format
kwargs['body_format'] = body_format
# Acquire our recursion count (if defined)
try:
recursion = \
int(request.headers.get('X-Apprise-Recursion-Count', 0))
if recursion < 0:
# We do not accept negative numbers
raise TypeError("Invalid Recursion Value")
if recursion > settings.APPRISE_RECURSION_MAX:
return HttpResponse(
_('The recursion limit has been reached.'),
status=ResponseCode.method_not_accepted)
# Store our recursion value for our AppriseAsset() initialization
kwargs['_recursion'] = recursion
except (TypeError, ValueError):
return HttpResponse(
_('An invalid recursion value was specified.'),
status=ResponseCode.bad_request)
# Acquire our unique identifier (if defined)
uid = request.headers.get('X-Apprise-ID', '').strip()
if uid:
kwargs['_uid'] = uid
# Prepare ourselves a default Asset
asset = None if not body_format else \
apprise.AppriseAsset(body_format=body_format)
#
# Apply Any Global Filters (if identified)
#
if settings.APPRISE_ALLOW_SERVICES:
alphanum_re = re.compile(
r'^(?P<name>[a-z][a-z0-9]+)', re.IGNORECASE)
entries = \
[alphanum_re.match(x).group('name').lower()
for x in re.split(r'[ ,]+', settings.APPRISE_ALLOW_SERVICES)
if alphanum_re.match(x)]
for plugin in set(apprise.plugins.SCHEMA_MAP.values()):
if entries:
# Get a list of the current schema's associated with
schemas = set(apprise.plugins.details(plugin)
['tokens']['schema']['values'])
for entry in entries:
if entry in schemas:
break
if entry in schemas:
entries.remove(entry)
# We can keep this plugin enabled and move along to the
# next one...
continue
# if we reach here, we have to block our plugin
plugin.enabled = False
for entry in entries:
# Generate some noise for those who have bad configurations
logger.warning(
'APPRISE_ALLOW_SERVICES plugin %s:// was not found - '
'ignoring.', entry)
elif settings.APPRISE_DENY_SERVICES:
alphanum_re = re.compile(
r'^(?P<name>[a-z][a-z0-9]+)', re.IGNORECASE)
entries = \
[alphanum_re.match(x).group('name').lower()
for x in re.split(r'[ ,]+', settings.APPRISE_DENY_SERVICES)
if alphanum_re.match(x)]
for name in entries:
try:
# Force plugin to be disabled
apprise.plugins.SCHEMA_MAP[name].enabled = False
except KeyError:
logger.warning(
'APPRISE_DENY_SERVICES plugin %s:// was not found -'
' ignoring.', name)
# Prepare our apprise object
a_obj = apprise.Apprise(asset=asset)
# Add URLs
a_obj.add(content.get('urls'))
if not len(a_obj):
return HttpResponse(
_('There was no services to notify.'),
status=ResponseCode.no_content,
)
# Perform our notification at this point
result = a_obj.notify(
content.get('body'),
title=content.get('title', ''),
notify_type=content.get('type', apprise.NotifyType.INFO),
tag='all',
)
if not result:
# If at least one notification couldn't be sent; change up the
return HttpResponse(
_('One or more notification could not be sent.'),
status=ResponseCode.failed_dependency)
return HttpResponse(
_('Notification(s) sent.'),
status=ResponseCode.okay,
)
@method_decorator((gzip_page, never_cache), name='dispatch')
class JsonUrlView(View):
def get(self, request, key):
# "urls": [
# {
# "url": "windows://",
# "tags": [],
# },
# {
# "url": "mailto://user:pass@gmail.com"
# "tags": ["tag1", "tag2", "tag3"]
# }
# ]
# }
response = {
'tags': set(),
'urls': [],
}
# Privacy flag
# Support 'yes', '1', 'true', 'enable', 'active', and +
privacy = settings.APPRISE_CONFIG_LOCK or \
request.GET.get('privacy', 'no')[0] in (
'a', 'y', '1', 't', 'e', '+')
# Optionally filter on tags. Use comma to identify more then one
tag = request.GET.get('tag', 'all')
config, format = ConfigCache.get(key)
if config is None:
# The returned value of config and format tell a rather cryptic
# story; this portion could probably be updated in the future.
# but for now it reads like this:
# config == None and format == None: We had an internal error
# config == None and format != None: we simply have no data
# config != None: we simply have no data
if format is not None:
# no content to return
return JsonResponse(
response,
encoder=JSONEncoder,
safe=False,
status=ResponseCode.no_content,
)
# Something went very wrong; return 500
response['error'] = _('There was no configuration found.')
return JsonResponse(
response,
encoder=JSONEncoder,
safe=False,
status=ResponseCode.internal_server_error,
)
# Prepare our apprise object
a_obj = apprise.Apprise()
# Create an apprise config object
ac_obj = apprise.AppriseConfig()
# Load our configuration
ac_obj.add_config(config, format=format)
# Add our configuration
a_obj.add(ac_obj)
for notification in a_obj.find(tag):
# Set Notification
response['urls'].append({
'url': notification.url(privacy=privacy),
'tags': notification.tags,
})
# Store Tags
response['tags'] |= notification.tags
# Return our retrieved content
return JsonResponse(
response,
encoder=JSONEncoder,
safe=False,
status=ResponseCode.okay
)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.