hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a4c2a15e6606e6383c32e6f1df42877d87fa7d0 | 1,485 | py | Python | backend/server/apps/endpoints/serializers.py | Stinger101/my_uno_ml_service | 47d19f6e5e19e73c465b7ddca889324c9bd5862f | [
"MIT"
] | null | null | null | backend/server/apps/endpoints/serializers.py | Stinger101/my_uno_ml_service | 47d19f6e5e19e73c465b7ddca889324c9bd5862f | [
"MIT"
] | null | null | null | backend/server/apps/endpoints/serializers.py | Stinger101/my_uno_ml_service | 47d19f6e5e19e73c465b7ddca889324c9bd5862f | [
"MIT"
] | null | null | null | from rest_framework import serializers
from apps.endpoints.models import Endpoint
from apps.endpoints.models import MLAlgorithm
from apps.endpoints.models import MLAlgorithmStatus
from apps.endpoints.models import MLRequest
class EndpointSerializer(serializers.ModelSerializer):
class Meta:
model = Endpoint
read_only_fields = ("id","name","owner","created_at")
fields = read_only_fields
class MLAlgorithmSerializer(serializers.ModelSerializer):
current_status = serializers.SerializerMethodField(read_only=True)
def get_current_status(self,mlalgorithm):
return MLAlgorithmStatus.objects.filter(parent_mlalgorithm=mlalgorithm).latest('created_at').status
class Meta:
model = MLAlgorithm
read_only_fields = ("id", "name", "description", "code","version", "owner", "created_at","parent_endpoint", "current_status")
fields = read_only_fields
class MLAlgorithmStatusSerializer(serializers.ModelSerializer):
class Meta:
model = MLAlgorithmStatus
read_only_fields = ("id","active")
fields = ("id", "active", "status", "created_by", "created_at","parent_mlalgorithm")
class MLRequestSerializer(serializers.ModelSerializer):
class Meta:
model = MLRequest
read_only_fields = ("id","input_data","full_response","response","created_at","parent_mlalgorithm")
fields = ("id","input_data","full_response","response","feedback","created_at","parent_mlalgorithm")
| 39.078947 | 133 | 0.73468 |
2b09f213ba9fbf2fb91ccef9d23c69e72de2fc5b | 4,736 | py | Python | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/update_device_shadow_desired_data_request.py | NQLoong/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/update_device_shadow_desired_data_request.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/update_device_shadow_desired_data_request.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class UpdateDeviceShadowDesiredDataRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'device_id': 'str',
'body': 'UpdateDesireds'
}
attribute_map = {
'instance_id': 'Instance-Id',
'device_id': 'device_id',
'body': 'body'
}
def __init__(self, instance_id=None, device_id=None, body=None):
"""UpdateDeviceShadowDesiredDataRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._device_id = None
self._body = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
self.device_id = device_id
if body is not None:
self.body = body
@property
def instance_id(self):
"""Gets the instance_id of this UpdateDeviceShadowDesiredDataRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:return: The instance_id of this UpdateDeviceShadowDesiredDataRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this UpdateDeviceShadowDesiredDataRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:param instance_id: The instance_id of this UpdateDeviceShadowDesiredDataRequest.
:type: str
"""
self._instance_id = instance_id
@property
def device_id(self):
"""Gets the device_id of this UpdateDeviceShadowDesiredDataRequest.
**参数说明**:设备ID,用于唯一标识一个设备。在注册设备时直接指定,或者由物联网平台分配获得。由物联网平台分配时,生成规则为\"product_id\" + \"_\" + \"node_id\"拼接而成。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The device_id of this UpdateDeviceShadowDesiredDataRequest.
:rtype: str
"""
return self._device_id
@device_id.setter
def device_id(self, device_id):
"""Sets the device_id of this UpdateDeviceShadowDesiredDataRequest.
**参数说明**:设备ID,用于唯一标识一个设备。在注册设备时直接指定,或者由物联网平台分配获得。由物联网平台分配时,生成规则为\"product_id\" + \"_\" + \"node_id\"拼接而成。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param device_id: The device_id of this UpdateDeviceShadowDesiredDataRequest.
:type: str
"""
self._device_id = device_id
@property
def body(self):
"""Gets the body of this UpdateDeviceShadowDesiredDataRequest.
:return: The body of this UpdateDeviceShadowDesiredDataRequest.
:rtype: UpdateDesireds
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateDeviceShadowDesiredDataRequest.
:param body: The body of this UpdateDeviceShadowDesiredDataRequest.
:type: UpdateDesireds
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateDeviceShadowDesiredDataRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.234568 | 158 | 0.595228 |
fc816dcf4657a5740ddf7b8e44f64a411542bd07 | 8,057 | py | Python | rasa/core/slots.py | ALT-F1/rasa | 8d4a734616f72aca14f122716fbf338c42644188 | [
"Apache-2.0"
] | null | null | null | rasa/core/slots.py | ALT-F1/rasa | 8d4a734616f72aca14f122716fbf338c42644188 | [
"Apache-2.0"
] | 4 | 2020-09-26T00:31:43.000Z | 2022-02-10T01:13:39.000Z | rasa/core/slots.py | ALT-F1/rasa | 8d4a734616f72aca14f122716fbf338c42644188 | [
"Apache-2.0"
] | 2 | 2020-11-04T04:03:30.000Z | 2021-01-27T04:41:42.000Z | import logging
import warnings
from rasa.core import utils
from rasa.utils.common import class_from_module_path
from typing import Any, Dict, List, NoReturn, Optional, Text, Type
logger = logging.getLogger(__name__)
class Slot:
type_name = None
def __init__(
self,
name: Text,
initial_value: Any = None,
value_reset_delay: Optional[int] = None,
auto_fill: bool = True,
) -> None:
self.name = name
self.value = initial_value
self.initial_value = initial_value
self._value_reset_delay = value_reset_delay
self.auto_fill = auto_fill
def feature_dimensionality(self) -> int:
"""How many features this single slot creates.
The dimensionality of the array returned by `as_feature` needs
to correspond to this value."""
return 1
def has_features(self) -> bool:
"""Indicate if the slot creates any features."""
return self.feature_dimensionality() != 0
def value_reset_delay(self) -> Optional[int]:
"""After how many turns the slot should be reset to the initial_value.
If the delay is set to `None`, the slot will keep its value forever."""
# TODO: FUTURE this needs to be implemented - slots are not reset yet
return self._value_reset_delay
def as_feature(self) -> NoReturn:
raise NotImplementedError(
"Each slot type needs to specify how its "
"value can be converted to a feature. Slot "
"'{}' is a generic slot that can not be used "
"for predictions. Make sure you add this "
"slot to your domain definition, specifying "
"the type of the slot. If you implemented "
"a custom slot type class, make sure to "
"implement `.as_feature()`."
"".format(self.name)
)
def reset(self) -> None:
self.value = self.initial_value
def __str__(self) -> Text:
return f"{self.__class__.__name__}({self.name}: {self.value})"
def __repr__(self) -> Text:
return f"<{self.__class__.__name__}({self.name}: {self.value})>"
@staticmethod
def resolve_by_type(type_name) -> Type["Slot"]:
"""Returns a slots class by its type name."""
for cls in utils.all_subclasses(Slot):
if cls.type_name == type_name:
return cls
try:
return class_from_module_path(type_name)
except (ImportError, AttributeError):
raise ValueError(
"Failed to find slot type, '{}' is neither a known type nor "
"user-defined. If you are creating your own slot type, make "
"sure its module path is correct.".format(type_name)
)
def persistence_info(self) -> Dict[str, Any]:
return {
"type": utils.module_path_from_instance(self),
"initial_value": self.initial_value,
"auto_fill": self.auto_fill,
}
class FloatSlot(Slot):
type_name = "float"
def __init__(
self,
name: Text,
initial_value: Optional[float] = None,
value_reset_delay: Optional[int] = None,
auto_fill: bool = True,
max_value: float = 1.0,
min_value: float = 0.0,
) -> None:
super().__init__(name, initial_value, value_reset_delay, auto_fill)
self.max_value = max_value
self.min_value = min_value
if min_value >= max_value:
raise ValueError(
"Float slot ('{}') created with an invalid range "
"using min ({}) and max ({}) values. Make sure "
"min is smaller than max."
"".format(self.name, self.min_value, self.max_value)
)
if initial_value is not None and not (min_value <= initial_value <= max_value):
warnings.warn(
f"Float slot ('{self.name}') created with an initial value "
f"{self.value} outside of configured min ({self.min_value}) "
f"and max ({self.max_value}) values."
)
def as_feature(self) -> List[float]:
try:
capped_value = max(self.min_value, min(self.max_value, float(self.value)))
if abs(self.max_value - self.min_value) > 0:
covered_range = abs(self.max_value - self.min_value)
else:
covered_range = 1
return [(capped_value - self.min_value) / covered_range]
except (TypeError, ValueError):
return [0.0]
def persistence_info(self) -> Dict[Text, Any]:
d = super().persistence_info()
d["max_value"] = self.max_value
d["min_value"] = self.min_value
return d
class BooleanSlot(Slot):
type_name = "bool"
def as_feature(self) -> List[float]:
try:
if self.value is not None:
return [1.0, float(float(self.value) != 0.0)]
else:
return [0.0, 0.0]
except (TypeError, ValueError):
# we couldn't convert the value to float - using default value
return [0.0, 0.0]
def feature_dimensionality(self) -> int:
return len(self.as_feature())
class TextSlot(Slot):
type_name = "text"
def as_feature(self) -> List[float]:
return [1.0 if self.value is not None else 0.0]
class ListSlot(Slot):
type_name = "list"
def as_feature(self) -> List[float]:
try:
if self.value is not None and len(self.value) > 0:
return [1.0]
else:
return [0.0]
except (TypeError, ValueError):
# we couldn't convert the value to a list - using default value
return [0.0]
class UnfeaturizedSlot(Slot):
type_name = "unfeaturized"
def as_feature(self) -> List[float]:
return []
def feature_dimensionality(self) -> int:
return 0
class CategoricalSlot(Slot):
type_name = "categorical"
def __init__(
self,
name: Text,
values: Optional[List[Any]] = None,
initial_value: Any = None,
value_reset_delay: Optional[int] = None,
auto_fill: bool = True,
) -> None:
super().__init__(name, initial_value, value_reset_delay, auto_fill)
self.values = [str(v).lower() for v in values] if values else []
def persistence_info(self) -> Dict[Text, Any]:
d = super().persistence_info()
d["values"] = self.values
return d
def as_feature(self) -> List[float]:
r = [0.0] * self.feature_dimensionality()
try:
for i, v in enumerate(self.values):
if v == str(self.value).lower():
r[i] = 1.0
break
else:
if self.value is not None:
warnings.warn(
f"Categorical slot '{self.name}' is set to a value "
f"('{self.value}') "
"that is not specified in the domain. "
"Value will be ignored and the slot will "
"behave as if no value is set. "
"Make sure to add all values a categorical "
"slot should store to the domain."
)
except (TypeError, ValueError):
logger.exception("Failed to featurize categorical slot.")
return r
return r
def feature_dimensionality(self) -> int:
return len(self.values)
class DataSlot(Slot):
def __init__(
self,
name: Text,
initial_value: Any = None,
value_reset_delay: Optional[int] = 1,
auto_fill: bool = True,
):
super().__init__(name, initial_value, value_reset_delay, auto_fill)
def as_feature(self) -> List[float]:
raise NotImplementedError(
"Each slot type needs to specify how its "
"value can be converted to a feature."
)
| 32.35743 | 87 | 0.568698 |
b2d91da936d0f8b3adf7f52d5d8785658cf9cb8d | 18,746 | py | Python | cmsplugin_cascade/plugin_base.py | teklager/djangocms-cascade | adc461f7054c6c0f88bc732aefd03b157df2f514 | [
"MIT"
] | 139 | 2015-01-08T22:27:06.000Z | 2021-08-19T03:36:58.000Z | cmsplugin_cascade/plugin_base.py | teklager/djangocms-cascade | adc461f7054c6c0f88bc732aefd03b157df2f514 | [
"MIT"
] | 286 | 2015-01-02T14:15:14.000Z | 2022-03-22T11:00:12.000Z | cmsplugin_cascade/plugin_base.py | teklager/djangocms-cascade | adc461f7054c6c0f88bc732aefd03b157df2f514 | [
"MIT"
] | 91 | 2015-01-16T15:06:23.000Z | 2022-03-23T23:36:54.000Z | from django.core.exceptions import ImproperlyConfigured
from django.forms import MediaDefiningClass, ModelForm
from django.utils.functional import lazy
from django.utils.module_loading import import_string
from django.utils.text import format_lazy
from django.utils.safestring import SafeText, mark_safe
from entangled.forms import EntangledModelFormMixin
from cms.plugin_base import CMSPluginBaseMetaclass, CMSPluginBase
from cms.utils.compat.dj import is_installed
from cmsplugin_cascade import app_settings
from .mixins import CascadePluginMixin
from .models_base import CascadeModelBase
from .models import CascadeElement, SharableCascadeElement
from .generic.mixins import SectionMixin, SectionModelMixin
from .sharable.forms import SharableGlossaryMixin
from .strides import register_stride
from .extra_fields.mixins import ExtraFieldsMixin
from .hide_plugins import HidePluginMixin
from .render_template import RenderTemplateMixin
from .utils import remove_duplicates
mark_safe_lazy = lazy(mark_safe, str)
fake_proxy_models = {}
def create_proxy_model(name, model_mixins, base_model, attrs=None, module=None):
"""
Create a Django Proxy Model on the fly, to be used by any Cascade Plugin.
"""
from django.apps import apps
class Meta:
proxy = True
app_label = 'cmsplugin_cascade'
name = str(name + 'Model')
try:
Model = apps.get_registered_model(Meta.app_label, name)
except LookupError:
bases = model_mixins + (base_model,)
attrs = dict(attrs or {}, Meta=Meta, __module__=module)
Model = type(name, bases, attrs)
fake_proxy_models[name] = bases
return Model
class CascadePluginMixinMetaclass(MediaDefiningClass):
ring_plugin_bases = {}
def __new__(cls, name, bases, attrs):
ring_plugin = attrs.get('ring_plugin')
if ring_plugin:
ring_plugin_bases = [b.ring_plugin for b in bases
if hasattr(b, 'ring_plugin') and b.ring_plugin != ring_plugin]
# remember the dependencies
cls.ring_plugin_bases.setdefault(ring_plugin, [])
cls.ring_plugin_bases[ring_plugin].extend(ring_plugin_bases)
cls.ring_plugin_bases[ring_plugin] = remove_duplicates(cls.ring_plugin_bases[ring_plugin])
new_class = super().__new__(cls, name, bases, attrs)
return new_class
class CascadePluginMixinBase(metaclass=CascadePluginMixinMetaclass):
"""
Use this as a base for mixin classes used by other CascadePlugins
"""
class CascadePluginBaseMetaclass(CascadePluginMixinMetaclass, CMSPluginBaseMetaclass):
"""
All plugins from djangocms-cascade can be instantiated in different ways. In order to allow this
by a user defined configuration, this meta-class conditionally inherits from additional mixin
classes.
"""
plugins_with_extra_fields = dict(app_settings.CMSPLUGIN_CASCADE['plugins_with_extra_fields'])
plugins_with_extra_mixins = dict(app_settings.CMSPLUGIN_CASCADE['plugins_with_extra_mixins'])
plugins_with_bookmark = list(app_settings.CMSPLUGIN_CASCADE['plugins_with_bookmark'])
plugins_with_sharables = dict(app_settings.CMSPLUGIN_CASCADE['plugins_with_sharables'])
plugins_with_extra_render_templates = app_settings.CMSPLUGIN_CASCADE['plugins_with_extra_render_templates'].keys()
allow_plugin_hiding = app_settings.CMSPLUGIN_CASCADE['allow_plugin_hiding']
exclude_hiding_plugin = list(app_settings.CMSPLUGIN_CASCADE['exclude_hiding_plugin'])
def __new__(cls, name, bases, attrs):
model_mixins = attrs.pop('model_mixins', ())
if (cls.allow_plugin_hiding and name not in cls.exclude_hiding_plugin and 'name' in attrs and
not attrs.get('text_enabled')):
bases = (HidePluginMixin,) + bases
if name in cls.plugins_with_extra_fields:
bases = (ExtraFieldsMixin,) + bases
if name in cls.plugins_with_extra_mixins:
if isinstance(cls.plugins_with_extra_mixins[name], tuple):
bases = cls.plugins_with_extra_mixins[name] + bases
else:
bases = (cls.plugins_with_extra_mixins[name],) + bases
if name in cls.plugins_with_bookmark:
bases = (SectionMixin,) + bases
model_mixins = (SectionModelMixin,) + model_mixins
if name in cls.plugins_with_sharables:
bases = (SharableGlossaryMixin,) + bases
attrs['sharable_fields'] = cls.plugins_with_sharables[name]
base_model = SharableCascadeElement
else:
base_model = CascadeElement
if name in cls.plugins_with_extra_render_templates:
bases = (RenderTemplateMixin,) + bases
if name == 'SegmentPlugin':
# SegmentPlugin shall additionally inherit from configured mixin classes
model_mixins += tuple(import_string(mc[0]) for mc in app_settings.CMSPLUGIN_CASCADE['segmentation_mixins'])
if 'model' in attrs:
# the plugin overrides the CascadeModel
if not issubclass(attrs['model'], CascadeModelBase):
msg = "Cascade Plugins, overriding the model, must inherit from `CascadeModelBase`."
raise ImproperlyConfigured(msg)
else:
attrs['model'] = create_proxy_model(name, model_mixins, base_model, module=attrs.get('__module__'))
if is_installed('reversion'):
import reversion.revisions
if not reversion.revisions.is_registered(base_model):
reversion.revisions.register(base_model)
# handle ambiguous plugin names by appending a symbol
if 'name' in attrs and app_settings.CMSPLUGIN_CASCADE['plugin_prefix']:
attrs['name'] = format_lazy('{} {}', app_settings.CMSPLUGIN_CASCADE['plugin_prefix'], attrs['name'])
register_stride(name, bases, attrs, model_mixins)
if name == 'CascadePluginBase':
bases += (CascadePluginMixin, CMSPluginBase,)
return super().__new__(cls, name, bases, attrs)
class TransparentWrapper:
"""
Add this mixin class to other Cascade plugins, wishing to be added transparently between other
plugins restricting parent-children relationships.
For instance: A BootstrapColumnPlugin can only be added as a child to a RowPlugin. This means
that no other wrapper can be added between those two plugins. By adding this mixin class we can
allow any plugin to behave transparently, just as if it would not have be inserted into the DOM
tree. When moving plugins in- and out of transparent wrapper plugins, always reload the page, so
that the parent-children relationships can be updated.
"""
child_plugins_cache = False
parent_plugins_cache = False
@classmethod
def get_child_classes(cls, slot, page, instance=None):
if hasattr(cls, 'direct_child_classes'):
return cls.direct_child_classes
child_classes = set(super().get_child_classes(slot, page, instance))
while True:
instance = instance.get_parent_instance() if instance and instance.parent else None
if instance is None:
child_classes.update(super().get_child_classes(slot, page, instance))
return list(child_classes)
if not issubclass(instance.plugin_class, TransparentWrapper):
child_classes.update(instance.plugin_class.get_child_classes(slot, page, instance))
return list(child_classes)
@classmethod
def get_parent_classes(cls, slot, page, instance=None):
if hasattr(cls, 'direct_parent_classes'):
return cls.direct_parent_classes
parent_classes = set(super().get_parent_classes(slot, page, instance) or [])
if isinstance(instance, CascadeElement):
instance = instance.get_parent_instance() if instance and instance.parent else None
if instance is not None:
parent_classes.add(instance.plugin_type)
return list(parent_classes)
class TransparentContainer(TransparentWrapper):
"""
This mixin class marks each plugin inheriting from it, as a transparent container.
Such a plugin is added to the global list of entitled parent plugins, which is required if we
want to place and move all other Cascade plugins below this container.
Often, transparent wrapping classes come in pairs. For instance the `AccordionPlugin` containing
one or more `PanelPlugin`. Here the `AccordionPlugin` must inherit from `TransparentWrapper`,
whereas the `AccordionPlugin` must inherit from the `TransparentContainer`.
"""
@staticmethod
def get_plugins():
from cms.plugin_pool import plugin_pool
global _leaf_transparent_plugins
try:
return _leaf_transparent_plugins
except NameError:
_leaf_transparent_plugins = [
plugin.__name__ for plugin in plugin_pool.get_all_plugins()
if issubclass(plugin, TransparentContainer)
]
return _leaf_transparent_plugins
class CascadeFormMixin(EntangledModelFormMixin):
class Meta:
entangled_fields = {'glossary': []}
class CascadePluginBase(metaclass=CascadePluginBaseMetaclass):
change_form_template = 'cascade/admin/change_form.html'
model_mixins = () # model mixins added to the final Django model
parent_classes = None
alien_child_classes = False
form = CascadeFormMixin # safety fallback for plugins without any form
class Media:
css = {'all': ['cascade/css/admin/partialfields.css', 'cascade/css/admin/editplugin.css']}
js = ['cascade/js/underscore.js', 'cascade/js/ring.js']
def __init__(self, model=None, admin_site=None):
super().__init__(model, admin_site)
def __repr__(self):
return "<class '{}'>".format(self.__class__.__name__)
@classmethod
def super(cls, klass, instance):
"""
Plugins inheriting from CascadePluginBaseMetaclass can have two different base classes,
:class:`cmsplugin_cascade.plugin_base.CMSPluginBase` and :class:`cmsplugin_cascade.strides.StridePluginBase`.
Therefore in order to call a method from an inherited class, use this "super" wrapping method.
>>> cls.super(MyPlugin, self).a_method()
"""
return super(klass, instance)
@classmethod
def _get_parent_classes_transparent(cls, slot, page, instance=None):
"""
Return all parent classes including those marked as "transparent".
"""
parent_classes = super().get_parent_classes(slot, page, instance)
if parent_classes is None:
if cls.get_require_parent(slot, page) is False:
return
parent_classes = []
# add all plugins marked as 'transparent', since they all are potential parents
parent_classes = set(parent_classes)
parent_classes.update(TransparentContainer.get_plugins())
return list(parent_classes)
@classmethod
def get_child_classes(cls, slot, page, instance=None):
plugin_type = cls.__name__
child_classes = set()
for child_class in cls.get_child_plugin_candidates(slot, page):
if issubclass(child_class, CascadePluginBase):
own_child_classes = getattr(cls, 'child_classes', None) or []
child_parent_classes = child_class._get_parent_classes_transparent(slot, page, instance)
if isinstance(child_parent_classes, (list, tuple)) and plugin_type in child_parent_classes:
child_classes.add(child_class)
elif plugin_type in own_child_classes:
child_classes.add(child_class)
elif child_parent_classes is None:
child_classes.add(child_class)
else:
if cls.alien_child_classes and child_class.__name__ in app_settings.CMSPLUGIN_CASCADE['alien_plugins']:
child_classes.add(child_class)
return list(cc.__name__ for cc in child_classes)
@classmethod
def get_parent_classes(cls, slot, page, instance=None):
return cls._get_parent_classes_transparent(slot, page, instance)
@classmethod
def get_identifier(cls, instance):
"""
Hook to return a description for the current model.
"""
return SafeText()
@classmethod
def sanitize_model(cls, instance):
"""
This method is called, before the model is written to the database. It can be overloaded
to sanitize the current models, in case a parent model changed in a way, which might
affect this plugin.
This method shall return `True`, in case a model change was necessary, otherwise it shall
return `False` to prevent a useless database update.
"""
if instance.glossary is None:
instance.glossary = {}
return False
@classmethod
def get_data_representation(cls, instance):
"""
Return a representation of the given instance suitable for a serialized representation.
"""
return {'glossary': instance.glossary, 'pk': instance.pk}
@classmethod
def add_inline_elements(cls, instance, inlines):
"""
Hook to create (sortable) inline elements for the given instance.
"""
@classmethod
def add_shared_reference(cls, instance, shared_glossary):
"""
Hook to add a reference pointing onto an existing SharedGlossary instance.
"""
def extend_children(self, parent, wanted_children, child_class, child_glossary=None):
"""
Extend the number of children so that the parent object contains wanted children.
No child will be removed if wanted_children is smaller than the current number of children.
"""
from cms.api import add_plugin
current_children = parent.get_num_children()
for _ in range(current_children, wanted_children):
child = add_plugin(parent.placeholder, child_class, parent.language, target=parent)
if isinstance(child_glossary, dict):
child.glossary.update(child_glossary)
child.save()
def get_form(self, request, obj=None, **kwargs):
form = kwargs.get('form', self.form)
assert issubclass(form, EntangledModelFormMixin), "Form must inherit from EntangledModelFormMixin"
bases = (form,)
if not issubclass(form, CascadeFormMixin):
bases = (CascadeFormMixin,) + bases
if not issubclass(form, ModelForm):
bases += (ModelForm,)
kwargs['form'] = type(form.__name__, bases, {})
return super().get_form(request, obj, **kwargs)
def get_parent_instance(self, request=None, obj=None):
"""
Get the parent model instance corresponding to this plugin. When adding a new plugin, the
parent might not be available. Therefore as fallback, pass in the request object.
"""
try:
parent_id = obj.parent_id
except AttributeError:
try:
# TODO: self.parent presumably is not used anymore in CMS-3.4, because it doesn't
# make sense anyway, since the plugin instances shall know their parents, not the
# plugins.
parent_id = self.parent.id
except AttributeError:
if request:
parent_id = request.GET.get('plugin_parent', None)
if parent_id is None:
from cms.models import CMSPlugin
try:
parent_id = CMSPlugin.objects.filter(id=request.resolver_match.args[0]
).only("parent_id").order_by('?').first().parent_id
except (AttributeError, IndexError):
parent_id = None
else:
parent_id = None
for model in CascadeModelBase._get_cascade_elements():
try:
return model.objects.get(id=parent_id)
except model.DoesNotExist:
continue
def get_previous_instance(self, obj):
"""
Return the previous plugin instance for the given object.
This differs from `obj.get_prev_sibling()` which returns an unsorted sibling.
"""
ordered_siblings = obj.get_siblings().filter(placeholder=obj.placeholder).order_by('position')
pos = list(ordered_siblings).index(obj.cmsplugin_ptr)
if pos > 0:
prev_sibling = ordered_siblings[pos - 1]
return prev_sibling.get_bound_plugin()
def get_next_instance(self, obj):
"""
Return the next plugin instance for the given object.
This differs from `obj.get_next_sibling()` which returns an unsorted sibling.
"""
ordered_siblings = obj.get_siblings().filter(placeholder=obj.placeholder).order_by('position')
pos = list(ordered_siblings).index(obj.cmsplugin_ptr)
if pos < ordered_siblings.count() - 1:
next_sibling = ordered_siblings[pos + 1]
return next_sibling.get_bound_plugin()
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
ring_plugin_bases = {ring_plugin: ['django.cascade.{}'.format(b) for b in bases]
for ring_plugin, bases in CascadePluginMixinMetaclass.ring_plugin_bases.items()}
context.update(
ring_plugin_bases=ring_plugin_bases,
plugin_title=format_lazy("{} {} Plugin", self.module, self.name),
plugin_intro=mark_safe(getattr(self, 'intro_html', '')),
plugin_footnote=mark_safe(getattr(self, 'footnote_html', '')),
)
if hasattr(self, 'ring_plugin'):
context.update(
ring_plugin=self.ring_plugin,
)
context['empty_form'] = not (context['adminform'].form._meta.entangled_fields.get('glossary') or
context['adminform'].form._meta.untangled_fields)
return super().render_change_form(request, context, add, change, form_url, obj)
def in_edit_mode(self, request, placeholder):
"""
Returns True, if the plugin is in "edit mode".
"""
toolbar = getattr(request, 'toolbar', None)
edit_mode = getattr(toolbar, 'edit_mode_active', False) and getattr(placeholder, 'is_editable', True)
if edit_mode:
edit_mode = placeholder.has_change_permission(request.user)
return edit_mode
| 45.610706 | 119 | 0.671343 |
49ee9b363a3763a024c4b00be0e0021cc10bbd06 | 1,078 | py | Python | src/DoingMathInPython/ch_03/challenge/better_corel.py | bmoretz/Python-Playground | a367ec7659b85c24363c21b5c0ac25db08ffa1f6 | [
"MIT"
] | null | null | null | src/DoingMathInPython/ch_03/challenge/better_corel.py | bmoretz/Python-Playground | a367ec7659b85c24363c21b5c0ac25db08ffa1f6 | [
"MIT"
] | null | null | null | src/DoingMathInPython/ch_03/challenge/better_corel.py | bmoretz/Python-Playground | a367ec7659b85c24363c21b5c0ac25db08ffa1f6 | [
"MIT"
] | null | null | null | import math
def find_corr_x_y( x, y ):
n = len( x )
z = len( y )
if n != z:
raise ValueError( 'Correlation cannot be evaluated with different length sets ( x:len[{0}] / y:len[{1}] )'.format( n, z ) )
prod = []
for xi, yi in zip( x, y ):
prod.append( xi * yi )
sum_prod_x_y = sum( prod )
sum_x = sum( x )
sum_y = sum( y )
squared_sum_x = sum_x ** 2
squared_sum_y = sum_y ** 2
x_square = []
for xi in x:
x_square.append( xi ** 2 )
x_square_sum = sum( x_square )
y_square = []
for yi in y:
y_square.append( yi ** 2 )
y_square_sum = sum( y_square )
numerator = n * sum_prod_x_y - sum_x * sum_y
denominator_term1 = n * x_square_sum - squared_sum_x
denominator_term2 = n * y_square_sum - squared_sum_y
denominator = ( denominator_term1 * denominator_term2 ) ** .5
correlation = numerator / denominator
return correlation
if __name__ == '__main__':
x = [ 90, 92, 95, 96, 87, 87, 90 ,95, 98 ]
y = [ 85, 87, 86, 97, 96, 88, 89, 98, 98, 87 ]
try:
correl = find_corr_x_y( x, y )
print( correl )
except ValueError as e:
print( e ) | 19.6 | 125 | 0.62616 |
f383fc596a572afb8c040726e3685e68ec1ff82b | 112 | py | Python | python-logging/mymodule/__init__.py | cgt212/example-code | 739dadc5003b0a1f82cc05b3f40c168659ed31f3 | [
"Apache-2.0"
] | null | null | null | python-logging/mymodule/__init__.py | cgt212/example-code | 739dadc5003b0a1f82cc05b3f40c168659ed31f3 | [
"Apache-2.0"
] | null | null | null | python-logging/mymodule/__init__.py | cgt212/example-code | 739dadc5003b0a1f82cc05b3f40c168659ed31f3 | [
"Apache-2.0"
] | null | null | null | from mymodule.thing import Thing
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
| 18.666667 | 61 | 0.830357 |
a14eadc32cf73a5972bcead1bbc056b68ec32a17 | 5,684 | py | Python | junorunner/runner.py | druids/django-juno-testrunner | eaf7a945f7fc8fcb72e7fd1b67c36aa230bba365 | [
"MIT"
] | null | null | null | junorunner/runner.py | druids/django-juno-testrunner | eaf7a945f7fc8fcb72e7fd1b67c36aa230bba365 | [
"MIT"
] | 2 | 2017-02-25T21:42:07.000Z | 2021-05-10T15:19:53.000Z | junorunner/runner.py | druids/django-juno-testrunner | eaf7a945f7fc8fcb72e7fd1b67c36aa230bba365 | [
"MIT"
] | 1 | 2016-04-22T14:40:28.000Z | 2016-04-22T14:40:28.000Z | import os
from django.test.runner import DiscoverRunner
from django.test.runner import reorder_suite
from django.core import management
from django.conf import settings
from junorunner.extended_runner import TextTestRunner
from unittest.suite import TestSuite
from unittest import loader
class JunoDiscoverRunner(DiscoverRunner):
"""
The only real difference between this and the standard DiscoverRunner in Django 1.6+
is the use of the custom TextTestRunner, which we hook in via run_suite()
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_log_files = not self.failfast
def run_suite(self, suite, **kwargs):
return TextTestRunner(
verbosity=self.verbosity,
failfast=self.failfast,
total_tests=suite.total_tests,
slow_test_count=self.slow_test_count,
use_log_files=self.use_log_files
).run(suite)
def _get_suite(self, test_labels, discover_kwargs, extra_tests, methods):
suite = TestSuite()
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()):
# if no tests found, it's probably a package; try discovery
tests = self.test_loader.discover(start_dir=label, **kwargs)
# make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
tests = self.get_tests_defined_in_methods_or_none(tests, methods)
if tests:
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
return suite
def _get_parallel_suite(self, suite):
if self.parallel > 1:
parallel_suite = self.parallel_test_suite(suite, self.parallel, self.failfast)
# Since tests are distributed across processes on a per-TestCase
# basis, there's no need for more processes than TestCases.
parallel_units = len(parallel_suite.subsuites)
if self.parallel > parallel_units:
self.parallel = parallel_units
# If there's only one TestCase, parallelization isn't needed.
if self.parallel > 1:
return parallel_suite
return suite
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
extra_tests = extra_tests or []
methods = self.methods.split(',') if self.methods else []
if methods:
self.use_log_files = False
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
suite = self._get_suite(test_labels, discover_kwargs, extra_tests, methods)
if self.tags or self.exclude_tags:
suite = filter_tests_by_tags(suite, self.tags, self.exclude_tags)
suite = reorder_suite(suite, self.reorder_by, self.reverse)
total_tests = len(suite._tests)
suite = self._get_parallel_suite(suite)
suite.total_tests = total_tests
return suite
def get_tests_defined_in_methods_or_none(self, tests, methods):
if not methods:
return tests
else:
if isinstance(tests, TestSuite):
returned_tests = []
for test in tests:
returned_test = self.get_tests_defined_in_methods_or_none(test, methods)
if returned_test:
returned_tests.append(returned_test)
return TestSuite(returned_tests)
elif tests._testMethodName in methods:
return tests
elif isinstance(tests, loader._FailedTest):
return tests
else:
return None
| 40.312057 | 92 | 0.606791 |
e98671df2edc6228ee1fd39f87b5412abc43cddd | 2,890 | py | Python | GEOS_Util/coupled_diagnostics/verification/stress_mon_clim/wind_qscat.py | GEOS-ESM/GMAO_Shared | 022af23abbc7883891006b57379be96d9a50df23 | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-02-01T17:36:53.000Z | 2020-02-01T17:36:53.000Z | GEOS_Util/coupled_diagnostics/verification/stress_mon_clim/wind_qscat.py | GEOS-ESM/GMAO_Shared | 022af23abbc7883891006b57379be96d9a50df23 | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 105 | 2019-07-08T19:27:23.000Z | 2022-03-22T02:12:16.000Z | GEOS_Util/coupled_diagnostics/verification/stress_mon_clim/wind_qscat.py | GEOS-ESM/GMAO_Shared | 022af23abbc7883891006b57379be96d9a50df23 | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 10 | 2019-07-05T18:00:44.000Z | 2022-03-11T16:26:29.000Z | import scipy as sp
import os
import matplotlib.pyplot as pl
from mpl_toolkits.basemap.cm import sstanom
import my_plots as mpl
from matplotlib import dates
path=os.environ['NOBACKUP']+'/verification/stress_mon_clim'
execfile(path+'/ctl.py')
ctl=Ctl()
taux={}
taux['clim']=ctl.fromfile('taux'); taux['clim'].data*=10.
taux['clim'].shiftgrid(.1)
taux['clim'].grid['lon']=sp.where(taux['clim'].grid['lon']<0.,taux['clim'].grid['lon']+360, \
taux['clim'].grid['lon'])
ind=[0,1,11]; taux['djf']=taux['clim'].subset(tind=ind).ave(0); taux['djf'].name+=', DJF'
ind=[5,6,7]; taux['jja']=taux['clim'].subset(tind=ind).ave(0); taux['jja'].name+=', JJA'
taux['am']=taux['clim'].subset(tind=ind).ave(0); taux['am'].name+=', Annual Mean'
tauy={}
tauy['clim']=ctl.fromfile('tauy'); tauy['clim'].data*=10.
tauy['clim'].shiftgrid(.1)
tauy['clim'].grid['lon']=sp.where(tauy['clim'].grid['lon']<0.,tauy['clim'].grid['lon']+360, \
tauy['clim'].grid['lon'])
ind=[0,1,11]; tauy['djf']=tauy['clim'].subset(tind=ind).ave(0); tauy['djf'].name+=', DJF'
ind=[5,6,7]; tauy['jja']=tauy['clim'].subset(tind=ind).ave(0); tauy['jja'].name+=', JJA'
tauy['am']=tauy['clim'].subset(tind=ind).ave(0); tauy['am'].name+=', Annual Mean'
# Equatorial annual cycle
lonind=sp.logical_and(taux['clim'].grid['lon'][0]>=130.0,taux['clim'].grid['lon'][0]<=280.0)
latind=sp.logical_and(taux['clim'].grid['lat'][:,0]>=-2.1,taux['clim'].grid['lat'][:,0]<=2.0)
taux['eqac']=taux['clim'].subset(iind=lonind,jind=latind).ave(2)
taux['eqac'].data-=taux['eqac'].ave(0).data
taux['eqac'].name=taux['clim'].name+', Eq. Annual Cycle'
# Plots
path+='/pics'
def plot_field(field, fig):
clevs=sp.arange(-2,2.1,0.2)
pl.figure(fig); pl.clf()
field.copts={'levels': clevs,\
'cmap': sstanom}
field.plot_map()
mean,std=field.aave(ret_std=True)
mpl.draw_stat((mean.data.squeeze(),std.data.squeeze()))
pl.show()
#DJF
plot_field(taux['djf'],1)
pl.savefig(path+'/taux_djf.png')
plot_field(tauy['djf'],2)
pl.savefig(path+'/tauy_djf.png')
#JJA
plot_field(taux['jja'],1)
pl.savefig(path+'/taux_jja.png')
plot_field(tauy['jja'],2)
pl.savefig(path+'/tauy_jja.png')
#AM
plot_field(taux['am'],1)
pl.savefig(path+'/taux_am.png')
plot_field(tauy['am'],2)
pl.savefig(path+'/tauy_am.png')
# Plot annual cycle
pl.figure(3);pl.clf()
taux['eqac'].copts={'levels': sp.arange(-0.2,0.21,0.05),\
'cmap' : sstanom,\
'timefmt': dates.DateFormatter('%b')}
taux['eqac'].plot2d()
taux['eqac'].copts={'func': pl.contour,\
'colors': 'black',\
'levels': sp.arange(-0.2,0.21,0.05),\
'timefmt': dates.DateFormatter('%b')}
taux['eqac'].plot2d()
ax=pl.gca(); ax.yaxis.set_major_locator(dates.MonthLocator())
pl.grid(); pl.show()
pl.savefig(path+'/taux_eq_ac.png')
| 34.404762 | 93 | 0.610035 |
8612b35a4baf8b847a70ab46d7220f213fb0d65c | 914 | py | Python | src/test_html_file_dao.py | Alan-Greene/wcag | bd502eae346dad85b0c64fb4e0b59c1aa85a0f19 | [
"MIT"
] | null | null | null | src/test_html_file_dao.py | Alan-Greene/wcag | bd502eae346dad85b0c64fb4e0b59c1aa85a0f19 | [
"MIT"
] | null | null | null | src/test_html_file_dao.py | Alan-Greene/wcag | bd502eae346dad85b0c64fb4e0b59c1aa85a0f19 | [
"MIT"
] | 2 | 2020-05-28T22:10:58.000Z | 2020-05-28T22:11:58.000Z | import unittest
from html_file_dao import HtmlFileDao
class TestHtmlFileDao(unittest.TestCase):
def setUp(self):
#Arrange
self.false_html_path = './fixtures/big.html'
self.correct_html_path = './fixtures/small.html'
self.file_dao = HtmlFileDao()
def test_file_not_found(self):
#Act
#Assert
with self.assertRaises(FileNotFoundError):
self.file_dao.get_html(self.false_html_path)
def test_file_found(self):
#Act
result = self.file_dao.get_html(self.correct_html_path)
#Assert
assert(result)
def test_assert_correct_content(self):
#Act
with open(self.correct_html_path, 'r') as html_file:
self.html_content = html_file.read()
result = self.file_dao.get_html(self.correct_html_path)
#Assert
self.assertEqual(result, self.html_content) | 27.69697 | 63 | 0.652079 |
3b7d26586e5216a3e851869d9a4ba3f81651cc65 | 855 | py | Python | scripts/create_test_data.py | rjw57/trafficdb | 7c895e14a52c8c313981243e36732a5e8dcc909a | [
"MIT"
] | 1 | 2016-12-12T21:23:26.000Z | 2016-12-12T21:23:26.000Z | scripts/create_test_data.py | rjw57/trafficdb | 7c895e14a52c8c313981243e36732a5e8dcc909a | [
"MIT"
] | null | null | null | scripts/create_test_data.py | rjw57/trafficdb | 7c895e14a52c8c313981243e36732a5e8dcc909a | [
"MIT"
] | null | null | null | # This script should be run via "webapp shell" and the "%run" magic
import os
from flask.ext.migrate import upgrade as upgrade_db
from trafficdb.models import *
from tests.fixtures import *
from tests.util import drop_all_data
# Rollback any incomplete session
db.session.rollback()
# Remember echo state
prev_echo = db.engine.echo
db.engine.echo = False
# Upgrade DB if necessary
upgrade_db()
# Drop any existing data
drop_all_data()
db.session.commit()
# Create test data
print('Creating test data...')
start_date = datetime.datetime(2012, 4, 23)
end_date = datetime.datetime(2012, 5, 10)
duration = int((end_date - start_date).total_seconds() // 60)
create_fake_observations(link_count=200, start=start_date, duration=duration)
create_fake_link_aliases(alias_count=100)
db.session.commit()
print('Test data created')
db.engine.echo = prev_echo
| 24.428571 | 77 | 0.775439 |
057871323dcab6db7ce225ca6a05713b85b1f71e | 755 | py | Python | zenml/cli/__init__.py | birdiesanders/zenml | fca11c17ccf941aa14920ef4eab470b94b26ccbe | [
"Apache-2.0"
] | 1 | 2021-05-04T17:11:23.000Z | 2021-05-04T17:11:23.000Z | zenml/cli/__init__.py | birdiesanders/zenml | fca11c17ccf941aa14920ef4eab470b94b26ccbe | [
"Apache-2.0"
] | null | null | null | zenml/cli/__init__.py | birdiesanders/zenml | fca11c17ccf941aa14920ef4eab470b94b26ccbe | [
"Apache-2.0"
] | 1 | 2020-12-27T08:16:42.000Z | 2020-12-27T08:16:42.000Z | # Copyright (c) maiot GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from .config import *
from .init import *
from .version import *
from .pipeline import *
| 32.826087 | 70 | 0.740397 |
e8f609ca7445ddf08ca34b8736d982332dcd8ea3 | 2,168 | py | Python | Scripts/EncrypThor_v3.py | s0rcy-r/encrypthor | 2e1ec9d123822ec8df8d95b20c7adf6790547828 | [
"MIT"
] | 2 | 2021-06-11T08:05:48.000Z | 2021-07-29T07:11:31.000Z | Scripts/EncrypThor_v3.py | s0rcy-r/encrypthor | 2e1ec9d123822ec8df8d95b20c7adf6790547828 | [
"MIT"
] | null | null | null | Scripts/EncrypThor_v3.py | s0rcy-r/encrypthor | 2e1ec9d123822ec8df8d95b20c7adf6790547828 | [
"MIT"
] | null | null | null | import os
import shutil
from Class.EncrypThor_Class import EncrypThor
###EncrypThor v2
#Encryption protocol v2 (two keys)
def encryption_protocol_v3(path, key, keyword):
# path = <str> -> path to file
# key = <str> -> cipher key
# keyword = <str> -> random keyword
#Init EncrypThor 2
encrypthor = EncrypThor()
#Generate two keys from one key
key_1 = encrypthor.keys_gen(password=keyword, salt=key)
key_2 = encrypthor.keys_gen(password=keyword, salt=key_1)
#Encrypt the data
encrypthor.encrypt_data(key=key_1, path_in=f"{path}")
#Cut the encrypted in blocks of random size + remove the .enc file
encrypthor.cutin_blocks(path_in=f"{path}.enc", path_out=f"{path}.encx")
os.remove(f"{path}.enc")
#Will coming in next updates
#encrypthor.shuffle_blocks(path_in=f"{path}.encx")
#List the files in the .encx folder
dir = os.listdir(f"{path}.encx")
#Decrypt each files in the .encx folder
for i in dir:
encrypthor.encrypt_data(key=key_2, path_in=f"{path}.encx/{i}")
os.remove(f"{path}.encx/{i}")
#Remove the clear file
os.remove(path)
#Decryption protocol v2 (two keys)
def decryption_protocol_v3(path, key, keyword):
# path = <str> -> path to file
# key = <str> -> cipher key
# keyword = <str> -> random keyword
#Init EncrypThor 2
encrypthor = EncrypThor()
#Generate two keys from one key
key_1 = encrypthor.keys_gen(password=keyword, salt=key)
key_2 = encrypthor.keys_gen(password=keyword, salt=key_1)
#Will coming in next updates
#encrypthor.sort_blocks(path_in=f"{path}")
#List the files in the .encx folder
dir_2 = os.listdir(f"{path}")
#Decrypt each files in the .encx folder
for j in dir_2:
encrypthor.decrypt_data(key=key_2, path_in=f"{path}/{j}")
os.remove(f"{path}/{j}")
#Join the .encx folder
encrypthor.joinin_blocks(path_in=f"{path}", path_out=f"{path[:-1]}")
#Decrypt the encrypted file
encrypthor.decrypt_data(key=key_1, path_in=f"{path[:-1]}")
#Remove the .encx folder and the encrypted file
os.remove(f"{path[:-1]}")
shutil.rmtree(path) | 28.155844 | 75 | 0.66559 |
e48b7e8b425633af61fa95ce0a32493812756c69 | 490 | py | Python | 01_Modelos_Supervisionados/1.17_Modelos_de_Rede_Neural_(supervisionados)/1.17.6_Complexidade.py | BrunoBertti/Scikit_Learning | 4b9e10ff7909f3728ac1e8bba19f5fd779340bc4 | [
"MIT"
] | null | null | null | 01_Modelos_Supervisionados/1.17_Modelos_de_Rede_Neural_(supervisionados)/1.17.6_Complexidade.py | BrunoBertti/Scikit_Learning | 4b9e10ff7909f3728ac1e8bba19f5fd779340bc4 | [
"MIT"
] | null | null | null | 01_Modelos_Supervisionados/1.17_Modelos_de_Rede_Neural_(supervisionados)/1.17.6_Complexidade.py | BrunoBertti/Scikit_Learning | 4b9e10ff7909f3728ac1e8bba19f5fd779340bc4 | [
"MIT"
] | null | null | null | ########## 1.17.6. Complexidade ##########
# Suponha que haja n amostras de treinamento, m recursos, k camadas ocultas, cada uma contendo h neurônios - para simplificar, e o neurônios de saída. A complexidade de tempo da retropropagação é O (n \ cdot m \ cdot h ^ k \ cdot o \ cdot i), onde i é o número de iterações. Como a retropropagação tem uma alta complexidade de tempo, é aconselhável começar com um número menor de neurônios ocultos e poucas camadas ocultas para treinamento. | 122.5 | 445 | 0.730612 |
aa32a02d2dbd1a8a2ba00c03028505284d77f771 | 5,427 | py | Python | ci/codespell-sarif.py | meissel/nemu | 4f495cca81f459717d175137f16a29f410140d8b | [
"BSD-2-Clause"
] | 155 | 2019-11-15T12:11:37.000Z | 2022-03-25T19:12:58.000Z | ci/codespell-sarif.py | meissel/nemu | 4f495cca81f459717d175137f16a29f410140d8b | [
"BSD-2-Clause"
] | 93 | 2020-01-04T15:28:48.000Z | 2022-01-31T11:52:09.000Z | ci/codespell-sarif.py | meissel/nemu | 4f495cca81f459717d175137f16a29f410140d8b | [
"BSD-2-Clause"
] | 18 | 2020-01-04T12:59:55.000Z | 2022-02-05T22:54:28.000Z | #!/usr/bin/env python3
from argparse import ArgumentParser, Namespace
from subprocess import run
from json import dumps
from typing import List, Dict
from sys import exit
from os import path
import traceback
import logging
def sarif_template(codePath: str, uriBaseId: str, codespell: str) -> dict:
repoUri = git_remote_url(codePath)
codeRev = git_rev(codePath)
codeBranch = git_branch(codePath)
codespellVersion = codespell_version(codespell)
return {
"$schema": "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json",
"version": "2.1.0",
"runs": [
{
"tool": {
"driver": {
"name": "codespell",
"version": codespellVersion,
"informationUri": "https://github.com/codespell-project/codespell",
"rules": [
{
"id": "CS0001",
"name": "SpellingMistake",
"shortDescription": {
"text": "Probable spelling mistake",
},
"helpUri": "https://github.com/codespell-project/codespell#readme",
"properties": {
"Severity": "style",
},
},
],
},
},
"originalUriBaseIds": {
uriBaseId: {
"uri": f"FILE://{path.abspath(codePath)}/",
},
},
"versionControlProvenance": [
{
"repositoryUri": repoUri,
"revisionId": codeRev,
"branch": codeBranch,
"mappedTo": {
"uriBaseId": uriBaseId,
},
},
],
"artifacts": [],
"results": [],
},
],
}
def codespell_version(codespell: str) -> str:
return run(
[codespell, "--version"],
capture_output=True
).stdout.decode().strip()
def git_rev(codePath: str) -> str:
return run(
["git", "-C", codePath, "rev-parse", "HEAD"],
capture_output=True
).stdout.decode().strip()
def git_branch(codePath: str) -> str:
return run(
["git", "-C", codePath, "branch", "--show-current"],
capture_output=True
).stdout.decode().strip()
def git_remote_url(codePath: str) -> str:
return run(
["git", "-C", codePath, "remote", "get-url", "origin"],
capture_output=True
).stdout.decode().strip()
def run_codespell(
codespell: str, args: List[str], codePath: str
) -> List[Dict]:
out = run(
[codespell] + args + [codePath],
capture_output=True
).stdout.decode().split("\n")
errors = []
for str in out:
if not str:
continue
file, line, msg = (x.strip() for x in str.split(":"))
file = path.relpath(file, codePath)
errors.append({
"file": file,
"line": int(line),
"msg": msg,
})
return errors
def parse_error(error: str, uriBaseId: str) -> dict:
return {
"ruleId": "CS0001",
"ruleIndex": 0,
"message": {
"text": f"Possible spelling mistake: {error['msg']}.",
},
"locations": [
{
"physicalLocation": {
"artifactLocation": {
"uri": error["file"],
"uriBaseId": uriBaseId,
},
"region": {
"startLine": error["line"],
},
},
},
],
}
def main(args: Namespace):
codespell = args.codespell
codespellArgs = args.codespell_args.split(" ")
codePath = args.path
uriBaseId = args.base_uri
outputPath = args.output
sarif = sarif_template(codePath, uriBaseId, codespell)
errors = run_codespell(codespell, codespellArgs, codePath)
for error in errors:
sarif["runs"][0]["results"].append(parse_error(error, uriBaseId))
if outputPath:
with open(outputPath, "w") as outputFile:
outputFile.write(dumps(sarif, indent=2))
else:
print(dumps(sarif, indent=2))
if __name__ == "__main__":
argParser = ArgumentParser()
argParser.add_argument(
"path",
help="Path to project", type=str
)
argParser.add_argument(
"-c", "--codespell",
help="codespell command to run", type=str, default="codespell"
)
argParser.add_argument(
"-a", "--codespell-args",
help="codespell arguments", type=str, default="-q 3"
)
argParser.add_argument(
"-u", "--base-uri",
help="uriBaseId for SARIF", type=str, default="SRCROOT"
)
argParser.add_argument(
"-o", "--output",
help="Path to output SARIF file", type=str, default=""
)
try:
main(argParser.parse_args())
except Exception:
logging.error(traceback.format_exc())
exit(1)
| 28.413613 | 116 | 0.47798 |
b85f2932597b059dfc08052205232d17c346f19d | 6,670 | py | Python | markdown_settings.py | revolunet/sublimetext-markdown-preview | 154bdd63f27970dd4c775c5c57810d7f18c64ea5 | [
"MIT"
] | 1,563 | 2015-01-01T05:52:34.000Z | 2022-02-20T09:32:31.000Z | markdown_settings.py | revolunet/sublimetext-markdown-preview | 154bdd63f27970dd4c775c5c57810d7f18c64ea5 | [
"MIT"
] | 178 | 2015-01-08T18:21:44.000Z | 2018-05-17T09:32:11.000Z | markdown_settings.py | revolunet/sublimetext-markdown-preview | 154bdd63f27970dd4c775c5c57810d7f18c64ea5 | [
"MIT"
] | 298 | 2015-01-08T21:12:15.000Z | 2022-03-15T08:00:44.000Z | """Markdown Preview settings handler."""
from __future__ import unicode_literals
import sublime
import os
import sys
import re
import json
import importlib
BUILTIN_KEYS = ('basepath', 'references', 'destination')
def extended_decode(d):
"""Decode python functions in JSON."""
if '!!python/name' in d:
parts = d["!!python/name"].split('.')
function = parts[-1]
module = '.'.join(parts[:-1])
return getattr(importlib.import_module(module), function)
return d
class Settings(object):
"""Settings handler."""
def __init__(self, settings_file, file_name):
"""Initialize."""
self.file_name = file_name
self._sub_settings = sublime.load_settings(settings_file)
self._overrides = {
"builtin": {
"references": [],
"basepath": self.get_base_path(None)
},
"meta": {}
}
def parse_md_ext(self):
"""Parse Markdown extensions."""
extensions = self._sub_settings.get('markdown_extensions', {})
return json.loads(json.dumps(extensions), object_hook=extended_decode)
def get(self, key, default=None):
"""
Get method for the settings object.
First check if there is an override.
"""
if key in self._overrides:
return self._overrides[key]
else:
if key == 'markdown_extensions':
return self.parse_md_ext()
else:
return self._sub_settings.get(key, default)
def set(self, key, value):
"""
Set method for the settings object.
Setting will add to overrides.
"""
self._overrides[key] = value
def has(self, key):
"""
Check if key is present.
Check in overrides first.
"""
found = key in self._overrides
if not found:
found = self._sub_settings.has(key)
return found
def is_abs(self, pth):
"""Check if path is an absolute path."""
absolute = False
if pth is not None:
if sys.platform.startswith('win'):
re_win_drive = re.compile(r"(^[A-Za-z]{1}:(?:\\|/))")
if re_win_drive.match(pth) is not None or pth.startswith("//"):
absolute = True
elif pth.startswith('/'):
absolute = True
return absolute
def resolve_meta_path(self, target):
"""
Resolve the path returned in the meta data.
1. See if path is defined as absolute and if so see
if it exists
2. If relative, use the file's current directory
(if available) as the base and see if the file
can be found
3. If relative, and the file's current directory
as the base proved fruitless, use the defined
basepath (if available)
"""
basepath = self._overrides["builtin"].get("basepath")
current_dir = None if self.file_name is None else os.path.dirname(self.file_name)
if target is not None:
target = os.path.expanduser(target)
if not self.is_abs(target):
for base in (current_dir, basepath):
if base is not None:
temp = os.path.join(base, target)
if os.path.exists(temp):
target = temp
break
elif not os.path.exists(target):
target = None
return target
def get_base_path(self, basepath):
"""Get the base path to use when resolving basepath paths if possible."""
if basepath is not None:
basepath = os.path.expanduser(basepath)
if (
basepath is not None and os.path.exists(basepath) and
self.is_abs(basepath) and os.path.isdir(basepath)
):
# A valid path was fed in
path = basepath
basepath = path
elif self.file_name is not None and os.path.exists(self.file_name):
basepath = os.path.dirname(self.file_name)
else:
# Okay, there is no way to tell the orign.
# We are probably a stream that has no specified
# physical location.
basepath = None
return basepath
def add_meta(self, meta):
"""Add meta data."""
meta = dict(list(meta.items()) + list(self._overrides.get("meta", {}).items()))
self._overrides["meta"] = meta
def apply_frontmatter(self, frontmatter):
"""Apply the provided frontmatter to override."""
# Handle basepath first
if "basepath" in frontmatter:
value = frontmatter["basepath"]
self._overrides["builtin"]["basepath"] = self.get_base_path(value)
del frontmatter["basepath"]
for key, value in frontmatter.items():
if key == "settings" and isinstance(value, dict):
for subkey, subvalue in value.items():
self._overrides[subkey] = subvalue
elif key in BUILTIN_KEYS:
if key == "references":
if not isinstance(value, list):
value = [value]
refs = []
for ref in value:
file_name = self.resolve_meta_path(ref)
if file_name is not None and not os.path.isdir(file_name):
refs.append(os.path.normpath(file_name))
self._overrides["builtin"][key] = refs
if key == "destination":
if value is not None:
file_name = value
if file_name is not None:
directory = os.path.dirname(file_name)
directory = self.resolve_meta_path(directory)
else:
directory = None
if directory is not None:
file_name = os.path.join(directory, os.path.basename(file_name))
if (
file_name is not None and
(not os.path.exists(file_name) or not os.path.isdir(file_name))
):
self._overrides["builtin"][key] = file_name
else:
if isinstance(value, list):
value = [str(v) for v in value]
else:
value = str(value)
self._overrides["meta"][str(key)] = value
| 35.668449 | 92 | 0.527886 |
f41712ecced717ef06d2758da0b174ad3c4c8d4e | 6,524 | py | Python | csmpe/csm_pm.py | kstaniek/cmspe | 16d9c1510a17b31c8de37ba05b3c689e4952b155 | [
"BSD-2-Clause"
] | null | null | null | csmpe/csm_pm.py | kstaniek/cmspe | 16d9c1510a17b31c8de37ba05b3c689e4952b155 | [
"BSD-2-Clause"
] | null | null | null | csmpe/csm_pm.py | kstaniek/cmspe | 16d9c1510a17b31c8de37ba05b3c689e4952b155 | [
"BSD-2-Clause"
] | null | null | null | # =============================================================================
# CSMPluginManager
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# # Author: Klaudiusz Staniek
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import pkginfo
from stevedore.dispatch import DispatchExtensionManager
from stevedore.exception import NoMatches
from condoor import ConnectionError
from context import PluginContext
install_phases = ['Pre-Upgrade', 'Pre-Add', 'Add', 'Pre-Activate', 'Activate', 'Pre-Deactivate',
'Deactivate', 'Pre-Remove', 'Remove', 'Commit']
auto_pre_phases = ["Add", "Activate", "Deactivate"]
class CSMPluginManager(object):
def __init__(self, ctx=None, invoke_on_load=True):
self._ctx = PluginContext(ctx)
try:
self._platform = self._ctx.family
except AttributeError:
self._platform = None
try:
self._phase = self._ctx.phase
except AttributeError:
self._phase = None
self._name = None
self._manager = DispatchExtensionManager(
"csm.plugin",
self._check_plugin,
invoke_on_load=invoke_on_load,
invoke_args=(self._ctx,),
propagate_map_exceptions=True,
on_load_failure_callback=self._on_load_failure,
)
self._build_plugin_list()
def __getitem__(self, item):
return self._manager.__getitem__(item)
def _build_plugin_list(self):
self.plugins = {}
for ext in self._manager:
self.plugins[ext.name] = {
# 'package_name': ext.entry_point.dist.project_name,
'package_name': ext.entry_point.module_name.split(".")[0],
'name': ext.plugin.name,
'description': ext.plugin.__doc__,
'phases': ext.plugin.phases,
'platforms': ext.plugin.platforms,
}
def _filter_func(self, ext, *args, **kwargs):
print(ext.plugin.name)
if self._platform and self._platform not in ext.plugin.platforms:
return False
if self._phase and self._phase not in ext.plugin.phases:
return False
if self._name and ext.plugin.name not in self._name:
return False
return True
def _dispatch(self, ext, *args, **kwargs):
if self._filter_func(ext):
self._ctx.current_plugin = None
self._ctx.info("Dispatching: '{}'".format(ext.plugin.name))
self._ctx.current_plugin = ext.plugin.name
return True
return False
def _on_load_failure(self, manager, entry_point, exc):
self._ctx.warning("Plugin load error: {}".format(entry_point))
self._ctx.warning("Exception: {}".format(exc))
def _check_plugin(self, ext, *args, **kwargs):
attributes = ['name', 'phases', 'platforms']
plugin = ext.plugin
for attribute in attributes:
if not hasattr(plugin, attribute):
self._ctx.warning("Attribute '{}' missing in plugin class".format(attribute))
return self._filter_func(ext)
def _find_plugin_packages(self):
packages = set()
for ext in self._manager:
dist = ext.entry_point.dist
print(dist.__dict__)
return list(packages)
def get_package_metadata(self, name):
try:
meta = pkginfo.Installed(name)
except ValueError as e:
print(e)
return None
return meta
def get_package_names(self):
return self.get_package_metadata().keys()
def dispatch(self, func):
try:
self._ctx.connect()
except ConnectionError as e:
self._ctx.post_status(e.message)
self._ctx.error(e.message)
return False
results = []
if self._phase in auto_pre_phases:
current_phase = self._phase
phase = "Pre-{}".format(self._phase)
self.set_phase_filter(phase)
try:
results = self._manager.map_method(self._dispatch, func)
except NoMatches:
self._ctx.warning("No {} plugins found".format(phase))
self._ctx.current_plugin = None
self.set_phase_filter(current_phase)
try:
results += self._manager.map_method(self._dispatch, func)
except NoMatches:
self._ctx.post_status("No plugins found for phase {}".format(self._phase))
self._ctx.error("No plugins found for phase {}".format(self._phase))
self._ctx.current_plugin = None
self._ctx.success = True
return results
def set_platform_filter(self, platform):
self._platform = platform
def set_phase_filter(self, phase):
self._ctx.info("Phase: {}".format(phase))
self._phase = phase
def set_name_filter(self, name):
if isinstance(name, str) or isinstance(name, unicode):
self._name = set((name,))
elif isinstance(name, list):
self._name = set(name)
elif isinstance(name, set):
self._name = name
else:
self._name = None
| 36.858757 | 96 | 0.623237 |
21302bba7d91022aaaa40af2a6a81a2e13fcecd1 | 6,390 | py | Python | ENG_FinBot/intent/Updater.py | RobinJLin/LokiHub | 8cc7a895f126a46b85213da4c35023bff9e2c99e | [
"MIT"
] | null | null | null | ENG_FinBot/intent/Updater.py | RobinJLin/LokiHub | 8cc7a895f126a46b85213da4c35023bff9e2c99e | [
"MIT"
] | null | null | null | ENG_FinBot/intent/Updater.py | RobinJLin/LokiHub | 8cc7a895f126a46b85213da4c35023bff9e2c99e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
名稱: Loki Intent Update Tool
作者: Droidtown
日期: 2021-02-22
信箱: info@droidtown.co
範例: Updater.py -o <old_intent(s)_dir> -n <new_intent(s)_dir>
說明: Updater.py 會將 <old_intent(s)_dir> 目錄中檔名符合 Loki_ 開頭
以及 .py 結尾的標準 Loki 意圖檔,和 <new_intent(s)_dir> 目錄中
的同名檔案做比對。
只要某個句型在新的意圖檔裡有,而舊的意圖檔裡沒有,就會產生出一個綜合了
新舊句型的意圖檔供開發者參考。更新後的意圖檔將會加上 __updated 做為
檔名後綴。在這個加上了 _updated 檔名後綴的檔案裡,靠近底部檔尾的地方
,則會列出所有「新」的句型。開發者只要把這個段落複製起來,貼入原有的意
圖檔裡的對應段落,即完成更新。
除了句型以外,Updater.py 也會更新全部意圖檔中的 userDefinedDICT
使用者自訂義字典。
"""
from argparse import ArgumentParser
import json
import os
def utterance_updater(oldIntentDirectory="./", newIntentDirectory=None):
resultBOOL = False
if newIntentDirectory == None:
return resultBOOL
else:
pass
#收集舊的 intent 檔名
oldIntentFileLIST = []
for f in os.listdir("./"):
if f.startswith("Loki_") and f.endswith(".py"):
if f.endswith("_updated.py"):
pass
else:
oldIntentFileLIST.append(f)
oldIntentFileLIST.sort()
for f in oldIntentFileLIST:
print("[1] 檢查{}".format(f))
#取出舊 intent 中的句型
with open(f, encoding="utf-8") as intentFILE:
oldIntentSrcLIST = intentFILE.readlines()
while oldIntentSrcLIST[-1] == "\n":
oldIntentSrcLIST = oldIntentSrcLIST[:-1]
#取出新 intent 中的和舊 intent 同檔名的 .py 檔中的句型
try:
with open("{}/{}".format(newIntentDirectory, f), encoding="utf-8") as intentFILE:
newIntentSrcLIST = intentFILE.readlines()
while newIntentSrcLIST[-1] == "\n":
newIntentSrcLIST = newIntentSrcLIST[:-1]
except FileNotFoundError:
pass
#若新 intent 中的句型已存在於舊 intent 裡,pass;否則,如將該 intent 更新。
updatedBOOL = False
for n in newIntentSrcLIST:
if n in oldIntentSrcLIST:
pass
else:
if n.startswith(" if utterance == "):
print(" >>加入新句型判斷:{}".format(n))
updatedBOOL = True
oldIntentSrcLIST.insert(-2, "\n{}{}".format(n, " # write your code here\n pass\n"))
else:
pass
if updatedBOOL == True:
with open(f.replace(".py", "_updated.py"), mode="w", encoding="utf-8") as updatedFILE:
updatedFILE.write("".join(oldIntentSrcLIST))
updatedBOOL = False
else:
print(" >>沒有新句型需要新增。")
return resultBOOL
def userDefinedDICT_updater(oldIntentDirectory="./", newIntentDirectory=None):
resultBOOL = False
if newIntentDirectory == None:
return resultBOOL
else:
pass
#收集舊的 intent 檔名
oldIntentFileLIST = []
for f in os.listdir("./"):
if f.startswith("Loki_") and f.endswith(".py"):
if f.endswith("_updated.py"):
pass
else:
oldIntentFileLIST.append(f)
oldIntentFileLIST.sort()
#取得新的 userDefinedDICT
with open("{}/{}".format(newIntentDirectory, oldIntentFileLIST[0]), encoding="utf-8") as intentFILE:
newIntentSrcLIST = intentFILE.readlines()
for n in newIntentSrcLIST:
if n.startswith("userDefinedDICT"):
newUserDefinedDICT = n
break
for o in oldIntentFileLIST:
print("[2] 更新 userDefinedDICT:{}".format(o))
with open(o, encoding="utf-8") as intentFILE:
oldIntentSrcLIST = intentFILE.readlines()
for l in range(len(oldIntentSrcLIST)):
if oldIntentSrcLIST[l].startswith("userDefinedDICT"):
try: #這裡只是測試看看 intent 中的 userDefinedDICT 是否仍然列在同一行內,未經手動調整斷行。
json.loads(oldIntentSrcLIST[l].split("=")[1])
except:
return False
oldIntentSrcLIST[l] = newUserDefinedDICT
break
else:
pass
with open(o, mode="w", encoding="utf-8") as updatedFILE:
updatedFILE.write("".join(oldIntentSrcLIST))
return True
if __name__ == "__main__":
progSTR = "Loki Intent Update Tool"
usageSTR = "\nUpdater.py -o <old_intent(s)_dir> -n <new_intent(s)_dir>"
descriptionSTR = """
Updater.py 會將 <old_intent(s)_dir> 目錄中檔名符合 Loki_ 開頭
以及 .py 結尾的標準 Loki 意圖檔,和 <new_intent(s)_dir> 目錄中
的同名檔案做比對。
只要某個句型在新的意圖檔裡有,而舊的意圖檔裡沒有,就會產生出一個綜合了
新舊句型的意圖檔供開發者參考。更新後的意圖檔將會加上 __updated 做為
檔名後綴。在這個加上了 _updated 檔名後綴的檔案裡,靠近底部檔尾的地方
,則會列出所有「新」的句型。開發者只要把這個段落複製起來,貼入原有的意
圖檔裡的對應段落,即完成更新。
除了句型以外,Updater.py 也會更新全部意圖檔中的 userDefinedDICT
使用者自訂義字典。
"""
argParser = ArgumentParser(prog=progSTR, usage=usageSTR, description=descriptionSTR, epilog=None)
argParser.add_argument("-o", "--old-intent-dir", help="Old intent(s) directory", dest="oldIntentDirectory")
argParser.add_argument("-n", "--new-intent-dir", help="New intent(s) directory", dest="newIntentDirectory")
args = argParser.parse_args()
#<manual_section>
manualMode = False
if manualMode == False:
pass
else:
args.oldIntentDirectory = ""
args.newIntentDirectory = ""
#</manual_section>
if None in (args.oldIntentDirectory, args.newIntentDirectory):
argParser.print_help()
else:
print("舊 Intent 目錄:{}".format(args.oldIntentDirectory))
print("新 Intent 目錄:{}".format(args.newIntentDirectory))
if os.path.isdir(args.oldIntentDirectory):
if os.path.isdir(args.newIntentDirectory):
print("\n作業開始\n")
if utterance_updater(oldIntentDirectory=args.oldIntentDirectory, newIntentDirectory=args.newIntentDirectory):
print("無法完成新句型檢查!")
else:
print("完成句型新增檢查!")
print("請查看目錄中以 _updated.py 結尾的檔案末端以取得新的句型段落。")
if userDefinedDICT_updater(oldIntentDirectory=args.oldIntentDirectory, newIntentDirectory=args.newIntentDirectory):
print("成功更新 userDefinedDICT")
else:
print("更新 userDefinedDICT 失敗!")
else:
print("新 Intent 目錄:{} 不存在或不是合理的目錄路徑。".format(args.newIntentDirectory))
else:
print("舊 Intent 目錄:{} 不存在或不是合理的目錄路徑。".format(args.oldIntentDirectory)) | 35.5 | 131 | 0.604225 |
7f071b0ed22c42b1446bf51f49ff9889eb04c314 | 236 | py | Python | discord_interactions/urls.py | MrKioZ/django-discord-interactions | d00cbfbf3e80350b6e855d73487101cfec8f2242 | [
"MIT"
] | 1 | 2020-12-14T03:47:54.000Z | 2020-12-14T03:47:54.000Z | discord_interactions/urls.py | MrKioZ/django-discord-interactions | d00cbfbf3e80350b6e855d73487101cfec8f2242 | [
"MIT"
] | null | null | null | discord_interactions/urls.py | MrKioZ/django-discord-interactions | d00cbfbf3e80350b6e855d73487101cfec8f2242 | [
"MIT"
] | null | null | null | from django.urls import path
from django.conf import settings
from .views import interactions, get_all_commands
urlpatterns = [
path('', interactions),
]
if settings.DEBUG:
urlpatterns += [path('commands/', get_all_commands)] | 21.454545 | 56 | 0.745763 |
b998ff2674d6d4e428cbca6b6b5343b9187c22c9 | 19,062 | py | Python | .mywaflib/waflib/Node.py | sebdoerr/seb_gitproject | b5516c2d182930deff0fb9f4d06597047390e05a | [
"BSD-3-Clause"
] | 2 | 2016-05-15T19:20:55.000Z | 2016-07-04T18:38:20.000Z | .mywaflib/waflib/Node.py | michaelkilchenmann/Quantitative_Economic_History | c64b5ad877eb995629d4b31f8a8500e7565a953a | [
"BSD-3-Clause"
] | null | null | null | .mywaflib/waflib/Node.py | michaelkilchenmann/Quantitative_Economic_History | c64b5ad877eb995629d4b31f8a8500e7565a953a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
Node: filesystem structure, contains lists of nodes
#. Each file/folder is represented by exactly one node.
#. Some potential class properties are stored on :py:class:`waflib.Build.BuildContext` : nodes to depend on, etc.
Unused class members can increase the `.wafpickle` file size sensibly.
#. Node objects should never be created directly, use
the methods :py:func:`Node.make_node` or :py:func:`Node.find_node`
#. The methods :py:func:`Node.find_resource`, :py:func:`Node.find_dir` :py:func:`Node.find_or_declare` should be
used when a build context is present
#. Each instance of :py:class:`waflib.Context.Context` has a unique :py:class:`Node` subclass.
(:py:class:`waflib.Node.Nod3`, see the :py:class:`waflib.Context.Context` initializer). A reference to the context owning a node is held as self.ctx
"""
import os, re, sys, shutil
from waflib import Utils, Errors
exclude_regs = '''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/BitKeeper
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzrignore
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/.arch-ids
**/{arch}
**/_darcs
**/_darcs/**
**/.intlcache
**/.DS_Store'''
"""
Ant patterns for files and folders to exclude while doing the
recursive traversal in :py:meth:`waflib.Node.Node.ant_glob`
"""
# TODO waf 1.9
split_path = Utils.split_path_unix
split_path_cygwin = Utils.split_path_cygwin
split_path_win32 = Utils.split_path_win32
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif Utils.is_win32:
split_path = split_path_win32
class Node(object):
"""
This class is organized in two parts
* The basic methods meant for filesystem access (compute paths, create folders, etc)
* The methods bound to a :py:class:`waflib.Build.BuildContext` (require ``bld.srcnode`` and ``bld.bldnode``)
The Node objects are not thread safe in any way.
"""
dict_class = dict
__slots__ = ('name', 'sig', 'children', 'parent', 'cache_abspath', 'cache_isdir', 'cache_sig')
def __init__(self, name, parent):
self.name = name
self.parent = parent
if parent:
if name in parent.children:
raise Errors.WafError('node %s exists in the parent files %r already' % (name, parent))
parent.children[name] = self
def __setstate__(self, data):
"Deserializes from data"
self.name = data[0]
self.parent = data[1]
if data[2] is not None:
# Issue 1480
self.children = self.dict_class(data[2])
if data[3] is not None:
self.sig = data[3]
def __getstate__(self):
"Serialize the node info"
return (self.name, self.parent, getattr(self, 'children', None), getattr(self, 'sig', None))
def __str__(self):
"String representation (name), for debugging purposes"
return self.name
def __repr__(self):
"String representation (abspath), for debugging purposes"
return self.abspath()
def __hash__(self):
"Node hash, used for storage in dicts. This hash is not persistent."
return id(self)
def __eq__(self, node):
"Node comparison, based on the IDs"
return id(self) == id(node)
def __copy__(self):
"Implemented to prevent nodes from being copied (raises an exception)"
raise Errors.WafError('nodes are not supposed to be copied')
def read(self, flags='r', encoding='ISO8859-1'):
"""
Return the contents of the file represented by this node::
def build(bld):
bld.path.find_node('wscript').read()
:type fname: string
:param fname: Path to file
:type m: string
:param m: Open mode
:rtype: string
:return: File contents
"""
return Utils.readf(self.abspath(), flags, encoding)
def write(self, data, flags='w', encoding='ISO8859-1'):
"""
Write some text to the physical file represented by this node::
def build(bld):
bld.path.make_node('foo.txt').write('Hello, world!')
:type data: string
:param data: data to write
:type flags: string
:param flags: Write mode
"""
Utils.writef(self.abspath(), data, flags, encoding)
def chmod(self, val):
"""
Change file/dir permissions::
def build(bld):
bld.path.chmod(493) # 0755
"""
os.chmod(self.abspath(), val)
def delete(self):
"""Delete the file/folder, and remove this node from the tree. Do not use this object after calling this method."""
try:
try:
if hasattr(self, 'children'):
shutil.rmtree(self.abspath())
else:
os.remove(self.abspath())
except OSError as e:
if os.path.exists(self.abspath()):
raise e
finally:
self.evict()
def evict(self):
"""Internal - called when a node is removed"""
del self.parent.children[self.name]
def suffix(self):
"""Return the file extension"""
k = max(0, self.name.rfind('.'))
return self.name[k:]
def height(self):
"""Depth in the folder hierarchy from the filesystem root or from all the file drives"""
d = self
val = -1
while d:
d = d.parent
val += 1
return val
def listdir(self):
"""List the folder contents"""
lst = Utils.listdir(self.abspath())
lst.sort()
return lst
def mkdir(self):
"""
Create a folder represented by this node, creating intermediate nodes as needed
An exception will be raised only when the folder cannot possibly exist there
"""
if getattr(self, 'cache_isdir', None):
return
try:
self.parent.mkdir()
except OSError:
pass
if self.name:
try:
os.makedirs(self.abspath())
except OSError:
pass
if not os.path.isdir(self.abspath()):
raise Errors.WafError('Could not create the directory %s' % self.abspath())
try:
self.children
except AttributeError:
self.children = self.dict_class()
self.cache_isdir = True
def find_node(self, lst):
"""
Find a node on the file system (files or folders), create intermediate nodes as needed
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
continue
try:
ch = cur.children
except AttributeError:
cur.children = self.dict_class()
else:
try:
cur = cur.children[x]
continue
except KeyError:
pass
# optimistic: create the node first then look if it was correct to do so
cur = self.__class__(x, cur)
try:
os.stat(cur.abspath())
except OSError:
cur.evict()
return None
ret = cur
try:
os.stat(ret.abspath())
except OSError:
ret.evict()
return None
try:
while not getattr(cur.parent, 'cache_isdir', None):
cur = cur.parent
cur.cache_isdir = True
except AttributeError:
pass
return ret
def make_node(self, lst):
"""
Find or create a node without looking on the filesystem
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
continue
if getattr(cur, 'children', {}):
if x in cur.children:
cur = cur.children[x]
continue
else:
cur.children = self.dict_class()
cur = self.__class__(x, cur)
return cur
def search_node(self, lst):
"""
Search for a node without looking on the filesystem
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
else:
try:
cur = cur.children[x]
except (AttributeError, KeyError):
return None
return cur
def path_from(self, node):
"""
Path of this node seen from the other::
def build(bld):
n1 = bld.path.find_node('foo/bar/xyz.txt')
n2 = bld.path.find_node('foo/stuff/')
n1.path_from(n2) # '../bar/xyz.txt'
:param node: path to use as a reference
:type node: :py:class:`waflib.Node.Node`
"""
c1 = self
c2 = node
c1h = c1.height()
c2h = c2.height()
lst = []
up = 0
while c1h > c2h:
lst.append(c1.name)
c1 = c1.parent
c1h -= 1
while c2h > c1h:
up += 1
c2 = c2.parent
c2h -= 1
while id(c1) != id(c2):
lst.append(c1.name)
up += 1
c1 = c1.parent
c2 = c2.parent
if c1.parent:
for i in range(up):
lst.append('..')
else:
if lst and not Utils.is_win32:
lst.append('')
lst.reverse()
return os.sep.join(lst) or '.'
def abspath(self):
"""
Absolute path. A cache is kept in the context as ``cache_node_abspath``
"""
try:
return self.cache_abspath
except AttributeError:
pass
# think twice before touching this (performance + complexity + correctness)
if not self.parent:
val = os.sep
elif not self.parent.name:
val = os.sep + self.name
else:
val = self.parent.abspath() + os.sep + self.name
self.cache_abspath = val
return val
if Utils.is_win32:
def abspath(self):
try:
return self.cache_abspath
except AttributeError:
pass
if not self.parent:
val = ''
elif not self.parent.name:
val = self.name + os.sep
else:
val = self.parent.abspath().rstrip(os.sep) + os.sep + self.name
self.cache_abspath = val
return val
def is_child_of(self, node):
"""
Does this node belong to the subtree node?::
def build(bld):
node = bld.path.find_node('wscript')
node.is_child_of(bld.path) # True
:param node: path to use as a reference
:type node: :py:class:`waflib.Node.Node`
"""
p = self
diff = self.height() - node.height()
while diff > 0:
diff -= 1
p = p.parent
return id(p) == id(node)
def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True):
"""
Semi-private and recursive method used by ant_glob.
:param accept: function used for accepting/rejecting a node, returns the patterns that can be still accepted in recursion
:type accept: function
:param maxdepth: maximum depth in the filesystem (25)
:type maxdepth: int
:param pats: list of patterns to accept and list of patterns to exclude
:type pats: tuple
:param dir: return folders too (False by default)
:type dir: bool
:param src: return files (True by default)
:type src: bool
:param remove: remove files/folders that do not exist (True by default)
:type remove: bool
"""
dircont = self.listdir()
dircont.sort()
try:
lst = set(self.children.keys())
except AttributeError:
self.children = self.dict_class()
else:
if remove:
for x in lst - set(dircont):
self.children[x].evict()
for name in dircont:
npats = accept(name, pats)
if npats and npats[0]:
accepted = [] in npats[0]
node = self.make_node([name])
isdir = os.path.isdir(node.abspath())
if accepted:
if isdir:
if dir:
yield node
else:
if src:
yield node
if getattr(node, 'cache_isdir', None) or isdir:
node.cache_isdir = True
if maxdepth:
for k in node.ant_iter(accept=accept, maxdepth=maxdepth - 1, pats=npats, dir=dir, src=src, remove=remove):
yield k
raise StopIteration
def ant_glob(self, *k, **kw):
"""
This method is used for finding files across folders. It behaves like ant patterns:
* ``**/*`` find all files recursively
* ``**/*.class`` find all files ending by .class
* ``..`` find files having two dot characters
For example::
def configure(cfg):
cfg.path.ant_glob('**/*.cpp') # find all .cpp files
cfg.root.ant_glob('etc/*.txt') # using the filesystem root can be slow
cfg.path.ant_glob('*.cpp', excl=['*.c'], src=True, dir=False)
For more information see http://ant.apache.org/manual/dirtasks.html
The nodes that correspond to files and folders that do not exist will be removed. To prevent this
behaviour, pass 'remove=False'
:param incl: ant patterns or list of patterns to include
:type incl: string or list of strings
:param excl: ant patterns or list of patterns to exclude
:type excl: string or list of strings
:param dir: return folders too (False by default)
:type dir: bool
:param src: return files (True by default)
:type src: bool
:param remove: remove files/folders that do not exist (True by default)
:type remove: bool
:param maxdepth: maximum depth of recursion
:type maxdepth: int
:param ignorecase: ignore case while matching (False by default)
:type ignorecase: bool
"""
src = kw.get('src', True)
dir = kw.get('dir', False)
excl = kw.get('excl', exclude_regs)
incl = k and k[0] or kw.get('incl', '**')
reflags = kw.get('ignorecase', 0) and re.I
def to_pat(s):
lst = Utils.to_list(s)
ret = []
for x in lst:
x = x.replace('\\', '/').replace('//', '/')
if x.endswith('/'):
x += '**'
lst2 = x.split('/')
accu = []
for k in lst2:
if k == '**':
accu.append(k)
else:
k = k.replace('.', '[.]').replace('*','.*').replace('?', '.').replace('+', '\\+')
k = '^%s$' % k
try:
#print "pattern", k
accu.append(re.compile(k, flags=reflags))
except Exception as e:
raise Errors.WafError("Invalid pattern: %s" % k, e)
ret.append(accu)
return ret
def filtre(name, nn):
ret = []
for lst in nn:
if not lst:
pass
elif lst[0] == '**':
ret.append(lst)
if len(lst) > 1:
if lst[1].match(name):
ret.append(lst[2:])
else:
ret.append([])
elif lst[0].match(name):
ret.append(lst[1:])
return ret
def accept(name, pats):
nacc = filtre(name, pats[0])
nrej = filtre(name, pats[1])
if [] in nrej:
nacc = []
return [nacc, nrej]
ret = [x for x in self.ant_iter(accept=accept, pats=[to_pat(incl), to_pat(excl)], maxdepth=kw.get('maxdepth', 25), dir=dir, src=src, remove=kw.get('remove', True))]
if kw.get('flat', False):
return ' '.join([x.path_from(self) for x in ret])
return ret
# --------------------------------------------------------------------------------
# the following methods require the source/build folders (bld.srcnode/bld.bldnode)
# using a subclass is a possibility, but is that really necessary?
# --------------------------------------------------------------------------------
def is_src(self):
"""
True if the node is below the source directory
note: !is_src does not imply is_bld()
:rtype: bool
"""
cur = self
x = id(self.ctx.srcnode)
y = id(self.ctx.bldnode)
while cur.parent:
if id(cur) == y:
return False
if id(cur) == x:
return True
cur = cur.parent
return False
def is_bld(self):
"""
True if the node is below the build directory
note: !is_bld does not imply is_src
:rtype: bool
"""
cur = self
y = id(self.ctx.bldnode)
while cur.parent:
if id(cur) == y:
return True
cur = cur.parent
return False
def get_src(self):
"""
Return the equivalent src node (or self if not possible)
:rtype: :py:class:`waflib.Node.Node`
"""
cur = self
x = id(self.ctx.srcnode)
y = id(self.ctx.bldnode)
lst = []
while cur.parent:
if id(cur) == y:
lst.reverse()
return self.ctx.srcnode.make_node(lst)
if id(cur) == x:
return self
lst.append(cur.name)
cur = cur.parent
return self
def get_bld(self):
"""
Return the equivalent bld node (or self if not possible)
:rtype: :py:class:`waflib.Node.Node`
"""
cur = self
x = id(self.ctx.srcnode)
y = id(self.ctx.bldnode)
lst = []
while cur.parent:
if id(cur) == y:
return self
if id(cur) == x:
lst.reverse()
return self.ctx.bldnode.make_node(lst)
lst.append(cur.name)
cur = cur.parent
# the file is external to the current project, make a fake root in the current build directory
lst.reverse()
if lst and Utils.is_win32 and len(lst[0]) == 2 and lst[0].endswith(':'):
lst[0] = lst[0][0]
return self.ctx.bldnode.make_node(['__root__'] + lst)
def find_resource(self, lst):
"""
Try to find a declared build node or a source file
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
node = self.get_bld().search_node(lst)
if not node:
self = self.get_src()
node = self.find_node(lst)
if node:
if os.path.isdir(node.abspath()):
return None
return node
def find_or_declare(self, lst):
"""
if 'self' is in build directory, try to return an existing node
if no node is found, go to the source directory
try to find an existing node in the source directory
if no node is found, create it in the build directory
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
node = self.get_bld().search_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig = None
node.parent.mkdir()
return node
self = self.get_src()
node = self.find_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig = None
node.parent.mkdir()
return node
node = self.get_bld().make_node(lst)
node.parent.mkdir()
return node
def find_dir(self, lst):
"""
Search for a folder in the filesystem
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
node = self.find_node(lst)
try:
if not os.path.isdir(node.abspath()):
return None
except (OSError, AttributeError):
# the node might be None, and raise an AttributeError
return None
return node
# helpers for building things
def change_ext(self, ext, ext_in=None):
"""
:return: A build node of the same path, but with a different extension
:rtype: :py:class:`waflib.Node.Node`
"""
name = self.name
if ext_in is None:
k = name.rfind('.')
if k >= 0:
name = name[:k] + ext
else:
name = name + ext
else:
name = name[:- len(ext_in)] + ext
return self.parent.find_or_declare([name])
def bldpath(self):
"Path seen from the build directory default/src/foo.cpp"
return self.path_from(self.ctx.bldnode)
def srcpath(self):
"Path seen from the source directory ../src/foo.cpp"
return self.path_from(self.ctx.srcnode)
def relpath(self):
"If a file in the build directory, bldpath, else srcpath"
cur = self
x = id(self.ctx.bldnode)
while cur.parent:
if id(cur) == x:
return self.bldpath()
cur = cur.parent
return self.srcpath()
def bld_dir(self):
"Build path without the file name"
return self.parent.bldpath()
def get_bld_sig(self):
"""
Node signature, assuming the file is in the build directory
"""
try:
return self.cache_sig
except AttributeError:
pass
if not self.is_bld() or self.ctx.bldnode is self.ctx.srcnode:
self.sig = Utils.h_file(self.abspath())
self.cache_sig = ret = self.sig
return ret
pickle_lock = Utils.threading.Lock()
"""Lock mandatory for thread-safe node serialization"""
class Nod3(Node):
"""Mandatory subclass for thread-safe node serialization"""
pass # do not remove
| 24.159696 | 166 | 0.641853 |
ba85098404413699abeaf6f239aa9eec02f5047c | 49,546 | py | Python | apc/eval_rcnn_afusg.py | jjn037/FusionAttack | 25646543b3008bd7f92760c8b0e6645450e79abf | [
"MIT"
] | null | null | null | apc/eval_rcnn_afusg.py | jjn037/FusionAttack | 25646543b3008bd7f92760c8b0e6645450e79abf | [
"MIT"
] | null | null | null | apc/eval_rcnn_afusg.py | jjn037/FusionAttack | 25646543b3008bd7f92760c8b0e6645450e79abf | [
"MIT"
] | null | null | null | import _init_path
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from lib.net.point_rcnn import PointRCNN
from lib.net.GAN_model import Generator_img, Generator_pts
from lib.net.train_functions import reduce_sum
from lib.datasets.kitti_rcnn_dataset import KittiRCNNDataset
import tools.train_utils.train_utils as train_utils
from lib.utils.bbox_transform import decode_bbox_target
from tools.kitti_object_eval_python.evaluate import evaluate as kitti_evaluate
from lib.config import cfg, cfg_from_file, save_config_to_file, cfg_from_list
import argparse
import lib.utils.kitti_utils as kitti_utils
import lib.utils.iou3d.iou3d_utils as iou3d_utils
from datetime import datetime
import logging
import re
import glob
import time
from tensorboardX import SummaryWriter
import tqdm
np.random.seed(1024) # set the same seed
parser = argparse.ArgumentParser(description = "arg parser")
parser.add_argument('--cfg_file', type = str, default = 'cfgs/default.yml', help = 'specify the config for evaluation')
parser.add_argument("--eval_mode", type = str, default = 'rpn', required = True, help = "specify the evaluation mode")
parser.add_argument('--eval_all', action = 'store_true', default = False, help = 'whether to evaluate all checkpoints')
parser.add_argument('--test', action = 'store_true', default = False, help = 'evaluate without ground truth')
parser.add_argument("--ckpt", type = str, default = None, help = "specify a checkpoint to be evaluated")
parser.add_argument("--rpn_ckpt", type = str, default = None,
help = "specify the checkpoint of rpn if trained separated")
parser.add_argument("--rcnn_ckpt", type = str, default = None,
help = "specify the checkpoint of rcnn if trained separated")
parser.add_argument("--afus_ckpt_dir", type = str, default = None)
parser.add_argument("--afus_epoch", type = int, default = 1)
parser.add_argument("--afus_iter", type = int, default = 100)
parser.add_argument('--gen_pert', action = 'store_true', default = True)
parser.add_argument('--batch_size', type = int, default = 1, help = 'batch size for evaluation')
parser.add_argument('--workers', type = int, default = 4, help = 'number of workers for dataloader')
parser.add_argument("--extra_tag", type = str, default = 'default', help = "extra tag for multiple evaluation")
parser.add_argument('--output_dir', type = str, default = None, help = 'specify an output directory if needed')
parser.add_argument("--ckpt_dir", type = str, default = None,
help = "specify a ckpt directory to be evaluated if needed")
parser.add_argument('--save_result', action = 'store_true', default = False, help = 'save evaluation results to files')
parser.add_argument('--save_rpn_feature', action = 'store_true', default = False,
help = 'save features for separately rcnn training and evaluation')
parser.add_argument('--random_select', action = 'store_true', default = True,
help = 'sample to the same number of points')
parser.add_argument('--start_epoch', default = 0, type = int, help = 'ignore the checkpoint smaller than this epoch')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument("--rcnn_eval_roi_dir", type = str, default = None,
help = 'specify the saved rois for rcnn evaluation when using rcnn_offline mode')
parser.add_argument("--rcnn_eval_feature_dir", type = str, default = None,
help = 'specify the saved features for rcnn evaluation when using rcnn_offline mode')
parser.add_argument('--set', dest = 'set_cfgs', default = None, nargs = argparse.REMAINDER,
help = 'set extra config keys if needed')
parser.add_argument('--model_type', type = str, default = 'base', help = 'model type')
args = parser.parse_args()
def create_logger(log_file):
log_format = '%(asctime)s %(levelname)5s %(message)s'
logging.basicConfig(level = logging.INFO, format = log_format, filename = log_file)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter(log_format))
logging.getLogger(__name__).addHandler(console)
return logging.getLogger(__name__)
def save_kitti_format(sample_id, calib, bbox3d, kitti_output_dir, scores, img_shape):
corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)
img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)
img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)
img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)
img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)
img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
box_valid_mask = np.logical_and(img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)
kitti_output_file = os.path.join(kitti_output_dir, '%06d.txt' % sample_id)
with open(kitti_output_file, 'w') as f:
for k in range(bbox3d.shape[0]):
if box_valid_mask[k] == 0:
continue
x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]
beta = np.arctan2(z, x)
alpha = -np.sign(beta) * np.pi / 2 + beta + ry
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %
(cfg.CLASSES, alpha, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2], img_boxes[k, 3],
bbox3d[k, 3], bbox3d[k, 4], bbox3d[k, 5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],
bbox3d[k, 6], scores[k]), file = f)
def save_rpn_features(seg_result, rpn_scores_raw, pts_features, backbone_xyz, backbone_features, kitti_features_dir,
sample_id):
pts_intensity = pts_features[:, 0]
output_file = os.path.join(kitti_features_dir, '%06d.npy' % sample_id)
xyz_file = os.path.join(kitti_features_dir, '%06d_xyz.npy' % sample_id)
seg_file = os.path.join(kitti_features_dir, '%06d_seg.npy' % sample_id)
intensity_file = os.path.join(kitti_features_dir, '%06d_intensity.npy' % sample_id)
np.save(output_file, backbone_features)
np.save(xyz_file, backbone_xyz)
np.save(seg_file, seg_result)
np.save(intensity_file, pts_intensity)
rpn_scores_raw_file = os.path.join(kitti_features_dir, '%06d_rawscore.npy' % sample_id)
np.save(rpn_scores_raw_file, rpn_scores_raw)
def eval_one_epoch_rpn(model, dataloader, epoch_id, result_dir, logger):
np.random.seed(1024)
mode = 'TEST' if args.test else 'EVAL'
if args.save_rpn_feature:
kitti_features_dir = os.path.join(result_dir, 'features')
os.makedirs(kitti_features_dir, exist_ok = True)
if args.save_result or args.save_rpn_feature:
kitti_output_dir = os.path.join(result_dir, 'detections', 'data')
seg_output_dir = os.path.join(result_dir, 'seg_result')
os.makedirs(kitti_output_dir, exist_ok = True)
os.makedirs(seg_output_dir, exist_ok = True)
logger.info('---- EPOCH %s RPN EVALUATION ----' % epoch_id)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
dataset = dataloader.dataset
cnt = max_num = rpn_iou_avg = 0
progress_bar = tqdm.tqdm(total = len(dataloader), leave = True, desc = 'eval')
for data in dataloader:
sample_id_list, pts_rect, pts_features, pts_input = \
data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']
sample_id = sample_id_list[0]
cnt += len(sample_id_list)
if not args.test:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
gt_boxes3d = data['gt_boxes3d']
rpn_cls_label = torch.from_numpy(rpn_cls_label).cuda(non_blocking = True).long()
if gt_boxes3d.shape[1] == 0: # (B, M, 7)
pass
# logger.info('%06d: No gt box' % sample_id)
else:
gt_boxes3d = torch.from_numpy(gt_boxes3d).cuda(non_blocking = True).float()
inputs = torch.from_numpy(pts_input).cuda(non_blocking = True).float()
input_data = { 'pts_input': inputs }
print('inputs', inputs.size())
# img feature
if cfg.LI_FUSION.ENABLED:
pts_origin_xy, img = data['pts_origin_xy'], data['img']
pts_origin_xy = torch.from_numpy(pts_origin_xy).cuda(non_blocking = True).float()
img = torch.from_numpy(img).cuda(non_blocking = True).float().permute((0,3,1,2))
input_data['pts_origin_xy'] = pts_origin_xy
input_data['img'] = img
if cfg.RPN.USE_RGB or cfg.RCNN.USE_RGB:
pts_rgb=data['rgb']
pts_rgb=torch.from_numpy(pts_rgb).cuda(non_blocking = True).float()
input_data['pts_rgb']=pts_rgb
# model inference
ret_dict = model(input_data)
rpn_cls, rpn_reg = ret_dict['rpn_cls'], ret_dict['rpn_reg']
backbone_xyz, backbone_features = ret_dict['backbone_xyz'], ret_dict['backbone_features']
rpn_scores_raw = rpn_cls[:, :, 0]
rpn_scores = torch.sigmoid(rpn_scores_raw)
seg_result = (rpn_scores > cfg.RPN.SCORE_THRESH).long()
# proposal layer
rois, roi_scores_raw = model.rpn.proposal_layer(rpn_scores_raw, rpn_reg, backbone_xyz) # (B, M, 7)
batch_size = rois.shape[0]
# calculate recall and save results to file
for bs_idx in range(batch_size):
cur_sample_id = sample_id_list[bs_idx]
cur_scores_raw = roi_scores_raw[bs_idx] # (N)
cur_boxes3d = rois[bs_idx] # (N, 7)
cur_seg_result = seg_result[bs_idx]
cur_pts_rect = pts_rect[bs_idx]
# calculate recall
if not args.test:
cur_rpn_cls_label = rpn_cls_label[bs_idx]
cur_gt_boxes3d = gt_boxes3d[bs_idx]
k = cur_gt_boxes3d.__len__() - 1
while k > 0 and cur_gt_boxes3d[k].sum() == 0:
k -= 1
cur_gt_boxes3d = cur_gt_boxes3d[:k + 1]
recalled_num = 0
if cur_gt_boxes3d.shape[0] > 0:
iou3d = iou3d_utils.boxes_iou3d_gpu(cur_boxes3d, cur_gt_boxes3d[:, 0:7])
gt_max_iou, _ = iou3d.max(dim = 0)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (gt_max_iou > thresh).sum().item()
recalled_num = (gt_max_iou > 0.7).sum().item()
total_gt_bbox += cur_gt_boxes3d.__len__()
fg_mask = cur_rpn_cls_label > 0
correct = ((cur_seg_result == cur_rpn_cls_label) & fg_mask).sum().float()
union = fg_mask.sum().float() + (cur_seg_result > 0).sum().float() - correct
rpn_iou = correct / torch.clamp(union, min = 1.0)
rpn_iou_avg += rpn_iou.item()
# save result
if args.save_rpn_feature:
# save features to file
save_rpn_features(seg_result[bs_idx].float().cpu().numpy(),
rpn_scores_raw[bs_idx].float().cpu().numpy(),
pts_features[bs_idx],
backbone_xyz[bs_idx].cpu().numpy(),
backbone_features[bs_idx].cpu().numpy().transpose(1, 0),
kitti_features_dir, cur_sample_id)
if args.save_result or args.save_rpn_feature:
cur_pred_cls = cur_seg_result.cpu().numpy()
output_file = os.path.join(seg_output_dir, '%06d.npy' % cur_sample_id)
if not args.test:
cur_gt_cls = cur_rpn_cls_label.cpu().numpy()
output_data = np.concatenate(
(cur_pts_rect.reshape(-1, 3), cur_gt_cls.reshape(-1, 1), cur_pred_cls.reshape(-1, 1)),
axis = 1)
else:
output_data = np.concatenate((cur_pts_rect.reshape(-1, 3), cur_pred_cls.reshape(-1, 1)), axis = 1)
np.save(output_file, output_data.astype(np.float16))
# save as kitti format
calib = dataset.get_calib(cur_sample_id)
cur_boxes3d = cur_boxes3d.cpu().numpy()
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, cur_boxes3d, kitti_output_dir, cur_scores_raw, image_shape)
disp_dict = { 'mode' : mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox),
'rpn_iou': rpn_iou_avg / max(cnt, 1.0) }
progress_bar.set_postfix(disp_dict)
progress_bar.update()
progress_bar.close()
logger.info(str(datetime.now()))
logger.info('-------------------performance of epoch %s---------------------' % epoch_id)
logger.info('max number of objects: %d' % max_num)
logger.info('rpn iou avg: %f' % (rpn_iou_avg / max(cnt, 1.0)))
ret_dict = { 'max_obj_num': max_num, 'rpn_iou': rpn_iou_avg / cnt }
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_recalled_bbox_list[idx],
total_gt_bbox, cur_recall))
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_recall
logger.info('result is saved to: %s' % result_dir)
return ret_dict
def eval_one_epoch_rcnn(model, dataloader, epoch_id, result_dir, logger):
np.random.seed(1024)
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
mode = 'TEST' if args.test else 'EVAL'
final_output_dir = os.path.join(result_dir, 'final_result', 'data')
os.makedirs(final_output_dir, exist_ok = True)
if args.save_result:
roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')
refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')
os.makedirs(roi_output_dir, exist_ok = True)
os.makedirs(refine_output_dir, exist_ok = True)
logger.info('---- EPOCH %s RCNN EVALUATION ----' % epoch_id)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
total_roi_recalled_bbox_list = [0] * 5
dataset = dataloader.dataset
cnt = final_total = total_cls_acc = total_cls_acc_refined = 0
progress_bar = tqdm.tqdm(total = len(dataloader), leave = True, desc = 'eval')
for data in dataloader:
sample_id = data['sample_id']
cnt += 1
assert args.batch_size == 1, 'Only support bs=1 here'
input_data = { }
for key, val in data.items():
if key != 'sample_id':
input_data[key] = torch.from_numpy(val).contiguous().cuda(non_blocking = True).float()
roi_boxes3d = input_data['roi_boxes3d']
roi_scores = input_data['roi_scores']
if cfg.RCNN.ROI_SAMPLE_JIT:
for key, val in input_data.items():
if key in ['gt_iou', 'gt_boxes3d']:
continue
input_data[key] = input_data[key].unsqueeze(dim = 0)
else:
pts_input = torch.cat((input_data['pts_input'], input_data['pts_features']), dim = -1)
input_data['pts_input'] = pts_input
# img feature
if cfg.LI_FUSION.ENABLED:
pts_origin_xy, img = data['pts_origin_xy'], data['img']
pts_origin_xy = torch.from_numpy(pts_origin_xy).cuda(non_blocking = True).float()
img = torch.from_numpy(img).cuda(non_blocking = True).float().permute((0,3,1,2))
input_data['pts_origin_xy'] = pts_origin_xy
input_data['img'] = img
if cfg.RPN.USE_RGB or cfg.RCNN.USE_RGB:
pts_rgb=data['rgb']
pts_rgb=torch.from_numpy(pts_rgb).cuda(non_blocking = True).float()
input_data['pts_rgb']=pts_rgb
ret_dict = model(input_data)
rcnn_cls = ret_dict['rcnn_cls']
rcnn_reg = ret_dict['rcnn_reg']
# bounding box regression
anchor_size = MEAN_SIZE
if cfg.RCNN.SIZE_RES_ON_ROI:
roi_size = input_data['roi_size']
anchor_size = roi_size
pred_boxes3d = decode_bbox_target(roi_boxes3d, rcnn_reg,
anchor_size = anchor_size,
loc_scope = cfg.RCNN.LOC_SCOPE,
loc_bin_size = cfg.RCNN.LOC_BIN_SIZE,
num_head_bin = cfg.RCNN.NUM_HEAD_BIN,
get_xz_fine = True, get_y_by_bin = cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope = cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size = cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine = True)
# scoring
if rcnn_cls.shape[1] == 1:
raw_scores = rcnn_cls.view(-1)
norm_scores = torch.sigmoid(raw_scores)
pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()
else:
pred_classes = torch.argmax(rcnn_cls, dim = 1).view(-1)
cls_norm_scores = F.softmax(rcnn_cls, dim = 1)
raw_scores = rcnn_cls[:, pred_classes]
norm_scores = cls_norm_scores[:, pred_classes]
# evaluation
disp_dict = { 'mode': mode }
if not args.test:
gt_boxes3d = input_data['gt_boxes3d']
gt_iou = input_data['gt_iou']
# calculate recall
gt_num = gt_boxes3d.shape[0]
if gt_num > 0:
iou3d = iou3d_utils.boxes_iou3d_gpu(pred_boxes3d, gt_boxes3d)
gt_max_iou, _ = iou3d.max(dim = 0)
refined_iou, _ = iou3d.max(dim = 1)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (gt_max_iou > thresh).sum().item()
recalled_num = (gt_max_iou > 0.7).sum().item()
total_gt_bbox += gt_num
iou3d_in = iou3d_utils.boxes_iou3d_gpu(roi_boxes3d, gt_boxes3d)
gt_max_iou_in, _ = iou3d_in.max(dim = 0)
for idx, thresh in enumerate(thresh_list):
total_roi_recalled_bbox_list[idx] += (gt_max_iou_in > thresh).sum().item()
# classification accuracy
cls_label = (gt_iou > cfg.RCNN.CLS_FG_THRESH).float()
cls_valid_mask = ((gt_iou >= cfg.RCNN.CLS_FG_THRESH) | (gt_iou <= cfg.RCNN.CLS_BG_THRESH)).float()
cls_acc = ((pred_classes == cls_label.long()).float() * cls_valid_mask).sum() / max(cls_valid_mask.sum(),
1.0)
iou_thresh = 0.7 if cfg.CLASSES == 'Car' else 0.5
cls_label_refined = (gt_iou >= iou_thresh).float()
cls_acc_refined = (pred_classes == cls_label_refined.long()).float().sum() / max(cls_label_refined.shape[0],
1.0)
total_cls_acc += cls_acc.item()
total_cls_acc_refined += cls_acc_refined.item()
disp_dict['recall'] = '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)
disp_dict['cls_acc_refined'] = '%.2f' % cls_acc_refined.item()
progress_bar.set_postfix(disp_dict)
progress_bar.update()
image_shape = dataset.get_image_shape(sample_id)
if args.save_result:
# save roi and refine results
roi_boxes3d_np = roi_boxes3d.cpu().numpy()
pred_boxes3d_np = pred_boxes3d.cpu().numpy()
calib = dataset.get_calib(sample_id)
save_kitti_format(sample_id, calib, roi_boxes3d_np, roi_output_dir, roi_scores, image_shape)
save_kitti_format(sample_id, calib, pred_boxes3d_np, refine_output_dir, raw_scores.cpu().numpy(),
image_shape)
# NMS and scoring
# scores thresh
inds = norm_scores > cfg.RCNN.SCORE_THRESH
if inds.sum() == 0:
continue
pred_boxes3d_selected = pred_boxes3d[inds]
raw_scores_selected = raw_scores[inds]
# NMS thresh
boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(pred_boxes3d_selected)
keep_idx = iou3d_utils.nms_gpu(boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH)
pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
scores_selected = raw_scores_selected[keep_idx]
pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu().numpy(), scores_selected.cpu().numpy()
calib = dataset.get_calib(sample_id)
final_total += pred_boxes3d_selected.shape[0]
save_kitti_format(sample_id, calib, pred_boxes3d_selected, final_output_dir, scores_selected, image_shape)
progress_bar.close()
# dump empty files
split_file = os.path.join(dataset.imageset_dir, '..', '..', 'ImageSets', dataset.split + '.txt')
split_file = os.path.abspath(split_file)
image_idx_list = [x.strip() for x in open(split_file).readlines()]
empty_cnt = 0
for k in range(image_idx_list.__len__()):
cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
if not os.path.exists(cur_file):
with open(cur_file, 'w') as temp_f:
pass
empty_cnt += 1
logger.info('empty_cnt=%d: dump empty file %s' % (empty_cnt, cur_file))
ret_dict = { 'empty_cnt': empty_cnt }
logger.info('-------------------performance of epoch %s---------------------' % epoch_id)
logger.info(str(datetime.now()))
avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
avg_det_num = (final_total / max(cnt, 1.0))
logger.info('final average detections: %.3f' % avg_det_num)
logger.info('final average cls acc: %.3f' % avg_cls_acc)
logger.info('final average cls acc refined: %.3f' % avg_cls_acc_refined)
ret_dict['rcnn_cls_acc'] = avg_cls_acc
ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
ret_dict['rcnn_avg_num'] = avg_det_num
for idx, thresh in enumerate(thresh_list):
cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total roi bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_roi_recalled_bbox_list[idx],
total_gt_bbox, cur_roi_recall))
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_recalled_bbox_list[idx],
total_gt_bbox, cur_recall))
ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
if cfg.TEST.SPLIT != 'test':
logger.info('Averate Precision:')
name_to_class = { 'Car': 0, 'Pedestrian': 1, 'Cyclist': 2 }
ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file = split_file,
current_class = name_to_class[cfg.CLASSES])
logger.info(ap_result_str)
ret_dict.update(ap_dict)
logger.info('result is saved to: %s' % result_dir)
return ret_dict
def eval_one_epoch_joint(model, generator_img, generator_pts, dataloader, epoch_id, result_dir, logger):
np.random.seed(666)
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
mode = 'TEST' if args.test else 'EVAL'
final_output_dir = os.path.join(result_dir, 'final_result', 'data')
os.makedirs(final_output_dir, exist_ok = True)
if args.save_result:
roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')
refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')
rpn_output_dir = os.path.join(result_dir, 'rpn_result', 'data')
os.makedirs(rpn_output_dir, exist_ok = True)
os.makedirs(roi_output_dir, exist_ok = True)
os.makedirs(refine_output_dir, exist_ok = True)
logger.info('---- EPOCH %s JOINT EVALUATION ----' % epoch_id)
logger.info('==> Output file: %s' % result_dir)
model.eval()
generator_img.eval()
generator_pts.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
total_roi_recalled_bbox_list = [0] * 5
dataset = dataloader.dataset
cnt = final_total = total_cls_acc = total_cls_acc_refined = total_rpn_iou = 0
pert_dist_img = 0
refined_dist_img = 0
pert_dist_pts = 0
progress_bar = tqdm.tqdm(total = len(dataloader), leave = True, desc = 'eval')
img_mean = np.array([0.485, 0.456, 0.406])
img_std = np.array([0.229, 0.224, 0.225])
clamp_max = (1. - img_mean) / img_std
clamp_min = - img_mean / img_std
for data in dataloader:
cnt += 1
sample_id, pts_rect, pts_features, pts_input = \
data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']
batch_size = len(sample_id)
inputs = torch.from_numpy(pts_input).cuda(non_blocking = True).float()
# print('pts_input', pts_input.shape)
input_data = {}
# img feature
if cfg.LI_FUSION.ENABLED:
pts_origin_xy, img = data['pts_origin_xy'], data['img']
pts_origin_xy = torch.from_numpy(pts_origin_xy).cuda(non_blocking = True).float()
img = torch.from_numpy(img).cuda(non_blocking = True).float().permute((0,3,1,2))
input_data['pts_origin_xy'] = pts_origin_xy
# input_data['img'] = img
img_pert, img_pert_feature = generator_img(img)
cur_dist_img = torch.mean(reduce_sum(img_pert ** 2))
pert_dist_img += cur_dist_img
input_data['img'] = img + img_pert
for j in range(3):
input_data['img'][:, j, :, :] = torch.clamp(input_data['img'][:, j, :, :],
min=clamp_min[j], max=clamp_max[j])
cur_dist_img_r = torch.mean(reduce_sum((input_data['img'] - img) ** 2))
refined_dist_img += cur_dist_img_r
if args.gen_pert:
print('gen_pert')
pts_pert = generator_pts(inputs, img_pert_feature, pts_origin_xy)
input_data['pts_input'] = inputs + pts_pert
# np.save('1st.npy', input_data['pts_input'].cpu().detach().numpy())
# print(h)
rpn_cls_label = data['rpn_cls_label']
rpn_cls_label = torch.from_numpy(rpn_cls_label).cuda(non_blocking=True).long()
fg_mask = (rpn_cls_label > 0).float()
cur_dist_pts = torch.mean(reduce_sum(pts_pert ** 2))
else:
input_data['pts_input'] = generator_pts(inputs)
cur_dist_pts = torch.mean(reduce_sum((input_data['pts_input'] - inputs) ** 2))
pert_dist_pts += cur_dist_pts
if cfg.RPN.USE_RGB or cfg.RCNN.USE_RGB:
pts_rgb=data['rgb']
pts_rgb=torch.from_numpy(pts_rgb).cuda(non_blocking = True).float()
input_data['pts_rgb']=pts_rgb
# model inference
ret_dict = model(input_data)
roi_scores_raw = ret_dict['roi_scores_raw'] # (B, M)
roi_boxes3d = ret_dict['rois'] # (B, M, 7)
seg_result = ret_dict['seg_result'].long() # (B, N)
rcnn_cls = ret_dict['rcnn_cls'].view(batch_size, -1, ret_dict['rcnn_cls'].shape[1])
rcnn_reg = ret_dict['rcnn_reg'].view(batch_size, -1, ret_dict['rcnn_reg'].shape[1]) # (B, M, C)
if cfg.USE_IOU_BRANCH:
rcnn_iou_branch = ret_dict['rcnn_iou_branch'].view(batch_size, -1, ret_dict['rcnn_iou_branch'].shape[1]) ##########################TO
rcnn_iou_branch = torch.max(rcnn_iou_branch, rcnn_iou_branch.new().resize_(rcnn_iou_branch.shape).fill_(1e-4))
rcnn_cls = rcnn_iou_branch * rcnn_cls
# bounding box regression
anchor_size = MEAN_SIZE
if cfg.RCNN.SIZE_RES_ON_ROI:
assert False
pred_boxes3d = decode_bbox_target(roi_boxes3d.view(-1, 7), rcnn_reg.view(-1, rcnn_reg.shape[-1]),
anchor_size = anchor_size,
loc_scope = cfg.RCNN.LOC_SCOPE,
loc_bin_size = cfg.RCNN.LOC_BIN_SIZE,
num_head_bin = cfg.RCNN.NUM_HEAD_BIN,
get_xz_fine = True, get_y_by_bin = cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope = cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size = cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine = True).view(batch_size, -1, 7)
# scoring
if rcnn_cls.shape[2] == 1:
raw_scores = rcnn_cls # (B, M, 1)
norm_scores = torch.sigmoid(raw_scores)
pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()
else:
pred_classes = torch.argmax(rcnn_cls, dim = 1).view(-1)
cls_norm_scores = F.softmax(rcnn_cls, dim = 1)
raw_scores = rcnn_cls[:, pred_classes]
norm_scores = cls_norm_scores[:, pred_classes]
# evaluation
recalled_num = gt_num = rpn_iou = 0
if not args.test:
if not cfg.RPN.FIXED:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
rpn_cls_label = torch.from_numpy(rpn_cls_label).cuda(non_blocking = True).long()
gt_boxes3d = data['gt_boxes3d']
for k in range(batch_size):
# calculate recall
cur_gt_boxes3d = gt_boxes3d[k]
tmp_idx = cur_gt_boxes3d.__len__() - 1
while tmp_idx >= 0 and cur_gt_boxes3d[tmp_idx].sum() == 0:
tmp_idx -= 1
if tmp_idx >= 0:
cur_gt_boxes3d = cur_gt_boxes3d[:tmp_idx + 1]
cur_gt_boxes3d = torch.from_numpy(cur_gt_boxes3d).cuda(non_blocking = True).float()
iou3d = iou3d_utils.boxes_iou3d_gpu(pred_boxes3d[k], cur_gt_boxes3d)
gt_max_iou, _ = iou3d.max(dim = 0)
refined_iou, _ = iou3d.max(dim = 1)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (gt_max_iou > thresh).sum().item()
recalled_num += (gt_max_iou > 0.7).sum().item()
gt_num += cur_gt_boxes3d.shape[0]
total_gt_bbox += cur_gt_boxes3d.shape[0]
# original recall
iou3d_in = iou3d_utils.boxes_iou3d_gpu(roi_boxes3d[k], cur_gt_boxes3d)
gt_max_iou_in, _ = iou3d_in.max(dim = 0)
for idx, thresh in enumerate(thresh_list):
total_roi_recalled_bbox_list[idx] += (gt_max_iou_in > thresh).sum().item()
if not cfg.RPN.FIXED:
fg_mask = rpn_cls_label > 0
correct = ((seg_result == rpn_cls_label) & fg_mask).sum().float()
union = fg_mask.sum().float() + (seg_result > 0).sum().float() - correct
rpn_iou = correct / torch.clamp(union, min = 1.0)
total_rpn_iou += rpn_iou.item()
disp_dict = {'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox),
'pert_dist_img': cur_dist_img.item(), 'refined_dist_img': cur_dist_img_r.item(),
'pert_dist_pts': cur_dist_pts.item()}
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if args.save_result:
# save roi and refine results
roi_boxes3d_np = roi_boxes3d.cpu().numpy()
pred_boxes3d_np = pred_boxes3d.cpu().numpy()
roi_scores_raw_np = roi_scores_raw.cpu().numpy()
raw_scores_np = raw_scores.cpu().numpy()
rpn_cls_np = ret_dict['rpn_cls'].cpu().numpy()
rpn_xyz_np = ret_dict['backbone_xyz'].cpu().numpy()
seg_result_np = seg_result.cpu().numpy()
output_data = np.concatenate((rpn_xyz_np, rpn_cls_np.reshape(batch_size, -1, 1),
seg_result_np.reshape(batch_size, -1, 1)), axis = 2)
for k in range(batch_size):
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, roi_boxes3d_np[k], roi_output_dir,
roi_scores_raw_np[k], image_shape)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_np[k], refine_output_dir,
raw_scores_np[k], image_shape)
output_file = os.path.join(rpn_output_dir, '%06d.npy' % cur_sample_id)
np.save(output_file, output_data.astype(np.float32))
# scores thresh
inds = norm_scores > cfg.RCNN.SCORE_THRESH
# print('cfg.RCNN.SCORE_THRESH:',cfg.RCNN.SCORE_THRESH)
# print('cfg.RCNN.NMS_THRESH:',cfg.RCNN.NMS_THRESH)
for k in range(batch_size):
cur_inds = inds[k].view(-1)
if cur_inds.sum() == 0:
continue
pred_boxes3d_selected = pred_boxes3d[k, cur_inds]
raw_scores_selected = raw_scores[k, cur_inds]
norm_scores_selected = norm_scores[k, cur_inds]
# NMS thresh
# rotated nms
boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(pred_boxes3d_selected)
keep_idx = iou3d_utils.nms_gpu(boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH).view(-1)
pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
scores_selected = raw_scores_selected[keep_idx]
pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu().numpy(), scores_selected.cpu().numpy()
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
final_total += pred_boxes3d_selected.shape[0]
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_selected, final_output_dir, scores_selected,
image_shape)
progress_bar.close()
# dump empty files
split_file = os.path.join(dataset.imageset_dir, '..', '..', 'ImageSets', dataset.split + '.txt')
split_file = os.path.abspath(split_file)
image_idx_list = [x.strip() for x in open(split_file).readlines()]
empty_cnt = 0
for k in range(image_idx_list.__len__()):
cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
if not os.path.exists(cur_file):
with open(cur_file, 'w') as temp_f:
pass
empty_cnt += 1
logger.info('empty_cnt=%d: dump empty file %s' % (empty_cnt, cur_file))
ret_dict = { 'empty_cnt': empty_cnt }
logger.info('-------------------performance of epoch %s---------------------' % epoch_id)
logger.info(str(datetime.now()))
avg_rpn_iou = (total_rpn_iou / max(cnt, 1.0))
avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
avg_det_num = (final_total / max(len(dataset), 1.0))
avg_pimg_dist = (pert_dist_img / max(cnt, 1.0))
avg_refined_dist = (refined_dist_img / max(cnt, 1.0))
avg_ppts_dist = (pert_dist_pts / max(cnt, 1.0))
logger.info('final average detections: %.3f' % avg_det_num)
logger.info('final average rpn_iou refined: %.3f' % avg_rpn_iou)
logger.info('final average cls acc: %.3f' % avg_cls_acc)
logger.info('final average cls acc refined: %.3f' % avg_cls_acc_refined)
logger.info('final average pert img dist: %f' % avg_pimg_dist)
logger.info('final average pert img dist refined: %f' % avg_refined_dist)
logger.info('final average pert pts dist: %f' % avg_ppts_dist)
ret_dict['rpn_iou'] = avg_rpn_iou
ret_dict['rcnn_cls_acc'] = avg_cls_acc
ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
ret_dict['rcnn_avg_num'] = avg_det_num
ret_dict['pimg_dist'] = avg_pimg_dist
ret_dict['pimg_dist_refined'] = avg_refined_dist
ret_dict['ppts_dist'] = avg_ppts_dist
for idx, thresh in enumerate(thresh_list):
cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total roi bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_roi_recalled_bbox_list[idx],
total_gt_bbox, cur_roi_recall))
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_recalled_bbox_list[idx],
total_gt_bbox, cur_recall))
ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
if cfg.TEST.SPLIT != 'test':
logger.info('Averate Precision:')
name_to_class = { 'Car': 0, 'Pedestrian': 1, 'Cyclist': 2 }
ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file = split_file,
current_class = name_to_class[cfg.CLASSES])
logger.info(ap_result_str)
ret_dict.update(ap_dict)
logger.info('result is saved to: %s' % result_dir)
return ret_dict
def eval_one_epoch(model, generator_img, generator_pts, dataloader, epoch_id, result_dir, logger):
if cfg.RPN.ENABLED and not cfg.RCNN.ENABLED:
ret_dict = eval_one_epoch_rpn(model, dataloader, epoch_id, result_dir, logger)
elif not cfg.RPN.ENABLED and cfg.RCNN.ENABLED:
ret_dict = eval_one_epoch_rcnn(model, dataloader, epoch_id, result_dir, logger)
elif cfg.RPN.ENABLED and cfg.RCNN.ENABLED:
ret_dict = eval_one_epoch_joint(model, generator_img, generator_pts, dataloader, epoch_id, result_dir, logger)
else:
raise NotImplementedError
return ret_dict
def load_part_ckpt(model, filename, logger, total_keys = -1):
if os.path.isfile(filename):
logger.info("==> Loading part model from checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
model_state = checkpoint['model_state']
update_model_state = { key: val for key, val in model_state.items() if key in model.state_dict() }
state_dict = model.state_dict()
state_dict.update(update_model_state)
model.load_state_dict(state_dict)
update_keys = update_model_state.keys().__len__()
if update_keys == 0:
raise RuntimeError
logger.info("==> Done (loaded %d/%d)" % (update_keys, total_keys))
else:
raise FileNotFoundError
def load_ckpt_based_on_args(model, generator_img, generator_pts, logger):
if args.ckpt is not None:
train_utils.load_checkpoint(model, filename = args.ckpt, logger = logger)
if args.afus_ckpt_dir is not None:
logger.info("==> Loading generator")
aimg_ckpt = os.path.join(args.afus_ckpt_dir, 'checkpoint_Gimg_iter_%d.pth' % args.afus_iter)
checkpoint = torch.load(aimg_ckpt)
generator_img.load_state_dict(checkpoint['model_state'])
logger.info("==> Loading perturbation")
apts_ckpt = os.path.join(args.afus_ckpt_dir, 'checkpoint_Gpts_iter_%d.pth' % args.afus_iter)
checkpoint = torch.load(apts_ckpt)
generator_pts.load_state_dict(checkpoint['model_state'])
logger.info("==> Done")
total_keys = model.state_dict().keys().__len__()
if cfg.RPN.ENABLED and args.rpn_ckpt is not None:
load_part_ckpt(model, filename = args.rpn_ckpt, logger = logger, total_keys = total_keys)
if cfg.RCNN.ENABLED and args.rcnn_ckpt is not None:
load_part_ckpt(model, filename = args.rcnn_ckpt, logger = logger, total_keys = total_keys)
def eval_single_ckpt(root_result_dir):
root_result_dir = os.path.join(root_result_dir, 'eval')
# set epoch_id and output dir
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
root_result_dir = os.path.join(root_result_dir, 'epoch_%s' % epoch_id, cfg.TEST.SPLIT)
if args.test:
root_result_dir = os.path.join(root_result_dir, 'test_mode')
if args.extra_tag != 'default':
root_result_dir = os.path.join(root_result_dir, args.extra_tag)
os.makedirs(root_result_dir, exist_ok = True)
log_file = os.path.join(root_result_dir, 'log_eval_one.txt')
logger = create_logger(log_file)
logger.info('**********************Start logging**********************')
for key, val in vars(args).items():
logger.info("{:16} {}".format(key, val))
save_config_to_file(cfg, logger = logger)
# create dataloader & network
test_loader = create_dataloader(logger)
# model = PointRCNN(num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')
if args.model_type == 'base':
model = PointRCNN(num_classes = test_loader.dataset.num_class, use_xyz = True, mode = 'TEST')
generator_img = Generator_img(num_channels=3, ngf=100)
input_channels = int(cfg.RPN.USE_INTENSITY) + 3 * int(cfg.RPN.USE_RGB)
generator_pts = Generator_pts(input_channels=input_channels, use_xyz=True)
# elif args.model_type == 'rpn_mscale':
# model = PointRCNN_mScale(num_classes = test_loader.dataset.num_class, use_xyz = True, mode = 'TEST')
model.cuda()
generator_img.cuda()
generator_pts.cuda()
# copy important files to backup
# backup_dir = os.path.join(root_result_dir, 'backup_files')
# os.makedirs(backup_dir, exist_ok = True)
# os.system('cp *.py %s/' % backup_dir)
# os.system('cp ../lib/net/*.py %s/' % backup_dir)
# os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)
# load checkpoint
load_ckpt_based_on_args(model, generator_img, generator_pts, logger)
# start evaluation
eval_one_epoch(model, generator_img, generator_pts, test_loader, epoch_id, root_result_dir, logger)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key = os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(root_result_dir, ckpt_dir):
root_result_dir = os.path.join(root_result_dir, 'eval', 'eval_all_' + args.extra_tag)
os.makedirs(root_result_dir, exist_ok = True)
log_file = os.path.join(root_result_dir, 'log_eval_all_%s.txt' % cfg.TEST.SPLIT)
logger = create_logger(log_file)
logger.info('**********************Start logging**********************')
# save config
for key, val in vars(args).items():
logger.info("{:16} {}".format(key, val))
save_config_to_file(cfg, logger = logger)
# create dataloader & network
test_loader = create_dataloader(logger)
# model = PointRCNN(num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')
if args.model_type == 'base':
model = PointRCNN(num_classes = test_loader.dataset.num_class, use_xyz = True, mode = 'TEST')
# print(model)
# elif args.model_type == 'rpn_mscale':
# model = PointRCNN_mScale(num_classes = test_loader.dataset.num_class, use_xyz = True, mode = 'TEST')
model.cuda()
# copy important files to backup
backup_dir = os.path.join(root_result_dir, 'backup_files')
os.makedirs(backup_dir, exist_ok = True)
os.system('cp *.py %s/' % backup_dir)
os.system('cp ../lib/net/*.py %s/' % backup_dir)
os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)
# evaluated ckpt record
ckpt_record_file = os.path.join(root_result_dir, 'eval_list_%s.txt' % cfg.TEST.SPLIT)
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
tb_log = SummaryWriter(logdir = os.path.join(root_result_dir, 'tensorboard_%s' % cfg.TEST.SPLIT))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
print('Wait %s second for next check: %s' % (wait_second, ckpt_dir))
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
# load checkpoint
train_utils.load_checkpoint(model, filename = cur_ckpt)
# start evaluation
cur_result_dir = os.path.join(root_result_dir, 'epoch_%s' % cur_epoch_id, cfg.TEST.SPLIT)
tb_dict = eval_one_epoch(model, test_loader, cur_epoch_id, cur_result_dir, logger)
step = int(float(cur_epoch_id))
if step == float(cur_epoch_id):
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, step)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file = f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def create_dataloader(logger):
mode = 'TEST' if args.test else 'EVAL'
DATA_PATH = os.path.join('../', 'data')
# create dataloader
test_set = KittiRCNNDataset(root_dir = DATA_PATH, npoints = cfg.RPN.NUM_POINTS, split = cfg.TEST.SPLIT, mode = mode,
random_select = args.random_select,
rcnn_eval_roi_dir = args.rcnn_eval_roi_dir,
rcnn_eval_feature_dir = args.rcnn_eval_feature_dir,
classes = cfg.CLASSES,
logger = logger)
test_loader = DataLoader(test_set, batch_size = args.batch_size, shuffle = False, pin_memory = True,
num_workers = args.workers, collate_fn = test_set.collate_batch)
print('test_loader')
return test_loader
if __name__ == "__main__":
# merge config and log to file
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.TAG = os.path.splitext(os.path.basename(args.cfg_file))[0]
if args.eval_mode == 'rpn':
cfg.RPN.ENABLED = True
cfg.RCNN.ENABLED = False
root_result_dir = os.path.join('../', 'output', 'rpn', cfg.TAG)
ckpt_dir = os.path.join('../', 'output', 'rpn', cfg.TAG, 'ckpt')
elif args.eval_mode == 'rcnn':
cfg.RCNN.ENABLED = True
cfg.RPN.ENABLED = cfg.RPN.FIXED = True
root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
ckpt_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG, 'ckpt')
elif args.eval_mode == 'rcnn_online':
cfg.RCNN.ENABLED = True
cfg.RPN.ENABLED = True
cfg.RPN.FIXED = False
root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
ckpt_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG, 'ckpt')
elif args.eval_mode == 'rcnn_offline':
cfg.RCNN.ENABLED = True
cfg.RPN.ENABLED = False
root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
ckpt_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG, 'ckpt')
assert args.rcnn_eval_roi_dir is not None and args.rcnn_eval_feature_dir is not None
else:
raise NotImplementedError
if args.ckpt_dir is not None:
ckpt_dir = args.ckpt_dir
if args.output_dir is not None:
root_result_dir = args.output_dir
os.makedirs(root_result_dir, exist_ok = True)
with torch.no_grad():
if args.eval_all:
assert os.path.exists(ckpt_dir), '%s' % ckpt_dir
repeat_eval_ckpt(root_result_dir, ckpt_dir)
print('eval_all')
else:
eval_single_ckpt(root_result_dir)
print('eval_single')
| 46.785647 | 152 | 0.618879 |
62c45e92ad19585a6290170c0b00c1e9f268fc79 | 6,715 | py | Python | bing_tts.py | jerryyip/respeaker_adapter | 98b56521186c89fe161e5cbff57fb386df9577e5 | [
"MIT"
] | 2 | 2017-05-29T02:52:33.000Z | 2018-04-27T03:33:43.000Z | bing_tts.py | jerryyip/respeaker_adapter | 98b56521186c89fe161e5cbff57fb386df9577e5 | [
"MIT"
] | null | null | null | bing_tts.py | jerryyip/respeaker_adapter | 98b56521186c89fe161e5cbff57fb386df9577e5 | [
"MIT"
] | null | null | null | '''
Bing Text To Speech (TTS)
'''
import json
import uuid
import wave
import io
import hashlib
from monotonic import monotonic
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
from bing_base import *
CACHE_SIZE = 2*1024*1024 #2M
class BingTTS():
def __init__(self, bing_base):
self.bing_base = bing_base
self.locales = {
"ar-eg": {"Female": "Microsoft Server Speech Text to Speech Voice (ar-EG, Hoda)"},
"de-DE": {"Female": "Microsoft Server Speech Text to Speech Voice (de-DE, Hedda)", "Male": "Microsoft Server Speech Text to Speech Voice (de-DE, Stefan, Apollo)"},
"en-AU": {"Female": "Microsoft Server Speech Text to Speech Voice (en-AU, Catherine)"},
"en-CA": {"Female": "Microsoft Server Speech Text to Speech Voice (en-CA, Linda)"},
"en-GB": {"Female": "Microsoft Server Speech Text to Speech Voice (en-GB, Susan, Apollo)", "Male": "Microsoft Server Speech Text to Speech Voice (en-GB, George, Apollo)"},
"en-IN": {"Male": "Microsoft Server Speech Text to Speech Voice (en-IN, Ravi, Apollo)"},
"en-US":{"Female": "Microsoft Server Speech Text to Speech Voice (en-US, ZiraRUS)","Male": "Microsoft Server Speech Text to Speech Voice (en-US, BenjaminRUS)"},
"es-ES":{"Female": "Microsoft Server Speech Text to Speech Voice (es-ES, Laura, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (es-ES, Pablo, Apollo)"},
"es-MX":{"Male": "Microsoft Server Speech Text to Speech Voice (es-MX, Raul, Apollo)"},
"fr-CA":{"Female": "Microsoft Server Speech Text to Speech Voice (fr-CA, Caroline)"},
"fr-FR":{"Female": "Microsoft Server Speech Text to Speech Voice (fr-FR, Julie, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (fr-FR, Paul, Apollo)"},
"it-IT":{"Male": "Microsoft Server Speech Text to Speech Voice (it-IT, Cosimo, Apollo)"},
"ja-JP":{"Female": "Microsoft Server Speech Text to Speech Voice (ja-JP, Ayumi, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (ja-JP, Ichiro, Apollo)"},
"pt-BR":{"Male": "Microsoft Server Speech Text to Speech Voice (pt-BR, Daniel, Apollo)"},
"ru-RU":{"Female": "Microsoft Server Speech Text to Speech Voice (pt-BR, Daniel, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (ru-RU, Pavel, Apollo)"},
"zh-CN":{"Female": "Microsoft Server Speech Text to Speech Voice (zh-CN, HuihuiRUS)","Female2": "Microsoft Server Speech Text to Speech Voice (zh-CN, Yaoyao, Apollo)", "Male": "Microsoft Server Speech Text to Speech Voice (zh-CN, Kangkang, Apollo)"},
"zh-HK":{"Female": "Microsoft Server Speech Text to Speech Voice (zh-HK, Tracy, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (zh-HK, Danny, Apollo)"},
"zh-TW":{"Female": "Microsoft Server Speech Text to Speech Voice (zh-TW, Yating, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (zh-TW, Zhiwei, Apollo)"}
}
self.cache = {}
def speak(self, text, language="en-US", gender="Female"):
access_token = self.bing_base.token()
if language not in self.locales.keys():
raise LocaleError("language locale not supported.")
lang = self.locales.get(language)
if gender not in ["Female", "Male", "Female2"]:
gender = "Female"
if len(lang) == 1:
gender = lang.keys()[0]
service_name = lang[gender]
hasher = hashlib.sha1()
hasher.update(text+language+gender)
sha1 = hasher.hexdigest()
if sha1 in self.cache:
print '[TTS wave from cache]'
# [size, data, ref_cnt]
self.cache[sha1][2] += 1
return self.cache[sha1][1]
body = "<speak version='1.0' xml:lang='en-us'>\
<voice xml:lang='%s' xml:gender='%s' name='%s'>%s</voice>\
</speak>" % (language, gender, service_name, text)
headers = {"Content-type": "application/ssml+xml",
"X-Microsoft-OutputFormat": "raw-16khz-16bit-mono-pcm",
"Authorization": "Bearer " + access_token,
"X-Search-AppId": "07D3234E49CE426DAA29772419F436CA",
"X-Search-ClientID": str(uuid.uuid1()).replace('-',''),
"User-Agent": "TTSForPython"}
url = "https://speech.platform.bing.com/synthesize"
request = Request(url, data=body, headers=headers)
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("tts request failed: {0}".format(
getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("tts connection failed: {0}".format(e.reason))
data = response.read()
size = len(data)
print "[TTS wave length: %dkB]" %(size/1024),
self.cache_wave(sha1, data, size)
return data
def _sum_cache(self):
sum = 0
for k,v in self.cache.items():
sum += v[0]
return sum
def cache_wave(self, sha, data, size):
overflow = self._sum_cache() + size - CACHE_SIZE
to_be_del = []
if overflow > 0:
lst = sorted(self.cache.items(), key=lambda t: t[1][2])
while overflow > 0 and len(lst) > 0:
garbage = lst.pop(0)
to_be_del.append(garbage[0])
overflow -= garbage[1][0]
for d in to_be_del:
del self.cache[d]
#print self.cache.keys()
# [size, data, ref_cnt]
self.cache[sha] = [size, data, 0]
if __name__ == '__main__':
import sys
try:
from creds import BING_KEY
except ImportError:
print('Get a key from https://www.microsoft.com/cognitive-services/en-us/speech-api and create creds.py with the key')
sys.exit(-1)
from bing_base import *
from player import Player
import pyaudio
import time
pa = pyaudio.PyAudio()
player = Player(pa)
if len(sys.argv) != 2:
print('Usage: %s "text"' % sys.argv[0])
sys.exit(-1)
bing = BingBase(BING_KEY)
tts = BingTTS(bing)
# recognize speech using Microsoft Bing Voice Recognition
try:
data = tts.speak(sys.argv[1], language='en-US')
player.play_buffer(data)
except LocaleError as e:
print e
except RequestError as e:
print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
time.sleep(10)
player.close()
pa.terminate()
| 42.232704 | 262 | 0.605659 |
7110ef6fca0fdfbe862595345ec909969219b765 | 4,164 | py | Python | backend/web/download_function.py | d2hydro/hydrobase | a1368b396a75eba7b3f13551c9ef4096115e8014 | [
"MIT"
] | null | null | null | backend/web/download_function.py | d2hydro/hydrobase | a1368b396a75eba7b3f13551c9ef4096115e8014 | [
"MIT"
] | null | null | null | backend/web/download_function.py | d2hydro/hydrobase | a1368b396a75eba7b3f13551c9ef4096115e8014 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 22 20:43:46 2021
@author: danie
"""
import rasterio
from rasterio import merge
from pydantic import BaseModel
from typing import List
import geopandas as gpd
from shapely.geometry import box, LineString
from sqlalchemy import create_engine
import io
import zipfile
import uuid
from pathlib import Path
try:
from .config import POSTGIS, TEMP_DIR, AHN5_DIR, CRS
except ImportError:
from config import POSTGIS, TEMP_DIR, AHN5_DIR, CRS
ahn_dir = AHN5_DIR
dtm_index_gdf = gpd.read_file(ahn_dir / 'dtm' / 'index.shp')
layer = "dtm"
temp_dir = Path(TEMP_DIR)
server = POSTGIS["server"]
port = POSTGIS["port"]
db = "ugw"
user = POSTGIS["user"]
password = POSTGIS["password"]
engine = create_engine(f'postgresql://{user}:{password}@{server}:{port}/{db}')
def _merge_rasters(indices, layer):
with rasterio.open(ahn_dir.joinpath(layer, f"{indices[0].upper()}_CM.tif")) as src:
profile = src.profile
raster_data, affine = merge.merge([
rasterio.open(ahn_dir.joinpath(layer, f"{i.upper()}_CM.tif")) for i in indices
])
profile["transform"] = affine
profile["width"] = raster_data.shape[1]
profile["height"] = raster_data.shape[2]
tif_file = temp_dir / f"{str(uuid.uuid4())}.tif"
with rasterio.open(tif_file, 'w', **profile) as dst:
dst.write(raster_data[0], 1)
return tif_file
def download_bbox(bbox):
poly_bounds = box(*bbox)
bounds = tuple(bbox)
# open zipfile
s = io.BytesIO()
zf = zipfile.ZipFile(s, "w")
# read polygon data to GeoPackage
gpkg_file = temp_dir / f"{str(uuid.uuid4())}.gpkg"
# add original boundary to geopackage
gpd.GeoDataFrame({"geometry": [LineString(poly_bounds.exterior)]},
crs=CRS).to_file(filename=gpkg_file,
layer="download_extent",
driver="GPKG")
# get water level areas intersecting original bounds
cmd = f"SELECT geom FROM waterlevel_areas WHERE waterlevel_areas.geom && ST_MakeEnvelope{bounds}"
gdf = gpd.GeoDataFrame.from_postgis(cmd, engine)
# re-compute bounds for downloading other sources
if not gdf.empty:
bounds = tuple(gdf.total_bounds)
poly_bounds = box(*bounds)
# add water level areas intersecting original bounds
cmd = f"SELECT vp, zp, wp, geom FROM waterlevel_areas WHERE waterlevel_areas.geom && ST_MakeEnvelope{bounds}"
gdf = gpd.GeoDataFrame.from_postgis(cmd, engine)
for target in ["zp", "wp"]:
gdf.loc[gdf[target].isna(), target] = gdf.loc[
gdf[target].isna()
]["vp"]
gdf.to_file(filename=gpkg_file, layer="waterlevel_areas", driver="GPKG")
# add original boundary to geopackage
gpd.GeoDataFrame({"geometry": [LineString(poly_bounds.exterior)]},
crs=CRS).to_file(filename=gpkg_file,
layer="model_extent",
driver="GPKG")
# add water areas
cmd = f"SELECT bl, bb, geom FROM water_areas WHERE water_areas.geom && ST_MakeEnvelope{bounds}"
gdf = gpd.GeoDataFrame.from_postgis(cmd, engine)
if not gdf.empty:
gdf.to_file(filename=gpkg_file, layer="water_areas", driver="GPKG")
# add water lines
cmd = f"SELECT blu, bld, geom FROM water_lines WHERE water_lines.geom && ST_MakeEnvelope{bounds}"
gdf = gpd.GeoDataFrame.from_postgis(cmd, engine)
if not gdf.empty:
gdf.to_file(filename=gpkg_file, layer="water_lines", driver="GPKG")
zf.write(gpkg_file, "water_features.gpkg")
if gpkg_file.exists():
gpkg_file.unlink()
# read raster data
indices = dtm_index_gdf.loc[dtm_index_gdf.intersects(poly_bounds)]['bladnr'].to_list()
if len(indices) > 1:
tif_file = _merge_rasters(indices, layer)
else:
tif_file = ahn_dir.joinpath(layer, f"{indices[0].upper()}_CM.tif")
zf.write(tif_file, f"{layer}.tif")
if len(indices) > 1:
if tif_file.exists():
tif_file.unlink()
zf.close()
return s
| 32.030769 | 117 | 0.642651 |
69a49ba014c1b09a4a4bf05b48b488d4e727452d | 1,825 | py | Python | pymcuprog/deviceinfo/devices/pic16f18446.py | KrystianD-contribution/pymcuprog | a9411a8e4a5db8b54517c51da0bae96bf8385a65 | [
"MIT"
] | 28 | 2021-05-08T19:28:33.000Z | 2022-03-23T06:23:13.000Z | pymcuprog/deviceinfo/devices/pic16f18446.py | KrystianD-contribution/pymcuprog | a9411a8e4a5db8b54517c51da0bae96bf8385a65 | [
"MIT"
] | 20 | 2021-05-24T19:20:39.000Z | 2022-03-12T20:10:30.000Z | pymcuprog/deviceinfo/devices/pic16f18446.py | KrystianD-contribution/pymcuprog | a9411a8e4a5db8b54517c51da0bae96bf8385a65 | [
"MIT"
] | 11 | 2021-06-24T20:59:16.000Z | 2022-03-23T23:59:38.000Z | """
Required device info for the PIC16F18446 devices
"""
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
DEVICE_INFO = {
'name': 'pic16f18446',
'architecture': 'PIC16',
# Will erase Flash, User ID and Config words
'default_bulk_erase_address_word': 0x8000,
# Flash
'flash_address_word': 0,
'flash_size_words': 16384,
'flash_page_size_words': 32,
'flash_write_size_words': 1,
'flash_read_size_words': 1,
# This address will erase only flash
'flash_erase_address_word': 0x80FE,
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'flash_isolated_erase': True,
# User ID
'user_id_address_word': 0x8000,
'user_id_size_words': 4,
'user_id_page_size_words': 1,
'user_id_write_size_words': 1,
'user_id_read_size_words': 1,
'user_id_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'user_id_isolated_erase': False,
# Config words
'config_words_address_word': 0x8007,
'config_words_size_words': 5,
'config_words_page_size_words': 1,
'config_words_write_size_words': 1,
'config_words_read_size_words': 1,
'config_words_erase_address_word': 0,
'config_words_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'config_words_isolated_erase': False,
# EEPROM
'eeprom_address_word': 0xF000,
'eeprom_size_bytes': 256,
'eeprom_page_size_words': 1,
'eeprom_write_size_words': 1,
'eeprom_read_size_words': 1,
'eeprom_chiperase_effect': ChiperaseEffect.NOT_ERASED,
'eeprom_isolated_erase': False,
# ICD memory
'icd_address_byte': 0x8600*2,
'icd_size_bytes': 512*2, # 1KiB
'icd_page_size_words': 32,
'icd_write_size_words': 1,
'icd_read_size_words': 1,
'icd_chiperase_effect': ChiperaseEffect.NOT_ERASED,
'icd_isolated_erase': True,
}
| 30.416667 | 67 | 0.718904 |
3d88f374b81dc18c1e2c4ea3ce7ab344065c45e3 | 453 | py | Python | export_readiness/migrations/0035_remove_countryguidepage_help_market_guide_cta_title.py | kaedroho/dit-directory-cms | 67c15eeed19e7b3583f1fce1969230ddf83b6813 | [
"MIT"
] | null | null | null | export_readiness/migrations/0035_remove_countryguidepage_help_market_guide_cta_title.py | kaedroho/dit-directory-cms | 67c15eeed19e7b3583f1fce1969230ddf83b6813 | [
"MIT"
] | null | null | null | export_readiness/migrations/0035_remove_countryguidepage_help_market_guide_cta_title.py | kaedroho/dit-directory-cms | 67c15eeed19e7b3583f1fce1969230ddf83b6813 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-03-14 16:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('export_readiness', '0034_auto_20190314_1250'),
]
operations = [
migrations.RemoveField(
model_name='countryguidepage',
name='help_market_guide_cta_title',
),
]
| 22.65 | 57 | 0.622517 |
57bb2eb34324bfe56826b69c1d4452c2f1f2311c | 2,386 | py | Python | nabs/visualizations.py | tangkong/nabs | 06d21428cddaa3f2ec6a31150f9d4758f5a7c2c5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | nabs/visualizations.py | tangkong/nabs | 06d21428cddaa3f2ec6a31150f9d4758f5a7c2c5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | nabs/visualizations.py | tangkong/nabs | 06d21428cddaa3f2ec6a31150f9d4758f5a7c2c5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | import matplotlib.pyplot as plt
from .plan_stubs import get_sample_targets
def show_shot_targets(sample_name, path):
"""
Display a plot with targets.
This class is used in conjunction witht the `XYGridStage` object from
`pcdsdevices` as well as the `fixed_target_scan` from `nabs.plans`.
It displays a scatter plot with the targets that have been scanned (& shot)
with `fixed_target_scan`, and the targets that are still available.
It uses the information saved in an yaml file for a specific sample
to get the x and y positions as well as the last target that has been shot.
This information is saved with the help of the `XYGridStage` object.
Parameters
----------
sample_name : str
The name of the sample file to plot the graph for.
path : str
The path of the sample file.
mn_format : bool
Indicates if the graph should be represented in terms of M and N
points rather than x and y positions.
"""
plt.clf()
xx, yy = get_sample_targets(sample_name, path)
# find the index of the next target to be shot
# if can't find it, assume all targets were shot
x_index = next((index for (index, d) in enumerate(xx)
if d['status'] is False), len(xx))
xx_shot = [item['pos'] for item in xx if item['status'] is True]
yy_shot = [item['pos'] for item in yy if item['status'] is True]
xx_available = [item['pos'] for item in xx if item['status'] is False]
yy_available = [item['pos'] for item in yy if item['status'] is False]
plt.plot(xx_available, yy_available, 'o', color='blue', markersize=1,
label="available")
plt.plot(xx_shot, yy_shot, 'o', color='orange', markersize=1, label="shot")
plt.gca().invert_yaxis()
plt.xlabel('X Target Positions')
plt.ylabel('Y Target Positions')
last_shot_index = x_index - 1
if (last_shot_index) > 0:
plt.plot(xx_shot[-1], yy_shot[-1],
'*', color='red', markersize=2, label='last shot index')
last_shot_pos = xx_shot[last_shot_index], yy_shot[last_shot_index]
plt.annotate(f" {last_shot_pos[0]}\n {last_shot_pos[1]}",
(xx_shot[last_shot_index], yy_shot[last_shot_index]),
size=8, color='red')
plt.legend(bbox_to_anchor=(0.15, -0.05), loc='upper center', ncol=3)
plt.show()
| 41.137931 | 79 | 0.652137 |
830b4ddfb95f956e9a21cd0c7d382cc8c0be95a7 | 5,035 | py | Python | test add group.py | marr-py/python_training | 3fc7a8ddcf89b5776b161ad2ac5155bc827a56b9 | [
"Apache-2.0"
] | null | null | null | test add group.py | marr-py/python_training | 3fc7a8ddcf89b5776b161ad2ac5155bc827a56b9 | [
"Apache-2.0"
] | null | null | null | test add group.py | marr-py/python_training | 3fc7a8ddcf89b5776b161ad2ac5155bc827a56b9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from selenium import webdriver
import unittest
from group import Group
class UntitledTestCase(unittest.TestCase):
def setUp(self):
self.wd = webdriver.Chrome()
self.wd.implicitly_wait(5)
def login(self, wd, username, password):
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//input[@value='Login']").click()
def test_add_group(self):
wd = self.wd
self.login(wd, username="admin", password="secret")
self.create_group(wd, Group("Group12", "header12", "footer12"))
self.logout()
def test_add_empty_group(self):
wd = self.wd
self.login(wd, username="admin", password="secret")
self.create_group(wd, Group(group_name="", header="", footer=""))
self.logout()
def create_group(self, wd, group):
# create group
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.group_name)
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
self.submit_group_creation()
self.return_to_groups_page()
def submit_group_creation(self):
wd = self.wd
wd.find_element_by_name("submit").click()
def return_to_groups_page(self):
wd = self.wd
wd.find_element_by_link_text("group page").click()
def logout(self):
wd = self.wd
wd.find_element_by_link_text("Logout").click()
def tearDown(self):
self.wd.quit()
def test_add_contact(self):
wd = self.wd
self.login(wd, username="admin", password="secret")
self.create_contact(wd)
self.logout()
def create_contact(self, wd):
# init contact form
wd.find_element_by_link_text("add new").click()
# fill contact form
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys("Jerry")
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys("Spoon")
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys("M")
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys("M.")
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys("Spoon")
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys("Spooky")
wd.find_element_by_name("title").click()
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys("YVB GmbH")
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys("test ave. 1")
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys("123123132")
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys("654654654")
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys("98798879887")
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys("789789789")
# wd.find_element_by_name("theform").click()
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys("js@test.com")
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys("this is our first client")
# submit contact
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
# return to the home page
wd.find_element_by_link_text("home page").click()
if __name__ == "__main__":
unittest.main()
| 41.270492 | 78 | 0.660973 |
758cd1f13d0c05c04fd1a1dfa6d69c9e549f0fbc | 2,456 | py | Python | slixmpp/xmlstream/handler/waiter.py | mzealey/slixmpp | b1411d8ed79792c6839f4aace13061256337e69b | [
"BSD-3-Clause"
] | null | null | null | slixmpp/xmlstream/handler/waiter.py | mzealey/slixmpp | b1411d8ed79792c6839f4aace13061256337e69b | [
"BSD-3-Clause"
] | null | null | null | slixmpp/xmlstream/handler/waiter.py | mzealey/slixmpp | b1411d8ed79792c6839f4aace13061256337e69b | [
"BSD-3-Clause"
] | null | null | null |
# slixmpp.xmlstream.handler.waiter
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Part of Slixmpp: The Slick XMPP Library
# :copyright: (c) 2011 Nathanael C. Fritz
# :license: MIT, see LICENSE for more details
import logging
import asyncio
from asyncio import Queue, wait_for, TimeoutError
import slixmpp
from slixmpp.xmlstream.handler.base import BaseHandler
log = logging.getLogger(__name__)
class Waiter(BaseHandler):
"""
The Waiter handler allows an event handler to block until a
particular stanza has been received. The handler will either be
given the matched stanza, or ``False`` if the waiter has timed out.
:param string name: The name of the handler.
:param matcher: A :class:`~slixmpp.xmlstream.matcher.base.MatcherBase`
derived object for matching stanza objects.
:param stream: The :class:`~slixmpp.xmlstream.xmlstream.XMLStream`
instance this handler should monitor.
"""
def __init__(self, name, matcher, stream=None):
BaseHandler.__init__(self, name, matcher, stream=stream)
self._payload = Queue()
def prerun(self, payload):
"""Store the matched stanza when received during processing.
:param payload: The matched
:class:`~slixmpp.xmlstream.stanzabase.ElementBase` object.
"""
self._payload.put_nowait(payload)
def run(self, payload):
"""Do not process this handler during the main event loop."""
pass
async def wait(self, timeout=None):
"""Block an event handler while waiting for a stanza to arrive.
Be aware that this will impact performance if called from a
non-threaded event handler.
Will return either the received stanza, or ``False`` if the
waiter timed out.
:param int timeout: The number of seconds to wait for the stanza
to arrive. Defaults to the the stream's
:class:`~slixmpp.xmlstream.xmlstream.XMLStream.response_timeout`
value.
"""
if timeout is None:
timeout = slixmpp.xmlstream.RESPONSE_TIMEOUT
stanza = None
try:
stanza = await self._payload.get()
except TimeoutError:
log.warning("Timed out waiting for %s", self.name)
self.stream().remove_handler(self.name)
return stanza
def check_delete(self):
"""Always remove waiters after use."""
return True
| 32.315789 | 76 | 0.649837 |
ba3157471f9c806f8b242bc160c83dff02923d5e | 2,757 | py | Python | homeassistant/components/homematicip_cloud/weather.py | VirtualL/home-assistant | 301829d02be8d865ab46c8901ac046d060849320 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/homematicip_cloud/weather.py | VirtualL/home-assistant | 301829d02be8d865ab46c8901ac046d060849320 | [
"Apache-2.0"
] | 3 | 2021-09-08T03:34:57.000Z | 2022-03-12T00:59:48.000Z | homeassistant/components/homematicip_cloud/weather.py | VirtualL/home-assistant | 301829d02be8d865ab46c8901ac046d060849320 | [
"Apache-2.0"
] | null | null | null |
"""Support for HomematicIP Cloud weather devices."""
import logging
from homeassistant.components.weather import WeatherEntity
from homeassistant.const import TEMP_CELSIUS
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
DEPENDENCIES = ['homematicip_cloud']
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud weather sensor."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the HomematicIP weather sensor from a config entry."""
from homematicip.aio.device import (
AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro,
)
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = []
for device in home.devices:
if isinstance(device, AsyncWeatherSensorPro):
devices.append(HomematicipWeatherSensorPro(home, device))
elif isinstance(device, (AsyncWeatherSensor, AsyncWeatherSensorPlus)):
devices.append(HomematicipWeatherSensor(home, device))
if devices:
async_add_entities(devices)
class HomematicipWeatherSensor(HomematicipGenericDevice, WeatherEntity):
"""representation of a HomematicIP Cloud weather sensor plus & basic."""
def __init__(self, home, device):
"""Initialize the weather sensor."""
super().__init__(home, device)
@property
def name(self):
"""Return the name of the sensor."""
return self._device.label
@property
def temperature(self):
"""Return the platform temperature."""
return self._device.actualTemperature
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self):
"""Return the humidity."""
return self._device.humidity
@property
def wind_speed(self):
"""Return the wind speed."""
return self._device.windSpeed
@property
def attribution(self):
"""Return the attribution."""
return "Powered by Homematic IP"
@property
def condition(self):
"""Return the current condition."""
if hasattr(self._device, "raining") and self._device.raining:
return 'rainy'
if self._device.storm:
return 'windy'
if self._device.sunshine:
return 'sunny'
return ''
class HomematicipWeatherSensorPro(HomematicipWeatherSensor):
"""representation of a HomematicIP weather sensor pro."""
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self._device.windDirection
| 29.021053 | 78 | 0.681538 |
f60b41ef2808f4127977e6e61b4b3278bac8cfde | 3,868 | py | Python | app.py | coralvanda/network_health | ea79673ef1e5dd72db277dfb56b53cd4563ebf62 | [
"MIT"
] | null | null | null | app.py | coralvanda/network_health | ea79673ef1e5dd72db277dfb56b53cd4563ebf62 | [
"MIT"
] | null | null | null | app.py | coralvanda/network_health | ea79673ef1e5dd72db277dfb56b53cd4563ebf62 | [
"MIT"
] | null | null | null | """
Principal Author: Eric Linden
Description :
Notes :
November 02, 2020
"""
import os
from datetime import datetime
from typing import List
import dash
import dash_core_components as dcc
import dash_html_components as html
import speedtest
from pythonping import ping
COLORS = ['green'] + ['yellow'] * 4 + ['orange'] * 5 + ['red'] * 11 # total len 20
BASE_FILE_PATH = os.path.join(os.getcwd(), 'history')
Speed_Test = speedtest.Speedtest()
app = dash.Dash()
def get_layout():
return html.Div([
dcc.Interval(id='interval-id', interval=1000 * 30), # 30 second ticks
dcc.Store(id='store', data=dict(pings=[], pass_fail='')),
html.H2('Ping results:'),
html.Div(id='ping-output', children=[]),
], id='top-level-container')
app.layout = get_layout()
@app.callback(
[
dash.dependencies.Output('ping-output', 'children'),
dash.dependencies.Output('store', 'data'),
],
[
dash.dependencies.Input('interval-id', 'n_intervals'),
],
[
dash.dependencies.State('store', 'data'),
]
)
def callback_func(tick, store_state):
speed_test_results = ''
response = ping('204.2.229.9', verbose=False)
good_response = all(r.success for r in response)
current_time = datetime.now()
ping_time = current_time.strftime('%H:%M:%S')
response_elements = [html.P(ping_time)] + [html.P(str(r)) for r in response]
response_as_str = '1' if good_response else '0'
if len(store_state['pings']) >= 120: # one hour of pings
speed_test_results = run_speed_test()
write_out_data(
current_time=current_time,
speed_test_results=speed_test_results,
ping_results=store_state['pings'])
store_state['pings'] = []
store_state['pass_fail'] = response_as_str + store_state['pass_fail'][:19] # 10 min history
store_state['pings'] = update_stored_results(
ping_time=ping_time,
ping_result=response,
history=store_state['pings'])
return (
html.Div(
id='callback-output-container',
children=[
html.Div(id='responses-container', children=response_elements),
html.P(speed_test_results),
html.P(store_state['pass_fail']),
html.Div(
id='response-color',
children='',
style=dict(
height=200,
width=200,
backgroundColor=COLORS[store_state['pass_fail'].count('0')]
))
]),
store_state
)
def update_stored_results(ping_time: str, ping_result, history: List[str]):
formatted_str = f'{ping_time}: '
formatted_str += ' - '.join([str(r) for r in ping_result])
updated_history = history + [formatted_str]
return updated_history
def write_out_data(current_time, speed_test_results, ping_results):
formatted_datetime_for_filename = current_time.strftime('%Y-%m-%d-%Hh%Mm')
file_path = os.path.join(BASE_FILE_PATH, f'{formatted_datetime_for_filename}.txt')
with open(file_path, 'w') as f:
f.write(speed_test_results)
f.write('\n')
for result in ping_results:
f.write(result[:10]) # writes the timestamp
f.write('\n')
replies = result[10:].split(' - ')
for reply in replies:
f.write('\t')
f.write(reply)
f.write('\n')
def run_speed_test() -> str:
down_speed = round((round(Speed_Test.download()) / 1048576), 2)
up_speed = round((round(Speed_Test.upload()) / 1048576), 2)
return f'Download speed: {down_speed}, Upload speed: {up_speed}'
if __name__ == '__main__':
app.run_server(
host='0.0.0.0',
port=8050,
debug=False)
| 28.233577 | 96 | 0.599535 |
cad5eee062d5c62ff52d3aa3bffd48c674508550 | 9,310 | py | Python | mastersign/datascience/database/__init__.py | mastersign/mastersign-datascience | 29f5167eb7accc8a4aa5f37d90519061cdb4c47e | [
"BSD-3-Clause"
] | 1 | 2019-02-28T19:12:35.000Z | 2019-02-28T19:12:35.000Z | mastersign/datascience/database/__init__.py | mastersign/mastersign-datascience | 29f5167eb7accc8a4aa5f37d90519061cdb4c47e | [
"BSD-3-Clause"
] | null | null | null | mastersign/datascience/database/__init__.py | mastersign/mastersign-datascience | 29f5167eb7accc8a4aa5f37d90519061cdb4c47e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This module contains functionality to comfortably access a SQL database.
"""
import os
from collections import Iterable
import pandas as pd
from sqlalchemy import create_engine
from ..files import read_parquet as read_cachefile
from ..files import write_parquet as write_cachefile
_def_db_conn = None
def set_default_db_conn(db_conn):
"""
Sets the default connection string for subsequent database queries.
:param db_conn: A SqlAlchemy connection string.
"""
global _def_db_conn
_def_db_conn = db_conn
def execute(sql, db_conn=None, *args, **kwargs):
"""
Execute a SQL statement, returning no data.
:param sql: A string as a SQL statement.
:param db_conn: A SqlAlchemy connection string. (optional)
:param args: Additional positional arguments,
passed to `sqlalchemy.engine.Connection.execute()`.
:param kwargs: Additional keyword arguments,
passed to `sqlalchemy.engine.Connection.execute()`.
"""
engine = create_engine(db_conn or _def_db_conn)
try:
with engine.connect() as conn:
conn.execute(sql, *args, **kwargs)
finally:
engine.dispose()
def load_query(query, db_conn=None,
date=None, defaults=None, dtype=None, index=None,
chunksize=4096, cachefile=None, compress_cache=False,
**kwargs):
"""
Load data from an arbitrary SQL query.
:param query: A string as a SQL query.
:param db_conn: A SqlAlchemy connection string. (optional)
:param date: A column name or an iterable with column names,
or a dict with column names and date format strings,
for parsing specific columns as datetimes. (optional)
:param defaults:
A dict with column names and default values for
`NULL` values. (optional)
Can be used to fill columns with defaults before converting
them to numeric data types with `dtype`.
See `pandas.DataFrame.fillna()` for more details.
:param dtype: A dict with column names and NumPy datatypes
or ``'category'``. (optional)
See `pandas.DataFrame.astype()` for details.
:param index: A column name or an iterable with column names,
which will be the index in the resulting DataFrame.
(optional)
:param chunksize:
The number of rows to load in a chunk before
converting them into a Pandas DataFrame. (optional)
:param cachefile:
A path to a file to cache the result data from the query.
(optional)
If the file already exists, the content of the file is returned
instead of connecting to the database.
:param compress_cache:
A switch to activate data compression for the cache file.
:param kwargs: Additional keyword arguments
are passed to `pandas.read_sql_query()`.
:return: Pandas DataFrame
"""
if cachefile:
if not os.path.isdir(os.path.dirname(cachefile)):
raise FileNotFoundError("The parent directory for the cache file does not exist.")
try:
return read_cachefile(cachefile)
except FileNotFoundError:
pass
if type(date) is str:
date = (date,)
def process_chunk(c):
if defaults:
c.fillna(defaults, inplace=True, downcast=dtype)
if dtype:
c = c.astype(dtype, copy=False)
return c
engine = create_engine(db_conn or _def_db_conn)
try:
with engine.connect().execution_options(stream_results=True) as conn:
chunks = list(map(
process_chunk,
pd.read_sql_query(query, conn,
index_col=index,
parse_dates=date,
chunksize=chunksize, **kwargs)))
finally:
engine.dispose()
df = pd.concat(chunks)
if cachefile:
write_cachefile(df, cachefile, compress=compress_cache)
return df
def load_scalar(query, db_conn=None, *args, **kwargs):
"""
Load a single scalar from an arbitrary SQL query.
:param query: A string as a SQL query.
:param db_conn: A SqlAlchemy connection string. (optional)
:param args: Additional positional arguments,
passed to `sqlalchemy.engine.Connection.execute()`.
:param kwargs: Additional keyword arguments,
passed to `sqlalchemy.engine.Connection.execute()`.
:return: A single value
"""
engine = create_engine(db_conn or _def_db_conn)
try:
with engine.connect().execution_options(stream_results=True) as conn:
return conn.execute(query, *args, **kwargs).scalar()
finally:
engine.dispose()
def _select_query(table_name, columns=None, where=None, group_by=None, limit=None):
if columns:
column_list = ', '.join(columns)
else:
column_list = '*'
if type(where) is str:
where_clause = where
elif where:
where_clause = ' AND '.join(
map(lambda term: term if type(term) is str else '(' + ' OR '.join(term) + ')',
where))
else:
where_clause = ''
if where_clause:
where_clause = ' WHERE ' + where_clause
if type(group_by) is str:
group_by_clause = group_by
elif group_by:
group_by_clause = ', '.join(group_by)
else:
group_by_clause = ''
if group_by_clause:
group_by_clause = ' GROUP BY ' + group_by_clause
if limit:
if not isinstance(limit, str) and isinstance(limit, Iterable):
limit_clause = ' LIMIT ' \
+ str(int(limit[0])) + ', ' \
+ str(int(limit[1]))
else:
limit_clause = ' LIMIT ' + str(int(limit))
else:
limit_clause = ''
return "SELECT {} FROM `{}`{}{}{} ;".format(
column_list, table_name, where_clause, group_by_clause, limit_clause)
def load_table(name, columns=None, where=None, group_by=None, limit=None,
db_conn=None, date=None, defaults=None, dtype=None, index=None,
chunksize=4096, cachefile=None, compress_cache=False,
**kwargs):
"""
Load data from a SQL table.
:param name: The name of the table.
:param columns: An iterable of column names. (optional)
:param where: A string with on condition or an iterable. (optional)
The iterable forms a conjunction and can hold strings
as conditions or nested iterables. The nested iterables
form disjunctions and must hold strings with conditions.
:param group_by: A string as a GROUP-BY-clause or an iterable with
multiple GROUP-BY-clauses. (optional)
:param limit: The maximum number of rows,
or a pair with an row offset
and the maximum number of rows. (optional)
:param db_conn: A SqlAlchemy connection string. (optional)
:param date: A column name or an iterable with column names,
or a dict with column names and date format strings,
for parsing specific columns as datetimes. (optional)
:param defaults: A dict with column names and default values for
`NULL` values. (optional)
Can be used to fill columns with defaults before converting
them to numeric data types with `dtype`.
See `pandas.DataFrame.fillna()` for more details.
:param dtype: A dict with column names and NumPy datatypes
or ``'category'``. (optional)
See `pandas.DataFrame.astype()` for more details.
:param index: A column name or an iterable with column names,
which will be the index in the resulting DataFrame.
(optional)
:param chunksize:
The number of rows to load in a chunk before
converting them into a Pandas DataFrame. (optional)
:param cachefile:
A path to a file to cache the result data from the query.
(optional)
If the file already exists, the content of the file is returned
instead of connecting to the database.
:param compress_cache:
A switch to activate data compression for the cache file.
:param kwargs: Additional keyword arguments
are passed to `pandas.read_sql_query()`.
:return: Pandas DataFrame
"""
sql_query = _select_query(name,
columns=columns, where=where,
group_by=group_by, limit=limit)
return load_query(sql_query, db_conn=db_conn,
date=date, defaults=defaults, dtype=dtype, index=index,
chunksize=chunksize, cachefile=cachefile,
compress_cache=compress_cache, **kwargs)
| 38.953975 | 94 | 0.595166 |
c0826b4921739a3be1a148398c060385b22bb4af | 3,727 | py | Python | setup.py | GdoongMathew/Monitor | 1affeea0ca4f61d84fd8f0b8838a847da16854c2 | [
"MIT"
] | null | null | null | setup.py | GdoongMathew/Monitor | 1affeea0ca4f61d84fd8f0b8838a847da16854c2 | [
"MIT"
] | null | null | null | setup.py | GdoongMathew/Monitor | 1affeea0ca4f61d84fd8f0b8838a847da16854c2 | [
"MIT"
] | null | null | null | import os
import sys
import shutil
import subprocess
from setuptools import find_packages, setup, Command
from distutils.spawn import find_executable
from distutils.command.clean import clean as _clean
from distutils.command.build_py import build_py as _build_py
NAME = "monitor"
DESCRIPTION = "Monitor which reads hardware information."
URL = "https://github.com/GdoongMathew/Monitor"
REQUIRES_PYTHON = ">=3.6.0"
VERSION = "0.0.1"
try:
with open('requirements.txt', encoding='utf-8') as f:
REQUIRED = f.read().split('\n')
except:
REQUIRED = []
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def generate_proto(source, require=True):
"""Generates a _pb2.py from the given .proto file.
Does nothing if the output already exists and is newer than the input.
Args:
source: the .proto file path.
require: if True, exit immediately when a path is not found.
"""
if not require and not os.path.exists(source):
return
output = source.replace('.proto', '_pb2.py').replace('../src/', '')
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print('Generating %s...' % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
'protoc is not installed nor found in ../src. Please compile it '
'or install the binary package.\n')
sys.exit(-1)
protoc_command = [protoc, '-I../src', '-I.', '--python_out=.', source]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
class clean_cmd(_clean):
"""Custom clean command for building the protobuf extension."""
def run(self):
# Delete generated files in the code tree.
for (dirpath, unused_dirnames, filenames) in os.walk('.'):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if (filepath.endswith('_pb2.py') or filepath.endswith('.pyc') or
filepath.endswith('.so') or filepath.endswith('.o')):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
# List of all .proto files
proto_src = [
'monitor/reader/proto/device.proto',
]
class build_py(_build_py):
def run(self):
for f in proto_src:
generate_proto(f)
_build_py.run(self)
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py"):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(),
install_requires=REQUIRED,
cmdclass={'clean': clean, 'build_py': build_py}
)
| 30.801653 | 82 | 0.625973 |
b5319b7cd68425693b38afd5f4966c86596f12cc | 1,904 | py | Python | chanjo/converter/cli.py | nuin/chanjo | 9a1b4aa247c3fe5bf150ac24952d04be43befaa1 | [
"MIT"
] | null | null | null | chanjo/converter/cli.py | nuin/chanjo | 9a1b4aa247c3fe5bf150ac24952d04be43befaa1 | [
"MIT"
] | null | null | null | chanjo/converter/cli.py | nuin/chanjo | 9a1b4aa247c3fe5bf150ac24952d04be43befaa1 | [
"MIT"
] | 1 | 2018-07-18T14:56:09.000Z | 2018-07-18T14:56:09.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from pkg_resources import iter_entry_points, load_entry_point
import click
from toolz import pipe
from toolz.curried import map
from ..utils import serialize_interval, validate_stdin
@click.command()
@click.option(
'-a', '--adapter', default='ccds', help='plugin to use for conversion')
@click.option(
'-l', '--list', 'list_all', is_flag=True, help='show all installed adapters')
@click.argument(
'in_stream', callback=validate_stdin, type=click.File(encoding='utf-8'),
default='-', required=False)
@click.pass_context
def convert(context, in_stream, adapter, list_all):
"""Convert a reference database file to a Chanjo BED interval file.
\b
IN_STREAM: interval reference file (e.g. CCDS database dump)
"""
program_name = __package__.split('.')[0]
if list_all:
# list the installed converter options
for entry_point in iter_entry_points('chanjo.converters'):
# compose and print the message
segments = dict(
program=program_name,
note=click.style('converter', fg='cyan'),
plugin=entry_point.name
)
click.echo("%(program)s %(note)s %(plugin)s" % segments)
else:
try:
# load a single entry point
converter_pipeline = load_entry_point(
program_name, 'chanjo.converters', adapter
)
except ImportError:
segments = dict(
program=program_name,
note=click.style('error', fg='red'),
message="No such converter installed: %s" % adapter
)
click.echo("%(program)s %(note)s %(message)s" % segments)
context.abort()
# execute converter pipeline
bed_lines = pipe(
converter_pipeline(in_stream),
map(serialize_interval(bed=True)) # stringify/bedify
)
# reduce/write the BED lines
for bed_line in bed_lines:
click.echo(bed_line)
| 29.75 | 79 | 0.677521 |
35e37d7eae07cabd6394107cb6c6662372376333 | 12,739 | py | Python | pyiron_base/database/filetable.py | niklassiemer/pyiron_base | 4f2fc35819279798a6deb6394354722378a7816b | [
"BSD-3-Clause"
] | null | null | null | pyiron_base/database/filetable.py | niklassiemer/pyiron_base | 4f2fc35819279798a6deb6394354722378a7816b | [
"BSD-3-Clause"
] | 61 | 2021-05-17T15:25:43.000Z | 2022-03-31T04:14:19.000Z | pyiron_base/database/filetable.py | niklassiemer/pyiron_base | 4f2fc35819279798a6deb6394354722378a7816b | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import os
import pandas
import datetime
import h5io
from pyfileindex import PyFileIndex
from pyiron_base.generic.util import Singleton
def filter_function(file_name):
return '.h5' in file_name
class FileTable(metaclass=Singleton):
def __init__(self, project):
self._fileindex = None
self._job_table = None
self._project = os.path.abspath(project)
self._columns = ['id', 'status', 'chemicalformula', 'job', 'subjob', 'projectpath', 'project', 'timestart',
'timestop', 'totalcputime', 'computer', 'hamilton', 'hamversion', 'parentid', 'masterid',
'username']
self.force_reset()
@property
def viewer_mode(self):
return None
def force_reset(self):
self._fileindex = PyFileIndex(
path=self._project,
filter_function=filter_function
)
df = pandas.DataFrame(self.init_table(fileindex=self._fileindex.dataframe))
if len(df) != 0:
df.id = df.id.astype(int)
self._job_table = df[np.array(self._columns)]
else:
self._job_table = pandas.DataFrame({k: [] for k in self._columns})
def init_table(self, fileindex, working_dir_lst=None):
if working_dir_lst is None:
working_dir_lst = []
fileindex = fileindex[~fileindex.is_directory]
fileindex = fileindex.iloc[fileindex.path.values.argsort()]
job_lst = []
for path, mtime in zip(fileindex.path, fileindex.mtime):
job_dict = self.get_extract(path, mtime)
job_dict['id'] = len(working_dir_lst) + 1
working_dir_lst.append(job_dict['project'][:-1] + job_dict['subjob'] + '_hdf5/')
if job_dict['project'] in working_dir_lst:
job_dict['masterid'] = working_dir_lst.index(job_dict['project']) + 1
else:
job_dict['masterid'] = None
job_lst.append(job_dict)
return job_lst
def add_item_dict(self, par_dict):
par_dict = dict((key.lower(), value) for key, value in par_dict.items())
if len(self._job_table) != 0:
job_id = np.max(self._job_table.id.values) + 1
else:
job_id = 1
default_values = {
'id': job_id,
'status': 'initialized',
'chemicalformula': None,
'timestart': datetime.datetime.now(),
'computer': None,
'parentid': None,
'username': None,
'timestop': None,
'totalcputime': None,
'masterid': None
}
for k, v in default_values.items():
if k not in par_dict.keys():
par_dict[k] = v
self._job_table = pandas.concat([
self._job_table,
pandas.DataFrame([par_dict])[self._columns]
]).reset_index(drop=True)
return int(par_dict['id'])
def item_update(self, par_dict, item_id):
if isinstance(item_id, list):
item_id = item_id[0]
if isinstance(item_id, str):
item_id = float(item_id)
for k, v in par_dict.items():
self._job_table.loc[self._job_table.id == int(item_id), k] = v
def delete_item(self, item_id):
item_id = int(item_id)
if item_id in [int(v) for v in self._job_table.id.values]:
self._job_table = self._job_table[self._job_table.id != item_id].reset_index(drop=True)
else:
raise ValueError
def get_item_by_id(self, item_id):
item_id = int(item_id)
return {k: list(v.values())[0] for k, v in self._job_table[self._job_table.id == item_id].to_dict().items()}
def get_items_dict(self, item_dict, return_all_columns=True):
df = self._job_table
if not isinstance(item_dict, dict):
raise TypeError
for k, v in item_dict.items():
if k in ['id', 'parentid', 'masterid']:
df = df[df[k] == int(v)]
elif "%" not in str(v):
df = df[df[k] == v]
else:
df = df[df[k].str.contains(v.replace('%', ''))]
df_dict = df.to_dict()
if return_all_columns:
return [{k: v[i] for k, v in df_dict.items()} for i in df_dict['id'].keys()]
else:
return [{'id': i} for i in df_dict['id'].values()]
def update(self):
self._job_table.status = [
self._get_job_status_from_hdf5(job_id)
for job_id in self._job_table.id.values
]
self._fileindex.update()
if len(self._job_table) != 0:
files_lst, working_dir_lst = zip(*[[project + subjob[1:] + '.h5', project + subjob[1:] + '_hdf5']
for project, subjob in zip(self._job_table.project.values,
self._job_table.subjob.values)])
df_new = self._fileindex.dataframe[
~self._fileindex.dataframe.is_directory & ~self._fileindex.dataframe.path.isin(files_lst)]
else:
files_lst, working_dir_lst = [], []
df_new = self._fileindex.dataframe[~self._fileindex.dataframe.is_directory]
if len(df_new) > 0:
job_lst = self.init_table(fileindex=df_new, working_dir_lst=list(working_dir_lst))
df = pandas.DataFrame(job_lst)[self._columns]
if len(files_lst) != 0 and len(working_dir_lst) != 0:
self._job_table = pandas.concat([self._job_table, df]).reset_index(drop=True)
else:
self._job_table = df
def get_db_columns(self):
return self.get_table_headings()
def get_table_headings(self):
return self._job_table.columns.values
def job_table(
self,
project=None,
recursive=True,
columns=None,
all_columns=False,
sort_by="id",
max_colwidth=200,
full_table=False,
job_name_contains=''
):
if project is None:
project = self._project
if columns is None:
columns = ["job", "project", "chemicalformula"]
if all_columns:
columns = self._columns
if len(self._job_table) != 0:
if recursive:
df = self._job_table[self._job_table.project.str.contains(project)]
else:
df = self._job_table[self._job_table.project == project]
else:
df = self._job_table
if full_table:
pandas.set_option('display.max_rows', None)
pandas.set_option('display.max_columns', None)
else:
pandas.reset_option('display.max_rows')
pandas.reset_option('display.max_columns')
pandas.set_option("display.max_colwidth", max_colwidth)
if len(df) == 0:
return df
if job_name_contains != '':
df = df[df.job.str.contains(job_name_contains)]
if sort_by in columns:
return df[columns].sort_values(by=sort_by)
return df[columns]
def get_jobs(self, project=None, recursive=True, columns=None):
if project is None:
project = self._project
if columns is None:
columns = ["id", "project"]
df = self.job_table(project=project, recursive=recursive, columns=columns)
if len(df) == 0:
dictionary = {}
for key in columns:
dictionary[key] = list()
return dictionary
# return {key: list() for key in columns}
dictionary = {}
for key in df.keys():
dictionary[key] = df[
key
].tolist() # ToDo: Check difference of tolist and to_list
return dictionary
def get_job_ids(self, project=None, recursive=True):
return self.get_jobs(project=project, recursive=recursive, columns=['id'])["id"]
def get_job_id(self, job_specifier, project=None):
if project is None:
project = self._project
if isinstance(job_specifier, (int, np.integer)):
return job_specifier # is id
job_specifier.replace(".", "_")
job_id_lst = self._job_table[
(self._job_table.project == project) & (self._job_table.job == job_specifier)].id.values
if len(job_id_lst) == 0:
job_id_lst = self._job_table[
self._job_table.project.str.contains(project) & (self._job_table.job == job_specifier)].id.values
if len(job_id_lst) == 0:
return None
elif len(job_id_lst) == 1:
return int(job_id_lst[0])
else:
raise ValueError(
"job name '{0}' in this project is not unique".format(job_specifier)
)
def get_child_ids(self, job_specifier, project=None, status=None):
"""
Get the childs for a specific job
Args:
database (DatabaseAccess): Database object
sql_query (str): SQL query to enter a more specific request
user (str): username of the user whoes user space should be searched
project_path (str): root_path - this is in contrast to the project_path in GenericPath
job_specifier (str): name of the master job or the master jobs job ID
status (str): filter childs which match a specific status - None by default
Returns:
list: list of child IDs
"""
if project is None:
project = self._project
id_master = self.get_job_id(project=project, job_specifier=job_specifier)
if id_master is None:
return []
else:
if status is not None:
id_lst = self._job_table[
(self._job_table.masterid == id_master) & (self._job_table.status == status)].id.values
else:
id_lst = self._job_table[(self._job_table.masterid == id_master)].id.values
return sorted(id_lst)
def get_job_working_directory(self, job_id):
"""
Get the working directory of a particular job
Args:
job_id (int):
Returns:
str: working directory as absolute path
"""
try:
db_entry = self.get_item_by_id(job_id)
if db_entry and len(db_entry) > 0:
job_name = db_entry["subjob"][1:]
return os.path.join(
db_entry["project"],
job_name + "_hdf5",
job_name,
)
else:
return None
except KeyError:
return None
def _get_job_status_from_hdf5(self, job_id):
db_entry = self.get_item_by_id(job_id)
job_name = db_entry["subjob"][1:]
return get_job_status_from_file(
hdf5_file=os.path.join(db_entry["project"], job_name + ".h5"),
job_name=job_name
)
def get_job_status(self, job_id):
return self._job_table[self._job_table.id == job_id].status.values[0]
def set_job_status(self, job_id, status):
db_entry = self.get_item_by_id(item_id=job_id)
self._job_table.loc[self._job_table.id == job_id, 'status'] = status
h5io.write_hdf5(db_entry["project"] + db_entry["subjob"] + '.h5',
status,
title=db_entry["subjob"][1:] + '/status',
overwrite="update")
@staticmethod
def get_extract(path, mtime):
basename = os.path.basename(path)
job = os.path.splitext(basename)[0]
time = datetime.datetime.fromtimestamp(mtime)
return {
'status': get_job_status_from_file(hdf5_file=path, job_name=job),
'chemicalformula': None,
'job': job,
'subjob': '/' + job,
'projectpath': None,
'project': os.path.dirname(path) + '/',
'timestart': time,
'timestop': time,
'totalcputime': 0.0,
'computer': None,
'username': None,
'parentid': None,
'hamilton': get_hamilton_from_file(hdf5_file=path, job_name=job),
'hamversion': get_hamilton_version_from_file(hdf5_file=path, job_name=job)
}
def get_hamilton_from_file(hdf5_file, job_name):
return h5io.read_hdf5(hdf5_file, job_name + '/TYPE').split(".")[-1].split("'")[0]
def get_hamilton_version_from_file(hdf5_file, job_name):
return h5io.read_hdf5(hdf5_file, job_name + '/VERSION')
def get_job_status_from_file(hdf5_file, job_name):
return h5io.read_hdf5(hdf5_file, job_name + '/status')
| 38.026866 | 116 | 0.57163 |
08fa8d19c64bc93ec30c9289fd414e873f7e396b | 1,238 | py | Python | tests.py | geospatial-jeff/cognition-datasources-srtm | 057a5d9ea2a6b8b7e740646dbdc2344e5e9da776 | [
"Apache-2.0"
] | null | null | null | tests.py | geospatial-jeff/cognition-datasources-srtm | 057a5d9ea2a6b8b7e740646dbdc2344e5e9da776 | [
"Apache-2.0"
] | null | null | null | tests.py | geospatial-jeff/cognition-datasources-srtm | 057a5d9ea2a6b8b7e740646dbdc2344e5e9da776 | [
"Apache-2.0"
] | null | null | null | from datasources import tests
from SRTM import SRTM
class SRTMTestCases(tests.BaseTestCases):
def _setUp(self):
self.datasource = SRTM
self.spatial = {
"type": "Polygon",
"coordinates": [
[
[
-101.28433227539062,
46.813218976041945
],
[
-100.89431762695312,
46.813218976041945
],
[
-100.89431762695312,
47.06450941441436
],
[
-101.28433227539062,
47.06450941441436
],
[
-101.28433227539062,
46.813218976041945
]
]
]
}
self.properties = {'eo:gsd': {'eq': 30.0}}
self.limit = 10
def test_temporal_search(self):
# Underlying API doesn't accept temporal
pass
| 28.790698 | 50 | 0.331987 |
cc505681e530f9e70fdee743df0fe7f7d9d38643 | 13,462 | py | Python | logstash/datadog_checks/logstash/logstash.py | davidlrosenblum/integrations-extras | 281864a99ae054c91c3e3ea6a8ee8f04f6d7cdf3 | [
"BSD-3-Clause"
] | 158 | 2016-06-02T16:25:31.000Z | 2022-03-16T15:55:14.000Z | logstash/datadog_checks/logstash/logstash.py | davidlrosenblum/integrations-extras | 281864a99ae054c91c3e3ea6a8ee8f04f6d7cdf3 | [
"BSD-3-Clause"
] | 554 | 2016-03-15T17:39:12.000Z | 2022-03-31T10:29:16.000Z | logstash/datadog_checks/logstash/logstash.py | davidlrosenblum/integrations-extras | 281864a99ae054c91c3e3ea6a8ee8f04f6d7cdf3 | [
"BSD-3-Clause"
] | 431 | 2016-05-13T15:33:13.000Z | 2022-03-31T10:06:46.000Z | # stdlib
from collections import namedtuple
from distutils.version import LooseVersion
# 3rd party
from six import iteritems
from six.moves.urllib.parse import urljoin, urlparse
# project
from datadog_checks.base import AgentCheck
EVENT_TYPE = SOURCE_TYPE_NAME = 'logstash'
LogstashInstanceConfig = namedtuple('LogstashInstanceConfig', ['service_check_tags', 'tags', 'url'])
class LogstashCheck(AgentCheck):
DEFAULT_VERSION = '1.0.0'
SERVICE_CHECK_CONNECT_NAME = 'logstash.can_connect'
HTTP_CONFIG_REMAPPER = {
'ssl_cert': {
'name': 'tls_cert',
},
'ssl_key': {
'name': 'tls_private_key',
},
'ssl_verify': {
'name': 'tls_verify',
'default': False,
},
}
STATS_METRICS = {
"logstash.process.open_file_descriptors": ("gauge", "process.open_file_descriptors"),
"logstash.process.peak_open_file_descriptors": ("gauge", "process.peak_open_file_descriptors"),
"logstash.process.max_file_descriptors": ("gauge", "process.max_file_descriptors"),
"logstash.process.mem.total_virtual_in_bytes": ("gauge", "process.mem.total_virtual_in_bytes"),
"logstash.process.cpu.total_in_millis": ("gauge", "process.cpu.total_in_millis"),
"logstash.process.cpu.percent": ("gauge", "process.cpu.percent"),
"logstash.process.cpu.load_average.1m": ("gauge", "process.cpu.load_average.1m"),
"logstash.process.cpu.load_average.5m": ("gauge", "process.cpu.load_average.5m"),
"logstash.process.cpu.load_average.15m": ("gauge", "process.cpu.load_average.15m"),
"logstash.jvm.threads.count": ("gauge", "jvm.threads.count"),
"logstash.jvm.threads.peak_count": ("gauge", "jvm.threads.peak_count"),
"logstash.jvm.mem.heap_used_percent": ("gauge", "jvm.mem.heap_used_percent"),
"logstash.jvm.mem.heap_committed_in_bytes": ("gauge", "jvm.mem.heap_committed_in_bytes"),
"logstash.jvm.mem.heap_max_in_bytes": ("gauge", "jvm.mem.heap_max_in_bytes"),
"logstash.jvm.mem.heap_used_in_bytes": ("gauge", "jvm.mem.heap_used_in_bytes"),
"logstash.jvm.mem.non_heap_used_in_bytes": ("gauge", "jvm.mem.non_heap_used_in_bytes"),
"logstash.jvm.mem.non_heap_committed_in_bytes": ("gauge", "jvm.mem.non_heap_committed_in_bytes"),
"logstash.jvm.mem.pools.survivor.peak_used_in_bytes": ("gauge", "jvm.mem.pools.survivor.peak_used_in_bytes"),
"logstash.jvm.mem.pools.survivor.used_in_bytes": ("gauge", "jvm.mem.pools.survivor.used_in_bytes"),
"logstash.jvm.mem.pools.survivor.peak_max_in_bytes": ("gauge", "jvm.mem.pools.survivor.peak_max_in_bytes"),
"logstash.jvm.mem.pools.survivor.max_in_bytes": ("gauge", "jvm.mem.pools.survivor.max_in_bytes"),
"logstash.jvm.mem.pools.survivor.committed_in_bytes": ("gauge", "jvm.mem.pools.survivor.committed_in_bytes"),
"logstash.jvm.mem.pools.old.peak_used_in_bytes": ("gauge", "jvm.mem.pools.old.peak_used_in_bytes"),
"logstash.jvm.mem.pools.old.used_in_bytes": ("gauge", "jvm.mem.pools.old.used_in_bytes"),
"logstash.jvm.mem.pools.old.peak_max_in_bytes": ("gauge", "jvm.mem.pools.old.peak_max_in_bytes"),
"logstash.jvm.mem.pools.old.max_in_bytes": ("gauge", "jvm.mem.pools.old.max_in_bytes"),
"logstash.jvm.mem.pools.old.committed_in_bytes": ("gauge", "jvm.mem.pools.old.committed_in_bytes"),
"logstash.jvm.mem.pools.young.peak_used_in_bytes": ("gauge", "jvm.mem.pools.young.peak_used_in_bytes"),
"logstash.jvm.mem.pools.young.used_in_bytes": ("gauge", "jvm.mem.pools.young.used_in_bytes"),
"logstash.jvm.mem.pools.young.peak_max_in_bytes": ("gauge", "jvm.mem.pools.young.peak_max_in_bytes"),
"logstash.jvm.mem.pools.young.max_in_bytes": ("gauge", "jvm.mem.pools.young.max_in_bytes"),
"logstash.jvm.mem.pools.young.committed_in_bytes": ("gauge", "jvm.mem.pools.young.committed_in_bytes"),
"logstash.jvm.gc.collectors.old.collection_time_in_millis": (
"gauge",
"jvm.gc.collectors.old.collection_time_in_millis",
),
"logstash.jvm.gc.collectors.old.collection_count": ("gauge", "jvm.gc.collectors.old.collection_count"),
"logstash.jvm.gc.collectors.young.collection_time_in_millis": (
"gauge",
"jvm.gc.collectors.young.collection_time_in_millis",
),
"logstash.jvm.gc.collectors.young.collection_count": ("gauge", "jvm.gc.collectors.young.collection_count"),
"logstash.reloads.successes": ("gauge", "reloads.successes"),
"logstash.reloads.failures": ("gauge", "reloads.failures"),
}
PIPELINE_METRICS = {
"logstash.pipeline.dead_letter_queue.queue_size_in_bytes": ("gauge", "dead_letter_queue.queue_size_in_bytes"),
"logstash.pipeline.events.duration_in_millis": ("gauge", "events.duration_in_millis"),
"logstash.pipeline.events.in": ("gauge", "events.in"),
"logstash.pipeline.events.out": ("gauge", "events.out"),
"logstash.pipeline.events.filtered": ("gauge", "events.filtered"),
"logstash.pipeline.reloads.successes": ("gauge", "reloads.successes"),
"logstash.pipeline.reloads.failures": ("gauge", "reloads.failures"),
}
PIPELINE_QUEUE_METRICS = {
"logstash.pipeline.queue.events": ("gauge", "queue.events"),
"logstash.pipeline.queue.capacity.max_queue_size_in_bytes": ("gauge", "queue.capacity.max_queue_size_in_bytes"),
"logstash.pipeline.queue.capacity.queue_size_in_bytes": ("gauge", "queue.capacity.queue_size_in_bytes"),
"logstash.pipeline.queue.capacity.max_unread_events": ("gauge", "queue.capacity.max_unread_events"),
"logstash.pipeline.queue.capacity.page_capacity_in_bytes": ("gauge", "queue.capacity.page_capacity_in_bytes"),
}
PIPELINE_INPUTS_METRICS = {
"logstash.pipeline.plugins.inputs.events.out": ("gauge", "events.out"),
"logstash.pipeline.plugins.inputs.events.queue_push_duration_in_millis": (
"gauge",
"events.queue_push_duration_in_millis",
),
}
PIPELINE_OUTPUTS_METRICS = {
"logstash.pipeline.plugins.outputs.events.in": ("gauge", "events.in"),
"logstash.pipeline.plugins.outputs.events.out": ("gauge", "events.out"),
"logstash.pipeline.plugins.outputs.events.duration_in_millis": ("gauge", "events.duration_in_millis"),
}
PIPELINE_FILTERS_METRICS = {
"logstash.pipeline.plugins.filters.events.in": ("gauge", "events.in"),
"logstash.pipeline.plugins.filters.events.out": ("gauge", "events.out"),
"logstash.pipeline.plugins.filters.events.duration_in_millis": ("gauge", "events.duration_in_millis"),
}
def get_instance_config(self, instance):
url = instance.get('url')
if url is None:
raise Exception("A URL must be specified in the instance")
# Support URLs that have a path in them from the config, for
# backwards-compatibility.
parsed = urlparse(url)
if parsed[2] != "":
url = "%s://%s" % (parsed[0], parsed[1])
port = parsed.port
host = parsed.hostname
custom_tags = instance.get('tags', [])
service_check_tags = ['host:%s' % host, 'port:%s' % port]
service_check_tags.extend(custom_tags)
# Tag by URL so we can differentiate the metrics
# from multiple instances
tags = ['url:%s' % url]
tags.extend(custom_tags)
config = LogstashInstanceConfig(
service_check_tags=service_check_tags,
tags=tags,
url=url,
)
return config
def _get_data(self, url, config, send_sc=True):
"""Hit a given URL and return the parsed json"""
try:
resp = self.http.get(url)
resp.raise_for_status()
except Exception as e:
if send_sc:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {0} when hitting {1}".format(e, url),
tags=config.service_check_tags,
)
raise
return resp.json()
def _get_logstash_version(self, config):
"""Get the running version of logstash."""
try:
data = self._get_data(config.url, config, send_sc=False)
version = data['version']
except Exception as e:
self.warning(
"Error while trying to get Logstash version from %s %s. Defaulting to version %s.",
config.url,
e,
self.DEFAULT_VERSION,
)
version = self.DEFAULT_VERSION
self.service_metadata('version', version)
self.log.debug("Logstash version is %s", version)
return version
def _is_multi_pipeline(self, version):
"""Reusable version checker"""
return version and LooseVersion(version) >= LooseVersion("6.0.0")
def check(self, instance):
config = self.get_instance_config(instance)
logstash_version = self._get_logstash_version(config)
stats_url = urljoin(config.url, '/_node/stats')
stats_data = self._get_data(stats_url, config)
for metric, desc in iteritems(self.STATS_METRICS):
self._process_metric(stats_data, metric, *desc, tags=config.tags)
if not self._is_multi_pipeline(logstash_version):
self._process_pipeline_data(stats_data['pipeline'], config.tags, logstash_version)
elif 'pipelines' in stats_data:
for pipeline_name, pipeline_data in iteritems(stats_data['pipelines']):
if pipeline_name.startswith('.'):
# skip internal pipelines like '.monitoring_logstash'
continue
metric_tags = list(config.tags)
metric_tags.append(u'pipeline_name:{}'.format(pipeline_name))
self._process_pipeline_data(pipeline_data, metric_tags, logstash_version)
self.service_check(self.SERVICE_CHECK_CONNECT_NAME, AgentCheck.OK, tags=config.service_check_tags)
def _process_stats_data(self, data, stats_metrics, config):
for metric, desc in iteritems(stats_metrics):
self._process_metric(data, metric, *desc, tags=config.tags)
def _process_pipeline_data(self, pipeline_data, tags, logstash_version):
"""
Simple interface to run multiple metric submissions for pipeline top level,
plugin inputs, outputs, and filters
"""
self._process_top_level_pipeline_data(pipeline_data, tags, logstash_version)
self._process_pipeline_plugins_data(
pipeline_data['plugins'], self.PIPELINE_INPUTS_METRICS, tags, 'inputs', 'input_name'
)
self._process_pipeline_plugins_data(
pipeline_data['plugins'], self.PIPELINE_OUTPUTS_METRICS, tags, 'outputs', 'output_name'
)
self._process_pipeline_plugins_data(
pipeline_data['plugins'], self.PIPELINE_FILTERS_METRICS, tags, 'filters', 'filter_name'
)
def _process_top_level_pipeline_data(self, pipeline_data, tags, logstash_version):
"""
If multipipeline, also process metrics associated with multi-pipeline versions.
"""
pipeline_metrics = self.PIPELINE_METRICS
if self._is_multi_pipeline(logstash_version):
pipeline_metrics.update(self.PIPELINE_QUEUE_METRICS)
for metric, metric_desc in iteritems(pipeline_metrics):
self._process_metric(pipeline_data, metric, *metric_desc, tags=tags)
def _process_pipeline_plugins_data(
self, pipeline_plugins_data, pipeline_plugins_metrics, tags, plugin_type, tag_name, pipeline_name=None
):
for plugin_data in pipeline_plugins_data.get(plugin_type, []):
plugin_name = plugin_data.get('name')
plugin_conf_id = plugin_data.get('id')
metrics_tags = list(tags)
if not plugin_name:
plugin_name = 'unknown'
metrics_tags.append(u"{}:{}".format(tag_name, plugin_name))
if pipeline_name:
metrics_tags.append(u"pipeline_name:{}".format(pipeline_name))
if plugin_conf_id:
metrics_tags.append(u"plugin_conf_id:{}".format(plugin_conf_id))
for metric, desc in iteritems(pipeline_plugins_metrics):
self._process_metric(plugin_data, metric, *desc, tags=metrics_tags)
def _process_metric(self, data, metric, xtype, path, tags=None, hostname=None):
"""data: dictionary containing all the stats
metric: datadog metric
path: corresponding path in data, flattened, e.g. thread_pool.bulk.queue
"""
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key, None)
else:
break
if value is not None:
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self._metric_not_found(metric, path)
def _metric_not_found(self, metric, path):
self.log.debug("Metric not found: %s -> %s", path, metric)
| 47.06993 | 120 | 0.652206 |
0ec1e2cff290718c0139d639e364dc6546b485ad | 8,566 | py | Python | hypergan/cli.py | Darkar25/HyperGAN | 76ef7e0c20569ceece88dc76396d92c77050692b | [
"MIT"
] | null | null | null | hypergan/cli.py | Darkar25/HyperGAN | 76ef7e0c20569ceece88dc76396d92c77050692b | [
"MIT"
] | null | null | null | hypergan/cli.py | Darkar25/HyperGAN | 76ef7e0c20569ceece88dc76396d92c77050692b | [
"MIT"
] | null | null | null | """
The command line interface. Trains a directory of data.
"""
import gc
import sys
import os
import hyperchamber as hc
import tensorflow as tf
from hypergan.gan_component import ValidationException
from .inputs import *
from .viewer import GlobalViewer
from .configuration import Configuration
import hypergan as hg
import time
import os
import shutil
import sys
from hypergan.losses.supervised_loss import SupervisedLoss
from hypergan.multi_component import MultiComponent
from time import sleep
class CLI:
def __init__(self, gan, args={}):
self.samples = 0
self.steps = 0
self.gan = gan
if gan is not None:
self.gan.cli = self
args = hc.Config(args)
self.args = args
crop = self.args.crop
self.config_name = self.args.config or 'default'
self.method = args.method or 'test'
self.total_steps = args.steps or -1
self.sample_every = self.args.sample_every or 100
self.sampler_name = args.sampler
self.sampler = None
self.validate()
self.advSavePath = os.path.abspath("saves/"+self.config_name)+"/"
if self.args.save_file:
self.save_file = self.args.save_file
else:
default_save_path = os.path.abspath("saves/"+self.config_name)
self.save_file = default_save_path + "/model.ckpt"
self.create_path(self.save_file)
if self.gan is not None:
self.gan.save_file = self.save_file
title = "[hypergan] " + self.config_name
GlobalViewer.enable_menu = self.args.menu
GlobalViewer.title = title
GlobalViewer.viewer_size = self.args.viewer_size
GlobalViewer.enabled = self.args.viewer
GlobalViewer.zoom = self.args.zoom
def sample(self, allow_save=True):
""" Samples to a file. Useful for visualizing the learning process.
If allow_save is False then saves will not be created.
Use with:
ffmpeg -i samples/grid-%06d.png -vcodec libx264 -crf 22 -threads 0 grid1-7.mp4
to create a video of the learning process.
"""
sample_file="samples/%s/%06d.png" % (self.config_name, self.samples)
self.create_path(sample_file)
self.lazy_create()
sample_list = self.sampler.sample(sample_file, allow_save and self.args.save_samples)
self.samples += 1
return sample_list
def validate(self):
return True
def lazy_create(self):
if(self.sampler == None):
self.sampler = self.gan.sampler_for(self.sampler_name)(self.gan)
if(self.sampler == None):
raise ValidationException("No sampler found by the name '"+self.sampler_name+"'")
def step(self):
bgan = self.gan
self.gan.step()
if bgan.destroy:
self.sampler=None
self.gan = self.gan.newgan
gc.collect()
refs = gc.get_referrers(bgan)
d = bgan.trainer._delegate
bgan.trainer=None
gc.collect()
del bgan
tf.reset_default_graph()
gc.collect()
if(self.steps % self.sample_every == 0):
sample_list = self.sample()
self.steps+=1
def create_path(self, filename):
return os.makedirs(os.path.expanduser(os.path.dirname(filename)), exist_ok=True)
def build(self):
return self.gan.build()
def serve(self, gan):
return gan_server(self.gan.session, config)
def sample_forever(self):
while not self.gan.destroy:
self.sample()
GlobalViewer.tick()
def train(self):
i=0
if(self.args.ipython):
import fcntl
fd = sys.stdin.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while((i < self.total_steps or self.total_steps == -1) and not self.gan.destroy):
i+=1
start_time = time.time()
self.step()
GlobalViewer.tick()
if (self.args.save_every != None and
self.args.save_every != -1 and
self.args.save_every > 0 and
i % self.args.save_every == 0):
print(" |= Saving network")
self.gan.save(self.save_file)
self.create_path(self.advSavePath+'advSave.txt')
with open(self.advSavePath+'advSave.txt', 'w') as the_file:
the_file.write(str(self.steps)+"\n")
the_file.write(str(self.samples)+"\n")
if self.args.ipython:
self.check_stdin()
end_time = time.time()
def check_stdin(self):
try:
input = sys.stdin.read()
if input[0]=="y":
return
from IPython import embed
# Misc code
embed()
except:
return
def new(self):
template = self.args.directory + '.json'
print("[hypergan] Creating new configuration file '"+template+"' based off of '"+self.config_name+".json'")
if os.path.isfile(template):
raise ValidationException("File exists: " + template)
source_configuration = Configuration.find(self.config_name+".json")
shutil.copyfile(source_configuration, template)
return
def add_supervised_loss(self):
if self.args.classloss:
print("[discriminator] Class loss is on. Semi-supervised learning mode activated.")
supervised_loss = SupervisedLoss(self.gan, self.gan.config.loss)
self.gan.loss = MultiComponent(components=[supervised_loss, self.gan.loss], combine='add')
#EWW
else:
print("[discriminator] Class loss is off. Unsupervised learning mode activated.")
def run(self):
if self.method == 'train':
self.add_supervised_loss() # TODO I think this is broken now(after moving create out)
self.gan.session.run(tf.global_variables_initializer())
if not self.gan.load(self.save_file):
print("Initializing new model")
else:
print("Model loaded")
self.train()
self.gan.save(self.save_file)
tf.reset_default_graph()
self.gan.session.close()
elif self.method == 'build':
if not self.gan.load(self.save_file):
raise "Could not load model: "+ save_file
else:
with open(self.advSavePath+'advSave.txt', 'r') as the_file:
content = [x.strip() for x in the_file]
self.steps = int(content[0])
self.samples = int(content[1])
print("Model loaded")
self.build()
elif self.method == 'new':
self.new()
elif self.method == 'sample':
self.add_supervised_loss()
if not self.gan.load(self.save_file):
print("Initializing new model")
else:
with open(self.advSavePath+'advSave.txt', 'r') as the_file:
content = [x.strip() for x in the_file]
self.steps = int(content[0])
self.samples = int(content[1])
print("Model loaded")
tf.train.start_queue_runners(sess=self.gan.session)
self.sample_forever()
tf.reset_default_graph()
self.gan.session.close()
elif self.method == 'test':
print("Hooray!")
print("Hypergan is installed correctly. Testing tensorflow for GPU support.")
with tf.Session() as sess:
devices = sess.list_devices()
if not tf.test.gpu_device_name():
print("Warning: no default GPU device available")
allgood=False
else:
print("Default GPU is available")
allgood=True
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
print("Current available tensorflow devices:")
for device in devices:
print(device)
if allgood:
print("Congratulations! Tensorflow and hypergan both look installed correctly. If you still experience issues come let us know on discord.")
else:
print("There were errors in the test, please see the logs")
| 34.540323 | 158 | 0.576232 |
2c1fda7414cb1edbd24f123fba528fa9a889099c | 4,017 | py | Python | research/audio/fcn-4/train.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 1 | 2021-11-18T08:17:44.000Z | 2021-11-18T08:17:44.000Z | research/audio/fcn-4/train.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | null | null | null | research/audio/fcn-4/train.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 2 | 2019-09-01T06:17:04.000Z | 2019-10-04T08:39:45.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
##############train models#################
python train.py
'''
import os
from mindspore import context, nn
from mindspore.train import Model
from mindspore.common import set_seed
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from src.model_utils.device_adapter import get_device_id
from src.model_utils.config import config
from src.model_utils.moxing_adapter import moxing_wrapper
from src.dataset import create_dataset
from src.musictagger import MusicTaggerCNN
from src.loss import BCELoss
def modelarts_pre_process():
pass
@moxing_wrapper(pre_process=modelarts_pre_process)
def train(model, dataset_direct, filename, columns_list, num_consumer=4,
batch=16, epoch=50, save_checkpoint_steps=2172, keep_checkpoint_max=50,
prefix="model", directory='./'):
"""
train network
"""
config_ck = CheckpointConfig(save_checkpoint_steps=save_checkpoint_steps,
keep_checkpoint_max=keep_checkpoint_max)
ckpoint_cb = ModelCheckpoint(prefix=prefix,
directory=directory,
config=config_ck)
data_train = create_dataset(dataset_direct, filename, batch, columns_list,
num_consumer)
model.train(epoch, data_train, callbacks=[ckpoint_cb, \
LossMonitor(per_print_times=181), TimeMonitor()], dataset_sink_mode=True)
if __name__ == "__main__":
set_seed(1)
config.checkpoint_path = os.path.abspath(config.checkpoint_path)
context.set_context(device_target=config.device_target, mode=context.GRAPH_MODE)
if config.device_target == 'Ascend':
context.set_context(device_id=get_device_id())
network = MusicTaggerCNN(in_classes=[1, 128, 384, 768, 2048],
kernel_size=[3, 3, 3, 3, 3],
padding=[0] * 5,
maxpool=[(2, 4), (4, 5), (3, 8), (4, 8)],
has_bias=True)
if config.pre_trained:
param_dict = load_checkpoint(config.checkpoint_path + '/' +
config.model_name)
load_param_into_net(network, param_dict)
net_loss = BCELoss()
network.set_train(True)
net_opt = nn.Adam(params=network.trainable_params(),
learning_rate=config.lr,
loss_scale=config.loss_scale)
loss_scale_manager = FixedLossScaleManager(loss_scale=config.loss_scale,
drop_overflow_update=False)
net_model = Model(network, net_loss, net_opt, loss_scale_manager=loss_scale_manager)
train(model=net_model,
dataset_direct=config.data_dir,
filename=config.train_filename,
columns_list=['feature', 'label'],
num_consumer=config.num_consumer,
batch=config.batch_size,
epoch=config.epoch_size,
save_checkpoint_steps=config.save_step,
keep_checkpoint_max=config.keep_checkpoint_max,
prefix=config.prefix,
directory=config.checkpoint_path) # + "_{}".format(get_device_id())
print("train success")
| 39.772277 | 96 | 0.66343 |
ec66aebc6065646e90154b9044f8a85568fea164 | 4,531 | py | Python | process/generate_2d_shapes.py | irom-lab/DRAGEN | 2fcb7e7c56fc5f72a2e5649903fe5aa993f253ec | [
"MIT"
] | 2 | 2021-07-16T21:11:58.000Z | 2021-07-20T12:39:34.000Z | process/generate_2d_shapes.py | irom-lab/DRAGEN | 2fcb7e7c56fc5f72a2e5649903fe5aa993f253ec | [
"MIT"
] | null | null | null | process/generate_2d_shapes.py | irom-lab/DRAGEN | 2fcb7e7c56fc5f72a2e5649903fe5aa993f253ec | [
"MIT"
] | null | null | null | import numpy as np
from glob import glob
import trimesh
from shapely.geometry import Polygon
from util.mesh import saveURDF
def main():
num_object = 100
HEIGHT = 0.05 # fixed height for all 2D objects
generate_train = True # generate test data
generate_shape = 0 # 0 for ractangle; 1 for ellipse; 2 for triangle
save_folder_name = '' # TODO: specify save path
# Generate rectangles
if generate_shape == 0:
if generate_train:
side_range = [0.03,0.06]
else:
side_range = [0.02,0.07]
length_all = np.random.uniform(low=side_range[0], high=side_range[1], size=(num_object,))
width_all = np.random.uniform(low=side_range[0], high=side_range[1], size=(num_object,))
for obj_ind in range(num_object):
length = length_all[obj_ind]
width = width_all[obj_ind]
x0 = (-length/2.,-width/2.) # left bottom
x1 = (-length/2., width/2)
x2 = (length/2, width/2)
x3 = (length/2, -width/2)
verts = [x0, x1, x2, x3]
box_polygon = Polygon(verts)
box_mesh = trimesh.creation.extrude_polygon(box_polygon, height=HEIGHT)
# Make centroid as origin
align_matrix = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, -HEIGHT/2],
[0, 0, 0, 1]])
box_mesh.apply_transform(align_matrix)
# Save as stl
box_mesh.export(save_folder_name + str(obj_ind) + '.stl')
# Save URDF
saveURDF(path=save_folder_name, urdfName=str(obj_ind), meshName=str(obj_ind)+'.stl', objMass=0.1, x_scale=1, y_scale=1, z_scale=1)
# Generate ellipses
elif generate_shape == 1:
if generate_train:
x_radius_range = [0.01, 0.05]
y_radius_range = [0.01, 0.03]
else:
x_radius_range = [0.03, 0.06]
y_radius_range = [0.02, 0.035]
x_radius_all = np.random.uniform(low=x_radius_range[0], high=x_radius_range[1], size=(num_object,))
y_radius_all = np.random.uniform(low=y_radius_range[0], high=y_radius_range[1], size=(num_object,))
for obj_ind in range(num_object):
x_radius = x_radius_all[obj_ind]
y_radius = y_radius_all[obj_ind]
angles = np.linspace(start=-np.pi, stop=np.pi, num=300)
x_y = np.vstack((x_radius*np.cos(angles), y_radius*np.sin(angles))).T
ellipse_polygon = Polygon([tuple(point) for point in x_y])
ellipse_mesh = trimesh.creation.extrude_polygon(ellipse_polygon, height=HEIGHT)
# Center
align_matrix = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, -HEIGHT/2],
[0, 0, 0, 1]])
ellipse_mesh.apply_transform(align_matrix)
# Save as stl
ellipse_mesh.export(save_folder_name + str(obj_ind) + '.stl')
# Save URDF
saveURDF(path=save_folder_name, urdfName=str(obj_ind), meshName=str(obj_ind)+'.stl', objMass=0.1, x_scale=1, y_scale=1, z_scale=1)
# Generate triangles
elif generate_shape == 2:
if generate_train:
a1_range = [45,90]
a2_range = [45,60]
l1_range = [0.03, 0.07] # oppsite to a1 angle
else:
a1_range = [60,90]
a2_range = [20,45]
l1_range = [0.04, 0.08]
a1_all = np.random.uniform(low=a1_range[0], high=a1_range[1], size=(num_object,))*np.pi/180
a2_all = np.random.uniform(low=a2_range[0], high=a2_range[1], size=(num_object,))*np.pi/180
a3_all = 2*np.pi-a1_all-a2_all
l1_all = np.random.uniform(low=l1_range[0], high=l1_range[1], size=(num_object,))
l2_all = l1_all*np.sin(a1_all)/(np.sin(a1_all)*np.cos(a3_all)+np.sin(a3_all)*np.cos(a1_all))
l3_all = l1_all*np.sin(a3_all)/(np.sin(a1_all)*np.cos(a3_all)+np.sin(a3_all)*np.cos(a1_all))
for obj_ind in range(num_object):
l1 = l1_all[obj_ind]
l2 = l2_all[obj_ind]
l3 = l3_all[obj_ind]
a1 = a1_all[obj_ind]
a2 = a2_all[obj_ind]
x0 = (-np.tan(a2/2)*l1/(np.tan(a1/2)+np.tan(a2/2)),-np.tan(a1/2)*np.tan(a2/2)*l1/(np.tan(a1/2)+np.tan(a2/2))) # left bottom
x1 = (x0[0]+l3*np.cos(a1), abs(x0[1]+l3*np.sin(a1)))
x2 = (l1+x0[0], x0[1])
verts = [x0, x1, x2]
triangle_polygon = Polygon(verts)
triangle_mesh = trimesh.creation.extrude_polygon(triangle_polygon, height=HEIGHT)
# Center
xy_center = (triangle_mesh.bounds[1,:2] - triangle_mesh.bounds[0,:2])/2
offset = -(triangle_mesh.bounds[0,:2]+xy_center)
align_matrix = np.array([[1, 0, 0, offset[0]],
[0, 1, 0, offset[1]],
[0, 0, 1, -HEIGHT/2],
[0, 0, 0, 1]])
triangle_mesh.apply_transform(align_matrix)
# Save as stl
triangle_mesh.export(save_folder_name + str(obj_ind) + '.stl')
# Save URDF
saveURDF(path=save_folder_name, urdfName=str(obj_ind), meshName=str(obj_ind)+'.stl', objMass=0.1, x_scale=1, y_scale=1, z_scale=1)
if __name__ == '__main__':
main()
| 34.853846 | 133 | 0.665857 |
e4b75e505a58b83c48eef56a0ea8c3cb85c88e11 | 2,095 | py | Python | app/models.py | ZhongChao1011/ops | 70d49e338b84077f94933fa467331de32c977a16 | [
"Apache-2.0"
] | null | null | null | app/models.py | ZhongChao1011/ops | 70d49e338b84077f94933fa467331de32c977a16 | [
"Apache-2.0"
] | null | null | null | app/models.py | ZhongChao1011/ops | 70d49e338b84077f94933fa467331de32c977a16 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2017/12/18
@author: ChaoZhong
@email: 16796679@qq.com
'''
import datetime
from uuid import uuid1
from app import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
class Users(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, nullable=False)
password_hash = db.Column(db.String(100), unique=True, nullable=False)
@property
def password(self):
raise AttributeError('Password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __unicode__(self):
return self.username
class Hosts(db.Model):
__tablename__ = 'hosts'
__searchable__ = ['hostname', 'wip', 'iip']
id = db.Column(db.Integer, primary_key=True)
hostname = db.Column(db.String(100), unique=True, nullable=False)
wip = db.Column(db.String(100), unique=True, nullable=False)
iip = db.Column(db.String(100), unique=True, nullable=False)
idc = db.Column(db.String(100), nullable=False)
start_time = db.Column(db.DateTime, default=datetime.datetime.now())
cat_id = db.Column(db.String(100), db.ForeignKey('category.id'))
def __unicode__(self):
return '<Host %s>' % self.hostname
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.String(100), primary_key=True, default=str(uuid1()))
cat_name = db.Column(db.String(100), unique=True, nullable=False)
start_time = db.Column(db.DateTime, default=datetime.datetime.now())
hosts = db.relationship('Hosts', backref='category', lazy='dynamic')
def __unicode__(self):
return '<Category %s>' % self.cat_name
@login_manager.user_loader
def load_user(user_id):
return Users.query.get(int(user_id))
| 30.808824 | 74 | 0.704057 |
9b7bf0d08533551135d6484105363223b0db6cf9 | 557 | py | Python | py_learning_9_8_selenium/selenium_test.py | Mloser-z/Python_Learning | c0b663faec17349547c8089fc2a918153b675ae9 | [
"Apache-2.0"
] | null | null | null | py_learning_9_8_selenium/selenium_test.py | Mloser-z/Python_Learning | c0b663faec17349547c8089fc2a918153b675ae9 | [
"Apache-2.0"
] | null | null | null | py_learning_9_8_selenium/selenium_test.py | Mloser-z/Python_Learning | c0b663faec17349547c8089fc2a918153b675ae9 | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
if __name__ == "__main__":
driver = webdriver.Chrome(executable_path="D:/chromedriver/chromedriver_version_96/chromedriver.exe")
driver.get("https://baidu.com")
assert u'百度' in driver.title
elem = driver.find_element(By.ID, "wd")
elem.clear()
elem.send_keys(u"网络爬虫")
elem.send_keys(Keys.RETURN)
time.sleep(3)
assert u"网络爬虫" not in driver.page_source
driver.close()
| 32.764706 | 106 | 0.710952 |
aaed36f9a8505c0b72fdaa2f79875c6b99a26f74 | 1,162 | py | Python | portapp/migrations/0010_auto_20210102_2349.py | nmshohel/portfolio | f37090479c85d307a1c8cb7d95cce4172a1f63c4 | [
"MIT"
] | null | null | null | portapp/migrations/0010_auto_20210102_2349.py | nmshohel/portfolio | f37090479c85d307a1c8cb7d95cce4172a1f63c4 | [
"MIT"
] | null | null | null | portapp/migrations/0010_auto_20210102_2349.py | nmshohel/portfolio | f37090479c85d307a1c8cb7d95cce4172a1f63c4 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-01-02 20:49
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portapp', '0009_auto_20210102_2341'),
]
operations = [
migrations.AlterField(
model_name='posthome3',
name='content_2',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='posthome3',
name='content_3',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='posthome4',
name='who_content',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='story_bolumu',
name='story_content',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='work_bolumu',
name='work_content',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
]
| 29.05 | 71 | 0.590361 |
869353189d58950843674766848b52d7f26df0f8 | 2,881 | py | Python | azure-mgmt-web/azure/mgmt/web/models/worker_pool_resource.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure/mgmt/web/models/worker_pool_resource.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure/mgmt/web/models/worker_pool_resource.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class WorkerPoolResource(ProxyOnlyResource):
"""Worker pool of an App Service Environment ARM resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param worker_size_id: Worker size ID for referencing this worker pool.
:type worker_size_id: int
:param compute_mode: Shared or dedicated app hosting. Possible values
include: 'Shared', 'Dedicated', 'Dynamic'
:type compute_mode: str or ~azure.mgmt.web.models.ComputeModeOptions
:param worker_size: VM size of the worker pool instances.
:type worker_size: str
:param worker_count: Number of instances in the worker pool.
:type worker_count: int
:ivar instance_names: Names of all instances in the worker pool (read
only).
:vartype instance_names: list[str]
:param sku:
:type sku: ~azure.mgmt.web.models.SkuDescription
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'instance_names': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'worker_size_id': {'key': 'properties.workerSizeId', 'type': 'int'},
'compute_mode': {'key': 'properties.computeMode', 'type': 'ComputeModeOptions'},
'worker_size': {'key': 'properties.workerSize', 'type': 'str'},
'worker_count': {'key': 'properties.workerCount', 'type': 'int'},
'instance_names': {'key': 'properties.instanceNames', 'type': '[str]'},
'sku': {'key': 'sku', 'type': 'SkuDescription'},
}
def __init__(self, kind=None, worker_size_id=None, compute_mode=None, worker_size=None, worker_count=None, sku=None):
super(WorkerPoolResource, self).__init__(kind=kind)
self.worker_size_id = worker_size_id
self.compute_mode = compute_mode
self.worker_size = worker_size
self.worker_count = worker_count
self.instance_names = None
self.sku = sku
| 39.465753 | 121 | 0.618188 |
bf1c8b799c94d54b95c63eb70798b75e0943d513 | 418 | py | Python | Objektorientering/forelesning0110.py | Williamskaug/IN1000 | 164d0b4abd3314c440210f9582d581973c5fcfee | [
"MIT"
] | null | null | null | Objektorientering/forelesning0110.py | Williamskaug/IN1000 | 164d0b4abd3314c440210f9582d581973c5fcfee | [
"MIT"
] | null | null | null | Objektorientering/forelesning0110.py | Williamskaug/IN1000 | 164d0b4abd3314c440210f9582d581973c5fcfee | [
"MIT"
] | null | null | null | class Student:
def __init__(self, navn):
self._antMott = 0
self._navn = navn
def registrer(self):
self._antMott = self._antMott + 1
def hentOppmøte(self):
return self._antMott
def hentNavn(self):
return self._navn
stud1 = Student("William")
stud1.registrer()
ant = stud1.hentOppmøte()
navn = stud1.hentNavn()
print(ant)
print(navn)
| 18.173913 | 41 | 0.598086 |
ef3c12a48a6059f67b9f7c4d8b695d8e74e3c0b2 | 16,097 | py | Python | intensity_normalization/normalize/ravel.py | AlaSummer/intensity-normalization | 4481b52c35041eb564bd3e7c7d1b89c1604410c6 | [
"Apache-2.0"
] | null | null | null | intensity_normalization/normalize/ravel.py | AlaSummer/intensity-normalization | 4481b52c35041eb564bd3e7c7d1b89c1604410c6 | [
"Apache-2.0"
] | null | null | null | intensity_normalization/normalize/ravel.py | AlaSummer/intensity-normalization | 4481b52c35041eb564bd3e7c7d1b89c1604410c6 | [
"Apache-2.0"
] | null | null | null | """RAVEL normalization (WhiteStripe then CSF correction)
Author: Jacob Reinhold <jcreinhold@gmail.com>
Created on: Jun 02, 2021
"""
from __future__ import annotations
__all__ = ["RavelNormalize"]
import argparse
import builtins
import collections.abc
import functools
import logging
import operator
import pathlib
import typing
import numpy as np
import numpy.typing as npt
import pymedio.image as mioi
import scipy.sparse
import scipy.sparse.linalg
import intensity_normalization.errors as intnorme
import intensity_normalization.normalize.base as intnormb
import intensity_normalization.normalize.whitestripe as intnormws
import intensity_normalization.typing as intnormt
import intensity_normalization.util.io as intnormio
import intensity_normalization.util.tissue_membership as intnormtm
logger = logging.getLogger(__name__)
try:
import ants
except ImportError as ants_imp_exn:
msg = "ANTsPy not installed. Install antspyx to use RAVEL."
raise RuntimeError(msg) from ants_imp_exn
else:
from intensity_normalization.util.coregister import register, to_ants
class RavelNormalize(intnormb.DirectoryNormalizeCLI):
def __init__(
self,
*,
membership_threshold: builtins.float = 0.99,
register: builtins.bool = True,
num_unwanted_factors: builtins.int = 1,
sparse_svd: builtins.bool = False,
whitestripe_kwargs: builtins.dict[builtins.str, typing.Any] | None = None,
quantile_to_label_csf: builtins.float = 1.0,
masks_are_csf: builtins.bool = False,
):
super().__init__()
self.membership_threshold = membership_threshold
self.register = register
self.num_unwanted_factors = num_unwanted_factors
self.sparse_svd = sparse_svd
self.whitestripe_kwargs = whitestripe_kwargs or dict()
self.quantile_to_label_csf = quantile_to_label_csf
self.masks_are_csf = masks_are_csf
if register and masks_are_csf:
msg = "If 'masks_are_csf', then images are assumed to be co-registered."
raise ValueError(msg)
self._template: intnormt.ImageLike | None = None
self._template_mask: intnormt.ImageLike | None = None
self._normalized: intnormt.ImageLike | None = None
self._control_masks: builtins.list[intnormt.ImageLike] = []
def normalize_image(
self,
image: intnormt.ImageLike,
/,
mask: intnormt.ImageLike | None = None,
*,
modality: intnormt.Modalities = intnormt.Modalities.T1,
) -> intnormt.ImageLike:
return NotImplemented
def teardown(self) -> None:
del self._normalized
self._normalized = None
@property
def template(self) -> ants.ANTsImage | None:
return self._template
@property
def template_mask(self) -> ants.ANTsImage | None:
return self._template_mask
def set_template(
self,
template: intnormt.ImageLike | ants.ANTsImage,
) -> None:
self._template = to_ants(template)
def set_template_mask(
self,
template_mask: intnormt.ImageLike | ants.ANTsImage | None,
) -> None:
if template_mask is None:
self._template_mask = None
else:
if hasattr(template_mask, "astype"):
template_mask = template_mask.astype(np.uint32) # type: ignore[union-attr] # noqa: E501
self._template_mask = to_ants(template_mask)
def use_mni_as_template(self) -> None:
standard_mni = ants.get_ants_data("mni")
self.set_template(ants.image_read(standard_mni))
assert self.template is not None
self.set_template_mask(self.template > 0.0)
def _find_csf_mask(
self,
image: intnormt.ImageLike,
/,
mask: intnormt.ImageLike | None = None,
*,
modality: intnormt.Modalities = intnormt.Modalities.T1,
) -> intnormt.ImageLike:
if self.masks_are_csf:
if mask is None:
raise ValueError("'mask' must be defined if masks are CSF masks.")
return mask
elif modality != intnormt.Modalities.T1:
msg = "Non-T1-w RAVEL normalization w/o CSF masks not supported."
raise NotImplementedError(msg)
tissue_membership = intnormtm.find_tissue_memberships(image, mask)
csf_mask: npt.NDArray = tissue_membership[..., 0] > self.membership_threshold
# convert to integer for intersection
csf_mask = csf_mask.astype(np.uint32)
return csf_mask
@staticmethod
def _ravel_correction(
control_voxels: npt.NDArray, unwanted_factors: npt.NDArray
) -> npt.NDArray:
"""Correct control voxels by removing trend from unwanted factors
Args:
control_voxels: rows are voxels, columns are images
(see V matrix in the paper)
unwanted_factors: unwanted factors
(see Z matrix in the paper)
Returns:
normalized: normalized images
"""
logger.debug("Performing RAVEL correction.")
logger.debug(f"Unwanted factors shape: {unwanted_factors.shape}.")
logger.debug(f"Control voxels shape: {control_voxels.shape}.")
beta = np.linalg.solve(
unwanted_factors.T @ unwanted_factors,
unwanted_factors.T @ control_voxels.T,
)
fitted = (unwanted_factors @ beta).T
residuals: np.ndarray = control_voxels - fitted
voxel_means = np.mean(control_voxels, axis=1, keepdims=True)
normalized: npt.NDArray = residuals + voxel_means
return normalized
def _register(self, image: ants.ANTsImage) -> npt.NDArray:
registered = register(
image,
template=self.template,
type_of_transform="SyN",
interpolator="linear",
template_mask=self.template_mask,
)
out: npt.NDArray = registered.numpy()
return out
def create_image_matrix_and_control_voxels(
self,
images: collections.abc.Sequence[intnormt.ImageLike],
/,
masks: collections.abc.Sequence[intnormt.ImageLike] | None = None,
*,
modality: intnormt.Modalities = intnormt.Modalities.T1,
) -> builtins.tuple[npt.NDArray, npt.NDArray]:
"""creates an matrix of images; rows correspond to voxels, columns are images
Args:
images: list of MR images of interest
masks: list of corresponding brain masks
modality: modality of the set of images (e.g., t1)
Returns:
image_matrix: rows are voxels, columns are images
control_voxels: rows are csf intersection voxels, columns are images
"""
n_images = len(images)
image_shapes = [image.shape for image in images]
image_shape = image_shapes[0]
image_size = int(np.prod(image_shape))
assert all([shape == image_shape for shape in image_shapes])
image_matrix = np.zeros((image_size, n_images))
whitestripe_norm = intnormws.WhiteStripeNormalize(**self.whitestripe_kwargs)
self._control_masks = [] # reset control masks to prevent run-to-run issues
registered_images = []
for i, (image, mask) in enumerate(intnormio.zip_with_nones(images, masks), 1):
image_ws = whitestripe_norm(image, mask)
image_matrix[:, i - 1] = image_ws.flatten()
logger.info(f"Processing image {i}/{n_images}.")
if i == 1 and self.template is None:
logger.debug("Setting template to first image.")
self.set_template(image)
self.set_template_mask(mask)
logger.debug("Finding CSF mask.")
# csf found on original b/c assume foreground positive
csf_mask = self._find_csf_mask(image, mask, modality=modality)
self._control_masks.append(csf_mask)
if self.register:
registered_images.append(image_ws)
else:
if self.register:
logger.debug("Deformably co-registering image to template.")
image = to_ants(image)
image = self._register(image)
image_ws = whitestripe_norm(image, mask)
registered_images.append(image_ws)
logger.debug("Finding CSF mask.")
csf_mask = self._find_csf_mask(image, mask, modality=modality)
self._control_masks.append(csf_mask)
control_mask_sum = functools.reduce(operator.add, self._control_masks)
threshold = np.floor(len(self._control_masks) * self.quantile_to_label_csf)
intersection: intnormt.ImageLike = control_mask_sum >= threshold
num_control_voxels = int(intersection.sum())
if num_control_voxels == 0:
msg = "No common control voxels found. Lower the membership threshold."
raise intnorme.NormalizationError(msg)
if self.register:
assert n_images == len(registered_images)
control_voxels = np.zeros((num_control_voxels, n_images))
for i, registered in enumerate(registered_images):
ctrl_vox = registered[intersection]
control_voxels[:, i] = ctrl_vox
logger.debug(
f"Image {i+1} control voxels - "
f"mean: {ctrl_vox.mean():.3f}; "
f"std: {ctrl_vox.std():.3f}"
)
else:
control_voxels = image_matrix[intersection.flatten(), :]
return image_matrix, control_voxels
def estimate_unwanted_factors(self, control_voxels: npt.NDArray) -> npt.NDArray:
logger.debug("Estimating unwanted factors.")
_, _, all_unwanted_factors = (
np.linalg.svd(control_voxels, full_matrices=False)
if not self.sparse_svd
else scipy.sparse.linalg.svds(
scipy.sparse.bsr_matrix(control_voxels),
k=self.num_unwanted_factors,
return_singular_vectors="vh",
)
)
unwanted_factors: npt.NDArray
unwanted_factors = all_unwanted_factors.T[:, 0 : self.num_unwanted_factors]
return unwanted_factors
def _fit(
self,
images: collections.abc.Sequence[intnormt.ImageLike],
/,
masks: collections.abc.Sequence[intnormt.ImageLike] | None = None,
*,
modality: intnormt.Modalities = intnormt.Modalities.T1,
**kwargs: typing.Any,
) -> None:
image_matrix, control_voxels = self.create_image_matrix_and_control_voxels(
images,
masks,
modality=modality,
)
unwanted_factors = self.estimate_unwanted_factors(control_voxels)
normalized = self._ravel_correction(image_matrix, unwanted_factors)
self._normalized = normalized.T # transpose so images on 0th axis
def process_directories(
self,
image_dir: intnormt.PathLike,
/,
mask_dir: intnormt.PathLike | None = None,
*,
modality: intnormt.Modalities = intnormt.Modalities.T1,
ext: builtins.str = "nii*",
return_normalized_and_masks: builtins.bool = False,
**kwargs: typing.Any,
) -> builtins.tuple[
builtins.list[mioi.Image], builtins.list[mioi.Image] | None
] | None:
logger.debug("Gathering images.")
images, masks = intnormio.gather_images_and_masks(image_dir, mask_dir, ext=ext)
self.fit(images, masks, modality=modality, **kwargs)
assert self._normalized is not None
if return_normalized_and_masks:
norm_lst: builtins.list[mioi.Image] = []
for normed, image in zip(self._normalized, images):
norm_lst.append(mioi.Image(normed.reshape(image.shape), image.affine))
return norm_lst, masks
return None
@staticmethod
def name() -> str:
return "ravel"
@staticmethod
def fullname() -> str:
return "RAVEL"
@staticmethod
def description() -> str:
desc = "Perform WhiteStripe and then correct for technical "
desc += "variation with RAVEL on a set of MR images."
return desc
@staticmethod
def add_method_specific_arguments(
parent_parser: argparse.ArgumentParser,
) -> argparse.ArgumentParser:
parser = parent_parser.add_argument_group("method-specific arguments")
parser.add_argument(
"-b",
"--num-unwanted-factors",
type=int,
default=1,
help="Number of unwanted factors to eliminate (see 'b' in RAVEL paper).",
)
parser.add_argument(
"-mt",
"--membership-threshold",
type=float,
default=0.99,
help="Threshold for the membership of the control (CSF) voxels.",
)
parser.add_argument(
"--no-registration",
action="store_false",
dest="register",
default=True,
help="Do not do deformable registration to find control mask. "
"(Assumes images are deformably co-registered).",
)
parser.add_argument(
"--sparse-svd",
action="store_true",
default=False,
help="Use a sparse version of the SVD (lower memory requirements).",
)
parser.add_argument(
"--masks-are-csf",
action="store_true",
default=False,
help="Use this flag if mask directory corresponds to CSF masks "
"instead of brain masks. (Assumes images are deformably co-registered).",
)
parser.add_argument(
"--quantile-to-label-csf",
default=1.0,
help="Control how intersection calculated "
"(1.0 means normal intersection, 0.5 means only "
"half of the images need the voxel labeled as CSF).",
)
return parent_parser
@classmethod
def from_argparse_args(cls, args: argparse.Namespace, /) -> RavelNormalize:
return cls(
membership_threshold=args.membership_threshold,
register=args.register,
num_unwanted_factors=args.num_unwanted_factors,
sparse_svd=args.sparse_svd,
quantile_to_label_csf=args.quantile_to_label_csf,
masks_are_csf=args.masks_are_csf,
)
def save_additional_info(
self,
args: argparse.Namespace,
**kwargs: typing.Any,
) -> None:
normed = kwargs["normalized"]
image_fns = kwargs["image_filenames"]
if len(self._control_masks) != len(image_fns):
msg = f"'control_masks' ({len(self._control_masks)}) "
msg += f"and 'image_filenames' ({len(image_fns)}) "
msg += "must be in correspondence."
raise RuntimeError(msg)
if len(self._control_masks) != len(normed):
msg = f"'control_masks' ({len(self._control_masks)}) "
msg += f"and 'normalized' ({len(normed)}) "
msg += "must be in correspondence."
raise RuntimeError(msg)
for _csf_mask, norm, fn in zip(self._control_masks, normed, image_fns):
if hasattr(norm, "affine"):
csf_mask = mioi.Image(_csf_mask, norm.affine)
elif hasattr(_csf_mask, "affine"):
csf_mask = mioi.Image(_csf_mask, _csf_mask.affine) # type: ignore[attr-defined] # noqa: E501
else:
csf_mask = mioi.Image(_csf_mask, None)
base, name, ext = intnormio.split_filename(fn)
new_name = name + "_csf_mask" + ext
if args.output_dir is None:
output = base / new_name
else:
output = pathlib.Path(args.output_dir) / new_name
csf_mask.to_filename(output)
del self._control_masks
self._control_masks = []
| 38.601918 | 109 | 0.621544 |
f73e40ebebcc8c71359c42c7a788d61b7c7a525a | 3,869 | py | Python | navigation_experiments_mc_pddl/launch/pddl_reconfig_controller_launch.py | estherag/navigation_experiments_mc_bts_pddl | 992b675c3519a726bf6b9c342402fbee2296941e | [
"Apache-2.0"
] | 3 | 2021-01-25T17:07:37.000Z | 2021-02-04T12:58:04.000Z | navigation_experiments_mc_pddl/launch/pddl_reconfig_controller_launch.py | estherag/navigation_experiments_mc_bts_pddl | 992b675c3519a726bf6b9c342402fbee2296941e | [
"Apache-2.0"
] | null | null | null | navigation_experiments_mc_pddl/launch/pddl_reconfig_controller_launch.py | estherag/navigation_experiments_mc_bts_pddl | 992b675c3519a726bf6b9c342402fbee2296941e | [
"Apache-2.0"
] | 2 | 2022-02-10T10:55:20.000Z | 2022-02-14T01:35:24.000Z | # Copyright 2019 Intelligent Robotics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription, SetEnvironmentVariable
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
import launch
import launch.actions
import launch.events
import launch_ros.actions
import launch_ros.events
import launch_ros.events.lifecycle
def generate_launch_description():
# Get the launch directory
example_dir = get_package_share_directory('navigation_experiments_mc_pddl')
stdout_linebuf_envvar = SetEnvironmentVariable(
'RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED', '1')
plansys2_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(
get_package_share_directory('plansys2_bringup'),
'launch',
'plansys2_bringup_launch_distributed.py')),
launch_arguments={'model_file': example_dir + '/pddl/patrol_w_recharge_reconfig.pddl'}.items()
)
# Specify the actions
move_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='move_action_node',
name='move_action_node',
output='screen',
parameters=[])
patrol_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='patrol_action_node',
name='patrol_action_node',
output='screen',
parameters=[])
charge_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='charge_action_node',
name='charge_action_node',
output='screen',
parameters=[])
ask_charge_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='ask_charge_action_node',
name='ask_charge_action_node',
output='screen',
parameters=[])
degraded_move_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='degraded_move_action_node',
name='degraded_move_action_node',
output='screen',
parameters=[])
reconfigure_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='reconfigure_action_node',
name='reconfigure_action_node',
output='screen',
parameters=[])
recover_nav_sensor_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='recover_nav_sensor_node',
name='recover_nav_sensor_node',
output='screen',
parameters=[])
#pddl_controller_cmd = Node(
# package='navigation_experiments_mc_pddl',
# executable='patrolling_controller_node',
# name='patrolling_controller_node',
# output='screen',
# parameters=[])
# Create the launch description and populate
ld = LaunchDescription()
# Set environment variables
ld.add_action(stdout_linebuf_envvar)
# Declare the launch options
ld.add_action(plansys2_cmd)
ld.add_action(move_cmd)
ld.add_action(patrol_cmd)
ld.add_action(charge_cmd)
ld.add_action(ask_charge_cmd)
ld.add_action(degraded_move_cmd)
ld.add_action(reconfigure_cmd)
ld.add_action(recover_nav_sensor_cmd)
#ld.add_action(pddl_controller_cmd)
return ld
| 31.713115 | 102 | 0.71207 |
ac49d12de40c20f05f9f91f7194fdfbbfb79e737 | 9,493 | py | Python | mindboggle/guts/graph.py | shnizzedy/mindboggle | 36ea945cfa4c1998f3a88b58f296c25be91713d5 | [
"CC-BY-3.0",
"Apache-2.0"
] | null | null | null | mindboggle/guts/graph.py | shnizzedy/mindboggle | 36ea945cfa4c1998f3a88b58f296c25be91713d5 | [
"CC-BY-3.0",
"Apache-2.0"
] | 7 | 2021-03-04T17:07:53.000Z | 2021-04-06T20:09:17.000Z | mindboggle/guts/graph.py | shnizzedy/mindboggle | 36ea945cfa4c1998f3a88b58f296c25be91713d5 | [
"CC-BY-3.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Graph operations:
- Diagonal degree matrix
- Matrix weights and affinity matrix
- Graph Laplacian
Authors:
- Eliezer Stavsky, 2012 (eli.stavsky@gmail.com)
- Arno Klein, 2016 (arno@mindboggle.info) http://binarybottle.com
Copyright 2016, Mindboggle team (http://mindboggle.info), Apache v2.0 License
"""
import networkx as nx
from mindboggle.guts.kernels import rbf_kernel
def diagonal_degree_matrix(W, inverse=False, square_root=False):
"""
Compute diagonal degree matrix.
Parameters
----------
W : N x N sparse matrix in csr format (affinity matrix)
inverse : boolean (compute inverse of diagonal degree matrix?)
square_root : boolean (compute square root of diagonal degree matrix?)
Returns
-------
ddm : N x N sparse matrix in csr format (diagonal matrix)
"csr" stands for "compressed sparse row" matrix
(http://docs.scipy.org/doc/scipy/reference/sparse.html)
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.graph import diagonal_degree_matrix
>>> W = np.array([[10,2.3,3], [0,0,3], [0,1.5,0]])
>>> tocsr = diagonal_degree_matrix(W, inverse=False, square_root=False)
>>> np.allclose(tocsr.data, [ 15.3, 3. , 1.5])
True
"""
import numpy as np
from scipy.sparse import lil_matrix
stability_term = 0.000001
ddm = lil_matrix((W.shape[0], W.shape[0]))
if inverse:
if not square_root:
ddm.setdiag(1 / (W.sum(axis=1) + stability_term))
else:
#ddm.setdiag(math.sqrt(1 / (W.sum(axis=1) + stability_term)))
ddm.setdiag(np.sqrt(1 / (W.sum(axis=1) + stability_term)))
else:
ddm.setdiag(W.sum(axis=1))
return ddm.tocsr()
def weight_graph(Nodes, Indices, Meshes, kernel=rbf_kernel, add_to_graph=True,
G=nx.Graph(), sigma=20, verbose=False):
"""
Construct weighted edges of a graph and compute an affinity matrix.
Parameters
----------
Nodes : numpy array
Indices : list of integers
Meshes : numpy array
kernel : function which determines weights of edges
- rbf_kernel: Gaussian kernel, with parameter sigma
- cotangent_kernel: weight calculation for Laplace_Beltrami_Operator
(NOTE: option removed until it can be tested)
- inverse_distance: additional kernel where the weight is the inverse
of the distance between two nodes
add_to_graph : boolean (add to graph?)
G : networkx graph
sigma : float (parameter for rbf_kernel)
verbose : bool
print statements?
Returns
-------
G : networkx graph
affinity_matrix : numpy array (sparse affinity matrix)
Examples
--------
>>> import numpy as np
>>> import networkx as nx
>>> from mindboggle.guts.kernels import rbf_kernel
>>> from mindboggle.guts.graph import weight_graph
>>> Nodes = np.array([0,1,2,3,4])
>>> Indices = [0,1,2,3,4]
>>> Meshes = np.array([[1,2,3],[0,1,2],[0,1,3],[0,1,4],[0,2,3],[0,3,4]])
>>> kernel = rbf_kernel
>>> add_to_graph = True
>>> G = nx.Graph()
>>> sigma = 20
>>> verbose = False
>>> G, affinity_matrix = weight_graph(Nodes, Indices, Meshes, kernel,
... add_to_graph, G, sigma, verbose)
>>> G.size()
9
>>> sorted(dict(G.degree()).items())
[(0.0, 4), (1.0, 4), (2.0, 3), (3.0, 4), (4.0, 3)]
"""
import numpy as np
from scipy.sparse import lil_matrix
from mindboggle.guts.kernels import rbf_kernel, inverse_distance
#cotangent_kernel
if kernel is rbf_kernel or kernel is inverse_distance:
if verbose:
if kernel is rbf_kernel:
print('Compute weights using rbf kernel (sigma={0})'.
format(sigma))
else:
print('Compute weights using inverse distance kernel '
'(sigma={0})'.format(sigma))
# Construct matrix of edge lines by breaking triangle into three edges.
if Meshes.shape[1] == 3:
edge_mat = np.vstack((Meshes.T[0:2].T, Meshes.T[1:3].T, Meshes.T[:3:2].T))
elif Meshes.shape[1] == 2:
edge_mat = Meshes
# Augment matrix to contain edge weight in the third column
weighted_edges = np.asarray([[Indices[np.int(i)], Indices[np.int(j)],
kernel(Nodes[np.int(Indices[np.int(i)])],
Nodes[np.int(Indices[np.int(j)])], sigma)]
for [i, j] in edge_mat])
# Add weights to graph
if add_to_graph:
if verbose:
print('Add weighted edges to the graph')
G.add_weighted_edges_from(weighted_edges)
# Construct affinity matrix
if verbose:
print('Construct sparse affinity matrix of size {0}'.
format(Nodes.shape[0]))
affinity_matrix = lil_matrix((Nodes.shape[0], Nodes.shape[0]))
for [i, j, edge_weight] in weighted_edges:
affinity_matrix[i, j] = affinity_matrix[j, i] = edge_weight
# elif kernel is cotangent_kernel:
# if verbose:
# print('Compute weights using cotangents')
# affinity_matrix = cotangent_kernel(Nodes, Meshes)
#
# # Add weights to graph
# if add_to_graph:
# edges = np.nonzero(affinity_matrix)
# edge_mat = np.hstack((edges[0].T[:, np.newaxis],
# edges[1].T[:, np.newaxis]))
# weighted_edges = np.asarray([[edge_mat[i,0],
# edge_mat[i,1],
# affinity_matrix[edge_mat[i]]]
# for i in range(affinity_matrix.shape[0])])
# if verbose:
# print('Add weighted edges to the graph')
# G.add_weighted_edges_from(weighted_edges)
# Return the affinity matrix as a "compressed sparse row" matrix
# (http://docs.scipy.org/doc/scipy/reference/sparse.html)
if add_to_graph:
return G, affinity_matrix.tocsr()
else:
return affinity_matrix.tocsr()
def graph_laplacian(W, type_of_laplacian='norm1', verbose=False):
"""
Compute normalized and unnormalized graph Laplacians.
Parameters
----------
W : N x N sparse matrix in csr format (affinity matrix)
"csr" stands for "compressed sparse row" matrix
(http://docs.scipy.org/doc/scipy/reference/sparse.html)
type_of_laplacian : string
- basic: non-normalized Laplacian (Lap = D - W)
- norm1: normalized Laplacian (Lap = ddmi_sq * L * ddmi_sq) - recovers definition
- norm2: normalized Laplacian (Lap = ddmi_sq * W * ddmi_sq)
- norm3: normalized Laplacian (Lap = inv(D) * L)
- random_walk: random walk Laplacian (Lap = inv(D) * W)
verbose : bool
print statements?
Returns
-------
Laplacian : N x N sparse matrix in csr format
(Graph Laplacian of affinity matrix)
Examples
--------
>>> import numpy as np
>>> import scipy.sparse as sparse
>>> from mindboggle.guts.graph import graph_laplacian
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> W = sparse.csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
>>> W
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> W.dtype
dtype('int64')
>>> type_of_laplacian = 'norm1'
>>> verbose = False
>>> Laplacian = graph_laplacian(W, type_of_laplacian, verbose)
>>> print(np.array_str(np.array(Laplacian),
... precision=5, suppress_small=True))
[[ 0.66667 0. -0.29814]
[ 0. 1. -0.44721]
[-0.59628 -0.74536 0.6 ]]
"""
if type_of_laplacian is 'basic':
if verbose:
print("Calculate unnormalized Laplacian")
Laplacian = diagonal_degree_matrix(W) - W
elif type_of_laplacian is 'norm1':
if verbose:
print("Normalize the Laplacian")
ddmi_sq = diagonal_degree_matrix(W, inverse=True, square_root=True)
Laplacian = ddmi_sq * (diagonal_degree_matrix(W, inverse=False, square_root=False) - W) * ddmi_sq
elif type_of_laplacian is 'norm2':
if verbose:
print("Normalize the Laplacian")
ddmi_sq = diagonal_degree_matrix(W, inverse=True, square_root=True)
Laplacian = ddmi_sq * W * ddmi_sq
elif type_of_laplacian is 'norm3':
if verbose:
print("Normalize the Laplacian")
ddmi = diagonal_degree_matrix(W, inverse=True, square_root=False)
Laplacian = ddmi * (diagonal_degree_matrix(W, inverse=False, square_root=False) - W)
elif type_of_laplacian is 'random_walk':
if verbose:
print("Compute Random Walk Laplacian")
ddmi = diagonal_degree_matrix(W, inverse=True, square_root=False)
Laplacian = ddmi * W
else:
if verbose:
print('Option is not available')
Laplacian = 0
return Laplacian
# ============================================================================
# Doctests
# ============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # py.test --doctest-modules
| 34.772894 | 105 | 0.580428 |
038f3d71a5ee7441b570179467e32a175ce1b735 | 8,174 | py | Python | classifier.py | maxvays/TCGA_classifier | ad3b92ac76273e0701e9b41ce2e4aa3688f597a5 | [
"MIT"
] | null | null | null | classifier.py | maxvays/TCGA_classifier | ad3b92ac76273e0701e9b41ce2e4aa3688f597a5 | [
"MIT"
] | null | null | null | classifier.py | maxvays/TCGA_classifier | ad3b92ac76273e0701e9b41ce2e4aa3688f597a5 | [
"MIT"
] | null | null | null | '''
Trains TensorFlow DNNClassifier on training examples in TFRecord format, with
each training example comprising methylation betas for one aliquot and the 0/1
label.
Outputs statistics on the number of input features with non-zero weights in the
hidden layer (dnn/hiddenlayer_0/kernel).
Upon completion of training, writes the weights for all input features in the
hidden layer (dnn/hiddenlayer_0/kernel) into a separate file for analysis.
Copyright (c) 2019 Maxim Vaysburd
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import tensorflow as tf
import pandas as pd
import glob
import sys
import math
import resource
import getopt
from tensorflow.python.ops import variables
from tensorflow.python.framework import dtypes
opts, args = getopt.getopt(
sys.argv[1:], "",
["regularization=", "eval_start_idx=", "study=", "tag=", "learning_rate="])
regularization = None
eval_start_idx = None
study = None
tag = None
learning_rate = None
NUM_TRAINING_FILES = 75
NUM_EVAL_FILES = 15
for o, a in opts:
if o == "--regularization":
print "setting regularization = " + a
regularization = float(a)
elif o == "--eval_start_idx":
print "setting eval_start_idx = " + a
eval_start_idx = int(a)
elif o == "--study":
print "setting study = " + a
study = a
elif o == "--tag":
print "setting tag = " + a
tag = a
elif o == "--learning_rate":
print "setting learning rate = " + a
learning_rate = float(a)
if regularization == None or eval_start_idx == None or study == None or tag == None or learning_rate == None:
print("specify --regularization, --eval_start_idx, --study, --tag, --learning_rate")
sys.exit(1)
modeldir = "model_{tag}_{study}_e{eval_start_idx}_lr{learning_rate}_r{regularization}".format(
tag=tag,
study=study,
eval_start_idx=eval_start_idx,
regularization=regularization,
learning_rate=learning_rate
)
print "model dir: " + modeldir
TRAINING_FILE_NAME = "{study}/tcga_training_examples_{idx}.tfr"
training_files = []
eval_files = []
for i in range(NUM_TRAINING_FILES):
filename = TRAINING_FILE_NAME.format(study=study, idx=i)
if i >= eval_start_idx and i < eval_start_idx + NUM_EVAL_FILES:
eval_files.append(filename)
else:
training_files.append(filename)
rsrc = resource.RLIMIT_DATA
soft, hard = resource.getrlimit(rsrc)
print 'Soft limit starts as :', soft
resource.setrlimit(rsrc, (4*1024*1024*1024, hard))
soft, hard = resource.getrlimit(rsrc)
print 'Soft limit changed to :', soft
probe_indices = {} #maps probe_ids to indices
df = pd.read_csv("probe_id_indices")
print "reading probe indices"
for index, row in df.iterrows():
probe_indices[row["probe_id"]] = row["index"] - 1
# numprobes = len(probe_indices)
numprobes = 25
print "done reading, number of probes: " + str(numprobes)
feature_columns = [
tf.feature_column.numeric_column('betas', shape=(numprobes,), dtype=tf.float32)
]
label_column = tf.feature_column.numeric_column('label', shape=(1,), dtype=tf.int64)
features_spec = tf.feature_column.make_parse_example_spec(
feature_columns + [label_column]
)
def input_fn(file_list):
dataset = tf.contrib.data.make_batched_features_dataset(
file_pattern=file_list,
batch_size=16,
features=features_spec,
num_epochs=None,
shuffle=False,
)
it = dataset.make_one_shot_iterator()
features = it.get_next()
labels = features.pop('label')
#labels = tf.stack([label, 1-label])
return features, labels
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[128],
model_dir=modeldir,
n_classes=2,
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=learning_rate,
l1_regularization_strength=regularization))
#training_files = sorted(glob.glob("BRCA/tcga_training_examples_*"))
#total_num_files = len(training_files)
#num_eval_files = total_num_files / 10
#eval_files = [training_files.pop() for i in range(num_eval_files)]
print "training files: " + ",".join(training_files)
print "evaluation files: " + ",".join(eval_files)
train_spec = tf.estimator.TrainSpec(
input_fn=lambda: input_fn(training_files),
max_steps=200000*1000
)
eval_spec = tf.estimator.EvalSpec(
input_fn=lambda: input_fn(eval_files)
)
tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
variable_names = classifier.get_variable_names()
for var_name in variable_names:
if var_name.startswith("dnn/hiddenlayer_0/kernel"):
values = classifier.get_variable_value(var_name)
n_nonzero9 = 0
n_nonzero6 = 0
n_nonzero3 = 0
n_nonzero0 = 0
n_nonzero1 = 0
n_nonzero2 = 0
n_total_weights = 0
n_nonzero_weights9 = 0
n_nonzero_weights6 = 0
n_nonzero_weights3 = 0
n_nonzero_weights0 = 0
n_nonzero_weights1 = 0
n_nonzero_weights2 = 0
wf_name = "weights_" + var_name.replace("/", "_") + ".csv"
wf = open(wf_name, "w")
for values_for_one_feature in values:
has_nonzeros9 = False
has_nonzeros6 = False
has_nonzeros3 = False
has_nonzeros0 = False
has_nonzeros1 = False
has_nonzeros2 = False
for x in values_for_one_feature:
wf.write("%f," % (x*1.0e6))
n_total_weights += 1
if abs(x) > 1.0:
has_nonzeros0 = True
n_nonzero_weights0 += 1
elif abs(x) > 1.0e-1:
has_nonzeros1 = True
n_nonzero_weights1 += 1
elif abs(x) > 1.0e-2:
has_nonzeros2 = True
n_nonzero_weights2 += 1
elif abs(x) > 1.0e-3:
has_nonzeros3 = True
n_nonzero_weights3 += 1
elif abs(x) > 1.0e-6:
has_nonzeros6 = True
n_nonzero_weights6 += 1
elif abs(x) > 1.0e-9:
has_nonzeros9 = True
n_nonzero_weights9 += 1
wf.write("\n")
if has_nonzeros9:
n_nonzero9 += 1
if has_nonzeros6:
n_nonzero6 += 1
if has_nonzeros3:
n_nonzero3 += 1
if has_nonzeros0:
n_nonzero0 += 1
if has_nonzeros1:
n_nonzero1 += 1
if has_nonzeros2:
n_nonzero2 += 1
print var_name, "# inputs with nonzero weights", n_nonzero0, n_nonzero1, n_nonzero2, n_nonzero3, n_nonzero6, n_nonzero9
print var_name, "# nonzero weights", n_total_weights, n_nonzero_weights0, n_nonzero_weights3, n_nonzero_weights6, n_nonzero_weights9
wf.close()
########################################################################
weights_file = open("weights_" + modeldir + ".csv", "w")
var_name = "dnn/hiddenlayer_0/kernel"
values_for_all_features = classifier.get_variable_value(var_name)
weights_file.write(values_for_all_features)
weights_file.close()
########################################################################
| 33.363265 | 148 | 0.661488 |
42f39ae5ede363cdb8b0ece021ae723fcd7826e8 | 162 | py | Python | steinbock/segmentation/cellprofiler/__init__.py | TambourineClub/steinbock | 548ccf69e80a1ec3cb144a16ec67070fcab5474c | [
"MIT"
] | 1 | 2022-03-03T13:22:27.000Z | 2022-03-03T13:22:27.000Z | steinbock/segmentation/cellprofiler/__init__.py | TambourineClub/steinbock | 548ccf69e80a1ec3cb144a16ec67070fcab5474c | [
"MIT"
] | null | null | null | steinbock/segmentation/cellprofiler/__init__.py | TambourineClub/steinbock | 548ccf69e80a1ec3cb144a16ec67070fcab5474c | [
"MIT"
] | null | null | null | from ._cellprofiler import create_and_save_segmentation_pipeline, try_segment_objects
__all__ = ["create_and_save_segmentation_pipeline", "try_segment_objects"]
| 40.5 | 85 | 0.876543 |
3e9c02dd4b4b5d8f710df95283587959a3fa0723 | 2,886 | py | Python | src/data_preparation/scripts/graph_generator/typeparsing/inheritancerewrite.py | typilus/typilus | 69c377b4cd286fd3657708accf3b2f56a5da1e8d | [
"MIT"
] | 39 | 2020-04-16T05:14:53.000Z | 2022-01-12T12:50:07.000Z | src/data_preparation/scripts/graph_generator/typeparsing/inheritancerewrite.py | fwangdo/typilus | 69c377b4cd286fd3657708accf3b2f56a5da1e8d | [
"MIT"
] | 5 | 2020-07-05T08:20:27.000Z | 2022-03-04T09:49:12.000Z | src/data_preparation/scripts/graph_generator/typeparsing/inheritancerewrite.py | fwangdo/typilus | 69c377b4cd286fd3657708accf3b2f56a5da1e8d | [
"MIT"
] | 12 | 2020-04-25T19:12:46.000Z | 2022-02-17T08:49:24.000Z | from itertools import product
from typing import Callable, Iterator, Set
import random
from typeparsing.nodes import TypeAnnotationNode, SubscriptAnnotationNode, TupleAnnotationNode, ListAnnotationNode, \
AttributeAnnotationNode, IndexAnnotationNode, ElipsisAnnotationNode
from typeparsing.visitor import TypeAnnotationVisitor
__all__ = ['DirectInheritanceRewriting']
class DirectInheritanceRewriting(TypeAnnotationVisitor):
"""Replace Nodes their direct is-a relationships"""
def __init__(self, is_a_info: Callable[[TypeAnnotationNode], Iterator[TypeAnnotationNode]],
non_generic_types: Set[TypeAnnotationNode], limit_combinations_to: int=10000):
self.__is_a = is_a_info
self.__non_generic_types = non_generic_types
self.__limit_combinations_to = limit_combinations_to
def visit_subscript_annotation(self, node: SubscriptAnnotationNode):
value_node_options = node.value.accept_visitor(self)
if node.slice is None:
slice_node_options = [None]
else:
slice_node_options = node.slice.accept_visitor(self)
all_children = []
for v in value_node_options:
if v in self.__non_generic_types:
all_children.append(v)
continue
for s in slice_node_options:
all_children.append(SubscriptAnnotationNode(v, s))
return all_children
def visit_tuple_annotation(self, node: TupleAnnotationNode):
all_elements_options = [e.accept_visitor(self) for e in node.elements]
r = [TupleAnnotationNode(t) for t in product(*all_elements_options)]
if len(r) > self.__limit_combinations_to:
random.shuffle(r)
return r[:self.__limit_combinations_to]
return r
def visit_name_annotation(self, node):
return [node] + list(self.__is_a(node))
def visit_list_annotation(self, node: ListAnnotationNode):
all_elements_options = [e.accept_visitor(self) for e in node.elements]
r = [ListAnnotationNode(t) for t in product(*all_elements_options)]
if len(r) > self.__limit_combinations_to:
random.shuffle(r)
return r[:self.__limit_combinations_to]
return r
def visit_attribute_annotation(self, node: AttributeAnnotationNode):
v = [node] + list(self.__is_a(node))
return v
def visit_index_annotation(self, node: IndexAnnotationNode):
next_values = node.value.accept_visitor(self)
return [IndexAnnotationNode(v) for v in next_values]
def visit_elipsis_annotation(self, node: ElipsisAnnotationNode):
return [node]
def visit_name_constant_annotation(self, node):
return [node]
def visit_unknown_annotation(self, node):
return [node]
| 39.534247 | 118 | 0.684685 |
9adfa078f9afb05610602fd628ad66148d068b14 | 26,070 | py | Python | seahub/settings.py | xiez/seahub | 8dfaa726e804afdf27e0b530de3e127b1654a7e3 | [
"Apache-2.0"
] | null | null | null | seahub/settings.py | xiez/seahub | 8dfaa726e804afdf27e0b530de3e127b1654a7e3 | [
"Apache-2.0"
] | null | null | null | seahub/settings.py | xiez/seahub | 8dfaa726e804afdf27e0b530de3e127b1654a7e3 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012-2016 Seafile Ltd.
# -*- coding: utf-8 -*-
# Django settings for seahub project.
import sys
import os
import re
from seaserv import FILE_SERVER_ROOT, FILE_SERVER_PORT, SERVICE_URL
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), os.pardir)
DEBUG = False
CLOUD_MODE = False
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '%s/seahub/seahub.db' % PROJECT_ROOT, # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = '%s/media/' % PROJECT_ROOT
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '%s/assets/' % MEDIA_ROOT
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/media/assets/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'%s/static' % PROJECT_ROOT,
'%s/frontend/build' % PROJECT_ROOT,
)
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'frontend/',
'STATS_FILE': os.path.join(PROJECT_ROOT, 'frontend/webpack-stats.pro.json'),
}
}
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# StaticI18N config
STATICI18N_ROOT = '%s/static/scripts' % PROJECT_ROOT
STATICI18N_OUTPUT_DIR = 'i18n'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n*v0=jz-1rz@(4gx^tf%6^e7c&um@2)g-l=3_)t@19a69n1nv6'
# Order is important
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'seahub.auth.middleware.AuthenticationMiddleware',
'seahub.base.middleware.BaseMiddleware',
'seahub.base.middleware.InfobarMiddleware',
'seahub.password_session.middleware.CheckPasswordHash',
'seahub.base.middleware.ForcePasswdChangeMiddleware',
'seahub.base.middleware.UserPermissionMiddleware',
'termsandconditions.middleware.TermsAndConditionsRedirectMiddleware',
'seahub.two_factor.middleware.OTPMiddleware',
'seahub.two_factor.middleware.ForceTwoFactorAuthMiddleware',
'seahub.trusted_ip.middleware.LimitIpMiddleware',
)
SITE_ROOT_URLCONF = 'seahub.urls'
ROOT_URLCONF = 'seahub.utils.rooturl'
SITE_ROOT = '/'
CSRF_COOKIE_NAME = 'sfcsrftoken'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'seahub.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, '../../seahub-data/custom/templates'),
os.path.join(PROJECT_ROOT, 'seahub/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'seahub.auth.context_processors.auth',
'seahub.base.context_processors.base',
'seahub.base.context_processors.debug',
],
},
},
]
LANGUAGES = (
# ('bg', gettext_noop(u'български език')),
('ca', u'Català'),
('cs', u'Čeština'),
('de', 'Deutsch'),
('en', 'English'),
('es', 'Español'),
('es-ar', 'Español de Argentina'),
('es-mx', 'Español de México'),
('fr', 'Français'),
('it', 'Italiano'),
('is', 'Íslenska'),
('lv', 'Latvian'),
# ('mk', 'македонски јазик'),
('hu', 'Magyar'),
('nl', 'Nederlands'),
('pl', 'Polski'),
('pt-br', 'Portuguese, Brazil'),
('ru', 'Русский'),
# ('sk', 'Slovak'),
('sl', 'Slovenian'),
('fi', 'Suomi'),
('sv', 'Svenska'),
('vi', 'Tiếng Việt'),
('tr', 'Türkçe'),
('uk', 'українська мова'),
('he', 'עברית'),
('ar', 'العربية'),
('el', 'ελληνικά'),
('th', 'ไทย'),
('ko', '한국어'),
('ja', '日本語'),
# ('lt', 'Lietuvių kalba'),
('zh-cn', '简体中文'),
('zh-tw', '繁體中文'),
)
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT, 'locale'),
os.path.join(PROJECT_ROOT, 'seahub/trusted_ip/locale'),
)
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# In order to overide command `createsuperuser`, base app *must* before auth app.
# ref: https://docs.djangoproject.com/en/1.11/howto/custom-management-commands/#overriding-commands
'seahub.base',
'django.contrib.auth',
'registration',
'captcha',
'compressor',
'statici18n',
'constance',
'constance.backends.database',
'post_office',
'termsandconditions',
'webpack_loader',
'seahub.api2',
'seahub.avatar',
'seahub.contacts',
'seahub.drafts',
'seahub.institutions',
'seahub.invitations',
'seahub.wiki',
'seahub.group',
'seahub.notifications',
'seahub.options',
'seahub.onlyoffice',
'seahub.profile',
'seahub.share',
'seahub.help',
'seahub.thumbnail',
'seahub.password_session',
'seahub.admin_log',
'seahub.wopi',
'seahub.tags',
'seahub.revision_tag',
'seahub.two_factor',
'seahub.role_permissions',
'seahub.trusted_ip',
'seahub.repo_tags',
'seahub.file_tags',
'seahub.related_files',
)
# Enable or disable multiple storage backends.
ENABLE_STORAGE_CLASSES = False
# `USER_SELECT` or `ROLE_BASED` or `REPO_ID_MAPPING`
STORAGE_CLASS_MAPPING_POLICY = 'USER_SELECT'
# Enable or disable constance(web settings).
ENABLE_SETTINGS_VIA_WEB = True
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_DATABASE_CACHE_BACKEND = 'default'
AUTHENTICATION_BACKENDS = (
'seahub.base.accounts.AuthBackend',
'seahub.oauth.backends.OauthRemoteUserBackend',
)
ENABLE_OAUTH = False
ENABLE_WATERMARK = False
# allow user to clean library trash
ENABLE_USER_CLEAN_TRASH = True
LOGIN_REDIRECT_URL = '/profile/'
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGOUT_REDIRECT_URL = None
ACCOUNT_ACTIVATION_DAYS = 7
# allow seafile amdin view user's repo
ENABLE_SYS_ADMIN_VIEW_REPO = False
#allow search from LDAP directly during auto-completion (not only search imported users)
ENABLE_SEARCH_FROM_LDAP_DIRECTLY = False
# show traffic on the UI
SHOW_TRAFFIC = True
# Enable or disable make group public
ENABLE_MAKE_GROUP_PUBLIC = False
# show or hide library 'download' button
SHOW_REPO_DOWNLOAD_BUTTON = False
# enable 'upload folder' or not
ENABLE_UPLOAD_FOLDER = True
# enable resumable fileupload or not
ENABLE_RESUMABLE_FILEUPLOAD = False
## maxNumberOfFiles for fileupload
MAX_NUMBER_OF_FILES_FOR_FILEUPLOAD = 1000
# enable encrypt library
ENABLE_ENCRYPTED_LIBRARY = True
# enable reset encrypt library's password when user forget password
ENABLE_RESET_ENCRYPTED_REPO_PASSWORD = False
# mininum length for password of encrypted library
REPO_PASSWORD_MIN_LENGTH = 8
# token length for the share link
SHARE_LINK_TOKEN_LENGTH = 20
# if limit only authenticated user can view preview share link
SHARE_LINK_LOGIN_REQUIRED = False
# min/max expire days for a share link
SHARE_LINK_EXPIRE_DAYS_MIN = 0 # 0 means no limit
SHARE_LINK_EXPIRE_DAYS_MAX = 0 # 0 means no limit
# mininum length for the password of a share link
SHARE_LINK_PASSWORD_MIN_LENGTH = 8
# enable or disable share link audit
ENABLE_SHARE_LINK_AUDIT = False
# share link audit code timeout
SHARE_LINK_AUDIT_CODE_TIMEOUT = 60 * 60
# enable or disable limit ip
ENABLE_LIMIT_IPADDRESS = False
TRUSTED_IP_LIST = ['127.0.0.1']
# Control the language that send email. Default to user's current language.
SHARE_LINK_EMAIL_LANGUAGE = ''
# check virus for files uploaded form upload link
ENABLE_UPLOAD_LINK_VIRUS_CHECK = False
# mininum length for user's password
USER_PASSWORD_MIN_LENGTH = 6
# LEVEL based on four types of input:
# num, upper letter, lower letter, other symbols
# '3' means password must have at least 3 types of the above.
USER_PASSWORD_STRENGTH_LEVEL = 3
# default False, only check USER_PASSWORD_MIN_LENGTH
# when True, check password strength level, STRONG(or above) is allowed
USER_STRONG_PASSWORD_REQUIRED = False
# Force user to change password when admin add/reset a user.
FORCE_PASSWORD_CHANGE = True
# Enable a user to change password in 'settings' page.
ENABLE_CHANGE_PASSWORD = True
# Enable or disable repo history setting
ENABLE_REPO_HISTORY_SETTING = True
# Enable or disable org repo creation by user
ENABLE_USER_CREATE_ORG_REPO = True
DISABLE_SYNC_WITH_ANY_FOLDER = False
ENABLE_TERMS_AND_CONDITIONS = False
# Enable or disable sharing to all groups
ENABLE_SHARE_TO_ALL_GROUPS = False
# interval for request unread notifications
UNREAD_NOTIFICATIONS_REQUEST_INTERVAL = 3 * 60 # seconds
# Enable group discussion
ENABLE_GROUP_DISCUSSION = True
# Enable file comments
ENABLE_FILE_COMMENT = True
# File preview
FILE_PREVIEW_MAX_SIZE = 30 * 1024 * 1024
FILE_ENCODING_LIST = ['auto', 'utf-8', 'gbk', 'ISO-8859-1', 'ISO-8859-5']
FILE_ENCODING_TRY_LIST = ['utf-8', 'gbk']
HIGHLIGHT_KEYWORD = False # If True, highlight the keywords in the file when the visit is via clicking a link in 'search result' page.
# extensions of previewed files
TEXT_PREVIEW_EXT = """ac, am, bat, c, cc, cmake, cpp, cs, css, diff, el, h, html, htm, java, js, json, less, make, org, php, pl, properties, py, rb, scala, script, sh, sql, txt, text, tex, vi, vim, xhtml, xml, log, csv, groovy, rst, patch, go"""
# Common settings(file extension, storage) for avatar and group avatar.
AVATAR_FILE_STORAGE = '' # Replace with 'seahub.base.database_storage.DatabaseStorage' if save avatar files to database
AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png', '.jpeg', '.gif')
# Avatar
AVATAR_STORAGE_DIR = 'avatars'
AVATAR_HASH_USERDIRNAMES = True
AVATAR_HASH_FILENAMES = True
AVATAR_GRAVATAR_BACKUP = False
AVATAR_DEFAULT_URL = '/avatars/default.png'
AVATAR_DEFAULT_NON_REGISTERED_URL = '/avatars/default-non-register.jpg'
AVATAR_MAX_AVATARS_PER_USER = 1
AVATAR_CACHE_TIMEOUT = 14 * 24 * 60 * 60
AUTO_GENERATE_AVATAR_SIZES = (16, 20, 24, 28, 32, 36, 40, 42, 48, 60, 64, 72, 80, 84, 96, 128, 160)
# Group avatar
GROUP_AVATAR_STORAGE_DIR = 'avatars/groups'
GROUP_AVATAR_DEFAULT_URL = 'avatars/groups/default.png'
AUTO_GENERATE_GROUP_AVATAR_SIZES = (20, 24, 32, 36, 48, 56)
LOG_DIR = os.environ.get('SEAHUB_LOG_DIR', '/tmp')
CACHE_DIR = "/tmp"
install_topdir = os.path.expanduser(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
central_conf_dir = os.environ.get('SEAFILE_CENTRAL_CONF_DIR', '')
if 'win32' in sys.platform:
try:
CCNET_CONF_PATH = os.environ['CCNET_CONF_DIR']
if not CCNET_CONF_PATH: # If it's set but is an empty string.
raise KeyError
except KeyError:
raise ImportError("Settings cannot be imported, because environment variable CCNET_CONF_DIR is undefined.")
else:
LOG_DIR = os.environ.get('SEAHUB_LOG_DIR', os.path.join(CCNET_CONF_PATH, '..'))
CACHE_DIR = os.path.join(CCNET_CONF_PATH, '..')
install_topdir = os.path.join(CCNET_CONF_PATH, '..')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(CACHE_DIR, 'seahub_cache'),
'OPTIONS': {
'MAX_ENTRIES': 1000000
}
},
# Compatible with existing `COMPRESS_CACHE_BACKEND` setting after
# upgrading to django-compressor v2.2.
# ref: https://manual.seafile.com/deploy_pro/deploy_in_a_cluster.html
'django.core.cache.backends.locmem.LocMemCache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
# rest_framwork
REST_FRAMEWORK = {
'DEFAULT_THROTTLE_RATES': {
'ping': '3000/minute',
'anon': '60/minute',
'user': '3000/minute',
},
# https://github.com/tomchristie/django-rest-framework/issues/2891
'UNICODE_JSON': False,
}
REST_FRAMEWORK_THROTTING_WHITELIST = []
# file and path
GET_FILE_HISTORY_TIMEOUT = 10 * 60 # seconds
MAX_UPLOAD_FILE_NAME_LEN = 255
MAX_FILE_NAME = MAX_UPLOAD_FILE_NAME_LEN
MAX_PATH = 4096
FILE_LOCK_EXPIRATION_DAYS = 0
# Whether or not activate user when registration complete.
# If set to ``False``, new user will be activated by admin or via activate link.
ACTIVATE_AFTER_REGISTRATION = True
# Whether or not send activation Email to user when registration complete.
# This option will be ignored if ``ACTIVATE_AFTER_REGISTRATION`` set to ``True``.
REGISTRATION_SEND_MAIL = False
# Whether or not send notify email to sytem admins when user registered or
# first login through Shibboleth.
NOTIFY_ADMIN_AFTER_REGISTRATION = False
# Whether or not activate inactive user on first login. Mainly used in LDAP user sync.
ACTIVATE_AFTER_FIRST_LOGIN = False
REQUIRE_DETAIL_ON_REGISTRATION = False
# Account initial password, for password resetting.
# INIT_PASSWD can either be a string, or a function (function has to be set without the brackets)
def genpassword():
from django.utils.crypto import get_random_string
return get_random_string(10)
INIT_PASSWD = genpassword
# browser tab title
SITE_TITLE = 'Private Seafile'
# Base name used in email sending
SITE_NAME = 'Seafile'
# Path to the license file(relative to the media path)
LICENSE_PATH = os.path.join(PROJECT_ROOT, '../../seafile-license.txt')
# Path to the background image file of login page(relative to the media path)
LOGIN_BG_IMAGE_PATH = 'img/login-bg.jpg'
# Path to the favicon file (relative to the media path)
# tip: use a different name when modify it.
FAVICON_PATH = 'img/favicon.ico'
# Path to the Logo Imagefile (relative to the media path)
LOGO_PATH = 'img/seafile-logo.png'
# logo size. the unit is 'px'
LOGO_WIDTH = 128
LOGO_HEIGHT = 32
CUSTOM_LOGO_PATH = 'custom/mylogo.png'
CUSTOM_FAVICON_PATH = 'custom/favicon.ico'
# used before version 6.3: the relative path of css file under seahub-data (e.g. custom/custom.css)
BRANDING_CSS = ''
# used in 6.3+, enable setting custom css via admin web interface
ENABLE_BRANDING_CSS = False
# Using Django to server static file. Set to `False` if deployed behide a web
# server.
SERVE_STATIC = True
# Enable or disable registration on web.
ENABLE_SIGNUP = False
# show 'log out' icon in top-bar or not.
SHOW_LOGOUT_ICON = False
# For security consideration, please set to match the host/domain of your site, e.g., ALLOWED_HOSTS = ['.example.com'].
# Please refer https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts for details.
ALLOWED_HOSTS = ['*']
# Logging
LOGGING = {
'version': 1,
# Enable existing loggers so that gunicorn errors will be bubbled up when
# server side error page "Internal Server Error" occurs.
# ref: https://www.caktusgroup.com/blog/2015/01/27/Django-Logging-Configuration-logging_config-default-settings-logger/
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s:%(lineno)s %(funcName)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
'default': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_DIR, 'seahub.log'),
'maxBytes': 1024*1024*100, # 100 MB
'backupCount': 5,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
},
'django.request': {
'handlers': ['default', 'mail_admins'],
'level': 'INFO',
'propagate': False
},
'py.warnings': {
'handlers': ['console', ],
'level': 'INFO',
'propagate': False
},
}
}
#Login Attempt
LOGIN_ATTEMPT_LIMIT = 5
LOGIN_ATTEMPT_TIMEOUT = 15 * 60 # in seconds (default: 15 minutes)
FREEZE_USER_ON_LOGIN_FAILED = False # deactivate user account when login attempts exceed limit
# Age of cookie, in seconds (default: 1 day).
SESSION_COOKIE_AGE = 24 * 60 * 60
# Days of remembered login info (deafult: 7 days)
LOGIN_REMEMBER_DAYS = 7
SEAFILE_VERSION = '6.3.3'
# Compress static files(css, js)
COMPRESS_ENABLED = False
COMPRESS_URL = MEDIA_URL
COMPRESS_ROOT = MEDIA_ROOT
COMPRESS_DEBUG_TOGGLE = 'nocompress'
COMPRESS_CSS_HASHING_METHOD = 'content'
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
]
CAPTCHA_IMAGE_SIZE = (90, 42)
###################
# Image Thumbnail #
###################
# Enable or disable thumbnail
ENABLE_THUMBNAIL = True
# Absolute filesystem path to the directory that will hold thumbnail files.
SEAHUB_DATA_ROOT = os.path.join(PROJECT_ROOT, '../../seahub-data')
if os.path.exists(SEAHUB_DATA_ROOT):
THUMBNAIL_ROOT = os.path.join(SEAHUB_DATA_ROOT, 'thumbnail')
else:
THUMBNAIL_ROOT = os.path.join(PROJECT_ROOT, 'seahub/thumbnail/thumb')
THUMBNAIL_EXTENSION = 'png'
# for thumbnail: height(px) and width(px)
THUMBNAIL_DEFAULT_SIZE = 48
THUMBNAIL_SIZE_FOR_GRID = 192
THUMBNAIL_SIZE_FOR_ORIGINAL = 1024
# size(MB) limit for generate thumbnail
THUMBNAIL_IMAGE_SIZE_LIMIT = 30
THUMBNAIL_IMAGE_ORIGINAL_SIZE_LIMIT = 256
# video thumbnails
ENABLE_VIDEO_THUMBNAIL = False
THUMBNAIL_VIDEO_FRAME_TIME = 5 # use the frame at 5 second as thumbnail
# template for create new office file
OFFICE_TEMPLATE_ROOT = os.path.join(MEDIA_ROOT, 'office-template')
#####################
# Global AddressBook #
#####################
ENABLE_GLOBAL_ADDRESSBOOK = True
ENABLE_ADDRESSBOOK_OPT_IN = False
#####################
# Folder Permission #
#####################
ENABLE_FOLDER_PERM = False
####################
# Guest Invite #
####################
ENABLE_GUEST_INVITATION = False
INVITATION_ACCEPTER_BLACKLIST = []
########################
# Security Enhancements #
########################
ENABLE_SUDO_MODE = True
FILESERVER_TOKEN_ONCE_ONLY = True
#################
# Email sending #
#################
SEND_EMAIL_ON_ADDING_SYSTEM_MEMBER = True # Whether to send email when a system staff adding new member.
SEND_EMAIL_ON_RESETTING_USER_PASSWD = True # Whether to send email when a system staff resetting user's password.
##########################
# Settings for Extra App #
##########################
ENABLE_SUB_LIBRARY = True
##########################
# Settings for frontend #
##########################
SEAFILE_COLLAB_SERVER = ''
############################
# Settings for Seahub Priv #
############################
# Replace from email to current user instead of email sender.
REPLACE_FROM_EMAIL = False
# Add ``Reply-to`` header, see RFC #822.
ADD_REPLY_TO_HEADER = False
ENABLE_DEMO_USER = False
CLOUD_DEMO_USER = 'demo@seafile.com'
ENABLE_TWO_FACTOR_AUTH = False
OTP_LOGIN_URL = '/profile/two_factor_authentication/setup/'
TWO_FACTOR_DEVICE_REMEMBER_DAYS = 90
# Enable personal wiki, group wiki
ENABLE_WIKI = False
# Enable 'repo snapshot label' feature
ENABLE_REPO_SNAPSHOT_LABEL = False
# Repo wiki mode
ENABLE_REPO_WIKI_MODE = True
#####################
# External settings #
#####################
def load_local_settings(module):
'''Import any symbols that begin with A-Z. Append to lists any symbols
that begin with "EXTRA_".
'''
if hasattr(module, 'HTTP_SERVER_ROOT'):
if not hasattr(module, 'FILE_SERVER_ROOT'):
module.FILE_SERVER_ROOT = module.HTTP_SERVER_ROOT
del module.HTTP_SERVER_ROOT
for attr in dir(module):
match = re.search('^EXTRA_(\w+)', attr)
if match:
name = match.group(1)
value = getattr(module, attr)
try:
globals()[name] += value
except KeyError:
globals()[name] = value
elif re.search('^[A-Z]', attr):
globals()[attr] = getattr(module, attr)
# Load seahub_extra_settings.py
try:
from seahub_extra import seahub_extra_settings
except ImportError:
pass
else:
load_local_settings(seahub_extra_settings)
del seahub_extra_settings
# Load local_settings.py
try:
import seahub.local_settings
except ImportError:
pass
else:
load_local_settings(seahub.local_settings)
del seahub.local_settings
# Load seahub_settings.py in server release
try:
if os.path.exists(central_conf_dir):
sys.path.insert(0, central_conf_dir)
import seahub_settings
except ImportError:
pass
else:
# In server release, sqlite3 db file is <topdir>/seahub.db
DATABASES['default']['NAME'] = os.path.join(install_topdir, 'seahub.db')
if 'win32' not in sys.platform:
# In server release, gunicorn is used to deploy seahub
INSTALLED_APPS += ('gunicorn', )
load_local_settings(seahub_settings)
del seahub_settings
# Remove install_topdir from path
sys.path.pop(0)
if 'win32' in sys.platform:
INSTALLED_APPS += ('django_wsgiserver', )
fp = open(os.path.join(install_topdir, "seahub.pid"), 'w')
fp.write("%d\n" % os.getpid())
fp.close()
# Following settings are private, can not be overwrite.
INNER_FILE_SERVER_ROOT = 'http://127.0.0.1:' + FILE_SERVER_PORT
CONSTANCE_ENABLED = ENABLE_SETTINGS_VIA_WEB
CONSTANCE_CONFIG = {
'SERVICE_URL': (SERVICE_URL,''),
'FILE_SERVER_ROOT': (FILE_SERVER_ROOT,''),
'DISABLE_SYNC_WITH_ANY_FOLDER': (DISABLE_SYNC_WITH_ANY_FOLDER,''),
'ENABLE_SIGNUP': (ENABLE_SIGNUP,''),
'ACTIVATE_AFTER_REGISTRATION': (ACTIVATE_AFTER_REGISTRATION,''),
'REGISTRATION_SEND_MAIL': (REGISTRATION_SEND_MAIL ,''),
'LOGIN_REMEMBER_DAYS': (LOGIN_REMEMBER_DAYS,''),
'LOGIN_ATTEMPT_LIMIT': (LOGIN_ATTEMPT_LIMIT, ''),
'FREEZE_USER_ON_LOGIN_FAILED': (FREEZE_USER_ON_LOGIN_FAILED, ''),
'ENABLE_USER_CREATE_ORG_REPO': (ENABLE_USER_CREATE_ORG_REPO, ''),
'ENABLE_ENCRYPTED_LIBRARY': (ENABLE_ENCRYPTED_LIBRARY,''),
'REPO_PASSWORD_MIN_LENGTH': (REPO_PASSWORD_MIN_LENGTH,''),
'ENABLE_REPO_HISTORY_SETTING': (ENABLE_REPO_HISTORY_SETTING,''),
'FORCE_PASSWORD_CHANGE': (FORCE_PASSWORD_CHANGE, ''),
'USER_STRONG_PASSWORD_REQUIRED': (USER_STRONG_PASSWORD_REQUIRED,''),
'USER_PASSWORD_MIN_LENGTH': (USER_PASSWORD_MIN_LENGTH,''),
'USER_PASSWORD_STRENGTH_LEVEL': (USER_PASSWORD_STRENGTH_LEVEL,''),
'SHARE_LINK_TOKEN_LENGTH': (SHARE_LINK_TOKEN_LENGTH, ''),
'SHARE_LINK_PASSWORD_MIN_LENGTH': (SHARE_LINK_PASSWORD_MIN_LENGTH,''),
'ENABLE_TWO_FACTOR_AUTH': (ENABLE_TWO_FACTOR_AUTH,''),
'TEXT_PREVIEW_EXT': (TEXT_PREVIEW_EXT, ''),
'ENABLE_SHARE_TO_ALL_GROUPS': (ENABLE_SHARE_TO_ALL_GROUPS, ''),
'SITE_NAME': (SITE_NAME, ''),
'SITE_TITLE': (SITE_TITLE, ''),
'ENABLE_BRANDING_CSS': (ENABLE_BRANDING_CSS, ''),
'CUSTOM_CSS': ('', ''),
'ENABLE_TERMS_AND_CONDITIONS': (ENABLE_TERMS_AND_CONDITIONS, ''),
'ENABLE_USER_CLEAN_TRASH': (ENABLE_USER_CLEAN_TRASH, ''),
}
| 31.184211 | 245 | 0.690487 |
70274a0a850adf9442e439d8777d0e0ace6884f6 | 147 | py | Python | eod/commands/__init__.py | YZW-explorer/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 | [
"Apache-2.0"
] | 1 | 2022-01-11T06:44:27.000Z | 2022-01-11T06:44:27.000Z | eod/commands/__init__.py | YZW-explorer/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 | [
"Apache-2.0"
] | null | null | null | eod/commands/__init__.py | YZW-explorer/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 | [
"Apache-2.0"
] | null | null | null | from .train import Train # noqa
from .inference import Inference # noqa
from .eval import Eval # noqa
from .quant_deploy import QuantDeploy # noqa | 36.75 | 44 | 0.782313 |
f4905cbd985203b10bd00a518ff5acaab1768ae2 | 6,083 | py | Python | python/databuffer.py | steven1003/binaryninja-api | 116d3b8d245d680c7e02c5c8cfde80a90cff1a64 | [
"MIT"
] | null | null | null | python/databuffer.py | steven1003/binaryninja-api | 116d3b8d245d680c7e02c5c8cfde80a90cff1a64 | [
"MIT"
] | null | null | null | python/databuffer.py | steven1003/binaryninja-api | 116d3b8d245d680c7e02c5c8cfde80a90cff1a64 | [
"MIT"
] | null | null | null | # Copyright (c) 2015-2022 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import ctypes
from typing import Optional, Union
# Binary Ninja components
from . import _binaryninjacore as core
DataBufferInputType = Union[str, bytes, 'DataBuffer', int]
class DataBuffer:
def __init__(self, contents: Union[str, bytes, 'DataBuffer', int] = b"", handle=None):
if handle is not None:
self.handle = core.handle_of_type(handle, core.BNDataBuffer)
elif isinstance(contents, int):
self.handle = core.BNCreateDataBuffer(None, contents)
elif isinstance(contents, DataBuffer):
self.handle = core.BNDuplicateDataBuffer(contents.handle)
elif isinstance(contents, str):
self.handle = core.BNCreateDataBuffer(contents.encode("utf-8"), len(contents.encode("utf-8")))
else:
if not isinstance(contents, bytes):
raise TypeError(f"type {type(contents)} not convertable to DataBuffer")
self.handle = core.BNCreateDataBuffer(contents, len(contents))
def __del__(self):
if core is not None:
core.BNFreeDataBuffer(self.handle)
def __len__(self):
return int(core.BNGetDataBufferLength(self.handle))
def __getitem__(self, i) -> bytes:
if isinstance(i, tuple):
result = bytes()
for s in i:
result += self.__getitem__(s)
return result
elif isinstance(i, slice):
if i.step is not None:
i = i.indices(len(self))
start = i[0]
stop = i[1]
if stop <= start:
return b""
buf = ctypes.create_string_buffer(stop - start)
data = core.BNGetDataBufferContentsAt(self.handle, start)
assert data is not None, "core.BNGetDataBufferContentsAt returned None"
ctypes.memmove(buf, data, stop - start)
return buf.raw
else:
return bytes(self)[i]
elif i < 0:
if i >= -len(self):
return core.BNGetDataBufferByte(self.handle, int(len(self) + i)).to_bytes(1, "little")
raise IndexError("index out of range")
elif i < len(self):
return core.BNGetDataBufferByte(self.handle, int(i)).to_bytes(1, "little")
else:
raise IndexError("index out of range")
def __setitem__(self, i, value):
if isinstance(i, slice):
if i.step is not None:
raise IndexError("step not supported on assignment")
i = i.indices(len(self))
start = i[0]
stop = i[1]
if stop < start:
stop = start
if len(value) != (stop - start):
data = bytes(self)
data = data[0:start] + value + data[stop:]
core.BNSetDataBufferContents(self.handle, data, len(data))
else:
value = str(value)
buf = ctypes.create_string_buffer(len(value))
data = core.BNGetDataBufferContentsAt(self.handle, start)
assert data is not None, "core.BNGetDataBufferContentsAt returned None"
ctypes.memmove(data, buf, len(value))
elif i < 0:
if i >= -len(self):
if len(value) != 1:
raise ValueError("expected single byte for assignment")
value = str(value)
buf = ctypes.create_string_buffer(len(value))
data = core.BNGetDataBufferContentsAt(self.handle, int(len(self) + i))
assert data is not None, "core.BNGetDataBufferContentsAt returned None"
ctypes.memmove(data, buf, 1)
else:
raise IndexError("index out of range")
elif i < len(self):
if len(value) != 1:
raise ValueError("expected single byte for assignment")
value = str(value)
buf = ctypes.create_string_buffer(len(value))
data = core.BNGetDataBufferContentsAt(self.handle, int(i))
assert data is not None, "core.BNGetDataBufferContentsAt returned None"
ctypes.memmove(data, buf, 1)
else:
raise IndexError("index out of range")
def __str__(self):
buf = ctypes.create_string_buffer(len(self))
data = core.BNGetDataBufferContents(self.handle)
assert data is not None, "core.BNGetDataBufferContents returned None"
ctypes.memmove(buf, data, len(self))
return buf.raw.decode('utf8')
def __bytes__(self):
buf = ctypes.create_string_buffer(len(self))
data = core.BNGetDataBufferContents(self.handle)
assert data is not None, "core.BNGetDataBufferContents returned None"
ctypes.memmove(buf, data, len(self))
return buf.raw
def __eq__(self, other: 'DataBuffer') -> bool:
# Not cryptographically secure
if len(self) != len(other):
return False
return bytes(self) == bytes(other)
def escape(self) -> str:
return core.BNDataBufferToEscapedString(self.handle)
def unescape(self) -> 'DataBuffer':
return DataBuffer(handle=core.BNDecodeEscapedString(str(self)))
def base64_encode(self) -> str:
return core.BNDataBufferToBase64(self.handle)
def base64_decode(self) -> 'DataBuffer':
return DataBuffer(handle=core.BNDecodeBase64(str(self)))
def zlib_compress(self) -> Optional['DataBuffer']:
buf = core.BNZlibCompress(self.handle)
if buf is None:
return None
return DataBuffer(handle=buf)
def zlib_decompress(self) -> Optional['DataBuffer']:
buf = core.BNZlibDecompress(self.handle)
if buf is None:
return None
return DataBuffer(handle=buf)
def escape_string(text: bytes) -> str:
return DataBuffer(text).escape()
def unescape_string(text: bytes) -> 'DataBuffer':
return DataBuffer(text).unescape()
| 35.16185 | 97 | 0.722012 |
d56659cd9f317aee4b0e161df851838a754f985b | 8,349 | py | Python | tests/test_config.py | jparsai/cvejob | 8f9462a1ecdf1d4de877ac5f44e772239ffcb379 | [
"Apache-2.0"
] | 8 | 2019-09-25T14:45:28.000Z | 2021-11-08T10:30:03.000Z | tests/test_config.py | jparsai/cvejob | 8f9462a1ecdf1d4de877ac5f44e772239ffcb379 | [
"Apache-2.0"
] | 113 | 2018-07-10T12:58:16.000Z | 2020-12-09T22:33:15.000Z | tests/test_config.py | jparsai/cvejob | 8f9462a1ecdf1d4de877ac5f44e772239ffcb379 | [
"Apache-2.0"
] | 12 | 2018-07-10T11:00:02.000Z | 2021-01-27T12:19:56.000Z | """Test for the config.py."""
# import pytest
import os
from cvejob.config import DefaultConfig, RuntimeConfig
def test_default_config_constructor():
"""Basic test for the class DefaultConfig."""
config = DefaultConfig()
assert config is not None
def test_default_config_attributes():
"""Check the attributes existence for a class DefaultConfig."""
config = DefaultConfig()
# basic configuration check
attributes = ("ecosystem", "cve_age", "feed_dir", "feed_names", "date_range", "cve_id",
"package_name", "cpe2pkg_path", "pkgfile_dir", "use_nvdtoolkit",
"nvdtoolkit_export_dir")
for attribute in attributes:
assert hasattr(config, attribute)
def test_default_config_attribute_values_nil():
"""Check the attributes that needs to be set to nil (None)."""
config = DefaultConfig()
# the following attributes needs to be set to nil
assert config.feed_names is None
assert config.date_range is None
assert config.cve_id is None
assert config.package_name is None
assert config.feed_names is None
def test_default_config_attribute_values_not_nil():
"""Check the attributes that needs not to be set to nil (None)."""
config = DefaultConfig()
# the following attributes need not be set to nil
assert config.ecosystem is not None
assert config.cve_age is not None
assert config.feed_dir is not None
assert config.cpe2pkg_path is not None
assert config.pkgfile_dir is not None
assert config.use_nvdtoolkit is not None
assert config.nvdtoolkit_export_dir is not None
def test_runtime_config():
"""Basic test for the class RuntimeConfig."""
config = RuntimeConfig()
assert config is not None
def test_runtime_config_attributes():
"""Check the attributes existence for a class RuntimeConfig."""
config = RuntimeConfig()
assert config is not None
assert hasattr(config, "_config")
def unset_environment_variable(name):
"""Reset specified environment variable."""
return os.environ.pop(name, None)
def test_runtime_config_attribute_ecosystem():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_ECOSYSTEM')
config = RuntimeConfig()
assert config._config.ecosystem == 'python'
os.environ['CVEJOB_ECOSYSTEM'] = 'foobar'
config = RuntimeConfig()
assert config._config.ecosystem == 'foobar'
if old_value is not None:
os.environ['CVEJOB_ECOSYSTEM'] = old_value
def test_runtime_config_attribute_cve_age():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_CVE_AGE')
config = RuntimeConfig()
assert config._config.cve_age == 0
os.environ['CVEJOB_CVE_AGE'] = '42'
config = RuntimeConfig()
assert config._config.cve_age == 42
os.environ['CVEJOB_CVE_AGE'] = '-42'
config = RuntimeConfig()
assert config._config.cve_age == -42
if old_value is not None:
os.environ['CVEJOB_CVE_AGE'] = old_value
def test_runtime_config_attribute_cvejob_feed_dir():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_FEED_DIR')
config = RuntimeConfig()
assert config._config.feed_dir == 'nvd-data/'
os.environ['CVEJOB_FEED_DIR'] = 'directory1'
config = RuntimeConfig()
assert config._config.feed_dir == 'directory1'
if old_value is not None:
os.environ['CVEJOB_FEED_DIR'] = old_value
def test_runtime_config_attribute_cvejob_feed_names():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_FEED_NAMES')
config = RuntimeConfig()
assert config._config.feed_names is None
# TODO: the following test needs to be enabled after the fix in master branch
# os.environ['CVEJOB_FEED_NAMES'] = 'name1'
# config = RuntimeConfig()
# assert config._config.feed_names == ['name1']
# os.environ['CVEJOB_FEED_NAMES'] = 'name1,name2'
# config = RuntimeConfig()
# assert config._config.feed_names == ['name1', 'name2']
if old_value is not None:
os.environ['CVEJOB_FEED_NAMES'] = old_value
def test_runtime_config_attribute_cvejob_date_range():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_DATE_RANGE')
config = RuntimeConfig()
assert config._config.date_range is None
os.environ['CVEJOB_DATE_RANGE'] = '2017-01-01'
config = RuntimeConfig()
assert config._config.date_range == '2017-01-01'
if old_value is not None:
os.environ['CVEJOB_DATE_RANGE'] = old_value
def test_runtime_config_attribute_cvejob_cve_id():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_CVE_ID')
config = RuntimeConfig()
assert config._config.cve_id is None
os.environ['CVEJOB_CVE_ID'] = 'CVE1234'
config = RuntimeConfig()
assert config._config.cve_id == 'CVE1234'
if old_value is not None:
os.environ['CVEJOB_CVE_ID'] = old_value
def test_runtime_config_attribute_cvejob_package_name():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_PACKAGE_NAME')
config = RuntimeConfig()
assert config._config.package_name is None
os.environ['CVEJOB_PACKAGE_NAME'] = 'test_package'
config = RuntimeConfig()
assert config._config.package_name == 'test_package'
if old_value is not None:
os.environ['CVEJOB_PACKAGE_NAME'] = old_value
def test_runtime_config_attribute_cvejob_cpe2pkg_path():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_CPE2PKG_PATH')
config = RuntimeConfig()
assert config._config.cpe2pkg_path == 'cpe2pkg.jar'
os.environ['CVEJOB_CPE2PKG_PATH'] = 'cpe2pkg10.jar'
config = RuntimeConfig()
assert config._config.cpe2pkg_path == 'cpe2pkg10.jar'
if old_value is not None:
os.environ['CVEJOB_CPE2PKG_PATH'] = old_value
def test_runtime_config_attribute_cvejob_pkgfile_dir():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_PKGFILE_DIR')
config = RuntimeConfig()
assert config._config.pkgfile_dir == 'data/'
os.environ['CVEJOB_PKGFILE_DIR'] = 'cpe2pkg10.jar'
config = RuntimeConfig()
assert config._config.pkgfile_dir == 'cpe2pkg10.jar'
if old_value is not None:
os.environ['CVEJOB_PKGFILE_DIR'] = old_value
def test_runtime_config_attribute_cvejob_use_nvd_toolkit():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_USE_NVD_TOOLKIT')
config = RuntimeConfig()
assert not config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = 'true'
config = RuntimeConfig()
assert config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = '1'
config = RuntimeConfig()
assert config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = 'yes'
config = RuntimeConfig()
assert config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = 'false'
config = RuntimeConfig()
assert not config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = '0'
config = RuntimeConfig()
assert not config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = 'no'
config = RuntimeConfig()
assert not config._config.use_nvdtoolkit
if old_value is not None:
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = old_value
def test_runtime_config_attribute_cvejob_nvd_toolkit_export_dir():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_NVD_TOOLKIT_EXPORT_DIR')
config = RuntimeConfig()
assert config._config.nvdtoolkit_export_dir == 'export/'
os.environ['CVEJOB_NVD_TOOLKIT_EXPORT_DIR'] = 'export2/'
config = RuntimeConfig()
assert config._config.nvdtoolkit_export_dir == 'export2/'
if old_value is not None:
os.environ['CVEJOB_NVD_TOOLKIT_EXPORT_DIR'] = old_value
| 31.625 | 91 | 0.723081 |
d22499aa0e6e4caaa85f413833f872a4202ac727 | 2,748 | py | Python | tests/test_latex_formatter.py | eerimoq/pygments | 3cd60987c27d2228ac46bfa2648e280aaaf61fc1 | [
"BSD-2-Clause"
] | 940 | 2019-08-23T13:08:46.000Z | 2022-03-31T06:40:44.000Z | tests/test_latex_formatter.py | eerimoq/pygments | 3cd60987c27d2228ac46bfa2648e280aaaf61fc1 | [
"BSD-2-Clause"
] | 1,043 | 2019-08-22T12:22:28.000Z | 2022-03-31T20:26:02.000Z | tests/test_latex_formatter.py | eerimoq/pygments | 3cd60987c27d2228ac46bfa2648e280aaaf61fc1 | [
"BSD-2-Clause"
] | 488 | 2019-09-19T14:27:19.000Z | 2022-03-31T17:02:44.000Z | """
Pygments LaTeX formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import tempfile
from os import path
from textwrap import dedent
import pytest
from pygments.formatters import LatexFormatter
from pygments.formatters.latex import LatexEmbeddedLexer
from pygments.lexers import PythonLexer, PythonConsoleLexer
from pygments.token import Token
TESTDIR = path.dirname(path.abspath(__file__))
TESTFILE = path.join(TESTDIR, 'test_latex_formatter.py')
def test_valid_output():
with open(TESTFILE) as fp:
tokensource = list(PythonLexer().get_tokens(fp.read()))
fmt = LatexFormatter(full=True, encoding='latin1')
handle, pathname = tempfile.mkstemp('.tex')
# place all output files in /tmp too
old_wd = os.getcwd()
os.chdir(os.path.dirname(pathname))
tfile = os.fdopen(handle, 'wb')
fmt.format(tokensource, tfile)
tfile.close()
try:
import subprocess
po = subprocess.Popen(['latex', '-interaction=nonstopmode',
pathname], stdout=subprocess.PIPE)
ret = po.wait()
output = po.stdout.read()
po.stdout.close()
except OSError as e:
# latex not available
pytest.skip(str(e))
else:
if ret:
print(output)
assert not ret, 'latex run reported errors'
os.unlink(pathname)
os.chdir(old_wd)
def test_embedded_lexer():
# Latex surrounded by '|' should be Escaped
lexer = LatexEmbeddedLexer('|', '|', PythonConsoleLexer())
# similar to gh-1516
src = dedent("""\
>>> x = 1
>>> y = mul(x, |$z^2$|) # these |pipes| are untouched
>>> y
|$1 + z^2$|""")
assert list(lexer.get_tokens(src)) == [
(Token.Generic.Prompt, '>>> '),
(Token.Name, 'x'),
(Token.Text, ' '),
(Token.Operator, '='),
(Token.Text, ' '),
(Token.Literal.Number.Integer, '1'),
(Token.Text, '\n'),
(Token.Generic.Prompt, '>>> '),
(Token.Name, 'y'),
(Token.Text, ' '),
(Token.Operator, '='),
(Token.Text, ' '),
(Token.Name, 'mul'),
(Token.Punctuation, '('),
(Token.Name, 'x'),
(Token.Punctuation, ','),
(Token.Text, ' '),
(Token.Escape, '$z^2$'),
(Token.Punctuation, ')'),
(Token.Text, ' '),
(Token.Comment.Single, '# these |pipes| are untouched'), # note: not Token.Escape
(Token.Text, '\n'),
(Token.Generic.Prompt, '>>> '),
(Token.Name, 'y'),
(Token.Text, '\n'),
(Token.Escape, '$1 + z^2$'),
(Token.Generic.Output, '\n'),
]
| 28.625 | 90 | 0.565866 |
402f0d42088c7e66afd043028216dd5b76186cdc | 2,684 | py | Python | tests/contrib/openstack/test_alternatives.py | AurelienLourot/charm-helpers | b5725ac546372e7d4004d15095f79cdd5e7da687 | [
"Apache-2.0"
] | 15 | 2017-09-20T13:37:10.000Z | 2021-11-03T13:31:15.000Z | tests/contrib/openstack/test_alternatives.py | AurelienLourot/charm-helpers | b5725ac546372e7d4004d15095f79cdd5e7da687 | [
"Apache-2.0"
] | 313 | 2017-09-15T13:22:58.000Z | 2022-02-25T17:55:01.000Z | tests/contrib/openstack/test_alternatives.py | AurelienLourot/charm-helpers | b5725ac546372e7d4004d15095f79cdd5e7da687 | [
"Apache-2.0"
] | 136 | 2017-09-19T13:37:33.000Z | 2022-03-29T11:08:00.000Z | from testtools import TestCase
from mock import patch
import charmhelpers.contrib.openstack.alternatives as alternatives
NAME = 'test'
SOURCE = '/var/lib/charm/test/test.conf'
TARGET = '/etc/test/test,conf'
class AlternativesTestCase(TestCase):
@patch('subprocess.os.path')
@patch('subprocess.check_call')
def test_new_alternative(self, _check, _path):
_path.exists.return_value = False
alternatives.install_alternative(NAME,
TARGET,
SOURCE)
_check.assert_called_with(
['update-alternatives', '--force', '--install',
TARGET, NAME, SOURCE, '50']
)
@patch('subprocess.os.path')
@patch('subprocess.check_call')
def test_priority(self, _check, _path):
_path.exists.return_value = False
alternatives.install_alternative(NAME,
TARGET,
SOURCE, 100)
_check.assert_called_with(
['update-alternatives', '--force', '--install',
TARGET, NAME, SOURCE, '100']
)
@patch('shutil.move')
@patch('subprocess.os.path')
@patch('subprocess.check_call')
def test_new_alternative_existing_file(self, _check,
_path, _move):
_path.exists.return_value = True
_path.islink.return_value = False
alternatives.install_alternative(NAME,
TARGET,
SOURCE)
_check.assert_called_with(
['update-alternatives', '--force', '--install',
TARGET, NAME, SOURCE, '50']
)
_move.assert_called_with(TARGET, '{}.bak'.format(TARGET))
@patch('shutil.move')
@patch('subprocess.os.path')
@patch('subprocess.check_call')
def test_new_alternative_existing_link(self, _check,
_path, _move):
_path.exists.return_value = True
_path.islink.return_value = True
alternatives.install_alternative(NAME,
TARGET,
SOURCE)
_check.assert_called_with(
['update-alternatives', '--force', '--install',
TARGET, NAME, SOURCE, '50']
)
_move.assert_not_called()
@patch('subprocess.check_call')
def test_remove_alternative(self, _check):
alternatives.remove_alternative(NAME, SOURCE)
_check.assert_called_with(
['update-alternatives', '--remove',
NAME, SOURCE]
)
| 34.857143 | 66 | 0.545455 |
cb9377df363351dccffd1155a14bdf0a9f4abcfc | 71,903 | py | Python | tensorflow/python/ops/rnn_cell_impl.py | konsoz/tensorflow | 684cfa3ee804855e817a96fa5a31f46dc9041b5b | [
"Apache-2.0"
] | 2 | 2019-02-12T01:37:54.000Z | 2019-09-17T18:20:54.000Z | tensorflow/python/ops/rnn_cell_impl.py | illaMcbender/tensorflow | a0b0a503287d019a28ef4f670b157eb3605a12f3 | [
"Apache-2.0"
] | 1 | 2019-02-22T00:50:13.000Z | 2019-02-22T00:50:13.000Z | tensorflow/python/ops/rnn_cell_impl.py | illaMcbender/tensorflow | a0b0a503287d019a28ef4f670b157eb3605a12f3 | [
"Apache-2.0"
] | 1 | 2021-05-21T15:00:04.000Z | 2021-05-21T15:00:04.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells.
This module provides a number of basic commonly used RNN cells, such as LSTM
(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
operators that allow adding dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
calling the `rnn` ops several times.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import numbers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import activations
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
# This can be used with self.assertRaisesRegexp for assert_like_rnncell.
ASSERT_LIKE_RNNCELL_ERROR_REGEXP = "is not an RNNCell"
def _hasattr(obj, attr_name):
try:
getattr(obj, attr_name)
except AttributeError:
return False
else:
return True
def assert_like_rnncell(cell_name, cell):
"""Raises a TypeError if cell is not like an RNNCell.
NOTE: Do not rely on the error message (in particular in tests) which can be
subject to change to increase readability. Use
ASSERT_LIKE_RNNCELL_ERROR_REGEXP.
Args:
cell_name: A string to give a meaningful error referencing to the name
of the functionargument.
cell: The object which should behave like an RNNCell.
Raises:
TypeError: A human-friendly exception.
"""
conditions = [
_hasattr(cell, "output_size"),
_hasattr(cell, "state_size"),
_hasattr(cell, "get_initial_state") or _hasattr(cell, "zero_state"),
callable(cell),
]
errors = [
"'output_size' property is missing",
"'state_size' property is missing",
"either 'zero_state' or 'get_initial_state' method is required",
"is not callable"
]
if not all(conditions):
errors = [error for error, cond in zip(errors, conditions) if not cond]
raise TypeError("The argument {!r} ({}) is not an RNNCell: {}.".format(
cell_name, cell, ", ".join(errors)))
def _concat(prefix, suffix, static=False):
"""Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
"""
if isinstance(prefix, ops.Tensor):
p = prefix
p_static = tensor_util.constant_value(prefix)
if p.shape.ndims == 0:
p = array_ops.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError("prefix tensor must be either a scalar or vector, "
"but saw tensor: %s" % p)
else:
p = tensor_shape.as_shape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)
if p.is_fully_defined() else None)
if isinstance(suffix, ops.Tensor):
s = suffix
s_static = tensor_util.constant_value(suffix)
if s.shape.ndims == 0:
s = array_ops.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError("suffix tensor must be either a scalar or vector, "
"but saw tensor: %s" % s)
else:
s = tensor_shape.as_shape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)
if s.is_fully_defined() else None)
if static:
shape = tensor_shape.as_shape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError("Provided a prefix or suffix of None: %s and %s"
% (prefix, suffix))
shape = array_ops.concat((p, s), 0)
return shape
def _zero_state_tensors(state_size, batch_size, dtype):
"""Create tensors of zeros based on state_size, batch_size, and dtype."""
def get_state_shape(s):
"""Combine s with batch_size to get a proper tensor shape."""
c = _concat(batch_size, s)
size = array_ops.zeros(c, dtype=dtype)
if not context.executing_eagerly():
c_static = _concat(batch_size, s, static=True)
size.set_shape(c_static)
return size
return nest.map_structure(get_state_shape, state_size)
@tf_export("nn.rnn_cell.RNNCell")
class RNNCell(base_layer.Layer):
"""Abstract object representing an RNN cell.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`. The optional
third input argument, `scope`, is allowed for backwards compatibility
purposes; but should be left off for new subclasses.
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def __init__(self, trainable=True, name=None, dtype=None, **kwargs):
super(RNNCell, self).__init__(
trainable=trainable, name=name, dtype=dtype, **kwargs)
# Attribute that indicates whether the cell is a TF RNN cell, due the slight
# difference between TF and Keras RNN cell.
self._is_tf_rnn_cell = True
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size, s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
if scope is not None:
with vs.variable_scope(scope,
custom_getter=self._rnn_get_variable) as scope:
return super(RNNCell, self).__call__(inputs, state, scope=scope)
else:
scope_attrname = "rnncell_scope"
scope = getattr(self, scope_attrname, None)
if scope is None:
scope = vs.variable_scope(vs.get_variable_scope(),
custom_getter=self._rnn_get_variable)
setattr(self, scope_attrname, scope)
with scope:
return super(RNNCell, self).__call__(inputs, state)
def _rnn_get_variable(self, getter, *args, **kwargs):
variable = getter(*args, **kwargs)
if context.executing_eagerly():
trainable = variable._trainable # pylint: disable=protected-access
else:
trainable = (
variable in tf_variables.trainable_variables() or
(isinstance(variable, tf_variables.PartitionedVariable) and
list(variable)[0] in tf_variables.trainable_variables()))
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def build(self, _):
# This tells the parent Layer object that it's OK to call
# self.add_variable() inside the call() method.
pass
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if inputs is not None:
# Validate the given batch_size and dtype against inputs if provided.
inputs = ops.convert_to_tensor(inputs, name="inputs")
if batch_size is not None:
if tensor_util.is_tensor(batch_size):
static_batch_size = tensor_util.constant_value(
batch_size, partial=True)
else:
static_batch_size = batch_size
if inputs.shape.dims[0].value != static_batch_size:
raise ValueError(
"batch size from input tensor is different from the "
"input param. Input tensor batch: {}, batch_size: {}".format(
inputs.shape.dims[0].value, batch_size))
if dtype is not None and inputs.dtype != dtype:
raise ValueError(
"dtype from input tensor is different from the "
"input param. Input tensor dtype: {}, dtype: {}".format(
inputs.dtype, dtype))
batch_size = inputs.shape.dims[0].value or array_ops.shape(inputs)[0]
dtype = inputs.dtype
if None in [batch_size, dtype]:
raise ValueError(
"batch_size and dtype cannot be None while constructing initial "
"state: batch_size={}, dtype={}".format(batch_size, dtype))
return self.zero_state(batch_size, dtype)
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size, state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size, s]` for each s in `state_size`.
"""
# Try to use the last cached zero_state. This is done to avoid recreating
# zeros, especially when eager execution is enabled.
state_size = self.state_size
is_eager = context.executing_eagerly()
if is_eager and _hasattr(self, "_last_zero_state"):
(last_state_size, last_batch_size, last_dtype,
last_output) = getattr(self, "_last_zero_state")
if (last_batch_size == batch_size and
last_dtype == dtype and
last_state_size == state_size):
return last_output
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
output = _zero_state_tensors(state_size, batch_size, dtype)
if is_eager:
self._last_zero_state = (state_size, batch_size, dtype, output)
return output
class LayerRNNCell(RNNCell):
"""Subclass of RNNCells that act like proper `tf.Layer` objects.
For backwards compatibility purposes, most `RNNCell` instances allow their
`call` methods to instantiate variables via `tf.get_variable`. The underlying
variable scope thus keeps track of any variables, and returning cached
versions. This is atypical of `tf.layer` objects, which separate this
part of layer building into a `build` method that is only called once.
Here we provide a subclass for `RNNCell` objects that act exactly as
`Layer` objects do. They must provide a `build` method and their
`call` methods do not access Variables `tf.get_variable`.
"""
def __call__(self, inputs, state, scope=None, *args, **kwargs):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size, s] for s in self.state_size`.
scope: optional cell scope.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
# Bypass RNNCell's variable capturing semantics for LayerRNNCell.
# Instead, it is up to subclasses to provide a proper build
# method. See the class docstring for more details.
return base_layer.Layer.__call__(self, inputs, state, scope=scope,
*args, **kwargs)
@tf_export(v1=["nn.rnn_cell.BasicRNNCell"])
class BasicRNNCell(LayerRNNCell):
"""The most basic RNN cell.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`. It could also be string
that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.SimpleRNNCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
super(BasicRNNCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn("%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, self._num_units])
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
self.built = True
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
_check_rnn_cell_input_dtypes([inputs, state])
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, state], 1), self._kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
return output, output
def get_config(self):
config = {
"num_units": self._num_units,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(BasicRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export(v1=["nn.rnn_cell.GRUCell"])
class GRUCell(LayerRNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnGRU` for better performance on GPU, or
`tf.contrib.rnn.GRUBlockCellV2` for better performance on CPU.
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.GRUCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None,
name=None,
dtype=None,
**kwargs):
super(GRUCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn("%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnGRU for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
self._kernel_initializer = initializers.get(kernel_initializer)
self._bias_initializer = initializers.get(bias_initializer)
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
self._gate_kernel = self.add_variable(
"gates/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, 2 * self._num_units],
initializer=self._kernel_initializer)
self._gate_bias = self.add_variable(
"gates/%s" % _BIAS_VARIABLE_NAME,
shape=[2 * self._num_units],
initializer=(
self._bias_initializer
if self._bias_initializer is not None
else init_ops.constant_initializer(1.0, dtype=self.dtype)))
self._candidate_kernel = self.add_variable(
"candidate/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, self._num_units],
initializer=self._kernel_initializer)
self._candidate_bias = self.add_variable(
"candidate/%s" % _BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=(
self._bias_initializer
if self._bias_initializer is not None
else init_ops.zeros_initializer(dtype=self.dtype)))
self.built = True
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
_check_rnn_cell_input_dtypes([inputs, state])
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, state], 1), self._gate_kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)
value = math_ops.sigmoid(gate_inputs)
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
candidate = math_ops.matmul(
array_ops.concat([inputs, r_state], 1), self._candidate_kernel)
candidate = nn_ops.bias_add(candidate, self._candidate_bias)
c = self._activation(candidate)
new_h = u * state + (1 - u) * c
return new_h, new_h
def get_config(self):
config = {
"num_units": self._num_units,
"kernel_initializer": initializers.serialize(self._kernel_initializer),
"bias_initializer": initializers.serialize(self._bias_initializer),
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
@tf_export("nn.rnn_cell.LSTMStateTuple")
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state
and `h` is the output.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if c.dtype != h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
@tf_export(v1=["nn.rnn_cell.BasicLSTMCell"])
class BasicLSTMCell(LayerRNNCell):
"""DEPRECATED: Please use `tf.nn.rnn_cell.LSTMCell` instead.
Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full `tf.nn.rnn_cell.LSTMCell`
that follows.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.LSTMCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self,
num_units,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
Must set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`. It
could also be string that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
When restoring from CudnnLSTM-trained checkpoints, must use
`CudnnCompatibleLSTMCell` instead.
"""
super(BasicLSTMCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn("%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
h_depth = self._num_units
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units])
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped
`[batch_size, num_units]`, if `state_is_tuple` has been set to
`True`. Otherwise, a `Tensor` shaped
`[batch_size, 2 * num_units]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
_check_rnn_cell_input_dtypes([inputs, state])
sigmoid = math_ops.sigmoid
one = constant_op.constant(1, dtype=dtypes.int32)
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one)
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, h], 1), self._kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(
value=gate_inputs, num_or_size_splits=4, axis=one)
forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)
# Note that using `add` and `multiply` instead of `+` and `*` gives a
# performance improvement. So using those at the cost of readability.
add = math_ops.add
multiply = math_ops.multiply
new_c = add(multiply(c, sigmoid(add(f, forget_bias_tensor))),
multiply(sigmoid(i), self._activation(j)))
new_h = multiply(self._activation(new_c), sigmoid(o))
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(BasicLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export(v1=["nn.rnn_cell.LSTMCell"])
class LSTMCell(LayerRNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf
Felix Gers, Jurgen Schmidhuber, and Fred Cummins.
"Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.LSTMCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self, num_units,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=None, num_proj_shards=None,
forget_bias=1.0, state_is_tuple=True,
activation=None, reuse=None, name=None, dtype=None, **kwargs):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training. Must set it manually to `0.0` when restoring from
CudnnLSTM trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`. It
could also be string that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
When restoring from CudnnLSTM-trained checkpoints, use
`CudnnCompatibleLSTMCell` instead.
"""
super(LSTMCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn("%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializers.get(initializer)
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
h_depth = self._num_units if self._num_proj is None else self._num_proj
maybe_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_unit_shards)
if self._num_unit_shards is not None
else None)
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units],
initializer=self._initializer,
partitioner=maybe_partitioner)
if self.dtype is None:
initializer = init_ops.zeros_initializer
else:
initializer = init_ops.zeros_initializer(dtype=self.dtype)
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=initializer)
if self._use_peepholes:
self._w_f_diag = self.add_variable("w_f_diag", shape=[self._num_units],
initializer=self._initializer)
self._w_i_diag = self.add_variable("w_i_diag", shape=[self._num_units],
initializer=self._initializer)
self._w_o_diag = self.add_variable("w_o_diag", shape=[self._num_units],
initializer=self._initializer)
if self._num_proj is not None:
maybe_proj_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_proj_shards)
if self._num_proj_shards is not None
else None)
self._proj_kernel = self.add_variable(
"projection/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[self._num_units, self._num_proj],
initializer=self._initializer,
partitioner=maybe_proj_partitioner)
self.built = True
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, must be 2-D, `[batch, input_size]`.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, [batch, state_size]`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
_check_rnn_cell_input_dtypes([inputs, state])
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
input_size = inputs.get_shape().with_rank(2).dims[1].value
if input_size is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = math_ops.matmul(
array_ops.concat([inputs, m_prev], 1), self._kernel)
lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev +
sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
m = math_ops.matmul(m, self._proj_kernel)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else
array_ops.concat([c, m], 1))
return m, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"use_peepholes": self._use_peepholes,
"cell_clip": self._cell_clip,
"initializer": initializers.serialize(self._initializer),
"num_proj": self._num_proj,
"proj_clip": self._proj_clip,
"num_unit_shards": self._num_unit_shards,
"num_proj_shards": self._num_proj_shards,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure_up_to(shallow_structure,
enumerated_fn, *args, **kwargs)
def _default_dropout_state_filter_visitor(substate):
if isinstance(substate, LSTMStateTuple):
# Do not perform dropout on the memory state.
return LSTMStateTuple(c=False, h=True)
elif isinstance(substate, tensor_array_ops.TensorArray):
return False
return True
class _RNNCellWrapperV1(RNNCell):
"""Base class for cells wrappers V1 compatibility.
This class along with `_RNNCellWrapperV2` allows to define cells wrappers that
are compatible with V1 and V2, and defines helper methods for this purpose.
"""
def __init__(self, cell):
super(_RNNCellWrapperV1, self).__init__()
self._cell = cell
if isinstance(cell, trackable.Trackable):
self._track_trackable(self._cell, name="cell")
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Calls the wrapped cell and performs the wrapping logic.
This method is called from the wrapper's `call` or `__call__` methods.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
raise NotImplementedError
def __call__(self, inputs, state, scope=None):
"""Runs the RNN cell step computation.
We assume that the wrapped RNNCell is being built within its `__call__`
method. We directly use the wrapped cell's `__call__` in the overridden
wrapper `__call__` method.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `__call__`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
scope: VariableScope for the subgraph created in the wrapped cells'
`__call__`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self._cell.__call__, scope=scope)
class _RNNCellWrapperV2(LayerRNNCell, _RNNCellWrapperV1):
"""Base class for cells wrappers V2 compatibility.
This class along with `_RNNCellWrapperV1` allows to define cells wrappers that
are compatible with V1 and V2, and defines helper methods for this purpose.
"""
def __init__(self, *args, **kwargs):
super(_RNNCellWrapperV2, self).__init__(*args, **kwargs)
self._layers = [self._cell]
def call(self, inputs, state, **kwargs):
"""Runs the RNN cell step computation.
When `call` is being used, we assume that the wrapper object has been built,
and therefore the wrapped cells has been built via its `build` method and
its `call` method can be used directly.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `call` and `build`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self._cell.call, **kwargs)
def build(self, inputs_shape):
"""Builds the wrapped cell."""
self._cell.build(inputs_shape)
self.built = True
@tf_export(v1=["nn.rnn_cell.DropoutWrapper"])
class DropoutWrapper(_RNNCellWrapperV1):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
state_keep_prob=1.0, variational_recurrent=False,
input_size=None, dtype=None, seed=None,
dropout_state_filter_visitor=None):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the same dropout mask is applied at every step, as described in:
Y. Gal, Z Ghahramani. "A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks". https://arxiv.org/abs/1512.05287
Otherwise a different dropout mask is applied at every time step.
Note, by default (unless a custom `dropout_state_filter` is provided),
the memory state (`c` component of any `LSTMStateTuple`) passing through
a `DropoutWrapper` is never modified. This behavior is described in the
above article.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the outgoing states of the cell.
**Note** the state components to which dropout is applied when
`state_keep_prob` is in `(0, 1)` are also determined by
the argument `dropout_state_filter_visitor` (e.g. by default dropout
is never applied to the `c` component of an `LSTMStateTuple`).
variational_recurrent: Python bool. If `True`, then the same
dropout pattern is applied across all time steps per run call.
If this parameter is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff**
`variational_recurrent = True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
dropout_state_filter_visitor: (optional), default: (see below). Function
that takes any hierarchical level of the state and returns
a scalar or depth=1 structure of Python booleans describing
which terms in the state should be dropped out. In addition, if the
function returns `True`, dropout is applied across this sublevel. If
the function returns `False`, dropout is not applied across this entire
sublevel.
Default behavior: perform dropout on all terms except the memory (`c`)
state of `LSTMCellState` objects, and don't try to apply dropout to
`TensorArray` objects:
```
def dropout_state_filter_visitor(s):
if isinstance(s, LSTMCellState):
# Never perform dropout on the c state.
return LSTMCellState(c=False, h=True)
elif isinstance(s, TensorArray):
return False
return True
```
Raises:
TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
but not `callable`.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
super(DropoutWrapper, self).__init__(cell)
assert_like_rnncell("cell", cell)
if (dropout_state_filter_visitor is not None
and not callable(dropout_state_filter_visitor)):
raise TypeError("dropout_state_filter_visitor must be callable")
self._dropout_state_filter = (
dropout_state_filter_visitor or _default_dropout_state_filter_visitor)
with ops.name_scope("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = ops.convert_to_tensor(v)
const_value = tensor_util.constant_value(tensor_value)
return (tensor_value, const_value)
for prob, attr in [(input_keep_prob, "input_keep_prob"),
(state_keep_prob, "state_keep_prob"),
(output_keep_prob, "output_keep_prob")]:
tensor_prob, const_prob = tensor_and_const_value(prob)
if const_prob is not None:
if const_prob < 0 or const_prob > 1:
raise ValueError("Parameter %s must be between 0 and 1: %d"
% (attr, const_prob))
setattr(self, "_%s" % attr, float(const_prob))
else:
setattr(self, "_%s" % attr, tensor_prob)
# Set variational_recurrent, seed before running the code below
self._variational_recurrent = variational_recurrent
self._seed = seed
self._recurrent_input_noise = None
self._recurrent_state_noise = None
self._recurrent_output_noise = None
if variational_recurrent:
if dtype is None:
raise ValueError(
"When variational_recurrent=True, dtype must be provided")
def convert_to_batch_shape(s):
# Prepend a 1 for the batch dimension; for recurrent
# variational dropout we use the same dropout mask for all
# batch elements.
return array_ops.concat(
([1], tensor_shape.TensorShape(s).as_list()), 0)
def batch_noise(s, inner_seed):
shape = convert_to_batch_shape(s)
return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype)
if (not isinstance(self._input_keep_prob, numbers.Real) or
self._input_keep_prob < 1.0):
if input_size is None:
raise ValueError(
"When variational_recurrent=True and input_keep_prob < 1.0 or "
"is unknown, input_size must be provided")
self._recurrent_input_noise = _enumerated_map_structure_up_to(
input_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)),
input_size)
self._recurrent_state_noise = _enumerated_map_structure_up_to(
cell.state_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)),
cell.state_size)
self._recurrent_output_noise = _enumerated_map_structure_up_to(
cell.output_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)),
cell.output_size)
def _gen_seed(self, salt_prefix, index):
if self._seed is None:
return None
salt = "%s_%d" % (salt_prefix, index)
string = (str(self._seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
@property
def wrapped_cell(self):
return self._cell
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _variational_recurrent_dropout_value(
self, index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.div(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob,
shallow_filtered_substructure=None):
"""Decides whether to perform standard dropout or recurrent dropout."""
if shallow_filtered_substructure is None:
# Put something so we traverse the entire structure; inside the
# dropout function we check to see if leafs of this are bool or not.
shallow_filtered_substructure = values
if not self._variational_recurrent:
def dropout(i, do_dropout, v):
if not isinstance(do_dropout, bool) or do_dropout:
return nn_ops.dropout(
v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i))
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values])
else:
def dropout(i, do_dropout, v, n):
if not isinstance(do_dropout, bool) or do_dropout:
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values, recurrent_noise])
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Runs the wrapped cell and applies dropout.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input",
self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = cell_call_fn(inputs, state, **kwargs)
if _should_dropout(self._state_keep_prob):
# Identify which subsets of the state to perform dropout on and
# which ones to keep.
shallow_filtered_substructure = nest.get_traverse_shallow_structure(
self._dropout_state_filter, new_state)
new_state = self._dropout(new_state, "state",
self._recurrent_state_noise,
self._state_keep_prob,
shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output",
self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
@tf_export("rnn.DropoutWrapper", v1=[])
class DropoutWrapperV2(_RNNCellWrapperV2, DropoutWrapper):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
state_keep_prob=1.0, variational_recurrent=False,
input_size=None, dtype=None, seed=None,
dropout_state_filter_visitor=None):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the same dropout mask is applied at every step, as described in:
Y. Gal, Z Ghahramani. "A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks". https://arxiv.org/abs/1512.05287
Otherwise a different dropout mask is applied at every time step.
Note, by default (unless a custom `dropout_state_filter` is provided),
the memory state (`c` component of any `LSTMStateTuple`) passing through
a `DropoutWrapper` is never modified. This behavior is described in the
above article.
Runs initialization in Keras style scope to use Keras-style variable
management.
Args:
cell: a LayerRNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the outgoing states of the cell.
**Note** the state components to which dropout is applied when
`state_keep_prob` is in `(0, 1)` are also determined by
the argument `dropout_state_filter_visitor` (e.g. by default dropout
is never applied to the `c` component of an `LSTMStateTuple`).
variational_recurrent: Python bool. If `True`, then the same
dropout pattern is applied across all time steps per run call.
If this parameter is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff**
`variational_recurrent = True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
dropout_state_filter_visitor: (optional), default: (see below). Function
that takes any hierarchical level of the state and returns
a scalar or depth=1 structure of Python booleans describing
which terms in the state should be dropped out. In addition, if the
function returns `True`, dropout is applied across this sublevel. If
the function returns `False`, dropout is not applied across this entire
sublevel.
Default behavior: perform dropout on all terms except the memory (`c`)
state of `LSTMCellState` objects, and don't try to apply dropout to
`TensorArray` objects:
```
def dropout_state_filter_visitor(s):
if isinstance(s, LSTMCellState):
# Never perform dropout on the c state.
return LSTMCellState(c=False, h=True)
elif isinstance(s, TensorArray):
return False
return True
```
Raises:
TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
but not `callable`.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
with base_layer.keras_style_scope():
super(DropoutWrapperV2, self).__init__(
cell=cell,
input_keep_prob=input_keep_prob,
output_keep_prob=output_keep_prob,
state_keep_prob=state_keep_prob,
variational_recurrent=variational_recurrent,
input_size=input_size,
dtype=dtype,
seed=seed,
dropout_state_filter_visitor=dropout_state_filter_visitor)
@tf_export(v1=["nn.rnn_cell.ResidualWrapper"])
class ResidualWrapper(_RNNCellWrapperV1):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell, residual_fn=None):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
"""
super(ResidualWrapper, self).__init__(cell)
self._residual_fn = residual_fn
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell and then apply the residual_fn on its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = cell_call_fn(inputs, state, **kwargs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
def default_residual_fn(inputs, outputs):
nest.assert_same_structure(inputs, outputs)
nest.map_structure(assert_shape_match, inputs, outputs)
return nest.map_structure(lambda inp, out: inp + out, inputs, outputs)
res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)
return (res_outputs, new_state)
@tf_export("rnn.ResidualWrapper", v1=[])
class ResidualWrapperV2(_RNNCellWrapperV2, ResidualWrapper):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell, residual_fn=None):
"""Constructs a `ResidualWrapperV2` for `cell`.
Runs initialization in Keras style scope to use Keras-style variable
management.
Args:
cell: An instance of `LayerRNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
"""
with base_layer.keras_style_scope():
super(ResidualWrapperV2, self).__init__(
cell=cell, residual_fn=residual_fn)
@tf_export("nn.rnn_cell.DeviceWrapper")
class DeviceWrapper(RNNCell):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
"""
super(DeviceWrapper, self).__init__()
self._cell = cell
if isinstance(cell, trackable.Trackable):
self._track_trackable(self._cell, name="cell")
self._device = device
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
with ops.device(self._device):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
"""Run the cell on specified device."""
with ops.device(self._device):
return self._cell(inputs, state, scope=scope)
@tf_export(v1=["nn.rnn_cell.MultiRNNCell"])
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells.
Example:
```python
num_units = [128, 64]
cells = [BasicLSTMCell(num_units=n) for n in num_units]
stacked_rnn_cell = MultiRNNCell(cells)
```
"""
@deprecated(None, "This class is equivalent as "
"tf.keras.layers.StackedRNNCells, and will be replaced by "
"that in Tensorflow 2.0.")
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
super(MultiRNNCell, self).__init__()
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
if not nest.is_sequence(cells):
raise TypeError(
"cells must be a list or tuple, but saw: %s." % cells)
if len(set([id(cell) for cell in cells])) < len(cells):
logging.log_first_n(logging.WARN,
"At least two cells provided to MultiRNNCell "
"are the same object and will share weights.", 1)
self._cells = cells
for cell_number, cell in enumerate(self._cells):
# Add Trackable dependencies on these cells so their variables get
# saved with this object when using object-based saving.
if isinstance(cell, trackable.Trackable):
# TODO(allenl): Track down non-Trackable callers.
self._track_trackable(cell, name="cell-%d" % (cell_number,))
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum(cell.state_size for cell in self._cells)
@property
def output_size(self):
return self._cells[-1].output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._state_is_tuple:
return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells)
else:
# We know here that state_size of each cell is not a tuple and
# presumably does not contain TensorArrays or anything else fancy
return super(MultiRNNCell, self).zero_state(batch_size, dtype)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def call(self, inputs, state):
"""Run this multi-layer cell on inputs, starting from state."""
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s" %
(len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(state, [0, cur_state_pos],
[-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
array_ops.concat(new_states, 1))
return cur_inp, new_states
def _check_rnn_cell_input_dtypes(inputs):
"""Check whether the input tensors are with supported dtypes.
Default RNN cells only support floats and complex as its dtypes since the
activation function (tanh and sigmoid) only allow those types. This function
will throw a proper error message if the inputs is not in a supported type.
Args:
inputs: tensor or nested structure of tensors that are feed to RNN cell as
input or state.
Raises:
ValueError: if any of the input tensor are not having dtypes of float or
complex.
"""
for t in nest.flatten(inputs):
_check_supported_dtypes(t.dtype)
def _check_supported_dtypes(dtype):
if dtype is None:
return
dtype = dtypes.as_dtype(dtype)
if not (dtype.is_floating or dtype.is_complex):
raise ValueError("RNN cell only supports floating point inputs, "
"but saw dtype: %s" % dtype)
| 40.191727 | 82 | 0.682781 |
685f34c317f317cdac68d5251abf0944d84782a4 | 1,833 | py | Python | tutorial2.py | Ziggareto/pytorch_learning | 49a24d670e76ccfa89883727b7e4c1dba1c6075d | [
"MIT"
] | null | null | null | tutorial2.py | Ziggareto/pytorch_learning | 49a24d670e76ccfa89883727b7e4c1dba1c6075d | [
"MIT"
] | null | null | null | tutorial2.py | Ziggareto/pytorch_learning | 49a24d670e76ccfa89883727b7e4c1dba1c6075d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 21:58:06 2020
@author: benja
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
#input 32 x 32
self.conv1 = nn.Conv2d(1, 6, 3) #1 input channel, 6 output, 3x3 square convolution
# 28 x 28 -> subsampling 14 x 14
self.conv2 = nn.Conv2d(6, 16, 3)
# 10 x 10 -> subsampling 5 x 5 ??????
self.fc1 = nn.Linear(16 * 6 * 6, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2,2)) #Max pooling with (2,2) window
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
params = list(net.parameters())
my_input = torch.randn(1, 1, 32, 32)
out = net(my_input)
print(out)
target = torch.randn(10)
target = target.view(1, -1)
criterion = nn.MSELoss()
loss = criterion(out, target)
print(loss)
net.zero_grad()
#print('conv1.bias.grad before backward')
#print(net.conv1.bias.grad)
#
#loss.backward()
#
#print('conv1.bias.grad after backward')
#print(net.conv1.bias.grad)
#
#learning_rate = 0.01
#for f in net.parameters():
# f.data.sub_(f.grad.data * learning_rate)
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.01)
optimizer.zero_grad()
loss.backward()
optimizer.step()
| 24.44 | 91 | 0.57665 |
76f65633ee8d74296d3fbb7793f4b6b9382b53cb | 2,384 | py | Python | main/migrations/0002_create-members-group-with-permissions.py | cupracer/family-tools | 95a9f4d845fca4a00e2b666afc7eb791745121e7 | [
"MIT"
] | null | null | null | main/migrations/0002_create-members-group-with-permissions.py | cupracer/family-tools | 95a9f4d845fca4a00e2b666afc7eb791745121e7 | [
"MIT"
] | null | null | null | main/migrations/0002_create-members-group-with-permissions.py | cupracer/family-tools | 95a9f4d845fca4a00e2b666afc7eb791745121e7 | [
"MIT"
] | null | null | null | from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
from housekeeping_book.models import AccountHolder, Category, Booking, PeriodicBooking
def make_permissions(apps,schema_editor):
emit_post_migrate_signal(2, False, 'default')
Group = apps.get_model("auth", "Group")
Permission = apps.get_model("auth", "Permission")
ContentType = apps.get_model('contenttypes', 'ContentType')
member, created = Group.objects.get_or_create(name='members')
# account_holder
member.permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(AccountHolder),
codename='add_accountholder'
)
)
member.permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(AccountHolder),
codename='view_accountholder'
)
)
member.permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(AccountHolder),
codename='delete_accountholder'
)
)
# category
member.permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Category),
codename='view_category'
)
)
# booking
member.permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Booking),
codename='view_booking'
)
)
member.permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Booking),
codename='add_booking'
)
)
# periodic_booking
member.permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(PeriodicBooking),
codename='view_periodicbooking'
)
)
member.permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(PeriodicBooking),
codename='add_periodicbooking'
)
)
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
('housekeeping_book', '0002_auto_20201012_0537'),
]
operations = [
migrations.RunPython(make_permissions, reverse_code=lambda *args, **kwargs: True)
]
| 27.402299 | 89 | 0.656879 |
d52095cc6fe0810691d0eac6bf419824acff8941 | 205 | py | Python | venv/lib/python3.4/site-packages/osa/__init__.py | zackszhu/SE343_Architecture-of-Enterprise-Applications | eae49d0c20ae4fc345e4d2dae8c053e8410729ad | [
"MIT"
] | null | null | null | venv/lib/python3.4/site-packages/osa/__init__.py | zackszhu/SE343_Architecture-of-Enterprise-Applications | eae49d0c20ae4fc345e4d2dae8c053e8410729ad | [
"MIT"
] | null | null | null | venv/lib/python3.4/site-packages/osa/__init__.py | zackszhu/SE343_Architecture-of-Enterprise-Applications | eae49d0c20ae4fc345e4d2dae8c053e8410729ad | [
"MIT"
] | null | null | null | # __init__.py - __init__ file, part of osa.
# Copyright 2013 Sergey Bozhenkov, boz at ipp.mpg.de
# Licensed under LGPLv3 or later, see the COPYING file.
__version__ = "1.6-p4"
from .client import Client
| 25.625 | 55 | 0.741463 |
4c2c0c88d1c53650c3f00b446da640534f1b564c | 53 | py | Python | src/questions/__init__.py | saadmk11/yourquery | 5bc64f91846908803becb4e0cb6fece417bbe49a | [
"MIT"
] | 4 | 2021-09-11T14:38:11.000Z | 2022-01-27T05:46:13.000Z | src/questions/__init__.py | saadmk11/yourquery | 5bc64f91846908803becb4e0cb6fece417bbe49a | [
"MIT"
] | 62 | 2021-09-25T13:32:25.000Z | 2021-10-20T13:48:25.000Z | src/questions/__init__.py | saadmk11/yourquery | 5bc64f91846908803becb4e0cb6fece417bbe49a | [
"MIT"
] | 2 | 2021-09-18T11:31:39.000Z | 2022-02-22T17:11:58.000Z | default_app_config = 'questions.apps.QuestionsConfig' | 53 | 53 | 0.867925 |
0e429a252689874a37d41ac5ee3446d9dc3c37e9 | 103,346 | py | Python | src/sage/modular/modform_hecketriangle/abstract_space.py | swewers/mein_sage | 0e4e2d14aab0a1a2e63292939a9baa997f0e986b | [
"BSL-1.0"
] | 4 | 2020-07-17T04:49:44.000Z | 2020-07-29T06:33:51.000Z | src/sage/modular/modform_hecketriangle/abstract_space.py | Ivo-Maffei/sage | 467fbc70a08b552b3de33d9065204ee9cbfb02c7 | [
"BSL-1.0"
] | 1 | 2020-04-18T16:30:43.000Z | 2020-04-18T16:30:43.000Z | src/sage/modular/modform_hecketriangle/abstract_space.py | dimpase/sage | 468f23815ade42a2192b0a9cd378de8fdc594dcd | [
"BSL-1.0"
] | null | null | null | r"""
Modular forms for Hecke triangle groups
AUTHORS:
- Jonas Jermann (2013): initial version
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2013-2014 Jonas Jermann <jjermann2@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.symbolic.all import i
from sage.rings.all import ZZ, QQ, infinity, AlgebraicField
from sage.rings.polynomial.polynomial_ring import is_PolynomialRing
from sage.rings.power_series_ring import is_PowerSeriesRing
from sage.rings.laurent_series_ring import is_LaurentSeriesRing
from sage.modules.free_module_element import is_FreeModuleElement
from sage.matrix.constructor import matrix
from sage.modules.free_module_element import vector
from sage.rings.all import Integer
from sage.structure.all import parent
from sage.misc.cachefunc import cached_method
from .abstract_ring import FormsRing_abstract
class FormsSpace_abstract(FormsRing_abstract):
r"""
Abstract (Hecke) forms space.
This should never be called directly. Instead one should
instantiate one of the derived classes of this class.
"""
from .element import FormsElement
Element = FormsElement
def __init__(self, group, base_ring, k, ep, n):
r"""
Abstract (Hecke) forms space.
INPUT:
- ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``)
- ``k`` -- The weight (default: `0`)
- ``ep`` -- The epsilon (default: ``None``).
If ``None``, then k*(n-2) has to be divisible by `2` and
``ep=(-1)^(k*(n-2)/2)`` is used.
- ``base_ring`` -- The base_ring (default: `\Z`).
OUTPUT:
The corresponding abstract (Hecke) forms space.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(n=5, base_ring=ZZ, k=6, ep=-1)
sage: MF
ModularForms(n=5, k=6, ep=-1) over Integer Ring
sage: MF.group()
Hecke triangle group for n = 5
sage: MF.base_ring()
Integer Ring
sage: MF.weight()
6
sage: MF.ep()
-1
sage: MF.has_reduce_hom()
True
sage: MF.is_homogeneous()
True
"""
#from space import canonical_parameters
#(group, base_ring, k, ep, n) = canonical_parameters(group, base_ring, k, ep, n)
super(FormsSpace_abstract, self).__init__(group=group, base_ring=base_ring, red_hom=True, n=n)
#self.register_embedding(self.hom(lambda f: f.parent().graded_ring()(f), codomain=self.graded_ring()))
self._weight = k
self._ep = ep
(self._l1,self._l2) = self.weight_parameters()
self._module = None
self._ambient_space = self
def _repr_(self):
r"""
Return the string representation of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms
sage: QuasiModularForms(n=4, k=2, ep=-1)
QuasiModularForms(n=4, k=2, ep=-1) over Integer Ring
"""
return "{}Forms(n={}, k={}, ep={}) over {}".format(self._analytic_type.analytic_space_name(), self._group.n(), self._weight, self._ep, self._base_ring)
def _latex_(self):
r"""
Return the LaTeX representation of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms
sage: latex(QuasiWeakModularForms())
QM^!_{ n=3 }(0,\ 1)(\Bold{Z})
"""
from sage.misc.latex import latex
return r"{}_{{ n={} }}({},\ {})({})".format(self._analytic_type.latex_space_name(), self._group.n(), self._weight, self._ep, latex(self._base_ring))
def _element_constructor_(self, el):
r"""
Return ``el`` coerced into this forms space.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import MeromorphicModularFormsRing
sage: from sage.modular.modform_hecketriangle.space import ModularForms, QuasiWeakModularForms
sage: MF = ModularForms(k=12, ep=1)
sage: (x,y,z,d) = MF.pol_ring().gens()
sage: Delta = MeromorphicModularFormsRing().Delta()
sage: Delta.parent()
MeromorphicModularFormsRing(n=3) over Integer Ring
sage: MF(Delta)
q - 24*q^2 + 252*q^3 - 1472*q^4 + O(q^5)
sage: MF(Delta).parent() == MF
True
sage: E2 = MF.E2()
sage: e2 = QuasiWeakModularForms(n=infinity, k=2, ep=-1)(E2)
sage: e2
1 - 24*q^2 - 72*q^4 + O(q^5)
sage: e2.parent()
QuasiWeakModularForms(n=+Infinity, k=2, ep=-1) over Integer Ring
sage: e2.as_ring_element()
(-f_i + 3*E2)/2
sage: MF(x^3)
1 + 720*q + 179280*q^2 + 16954560*q^3 + 396974160*q^4 + O(q^5)
sage: MF(x^3).parent() == MF
True
sage: qexp = Delta.q_expansion(prec=2)
sage: qexp
q + O(q^2)
sage: qexp.parent()
Power Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: MF(qexp)
q - 24*q^2 + 252*q^3 - 1472*q^4 + O(q^5)
sage: MF(qexp) == MF(Delta)
True
sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)
sage: QF.default_prec(2)
sage: el2 = QF.quasi_part_gens(min_exp=-1)[4]
sage: el2.reduced_parent()
QuasiWeakModularForms(n=8, k=10/3, ep=-1) over Integer Ring
sage: prec = QF.required_laurent_prec(min_exp=-1)
sage: qexp2 = el2.q_expansion(prec=prec)
sage: qexp2
q^-1 - 19/(64*d) - 7497/(262144*d^2)*q + 15889/(8388608*d^3)*q^2 + 543834047/(1649267441664*d^4)*q^3 + 711869853/(43980465111040*d^5)*q^4 + O(q^5)
sage: qexp2.parent()
Laurent Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: QF(qexp2)
q^-1 - 19/(64*d) - 7497/(262144*d^2)*q + O(q^2)
sage: QF(qexp2).reduced_parent()
QuasiWeakModularForms(n=8, k=10/3, ep=-1) over Integer Ring
sage: QF(qexp2) == el2
True
sage: QF = QuasiWeakModularForms(n=infinity, k=2, ep=-1)
sage: el3 = QF.f_i() + QF.f_i()^3/QF.E4()
sage: prec = QF.required_laurent_prec(order_1=-1)
sage: qexp3 = el3.q_expansion(prec=prec)
sage: qexp3
2 - 7/(4*d)*q + 195/(256*d^2)*q^2 - 903/(4096*d^3)*q^3 + 41987/(1048576*d^4)*q^4 - 181269/(33554432*d^5)*q^5 + O(q^6)
sage: QF.construct_quasi_form(qexp3, check=False) == el3
False
sage: QF.construct_quasi_form(qexp3, order_1=-1) == el3
True
sage: MF([0,1]) == MF(Delta)
True
sage: MF([1,0]) == MF(x^3) - 720*MF(Delta)
True
sage: vec = MF(Delta).coordinate_vector()
sage: vec
(0, 1)
sage: vec.parent()
Vector space of dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: vec in MF.module()
True
sage: MF(vec) == MF(Delta)
True
sage: subspace = MF.subspace([MF(Delta)])
sage: subspace
Subspace of dimension 1 of ModularForms(n=3, k=12, ep=1) over Integer Ring
sage: subspace(MF(Delta)) == subspace(d*(x^3-y^2)) == subspace(qexp) == subspace([0,1]) == subspace(vec) == subspace.gen()
True
sage: subspace(MF(Delta)).parent() == subspace(d*(x^3-y^2)).parent() == subspace(qexp).parent() == subspace([0,1]).parent() == subspace(vec).parent()
True
sage: subspace([1]) == subspace.gen()
True
sage: ssvec = subspace(vec).coordinate_vector()
sage: ssvec
(1)
sage: ssvec.parent()
Vector space of dimension 1 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: ambvec = subspace(vec).ambient_coordinate_vector()
sage: ambvec
(0, 1)
sage: ambvec.parent()
Vector space of degree 2 and dimension 1 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
Basis matrix:
[0 1]
sage: subspace(ambvec) == subspace(vec) and subspace(ambvec).parent() == subspace(vec).parent()
True
"""
from .graded_ring_element import FormsRingElement
if isinstance(el, FormsRingElement):
if (self.hecke_n() == infinity and el.hecke_n() == ZZ(3)):
el_f = el._reduce_d()._rat
(x,y,z,d) = self.pol_ring().gens()
num_sub = el_f.numerator().subs( x=(y**2 + 3*x)/ZZ(4), y=(9*x*y - y**3)/ZZ(8), z=(3*z - y)/ZZ(2))
denom_sub = el_f.denominator().subs( x=(y**2 + 3*x)/ZZ(4), y=(9*x*y - y**3)/ZZ(8), z=(3*z - y)/ZZ(2))
new_num = num_sub.numerator()*denom_sub.denominator()
new_denom = denom_sub.numerator()*num_sub.denominator()
el = self._rat_field(new_num) / self._rat_field(new_denom)
elif self.group() == el.group():
el = el._rat
else:
raise ValueError("{} has group {} != {}".format(el, el.group(), self.group()))
return self.element_class(self, el)
# This assumes that the series corresponds to a _weakly
# holomorphic_ (quasi) form. It also assumes that the form is
# holomorphic at -1 for n=infinity (this assumption however
# can be changed in construct_form
# resp. construct_quasi_form))
P = parent(el)
if is_LaurentSeriesRing(P) or is_PowerSeriesRing(P):
if (self.is_modular()):
return self.construct_form(el)
else:
return self.construct_quasi_form(el)
if is_FreeModuleElement(el) and (self.module() is P or self.ambient_module() is P):
return self.element_from_ambient_coordinates(el)
if (not self.is_ambient()) and (isinstance(el, list) or isinstance(el, tuple) or is_FreeModuleElement(el)) and len(el) == self.rank():
try:
return self.element_from_coordinates(el)
except (ArithmeticError, TypeError):
pass
if self.ambient_module() and self.ambient_module().has_coerce_map_from(P):
return self.element_from_ambient_coordinates(self.ambient_module()(el))
if (isinstance(el,list) or isinstance(el, tuple)) and len(el) == self.degree():
try:
return self.element_from_ambient_coordinates(el)
except (ArithmeticError, TypeError):
pass
return self.element_class(self, el)
def _coerce_map_from_(self, S):
r"""
Return whether or not there exists a coercion from ``S`` to ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms, ModularForms, CuspForms, ZeroForm
sage: MF1 = QuasiWeakModularForms(n=4, base_ring=CC, k=0, ep=1)
sage: MF2 = ModularForms(n=4, k=24, ep=1)
sage: MF3 = ModularForms(n=4, k=24, ep=-1)
sage: MF4 = CuspForms(n=4, k=0, ep=1)
sage: MF5 = ZeroForm(n=4, k=10, ep=-1)
sage: MF6 = QuasiWeakModularForms(n=3, k=24, ep=1)
sage: MF7 = QuasiWeakModularForms(n=infinity, k=24, ep=1)
sage: subspace1 = MF3.subspace([MF3.gen(0), MF3.gen(1)])
sage: subspace2 = MF3.subspace([MF3.gen(2)])
sage: subspace3 = MF3.subspace([MF3.gen(0), MF3.gen(0)+MF3.gen(2)])
sage: MF2.has_coerce_map_from(MF3)
False
sage: MF1.has_coerce_map_from(MF4)
True
sage: MF4.has_coerce_map_from(MF5)
True
sage: MF4.has_coerce_map_from(ZZ)
False
sage: MF1.has_coerce_map_from(ZZ)
True
sage: MF7.has_coerce_map_from(MF6)
True
sage: MF7.has_coerce_map_from(MF2)
False
sage: MF3.has_coerce_map_from(subspace1)
True
sage: subspace1.has_coerce_map_from(MF3)
False
sage: subspace3.has_coerce_map_from(subspace1)
False
sage: subspace3.has_coerce_map_from(subspace2)
True
"""
from .space import ZeroForm
from .subspace import SubSpaceForms
if ( isinstance(S, ZeroForm)):
return True
elif ( isinstance(S, SubSpaceForms)\
and isinstance(self, SubSpaceForms) ):
if (self.ambient_space().has_coerce_map_from(S.ambient_space())):
S2 = S.change_ambient_space(self.ambient_space())
return self.module().has_coerce_map_from(S2.module())
else:
return False
elif ( isinstance(S, FormsSpace_abstract)\
and self.graded_ring().has_coerce_map_from(S.graded_ring())\
and S.weight() == self._weight\
and S.ep() == self._ep\
and not isinstance(self, SubSpaceForms)):
return True
else:
return self.contains_coeff_ring() \
and self.coeff_ring().has_coerce_map_from(S)
# Since forms spaces are modules instead of rings
# we have to manually define one().
# one() allows to take the power 0 of an element
@cached_method
def one(self):
r"""
Return the one element from the corresponding space of constant forms.
.. NOTE:: The one element does not lie in ``self`` in general.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import CuspForms
sage: MF = CuspForms(k=12)
sage: MF.Delta()^0 == MF.one()
True
sage: (MF.Delta()^0).parent()
ModularForms(n=3, k=0, ep=1) over Integer Ring
"""
return self.extend_type("holo", ring=True)(1).reduce()
def is_ambient(self):
r"""
Return whether ``self`` is an ambient space.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(k=12)
sage: MF.is_ambient()
True
sage: MF.subspace([MF.gen(0)]).is_ambient()
False
"""
return self._ambient_space == self
def ambient_space(self):
r"""
Return the ambient space of self.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(k=12)
sage: MF.ambient_space()
ModularForms(n=3, k=12, ep=1) over Integer Ring
sage: MF.ambient_space() == MF
True
sage: subspace = MF.subspace([MF.gen(0)])
sage: subspace
Subspace of dimension 1 of ModularForms(n=3, k=12, ep=1) over Integer Ring
sage: subspace.ambient_space() == MF
True
"""
return self._ambient_space
def module(self):
r"""
Return the module associated to self.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(k=12)
sage: MF.module()
Vector space of dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: subspace = MF.subspace([MF.gen(0)])
sage: subspace.module()
Vector space of degree 2 and dimension 1 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
Basis matrix:
[1 0]
"""
return self._module
def ambient_module(self):
r"""
Return the module associated to the ambient space of self.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(k=12)
sage: MF.ambient_module()
Vector space of dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: MF.ambient_module() == MF.module()
True
sage: subspace = MF.subspace([MF.gen(0)])
sage: subspace.ambient_module() == MF.module()
True
"""
return self._ambient_space._module
def subspace(self, basis):
r"""
Return the subspace of ``self`` generated by ``basis``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(k=24)
sage: MF.dimension()
3
sage: subspace = MF.subspace([MF.gen(0), MF.gen(1)])
sage: subspace
Subspace of dimension 2 of ModularForms(n=3, k=24, ep=1) over Integer Ring
"""
from .subspace import SubSpaceForms
return SubSpaceForms(self, basis)
def change_ring(self, new_base_ring):
r"""
Return the same space as ``self`` but over a new base ring ``new_base_ring``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import CuspForms
sage: CuspForms(n=5, k=24).change_ring(CC)
CuspForms(n=5, k=24, ep=1) over Complex Field with 53 bits of precision
"""
return self.__class__.__base__(self.group(), new_base_ring, self.weight(), self.ep())
def construction(self):
r"""
Return a functor that constructs ``self`` (used by the coercion machinery).
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms
sage: QuasiModularForms(n=4, k=2, ep=1, base_ring=CC).construction()
(QuasiModularFormsFunctor(n=4, k=2, ep=1),
BaseFacade(Complex Field with 53 bits of precision))
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF=ModularForms(k=12)
sage: MF.subspace([MF.gen(1)]).construction()
(FormsSubSpaceFunctor with 1 generator for the ModularFormsFunctor(n=3, k=12, ep=1), BaseFacade(Integer Ring))
"""
from .functors import FormsSubSpaceFunctor, FormsSpaceFunctor, BaseFacade
ambient_space_functor = FormsSpaceFunctor(self._analytic_type, self._group, self._weight, self._ep)
if (self.is_ambient()):
return (ambient_space_functor, BaseFacade(self._base_ring))
else:
return (FormsSubSpaceFunctor(ambient_space_functor, self._basis), BaseFacade(self._base_ring))
@cached_method
def weight(self):
r"""
Return the weight of (elements of) ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms
sage: QuasiModularForms(n=16, k=16/7, ep=-1).weight()
16/7
"""
return self._weight
@cached_method
def ep(self):
r"""
Return the multiplier of (elements of) ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms
sage: QuasiModularForms(n=16, k=16/7, ep=-1).ep()
-1
"""
return self._ep
@cached_method
def contains_coeff_ring(self):
r"""
Return whether ``self`` contains its coefficient ring.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms
sage: QuasiModularForms(k=0, ep=1, n=8).contains_coeff_ring()
True
sage: QuasiModularForms(k=0, ep=-1, n=8).contains_coeff_ring()
False
"""
return ((self.AT("holo") <= self._analytic_type) and (self.weight()==QQ(0)) and (self.ep()==ZZ(1)))
def element_from_coordinates(self, vec):
r"""
If ``self`` has an associated free module, then return the element of ``self``
corresponding to the given coordinate vector ``vec``. Otherwise raise an exception.
INPUT:
- ``vec`` -- A coordinate vector with respect to ``self.gens()``.
OUTPUT:
An element of ``self`` corresponding to the coordinate vector ``vec``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(k=24)
sage: MF.dimension()
3
sage: el = MF.element_from_coordinates([1,1,1])
sage: el
1 + q + q^2 + 52611612*q^3 + 39019413208*q^4 + O(q^5)
sage: el == MF.gen(0) + MF.gen(1) + MF.gen(2)
True
sage: el.parent() == MF
True
sage: subspace = MF.subspace([MF.gen(0), MF.gen(1)])
sage: el = subspace.element_from_coordinates([1,1])
sage: el
1 + q + 52611660*q^3 + 39019412128*q^4 + O(q^5)
sage: el == subspace.gen(0) + subspace.gen(1)
True
sage: el.parent() == subspace
True
"""
if not self.module():
raise ValueError("No free module defined for {}".format(self))
basis = self.gens()
assert(len(basis) == len(vec))
# vec = self.module()(self.module().linear_combination_of_basis(vec))
# this also handles the trivial case (dimension 0)
return self(sum([vec[k]*basis[k] for k in range(0, len(vec))]))
def element_from_ambient_coordinates(self, vec):
r"""
If ``self`` has an associated free module, then return the element of ``self``
corresponding to the given ``vec``. Otherwise raise an exception.
INPUT:
- ``vec`` -- An element of ``self.module()`` or ``self.ambient_module()``.
OUTPUT:
An element of ``self`` corresponding to ``vec``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(k=24)
sage: MF.dimension()
3
sage: el = MF.element_from_ambient_coordinates([1,1,1])
sage: el == MF.element_from_coordinates([1,1,1])
True
sage: el.parent() == MF
True
sage: subspace = MF.subspace([MF.gen(0), MF.gen(1)])
sage: el = subspace.element_from_ambient_coordinates([1,1,0])
sage: el
1 + q + 52611660*q^3 + 39019412128*q^4 + O(q^5)
sage: el.parent() == subspace
True
"""
return self(self.ambient_space().element_from_coordinates(vec))
def homogeneous_part(self, k, ep):
r"""
Since ``self`` already is a homogeneous component return ``self``
unless the degree differs in which case a ``ValueError`` is raised.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiMeromorphicModularForms
sage: MF = QuasiMeromorphicModularForms(n=6, k=4)
sage: MF == MF.homogeneous_part(4,1)
True
sage: MF.homogeneous_part(5,1)
Traceback (most recent call last):
...
ValueError: QuasiMeromorphicModularForms(n=6, k=4, ep=1) over Integer Ring already is homogeneous with degree (4, 1) != (5, 1)!
"""
if (k==self._weight and ep==self._ep):
return self
else:
raise ValueError("{} already is homogeneous with degree ({}, {}) != ({}, {})!".format(self, self._weight, self._ep, k, ep))
def weight_parameters(self):
r"""
Check whether ``self`` has a valid weight and multiplier.
If not then an exception is raised. Otherwise the two weight
parameters corresponding to the weight and multiplier of ``self``
are returned.
The weight parameters are e.g. used to calculate dimensions
or precisions of Fourier expansion.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import MeromorphicModularForms
sage: MF = MeromorphicModularForms(n=18, k=-7, ep=-1)
sage: MF.weight_parameters()
(-3, 17)
sage: (MF._l1, MF._l2) == MF.weight_parameters()
True
sage: (k, ep) = (MF.weight(), MF.ep())
sage: n = MF.hecke_n()
sage: k == 4*(n*MF._l1 + MF._l2)/(n-2) + (1-ep)*n/(n-2)
True
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(n=5, k=12, ep=1)
sage: MF.weight_parameters()
(1, 4)
sage: (MF._l1, MF._l2) == MF.weight_parameters()
True
sage: (k, ep) = (MF.weight(), MF.ep())
sage: n = MF.hecke_n()
sage: k == 4*(n*MF._l1 + MF._l2)/(n-2) + (1-ep)*n/(n-2)
True
sage: MF.dimension() == MF._l1 + 1
True
sage: MF = ModularForms(n=infinity, k=8, ep=1)
sage: MF.weight_parameters()
(2, 0)
sage: MF.dimension() == MF._l1 + 1
True
"""
n = self._group.n()
k = self._weight
ep = self._ep
if (n == infinity):
num = (k-(1-ep)) / ZZ(4)
else:
num = (k-(1-ep)*ZZ(n)/ZZ(n-2)) * ZZ(n-2) / ZZ(4)
if (num.is_integral()):
num = ZZ(num)
if (n == infinity):
# TODO: Figure out what to do in this case
# (l1 and l2 are no longer defined in an analog/unique way)
#l2 = num % ZZ(2)
#l1 = ((num-l2)/ZZ(2)).numerator()
## TODO: The correct generalization seems (l1,l2) = (0,num)
l2 = ZZ(0)
l1 = num
else:
l2 = num % n
l1 = ((num-l2)/n).numerator()
else:
raise ValueError("Invalid or non-occuring weight k={}, ep={}!".format(k,ep))
return (l1, l2)
# TODO: this only makes sense for modular forms,
# resp. needs a big adjustment for quasi modular forms
def aut_factor(self, gamma, t):
r"""
The automorphy factor of ``self``.
INPUT:
- ``gamma`` -- An element of the group of ``self``.
- ``t`` -- An element of the upper half plane.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(n=8, k=4, ep=1)
sage: full_factor = lambda mat, t: (mat[1][0]*t+mat[1][1])**4
sage: T = MF.group().T()
sage: S = MF.group().S()
sage: i = AlgebraicField()(i)
sage: z = 1 + i/2
sage: MF.aut_factor(S, z)
3/2*I - 7/16
sage: MF.aut_factor(-T^(-2), z)
1
sage: MF.aut_factor(MF.group().V(6), z)
173.2640595631...? + 343.8133289126...?*I
sage: MF.aut_factor(S, z) == full_factor(S, z)
True
sage: MF.aut_factor(T, z) == full_factor(T, z)
True
sage: MF.aut_factor(MF.group().V(6), z) == full_factor(MF.group().V(6), z)
True
sage: MF = ModularForms(n=7, k=14/5, ep=-1)
sage: T = MF.group().T()
sage: S = MF.group().S()
sage: MF.aut_factor(S, z)
1.3655215324256...? + 0.056805991182877...?*I
sage: MF.aut_factor(-T^(-2), z)
1
sage: MF.aut_factor(S, z) == MF.ep() * (z/i)^MF.weight()
True
sage: MF.aut_factor(MF.group().V(6), z)
13.23058830577...? + 15.71786610686...?*I
"""
if (gamma.is_translation()):
return ZZ(1)
elif (gamma.is_reflection()):
return self._ep * (t/AlgebraicField()(i))**self._weight
else:
L = [v for v in gamma.word_S_T()[0]]
aut_f = ZZ(1)
while (len(L) > 0):
M = L.pop(-1)
aut_f *= self.aut_factor(M, t)
t = M.acton(t)
return aut_f
@cached_method
def F_simple(self, order_1=ZZ(0)):
r"""
Return a (the most) simple normalized element of ``self``
corresponding to the weight parameters ``l1=self._l1`` and
``l2=self._l2``. If the element does not lie in ``self`` the
type of its parent is extended accordingly.
The main part of the element is given by the ``(l1 - order_1)``-th power
of ``f_inf``, up to a small holomorphic correction factor.
INPUT:
- ``order_1`` -- An integer (default: 0) denoting the desired order at
``-1`` in the case ``n = infinity``.
If ``n != infinity`` the parameter is ignored.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import WeakModularForms
sage: MF = WeakModularForms(n=18, k=-7, ep=-1)
sage: MF.disp_prec(1)
sage: MF.F_simple()
q^-3 + 16/(81*d)*q^-2 - 4775/(104976*d^2)*q^-1 - 14300/(531441*d^3) + O(q)
sage: MF.F_simple() == MF.f_inf()^MF._l1 * MF.f_rho()^MF._l2 * MF.f_i()
True
sage: from sage.modular.modform_hecketriangle.space import CuspForms, ModularForms
sage: MF = CuspForms(n=5, k=2, ep=-1)
sage: MF._l1
-1
sage: MF.F_simple().parent()
WeakModularForms(n=5, k=2, ep=-1) over Integer Ring
sage: MF = ModularForms(n=infinity, k=8, ep=1)
sage: MF.F_simple().reduced_parent()
ModularForms(n=+Infinity, k=8, ep=1) over Integer Ring
sage: MF.F_simple()
q^2 - 16*q^3 + 120*q^4 + O(q^5)
sage: MF.F_simple(order_1=2)
1 + 32*q + 480*q^2 + 4480*q^3 + 29152*q^4 + O(q^5)
"""
(x,y,z,d) = self.rat_field().gens()
n = self.hecke_n()
if (n == infinity):
order_1 = ZZ(order_1)
order_inf = self._l1 - order_1
finf_pol = d*(x - y**2)
rat = finf_pol**order_inf * x**order_1 * y**(ZZ(1-self._ep)/ZZ(2))
else:
order_inf = self._l1
order_1 = order_inf
finf_pol = d*(x**n - y**2)
rat = finf_pol**self._l1 * x**self._l2 * y**(ZZ(1-self._ep)/ZZ(2))
if (order_inf > 0 and order_1 > 0):
new_space = self.extend_type("cusp")
elif (order_inf >=0 and order_1 >= 0):
new_space = self.extend_type("holo")
else:
new_space = self.extend_type("weak")
return new_space(rat)
def Faber_pol(self, m, order_1=ZZ(0), fix_d = False, d_num_prec = None):
r"""
Return the ``m``'th Faber polynomial of ``self``.
Namely a polynomial ``P(q)`` such that ``P(J_inv)*F_simple(order_1)``
has a Fourier expansion of the form ``q^m + O(q^(order_inf + 1))``.
where ``order_inf = self._l1 - order_1`` and ``d^(order_inf - m)*P(q)``
is a monic polynomial of degree ``order_inf - m``.
If ``n=infinity`` a non-trivial order of ``-1`` can be specified through the
parameter ``order_1`` (default: 0). Otherwise it is ignored.
The Faber polynomials are e.g. used to construct a basis of weakly holomorphic
forms and to recover such forms from their initial Fourier coefficients.
INPUT:
- ``m`` -- An integer ``m <= order_inf = self._l1 - order_1``.
- ``order_1`` -- The order at ``-1`` of F_simple (default: 0).
This parameter is ignored if ``n != infinity``.
- ``fix_d`` -- If ``False`` (default) a formal parameter is used for ``d``.
If ``True`` then the numerical value of ``d`` is used
(resp. an exact value if the group is arithmetic).
Otherwise the given value is used for ``d``.
- ``d_num_prec`` -- The precision to be used if a numerical value for ``d`` is substituted.
Default: ``None`` in which case the default
numerical precision of ``self.parent()`` is used.
OUTPUT:
The corresponding Faber polynomial ``P(q)``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import WeakModularForms
sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)
sage: MF.weight_parameters()
(2, 3)
sage: MF.Faber_pol(2)
1
sage: MF.Faber_pol(1)
1/d*q - 19/(100*d)
sage: MF.Faber_pol(0)
1/d^2*q^2 - 117/(200*d^2)*q + 9113/(320000*d^2)
sage: MF.Faber_pol(-2)
1/d^4*q^4 - 11/(8*d^4)*q^3 + 41013/(80000*d^4)*q^2 - 2251291/(48000000*d^4)*q + 1974089431/(4915200000000*d^4)
sage: (MF.Faber_pol(2)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2)
q^2 - 41/(200*d)*q^3 + O(q^4)
sage: (MF.Faber_pol(1)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)
q + O(q^3)
sage: (MF.Faber_pol(0)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)
1 + O(q^3)
sage: (MF.Faber_pol(-2)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)
q^-2 + O(q^3)
sage: MF.Faber_pol(2, fix_d=1)
1
sage: MF.Faber_pol(1, fix_d=1)
q - 19/100
sage: MF.Faber_pol(-2, fix_d=1)
q^4 - 11/8*q^3 + 41013/80000*q^2 - 2251291/48000000*q + 1974089431/4915200000000
sage: (MF.Faber_pol(2, fix_d=1)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=1)
q^2 - 41/200*q^3 + O(q^4)
sage: (MF.Faber_pol(-2)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1, fix_d=1)
q^-2 + O(q^3)
sage: MF = WeakModularForms(n=4, k=-2, ep=1)
sage: MF.weight_parameters()
(-1, 3)
sage: MF.Faber_pol(-1)
1
sage: MF.Faber_pol(-2, fix_d=True)
256*q - 184
sage: MF.Faber_pol(-3, fix_d=True)
65536*q^2 - 73728*q + 14364
sage: (MF.Faber_pol(-1, fix_d=True)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)
q^-1 + 80 + O(q)
sage: (MF.Faber_pol(-2, fix_d=True)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)
q^-2 + 400 + O(q)
sage: (MF.Faber_pol(-3)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)
q^-3 + 2240 + O(q)
sage: MF = WeakModularForms(n=infinity, k=14, ep=-1)
sage: MF.Faber_pol(3)
1
sage: MF.Faber_pol(2)
1/d*q + 3/(8*d)
sage: MF.Faber_pol(1)
1/d^2*q^2 + 75/(1024*d^2)
sage: MF.Faber_pol(0)
1/d^3*q^3 - 3/(8*d^3)*q^2 + 3/(512*d^3)*q + 41/(4096*d^3)
sage: MF.Faber_pol(-1)
1/d^4*q^4 - 3/(4*d^4)*q^3 + 81/(1024*d^4)*q^2 + 9075/(8388608*d^4)
sage: (MF.Faber_pol(-1)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1 + 1)
q^-1 + O(q^4)
sage: MF.Faber_pol(3, order_1=-1)
1/d*q + 3/(4*d)
sage: MF.Faber_pol(1, order_1=2)
1
sage: MF.Faber_pol(0, order_1=2)
1/d*q - 3/(8*d)
sage: MF.Faber_pol(-1, order_1=2)
1/d^2*q^2 - 3/(4*d^2)*q + 81/(1024*d^2)
sage: (MF.Faber_pol(-1, order_1=2)(MF.J_inv())*MF.F_simple(order_1=2)).q_expansion(prec=MF._l1 + 1)
q^-1 - 9075/(8388608*d^4)*q^3 + O(q^4)
"""
m = ZZ(m)
if (self.hecke_n() == infinity):
order_1 = ZZ(order_1)
order_inf = self._l1 - order_1
else:
order_inf = self._l1
order_1 = order_inf
if (m > order_inf):
raise ValueError("Invalid basis index: m = {} > {} = order_inf!".format(m, order_inf))
prec = 2*order_inf - m + 1
d = self.get_d(fix_d=fix_d, d_num_prec=d_num_prec)
q = self.get_q(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)
simple_qexp = self.F_simple(order_1=order_1).q_expansion(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)
J_qexp = self.J_inv().q_expansion(prec=order_inf - m, fix_d=fix_d, d_num_prec=d_num_prec)
# The precision could be infinity, otherwise we could do this:
#assert(temp_reminder.prec() == 1)
temp_reminder = (1 / simple_qexp / q**(-m)).add_bigoh(1)
fab_pol = q.parent()([])
while (len(temp_reminder.coefficients()) > 0):
temp_coeff = temp_reminder.coefficients()[0]
temp_exp = -temp_reminder.exponents()[0]
fab_pol += temp_coeff * (q/d)**temp_exp
temp_reminder -= temp_coeff * (J_qexp/d)**temp_exp
# The first term is zero only up to numerical errors,
# so we manually have to remove it
if (not d.parent().is_exact()):
temp_reminder=temp_reminder.truncate_neg(-temp_exp+1)
return fab_pol.polynomial()
# very similar to Faber_pol: faber_pol(q)=Faber_pol(d*q)
def faber_pol(self, m, order_1=ZZ(0), fix_d = False, d_num_prec = None):
r"""
If ``n=infinity`` a non-trivial order of ``-1`` can be specified through the
parameter ``order_1`` (default: 0). Otherwise it is ignored.
Return the `m`'th Faber polynomial of ``self``
with a different normalization based on ``j_inv``
instead of ``J_inv``.
Namely a polynomial ``p(q)`` such that ``p(j_inv)*F_simple()``
has a Fourier expansion of the form ``q^m + O(q^(order_inf + 1))``.
where ``order_inf = self._l1 - order_1`` and ``p(q)`` is a
monic polynomial of degree ``order_inf - m``.
If ``n=infinity`` a non-trivial order of ``-1`` can be specified through the
parameter ``order_1`` (default: 0). Otherwise it is ignored.
The relation to ``Faber_pol`` is: ``faber_pol(q) = Faber_pol(d*q)``.
INPUT:
- ``m`` -- An integer ``m <= self._l1 - order_1``.
- ``order_1`` -- The order at ``-1`` of ``F_simple`` (default: 0).
This parameter is ignored if ``n != infinity``.
- ``fix_d`` -- If ``False`` (default) a formal parameter is used for ``d``.
If ``True`` then the numerical value of ``d`` is used
(resp. an exact value if the group is arithmetic).
Otherwise the given value is used for ``d``.
- ``d_num_prec`` -- The precision to be used if a numerical value for ``d`` is substituted.
Default: ``None`` in which case the default
numerical precision of ``self.parent()`` is used.
OUTPUT:
The corresponding Faber polynomial ``p(q)``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import WeakModularForms
sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)
sage: MF.weight_parameters()
(2, 3)
sage: MF.faber_pol(2)
1
sage: MF.faber_pol(1)
q - 19/(100*d)
sage: MF.faber_pol(0)
q^2 - 117/(200*d)*q + 9113/(320000*d^2)
sage: MF.faber_pol(-2)
q^4 - 11/(8*d)*q^3 + 41013/(80000*d^2)*q^2 - 2251291/(48000000*d^3)*q + 1974089431/(4915200000000*d^4)
sage: (MF.faber_pol(2)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2)
q^2 - 41/(200*d)*q^3 + O(q^4)
sage: (MF.faber_pol(1)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)
q + O(q^3)
sage: (MF.faber_pol(0)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)
1 + O(q^3)
sage: (MF.faber_pol(-2)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)
q^-2 + O(q^3)
sage: MF = WeakModularForms(n=4, k=-2, ep=1)
sage: MF.weight_parameters()
(-1, 3)
sage: MF.faber_pol(-1)
1
sage: MF.faber_pol(-2, fix_d=True)
q - 184
sage: MF.faber_pol(-3, fix_d=True)
q^2 - 288*q + 14364
sage: (MF.faber_pol(-1, fix_d=True)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)
q^-1 + 80 + O(q)
sage: (MF.faber_pol(-2, fix_d=True)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)
q^-2 + 400 + O(q)
sage: (MF.faber_pol(-3)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)
q^-3 + 2240 + O(q)
sage: MF = WeakModularForms(n=infinity, k=14, ep=-1)
sage: MF.faber_pol(3)
1
sage: MF.faber_pol(2)
q + 3/(8*d)
sage: MF.faber_pol(1)
q^2 + 75/(1024*d^2)
sage: MF.faber_pol(0)
q^3 - 3/(8*d)*q^2 + 3/(512*d^2)*q + 41/(4096*d^3)
sage: MF.faber_pol(-1)
q^4 - 3/(4*d)*q^3 + 81/(1024*d^2)*q^2 + 9075/(8388608*d^4)
sage: (MF.faber_pol(-1)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1 + 1)
q^-1 + O(q^4)
sage: MF.faber_pol(3, order_1=-1)
q + 3/(4*d)
sage: MF.faber_pol(1, order_1=2)
1
sage: MF.faber_pol(0, order_1=2)
q - 3/(8*d)
sage: MF.faber_pol(-1, order_1=2)
q^2 - 3/(4*d)*q + 81/(1024*d^2)
sage: (MF.faber_pol(-1, order_1=2)(MF.j_inv())*MF.F_simple(order_1=2)).q_expansion(prec=MF._l1 + 1)
q^-1 - 9075/(8388608*d^4)*q^3 + O(q^4)
"""
m = ZZ(m)
if (self.hecke_n() == infinity):
order_1 = ZZ(order_1)
order_inf = self._l1 - order_1
else:
order_inf = self._l1
order_1 = order_inf
if (m > order_inf):
raise ValueError("Invalid basis index: m = {} > {} = order_inf!".format(m, order_inf))
prec = 2*order_inf - m + 1
d = self.get_d(fix_d=fix_d, d_num_prec=d_num_prec)
q = self.get_q(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)
simple_qexp = self.F_simple(order_1=order_1).q_expansion(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)
j_qexp = self.j_inv().q_expansion(prec=order_inf - m, fix_d=fix_d, d_num_prec=d_num_prec)
# The precision could be infinity, otherwise we could do this:
#assert(temp_reminder.prec() == 1)
temp_reminder = (1 / simple_qexp / q**(-m)).add_bigoh(1)
fab_pol = q.parent()([])
while (len(temp_reminder.coefficients()) > 0):
temp_coeff = temp_reminder.coefficients()[0]
temp_exp = -temp_reminder.exponents()[0]
fab_pol += temp_coeff*q**temp_exp
temp_reminder -= temp_coeff*j_qexp**temp_exp
# The first term is zero only up to numerical errors,
# so we manually have to remove it
if (not d.parent().is_exact()):
temp_reminder=temp_reminder.truncate_neg(-temp_exp+1)
return fab_pol.polynomial()
def F_basis_pol(self, m, order_1=ZZ(0)):
r"""
Returns a polynomial corresponding to the basis element of
the corresponding space of weakly holomorphic forms of
the same degree as ``self``. The basis element is determined
by the property that the Fourier expansion is of the form
``q^m + O(q^(order_inf + 1))``, where ``order_inf = self._l1 - order_1``.
If ``n=infinity`` a non-trivial order of ``-1`` can be specified through
the parameter ``order_1`` (default: 0). Otherwise it is ignored.
INPUT:
- ``m`` -- An integer ``m <= self._l1``.
- ``order_1`` -- The order at ``-1`` of ``F_simple`` (default: 0).
This parameter is ignored if ``n != infinity``.
OUTPUT:
A polynomial in ``x,y,z,d``, corresponding to ``f_rho, f_i, E2``
and the (possibly) transcendental parameter ``d``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import WeakModularForms
sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)
sage: MF.weight_parameters()
(2, 3)
sage: MF.F_basis_pol(2)
x^13*y*d^2 - 2*x^8*y^3*d^2 + x^3*y^5*d^2
sage: MF.F_basis_pol(1)
(-81*x^13*y*d + 62*x^8*y^3*d + 19*x^3*y^5*d)/(-100)
sage: MF.F_basis_pol(0)
(141913*x^13*y + 168974*x^8*y^3 + 9113*x^3*y^5)/320000
sage: MF(MF.F_basis_pol(2)).q_expansion(prec=MF._l1+2)
q^2 - 41/(200*d)*q^3 + O(q^4)
sage: MF(MF.F_basis_pol(1)).q_expansion(prec=MF._l1+1)
q + O(q^3)
sage: MF(MF.F_basis_pol(0)).q_expansion(prec=MF._l1+1)
1 + O(q^3)
sage: MF(MF.F_basis_pol(-2)).q_expansion(prec=MF._l1+1)
q^-2 + O(q^3)
sage: MF(MF.F_basis_pol(-2)).parent()
WeakModularForms(n=5, k=62/3, ep=-1) over Integer Ring
sage: MF = WeakModularForms(n=4, k=-2, ep=1)
sage: MF.weight_parameters()
(-1, 3)
sage: MF.F_basis_pol(-1)
x^3/(x^4*d - y^2*d)
sage: MF.F_basis_pol(-2)
(9*x^7 + 23*x^3*y^2)/(32*x^8*d^2 - 64*x^4*y^2*d^2 + 32*y^4*d^2)
sage: MF(MF.F_basis_pol(-1)).q_expansion(prec=MF._l1+2)
q^-1 + 5/(16*d) + O(q)
sage: MF(MF.F_basis_pol(-2)).q_expansion(prec=MF._l1+2)
q^-2 + 25/(4096*d^2) + O(q)
sage: MF = WeakModularForms(n=infinity, k=14, ep=-1)
sage: MF.F_basis_pol(3)
-y^7*d^3 + 3*x*y^5*d^3 - 3*x^2*y^3*d^3 + x^3*y*d^3
sage: MF.F_basis_pol(2)
(3*y^7*d^2 - 17*x*y^5*d^2 + 25*x^2*y^3*d^2 - 11*x^3*y*d^2)/(-8)
sage: MF.F_basis_pol(1)
(-75*y^7*d + 225*x*y^5*d - 1249*x^2*y^3*d + 1099*x^3*y*d)/1024
sage: MF.F_basis_pol(0)
(41*y^7 - 147*x*y^5 - 1365*x^2*y^3 - 2625*x^3*y)/(-4096)
sage: MF.F_basis_pol(-1)
(-9075*y^9 + 36300*x*y^7 - 718002*x^2*y^5 - 4928052*x^3*y^3 - 2769779*x^4*y)/(8388608*y^2*d - 8388608*x*d)
sage: MF.F_basis_pol(3, order_1=-1)
(-3*y^9*d^3 + 16*x*y^7*d^3 - 30*x^2*y^5*d^3 + 24*x^3*y^3*d^3 - 7*x^4*y*d^3)/(-4*x)
sage: MF.F_basis_pol(1, order_1=2)
-x^2*y^3*d + x^3*y*d
sage: MF.F_basis_pol(0, order_1=2)
(-3*x^2*y^3 - 5*x^3*y)/(-8)
sage: MF.F_basis_pol(-1, order_1=2)
(-81*x^2*y^5 - 606*x^3*y^3 - 337*x^4*y)/(1024*y^2*d - 1024*x*d)
"""
(x,y,z,d) = self.rat_field().gens()
n = self._group.n()
if (n ==infinity):
order_1 = ZZ(order_1)
order_inf = self._l1 - order_1
finf_pol = d*(x-y**2)
jinv_pol = x/(x-y**2)
rat = finf_pol**order_inf * x**order_1 * y**(ZZ(1-self._ep)/ZZ(2)) * self.Faber_pol(m, order_1)(jinv_pol)
else:
order_inf = self._l1
order_1 = order_inf
finf_pol = d*(x**n-y**2)
jinv_pol = x**n/(x**n-y**2)
rat = finf_pol**order_inf * x**self._l2 * y**(ZZ(1-self._ep)/ZZ(2)) * self.Faber_pol(m)(jinv_pol)
return rat
def F_basis(self, m, order_1=ZZ(0)):
r"""
Returns a weakly holomorphic element of ``self``
(extended if necessarily) determined by the property that
the Fourier expansion is of the form is of the form
``q^m + O(q^(order_inf + 1))``, where ``order_inf = self._l1 - order_1``.
In particular for all ``m <= order_inf`` these elements form
a basis of the space of weakly holomorphic modular forms
of the corresponding degree in case ``n!=infinity``.
If ``n=infinity`` a non-trivial order of ``-1`` can be specified through
the parameter ``order_1`` (default: 0). Otherwise it is ignored.
INPUT:
- ``m`` -- An integer ``m <= self._l1``.
- ``order_1`` -- The order at ``-1`` of ``F_simple`` (default: 0).
This parameter is ignored if ``n != infinity``.
OUTPUT:
The corresponding element in (possibly an extension of) ``self``.
Note that the order at ``-1`` of the resulting element may be
bigger than ``order_1`` (rare).
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import WeakModularForms, CuspForms
sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)
sage: MF.disp_prec(MF._l1+2)
sage: MF.weight_parameters()
(2, 3)
sage: MF.F_basis(2)
q^2 - 41/(200*d)*q^3 + O(q^4)
sage: MF.F_basis(1)
q - 13071/(640000*d^2)*q^3 + O(q^4)
sage: MF.F_basis(0)
1 - 277043/(192000000*d^3)*q^3 + O(q^4)
sage: MF.F_basis(-2)
q^-2 - 162727620113/(40960000000000000*d^5)*q^3 + O(q^4)
sage: MF.F_basis(-2).parent() == MF
True
sage: MF = CuspForms(n=4, k=-2, ep=1)
sage: MF.weight_parameters()
(-1, 3)
sage: MF.F_basis(-1).parent()
WeakModularForms(n=4, k=-2, ep=1) over Integer Ring
sage: MF.F_basis(-1).parent().disp_prec(MF._l1+2)
sage: MF.F_basis(-1)
q^-1 + 80 + O(q)
sage: MF.F_basis(-2)
q^-2 + 400 + O(q)
sage: MF = WeakModularForms(n=infinity, k=14, ep=-1)
sage: MF.F_basis(3)
q^3 - 48*q^4 + O(q^5)
sage: MF.F_basis(2)
q^2 - 1152*q^4 + O(q^5)
sage: MF.F_basis(1)
q - 18496*q^4 + O(q^5)
sage: MF.F_basis(0)
1 - 224280*q^4 + O(q^5)
sage: MF.F_basis(-1)
q^-1 - 2198304*q^4 + O(q^5)
sage: MF.F_basis(3, order_1=-1)
q^3 + O(q^5)
sage: MF.F_basis(1, order_1=2)
q - 300*q^3 - 4096*q^4 + O(q^5)
sage: MF.F_basis(0, order_1=2)
1 - 24*q^2 - 2048*q^3 - 98328*q^4 + O(q^5)
sage: MF.F_basis(-1, order_1=2)
q^-1 - 18150*q^3 - 1327104*q^4 + O(q^5)
"""
basis_pol = self.F_basis_pol(m, order_1=order_1)
if (self.hecke_n() == infinity):
(x,y,z,d) = self.pol_ring().gens()
if (x.divides(basis_pol.numerator()) and m > 0):
new_space = self.extend_type("cusp")
elif (x.divides(basis_pol.denominator()) or m < 0):
new_space = self.extend_type("weak")
else:
new_space = self.extend_type("holo")
else:
if (m > 0):
new_space = self.extend_type("cusp")
elif (m >= 0):
new_space = self.extend_type("holo")
else:
new_space = self.extend_type("weak")
return new_space(basis_pol)
def _canonical_min_exp(self, min_exp, order_1):
r"""
Return an adjusted value of ``min_exp`` and ``order_1`` corresponding
to the analytic type of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import CuspForms
sage: CF = CuspForms(n=5, k=16, ep=1)
sage: CF._canonical_min_exp(-2, 0)
(1, 0)
sage: CF = CuspForms(n=infinity, k=10, ep=-1)
sage: CF._canonical_min_exp(-2, -2)
(1, 1)
"""
min_exp = ZZ(min_exp)
order_1 = ZZ(order_1)
if self.is_holomorphic():
if self.is_cuspidal():
min_exp = max(min_exp, 1)
order_1 = max(order_1, 1)
else:
min_exp = max(min_exp, 0)
order_1 = max(order_1, 0)
if (self.hecke_n() != infinity):
order_1 = ZZ(0)
return (min_exp, order_1)
def quasi_part_gens(self, r=None, min_exp=0, max_exp=infinity, order_1=ZZ(0)):
r"""
Return a basis in ``self`` of the subspace of (quasi) weakly
holomorphic forms which satisfy the specified properties on
the quasi parts and the initial Fourier coefficient.
INPUT:
- ``r`` -- An integer or ``None`` (default), indicating
the desired power of ``E2`` If ``r=None``
then all possible powers (``r``) are
choosen.
- ``min_exp`` -- An integer giving a lower bound for the
first non-trivial Fourier coefficient of the
generators (default: 0).
- ``max_exp`` -- An integer or ``infinity`` (default) giving
an upper bound for the first non-trivial
Fourier coefficient of the generators. If
``max_exp==infinity`` then no upper bound is
assumed.
- ``order_1`` -- A lower bound for the order at ``-1`` of all
quasi parts of the basis elements (default:
0). If ``n!=infinity`` this parameter is
ignored.
OUTPUT:
A basis in ``self`` of the subspace of forms which are modular
after dividing by ``E2^r`` and which have a Fourier expansion
of the form ``q^m + O(q^(m+1))`` with ``min_exp <= m <=
max_exp`` for each quasi part (and at least the specified
order at ``-1`` in case ``n=infinity``). Note that linear
combinations of forms/quasi parts maybe have a higher order at
infinity than ``max_exp``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms
sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)
sage: QF.default_prec(1)
sage: QF.quasi_part_gens(min_exp=-1)
[q^-1 + O(q), 1 + O(q), q^-1 - 9/(128*d) + O(q), 1 + O(q), q^-1 - 19/(64*d) + O(q), q^-1 + 1/(64*d) + O(q)]
sage: QF.quasi_part_gens(min_exp=-1, max_exp=-1)
[q^-1 + O(q), q^-1 - 9/(128*d) + O(q), q^-1 - 19/(64*d) + O(q), q^-1 + 1/(64*d) + O(q)]
sage: QF.quasi_part_gens(min_exp=-2, r=1)
[q^-2 - 9/(128*d)*q^-1 - 261/(131072*d^2) + O(q), q^-1 - 9/(128*d) + O(q), 1 + O(q)]
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(k=36)
sage: MF.quasi_part_gens(min_exp=2)
[q^2 + 194184*q^4 + O(q^5), q^3 - 72*q^4 + O(q^5)]
sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms
sage: MF = QuasiModularForms(n=5, k=6, ep=-1)
sage: MF.default_prec(2)
sage: MF.dimension()
3
sage: MF.quasi_part_gens(r=0)
[1 - 37/(200*d)*q + O(q^2)]
sage: MF.quasi_part_gens(r=0)[0] == MF.E6()
True
sage: MF.quasi_part_gens(r=1)
[1 + 33/(200*d)*q + O(q^2)]
sage: MF.quasi_part_gens(r=1)[0] == MF.E2()*MF.E4()
True
sage: MF.quasi_part_gens(r=2)
[]
sage: MF.quasi_part_gens(r=3)
[1 - 27/(200*d)*q + O(q^2)]
sage: MF.quasi_part_gens(r=3)[0] == MF.E2()^3
True
sage: from sage.modular.modform_hecketriangle.space import QuasiCuspForms, CuspForms
sage: MF = QuasiCuspForms(n=5, k=18, ep=-1)
sage: MF.default_prec(4)
sage: MF.dimension()
8
sage: MF.quasi_part_gens(r=0)
[q - 34743/(640000*d^2)*q^3 + O(q^4), q^2 - 69/(200*d)*q^3 + O(q^4)]
sage: MF.quasi_part_gens(r=1)
[q - 9/(200*d)*q^2 + 37633/(640000*d^2)*q^3 + O(q^4),
q^2 + 1/(200*d)*q^3 + O(q^4)]
sage: MF.quasi_part_gens(r=2)
[q - 1/(4*d)*q^2 - 24903/(640000*d^2)*q^3 + O(q^4)]
sage: MF.quasi_part_gens(r=3)
[q + 1/(10*d)*q^2 - 7263/(640000*d^2)*q^3 + O(q^4)]
sage: MF.quasi_part_gens(r=4)
[q - 11/(20*d)*q^2 + 53577/(640000*d^2)*q^3 + O(q^4)]
sage: MF.quasi_part_gens(r=5)
[q - 1/(5*d)*q^2 + 4017/(640000*d^2)*q^3 + O(q^4)]
sage: MF.quasi_part_gens(r=1)[0] == MF.E2() * CuspForms(n=5, k=16, ep=1).gen(0)
True
sage: MF.quasi_part_gens(r=1)[1] == MF.E2() * CuspForms(n=5, k=16, ep=1).gen(1)
True
sage: MF.quasi_part_gens(r=3)[0] == MF.E2()^3 * MF.Delta()
True
sage: MF = QuasiCuspForms(n=infinity, k=18, ep=-1)
sage: MF.quasi_part_gens(r=1, min_exp=-2) == MF.quasi_part_gens(r=1, min_exp=1)
True
sage: MF.quasi_part_gens(r=1)
[q - 8*q^2 - 8*q^3 + 5952*q^4 + O(q^5),
q^2 - 8*q^3 + 208*q^4 + O(q^5),
q^3 - 16*q^4 + O(q^5)]
sage: MF = QuasiWeakModularForms(n=infinity, k=4, ep=1)
sage: MF.quasi_part_gens(r=2, min_exp=2, order_1=-2)[0] == MF.E2()^2 * MF.E4()^(-2) * MF.f_inf()^2
True
sage: [v.order_at(-1) for v in MF.quasi_part_gens(r=0, min_exp=2, order_1=-2)]
[-2, -2]
"""
if (not self.is_weakly_holomorphic()):
from warnings import warn
warn("This function only determines generators of (quasi) weakly modular forms!")
(min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)
# For modular forms spaces the quasi parts are all zero except for r=0
if (self.is_modular()):
r = ZZ(r)
if (r != 0):
return []
# The lower bounds on the powers of f_inf and E4 determine
# how large powers of E2 we can fit in...
n = self.hecke_n()
if (n == infinity):
max_numerator_weight = self._weight - 4*min_exp - 4*order_1 + 4
else:
max_numerator_weight = self._weight - 4*n/(n-2)*min_exp + 4
# If r is not specified we gather all generators for all possible r's
if (r is None):
gens = []
for rnew in range(ZZ(0), QQ(max_numerator_weight/ZZ(2)).floor() + 1):
gens += self.quasi_part_gens(r=rnew, min_exp=min_exp, max_exp=max_exp, order_1=order_1)
return gens
r = ZZ(r)
if (r < 0 or 2*r > max_numerator_weight):
return []
E2 = self.E2()
ambient_weak_space = self.graded_ring().reduce_type("weak", degree=(self._weight-QQ(2*r), self._ep*(-1)**r))
order_inf = ambient_weak_space._l1 - order_1
if (max_exp == infinity):
max_exp = order_inf
elif (max_exp < min_exp):
return []
else:
max_exp = min(ZZ(max_exp), order_inf)
gens = []
for m in range(min_exp, max_exp + 1):
gens += [ self(ambient_weak_space.F_basis(m, order_1=order_1)*E2**r) ]
return gens
def quasi_part_dimension(self, r=None, min_exp=0, max_exp=infinity, order_1=ZZ(0)):
r"""
Return the dimension of the subspace of ``self`` generated by
``self.quasi_part_gens(r, min_exp, max_exp, order_1)``.
See :meth:`quasi_part_gens` for more details.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms, QuasiCuspForms, QuasiWeakModularForms
sage: MF = QuasiModularForms(n=5, k=6, ep=-1)
sage: [v.as_ring_element() for v in MF.gens()]
[f_rho^2*f_i, f_rho^3*E2, E2^3]
sage: MF.dimension()
3
sage: MF.quasi_part_dimension(r=0)
1
sage: MF.quasi_part_dimension(r=1)
1
sage: MF.quasi_part_dimension(r=2)
0
sage: MF.quasi_part_dimension(r=3)
1
sage: MF = QuasiCuspForms(n=5, k=18, ep=-1)
sage: MF.dimension()
8
sage: MF.quasi_part_dimension(r=0)
2
sage: MF.quasi_part_dimension(r=1)
2
sage: MF.quasi_part_dimension(r=2)
1
sage: MF.quasi_part_dimension(r=3)
1
sage: MF.quasi_part_dimension(r=4)
1
sage: MF.quasi_part_dimension(r=5)
1
sage: MF.quasi_part_dimension(min_exp=2, max_exp=2)
2
sage: MF = QuasiCuspForms(n=infinity, k=18, ep=-1)
sage: MF.quasi_part_dimension(r=1, min_exp=-2)
3
sage: MF.quasi_part_dimension()
12
sage: MF.quasi_part_dimension(order_1=3)
2
sage: MF = QuasiWeakModularForms(n=infinity, k=4, ep=1)
sage: MF.quasi_part_dimension(min_exp=2, order_1=-2)
4
sage: [v.order_at(-1) for v in MF.quasi_part_gens(r=0, min_exp=2, order_1=-2)]
[-2, -2]
"""
if (not self.is_weakly_holomorphic()):
from warnings import warn
warn("This function only determines the dimension of some (quasi) weakly subspace!")
(min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)
# For modular forms spaces the quasi parts are all zero except for r=0
if (self.is_modular()):
r = ZZ(0)
if (r != 0):
return ZZ(0)
# The lower bounds on the powers of f_inf and E4 determine
# how large powers of E2 we can fit in...
n = self.hecke_n()
if (n == infinity):
max_numerator_weight = self._weight - 4*min_exp - 4*order_1 + 4
else:
max_numerator_weight = self._weight - 4*n/(n-2)*min_exp + 4
# If r is not specified we calculate the total dimension over all possible r's
if (r is None):
return sum([self.quasi_part_dimension(r=rnew, min_exp=min_exp, max_exp=max_exp, order_1=order_1) for rnew in range(ZZ(0), QQ(max_numerator_weight/ZZ(2)).floor() + 1)])
r = ZZ(r)
if (r < 0 or 2*r > max_numerator_weight):
return ZZ(0)
k = self._weight - QQ(2*r)
ep = self._ep * (-1)**r
if (n == infinity):
num = (k - (1-ep)) / ZZ(4)
l2 = order_1
order_inf = ZZ(num) - order_1
else:
num = ZZ((k-(1-ep)*ZZ(n)/ZZ(n-2)) * ZZ(n-2) / ZZ(4))
l2 = num % n
order_inf = ((num - l2) / n).numerator()
if (max_exp == infinity):
max_exp = order_inf
elif (max_exp < min_exp):
return ZZ(0)
else:
max_exp = min(ZZ(max_exp), order_inf)
return max(ZZ(0), max_exp - min_exp + 1)
def construct_form(self, laurent_series, order_1=ZZ(0), check=True, rationalize=False):
r"""
Tries to construct an element of self with the given Fourier
expansion. The assumption is made that the specified Fourier
expansion corresponds to a weakly holomorphic modular form.
If the precision is too low to determine the
element an exception is raised.
INPUT:
- ``laurent_series`` -- A Laurent or Power series.
- ``order_1`` -- A lower bound for the order at ``-1`` of the form (default: 0).
If ``n!=infinity`` this parameter is ignored.
- ``check`` -- If ``True`` (default) then the series expansion of the constructed
form is compared against the given series.
- ``rationalize`` -- If ``True`` (default: ``False``) then the series is
`rationalized` beforehand. Note that in non-exact or non-arithmetic
cases this is experimental and extremely unreliable!
OUTPUT:
If possible: An element of self with the same initial
Fourier expansion as ``laurent_series``.
Note: For modular spaces it is also possible to call
``self(laurent_series)`` instead.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import CuspForms
sage: Delta = CuspForms(k=12).Delta()
sage: qexp = Delta.q_expansion(prec=2)
sage: qexp.parent()
Power Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: qexp
q + O(q^2)
sage: CuspForms(k=12).construct_form(qexp) == Delta
True
sage: from sage.modular.modform_hecketriangle.space import WeakModularForms
sage: J_inv = WeakModularForms(n=7).J_inv()
sage: qexp2 = J_inv.q_expansion(prec=1)
sage: qexp2.parent()
Laurent Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: qexp2
d*q^-1 + 151/392 + O(q)
sage: WeakModularForms(n=7).construct_form(qexp2) == J_inv
True
sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)
sage: MF.default_prec(MF._l1+1)
sage: d = MF.get_d()
sage: MF.weight_parameters()
(2, 3)
sage: el2 = d*MF.F_basis(2) + 2*MF.F_basis(1) + MF.F_basis(-2)
sage: qexp2 = el2.q_expansion()
sage: qexp2.parent()
Laurent Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: qexp2
q^-2 + 2*q + d*q^2 + O(q^3)
sage: WeakModularForms(n=5, k=62/3, ep=-1).construct_form(qexp2) == el2
True
sage: MF = WeakModularForms(n=infinity, k=-2, ep=-1)
sage: el3 = MF.f_i()/MF.f_inf() + MF.f_i()*MF.f_inf()/MF.E4()^2
sage: MF.quasi_part_dimension(min_exp=-1, order_1=-2)
3
sage: prec = MF._l1 + 3
sage: qexp3 = el3.q_expansion(prec)
sage: qexp3
q^-1 - 1/(4*d) + ((1024*d^2 - 33)/(1024*d^2))*q + O(q^2)
sage: MF.construct_form(qexp3, order_1=-2) == el3
True
sage: MF.construct_form(el3.q_expansion(prec + 1), order_1=-3) == el3
True
sage: WF = WeakModularForms(n=14)
sage: qexp = WF.J_inv().q_expansion_fixed_d(d_num_prec=1000)
sage: qexp.parent()
Laurent Series Ring in q over Real Field with 1000 bits of precision
sage: WF.construct_form(qexp, rationalize=True) == WF.J_inv()
doctest:...: UserWarning: Using an experimental rationalization of coefficients, please check the result for correctness!
True
"""
base_ring = laurent_series.base_ring()
if is_PolynomialRing(base_ring.base()):
if not (self.coeff_ring().has_coerce_map_from(base_ring)):
raise ValueError("The Laurent coefficients don't coerce into the coefficient ring of self!")
elif rationalize:
laurent_series = self.rationalize_series(laurent_series)
else:
raise ValueError("The Laurent coefficients are not in the proper form yet. Try rationalize_series(laurent_series) beforehand (experimental).")
order_1 = self._canonical_min_exp(0, order_1)[1]
order_inf = self._l1 - order_1
if (laurent_series.prec() < order_inf + 1):
raise ValueError("Insufficient precision: {} < {} = order_inf!".format(laurent_series.prec(), order_inf + 1))
new_series = laurent_series.add_bigoh(order_inf + 1)
coefficients = new_series.coefficients()
exponents = new_series.exponents()
if (len(coefficients) == 0):
return self(0)
rat = sum([\
coefficients[j] * self.F_basis_pol(exponents[j], order_1=order_1)\
for j in range(ZZ(len(coefficients)))
])
el = self(rat)
if (check):
prec = min(laurent_series.prec(), laurent_series.exponents()[-1] + 1)
if (el.q_expansion(prec=prec) != laurent_series):
raise ValueError("The Laurent series {} does not correspond to a form of {}".format(laurent_series, self.reduce_type(["weak"])))
return el
@cached_method
def _quasi_form_matrix(self, min_exp=0, order_1=ZZ(0), incr_prec_by=0):
r"""
Return a base change matrix which transforms coordinate vectors
with respect to a certain basis into a vector corresponding to
Laurent coefficients of a series.
This is a helper function used to construct weakly holomorphic quasi
forms based on their initial Laurent coefficients
(see :meth:`construct_quasi_form`).
INPUT:
- ``min_exp`` -- An integer (default: 0), namely the lower bound for the
order at infinity resp. the exponent of the Laurent series.
- ``order_1`` -- A lower bound for the order at ``-1`` of all quasi parts of the
subspace (default: 0). If ``n!=infinity`` this parameter is ignored.
- ``incr_prec_by`` -- An integer (default: 0) which specifies how
much the precision should be increased compared to
the size of the corresponding basis.
OUTPUT:
The corresponding base change matrix.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms, ModularForms, QuasiModularForms
sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)
sage: A = QF._quasi_form_matrix(min_exp=-1)
sage: A[3]
(-1215/(65536*d^3), -2171/(131072*d^2), 134099/(16777216*d^3), -811/(131072*d^2), 15889/(8388608*d^3), -8851/(8388608*d^3))
sage: MF = ModularForms(k=36)
sage: MF._quasi_form_matrix(min_exp=2)
[1 0]
[0 1]
sage: QuasiModularForms(k=2)._quasi_form_matrix()
[1]
sage: QF = QuasiWeakModularForms(n=infinity, k=-2, ep=-1)
sage: A = QF._quasi_form_matrix(min_exp=-1, order_1=0)
sage: A
[ 1 1]
[-1/(4*d) 0]
"""
(min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)
order_inf = self._l1 - order_1
# We have to add + 1 to get a correct upper bound in all cases
# since corresponding weak space might have a higher l1 (+1) than
# ``self``, even if the weight is smaller
max_exp = order_inf + 1
basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)
column_size = len(basis)
# a non-trivial incr_prec_by will be added in case the resulting matrix does not have full rank
row_size = column_size + incr_prec_by
prec = row_size + min_exp
coeff_ring = self.coeff_ring()
A = matrix(coeff_ring, row_size, 0)
for gen in basis:
A = A.augment(gen.q_expansion_vector(min_exp=min_exp, max_exp=prec-1))
# So far this case never happened but potentially A could be singular!
# In this case we want to increase the row size until A has maximal
# rank (i.e. column size).
# This is done up increasing the precision of everything by about 20%
# of the column size until A has maximal rank:
if (A.rank() < column_size):
if (incr_prec_by == 0):
from sage.misc.verbose import verbose
verbose("Encountered a base change matrix with not-yet-maximal rank (rare, please report)!")
incr_prec_by += column_size//ZZ(5) + 1
return self._quasi_form_matrix(min_exp=min_exp, order_1=order_1, incr_prec_by=incr_prec_by)
elif (incr_prec_by == 0):
return A
# At this point the matrix has maximal rank but might be too big.
# Since we are interested in the (exact) required size resp. precision
# we have to decrease the (row) size as much as possible while keeping
# maximal rank. We cannot simply choose pivots/etc since we want to
# keep a simple correspondence to Fourier coefficients!
# We start by using an initial binary search to delete some unnecessary rows:
while (A.rank() == column_size):
row_size = A.dimensions()[0]
# to avoid infinite loops
if (row_size == column_size):
return A
B = A
A = A.delete_rows([r for r in range(column_size + (row_size-column_size)//2 - 1, row_size)])
# Next we simply delete row by row. Note that A is still modified here...
while (B.rank() == column_size):
A = B
row_size = B.dimensions()[0]
B = B.delete_rows([row_size-1])
return A
def required_laurent_prec(self, min_exp=0, order_1=ZZ(0)):
r"""
Return an upper bound for the required precision for Laurent series to
uniquely determine a corresponding (quasi) form in ``self`` with the given
lower bound ``min_exp`` for the order at infinity (for each quasi part).
.. NOTE::
For ``n=infinity`` only the holomorphic case (``min_exp >= 0``)
is supported (in particular a non-negative order at ``-1`` is assumed).
INPUT:
- ``min_exp`` -- An integer (default: 0), namely the lower bound for the
order at infinity resp. the exponent of the Laurent series.
- ``order_1`` -- A lower bound for the order at ``-1`` for all quasi parts
(default: 0). If ``n!=infinity`` this parameter is ignored.
OUTPUT:
An integer, namely an upper bound for the number of required
Laurent coefficients. The bound should be precise or at least
pretty sharp.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms, ModularForms, QuasiModularForms
sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)
sage: QF.required_laurent_prec(min_exp=-1)
5
sage: MF = ModularForms(k=36)
sage: MF.required_laurent_prec(min_exp=2)
4
sage: QuasiModularForms(k=2).required_laurent_prec()
1
sage: QuasiWeakModularForms(n=infinity, k=2, ep=-1).required_laurent_prec(order_1=-1)
6
"""
(min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)
return self._quasi_form_matrix(min_exp=min_exp, order_1=order_1).dimensions()[0] + min_exp
def construct_quasi_form(self, laurent_series, order_1=ZZ(0), check=True, rationalize=False):
r"""
Try to construct an element of self with the given Fourier
expansion. The assumption is made that the specified Fourier
expansion corresponds to a weakly holomorphic quasi modular form.
If the precision is too low to determine the
element an exception is raised.
INPUT:
- ``laurent_series`` -- A Laurent or Power series.
- ``order_1`` -- A lower bound for the order at ``-1`` for all quasi parts of the
form (default: 0). If ``n!=infinity`` this parameter is ignored.
- ``check`` -- If ``True`` (default) then the series expansion of the constructed
form is compared against the given (rationalized) series.
- ``rationalize`` -- If ``True`` (default: ``False``) then the series is
`rationalized` beforehand. Note that in non-exact or non-arithmetic
cases this is experimental and extremely unreliable!
OUTPUT:
If possible: An element of self with the same initial
Fourier expansion as ``laurent_series``.
Note: For non modular spaces it is also possible to call
``self(laurent_series)`` instead. Also note that this function works
much faster if a corresponding (cached) ``q_basis`` is available.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms, ModularForms, QuasiModularForms, QuasiCuspForms
sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)
sage: el = QF.quasi_part_gens(min_exp=-1)[4]
sage: prec = QF.required_laurent_prec(min_exp=-1)
sage: prec
5
sage: qexp = el.q_expansion(prec=prec)
sage: qexp
q^-1 - 19/(64*d) - 7497/(262144*d^2)*q + 15889/(8388608*d^3)*q^2 + 543834047/(1649267441664*d^4)*q^3 + 711869853/(43980465111040*d^5)*q^4 + O(q^5)
sage: qexp.parent()
Laurent Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: constructed_el = QF.construct_quasi_form(qexp)
sage: constructed_el.parent()
QuasiWeakModularForms(n=8, k=10/3, ep=-1) over Integer Ring
sage: el == constructed_el
True
If a q_basis is available the construction uses a different algorithm which we also check::
sage: basis = QF.q_basis(min_exp=-1)
sage: QF(qexp) == constructed_el
True
sage: MF = ModularForms(k=36)
sage: el2 = MF.quasi_part_gens(min_exp=2)[1]
sage: prec = MF.required_laurent_prec(min_exp=2)
sage: prec
4
sage: qexp2 = el2.q_expansion(prec=prec + 1)
sage: qexp2
q^3 - 1/(24*d)*q^4 + O(q^5)
sage: qexp2.parent()
Power Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: constructed_el2 = MF.construct_quasi_form(qexp2)
sage: constructed_el2.parent()
ModularForms(n=3, k=36, ep=1) over Integer Ring
sage: el2 == constructed_el2
True
sage: QF = QuasiModularForms(k=2)
sage: q = QF.get_q()
sage: qexp3 = 1 + O(q)
sage: QF(qexp3)
1 - 24*q - 72*q^2 - 96*q^3 - 168*q^4 + O(q^5)
sage: QF(qexp3) == QF.E2()
True
sage: QF = QuasiWeakModularForms(n=infinity, k=2, ep=-1)
sage: el4 = QF.f_i() + QF.f_i()^3/QF.E4()
sage: prec = QF.required_laurent_prec(order_1=-1)
sage: qexp4 = el4.q_expansion(prec=prec)
sage: qexp4
2 - 7/(4*d)*q + 195/(256*d^2)*q^2 - 903/(4096*d^3)*q^3 + 41987/(1048576*d^4)*q^4 - 181269/(33554432*d^5)*q^5 + O(q^6)
sage: QF.construct_quasi_form(qexp4, check=False) == el4
False
sage: QF.construct_quasi_form(qexp4, order_1=-1) == el4
True
sage: QF = QuasiCuspForms(n=8, k=22/3, ep=-1)
sage: el = QF(QF.f_inf()*QF.E2())
sage: qexp = el.q_expansion_fixed_d(d_num_prec=1000)
sage: qexp.parent()
Power Series Ring in q over Real Field with 1000 bits of precision
sage: QF.construct_quasi_form(qexp, rationalize=True) == el
True
"""
base_ring = laurent_series.base_ring()
if is_PolynomialRing(base_ring.base()):
if not (self.coeff_ring().has_coerce_map_from(base_ring)):
raise ValueError("The Laurent coefficients don't coerce into the coefficient ring of self!")
elif rationalize:
laurent_series = self.rationalize_series(laurent_series)
else:
raise ValueError("The Laurent coefficients are not in the proper form yet. Try rationalize_series(laurent_series) beforehand (experimental).")
prec = min(laurent_series.prec(), laurent_series.exponents()[-1] + 1)
min_exp1 = laurent_series.exponents()[0]
(min_exp, order_1) = self._canonical_min_exp(min_exp1, order_1)
if (min_exp != min_exp1):
raise ValueError("Due to the behavior at infinity the given Laurent series cannot possibly be an element of {}".format(self))
# if a q_basis is available we can construct the form much faster
if (self.q_basis.is_in_cache(min_exp=min_exp, order_1=order_1)):
basis = self.q_basis(min_exp=min_exp, order_1=order_1)
size = len(basis)
if (prec < min_exp + size):
raise ValueError("Insufficient precision: {} < {}!".format(laurent_series.prec(), min_exp + size))
b = vector(self.coeff_ring(), [laurent_series[m] for m in range(min_exp, min_exp + len(basis))])
el = self(sum([b[k]*basis[k] for k in range(0, len(basis))]))
else:
A = self._quasi_form_matrix(min_exp = min_exp, order_1=order_1)
row_size = A.dimensions()[0]
if (prec < min_exp + row_size):
raise ValueError("Insufficient precision: {} < {}!".format(laurent_series.prec(), min_exp + row_size))
b = vector(self.coeff_ring(), [laurent_series[m] for m in range(min_exp, min_exp + row_size)])
try:
coord_vector = A.solve_right(b)
except ValueError:
raise ValueError("The Laurent series {} does not correspond to a (quasi) form of {}".format(laurent_series, self.reduce_type(["quasi", "weak"])))
order_inf = self._l1 - order_1
# We have to add + 1 to get a correct upper bound in all cases
# since corresponding weak space might have a higher l1 (+1) than
# ``self``, even if the weight is smaller
max_exp = order_inf + 1
basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)
el = self(sum([coord_vector[k]*basis[k] for k in range(0, len(coord_vector))]))
if (check):
if (el.q_expansion(prec=prec) != laurent_series):
raise ValueError("The Laurent series {} does not correspond to a form of {}".format(laurent_series, self.reduce_type(["quasi", "weak"])))
return el
@cached_method
def q_basis(self, m=None, min_exp=0, order_1=ZZ(0)):
r"""
Try to return a (basis) element of ``self`` with a Laurent series of the form
``q^m + O(q^N)``, where ``N=self.required_laurent_prec(min_exp)``.
If ``m==None`` the whole basis (with varying ``m``'s) is returned if it exists.
INPUT:
- ``m`` -- An integer, indicating the desired initial Laurent exponent of the element.
If ``m==None`` (default) then the whole basis is returned.
- ``min_exp`` -- An integer, indicating the minimal Laurent exponent (for each quasi part)
of the subspace of ``self`` which should be considered (default: 0).
- ``order_1`` -- A lower bound for the order at ``-1`` of all quasi parts of the subspace
(default: 0). If ``n!=infinity`` this parameter is ignored.
OUTPUT:
The corresponding basis (if ``m==None``) resp. the corresponding basis vector (if ``m!=None``).
If the basis resp. element doesn't exist an exception is raised.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms, ModularForms, QuasiModularForms
sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)
sage: QF.default_prec(QF.required_laurent_prec(min_exp=-1))
sage: q_basis = QF.q_basis(min_exp=-1)
sage: q_basis
[q^-1 + O(q^5), 1 + O(q^5), q + O(q^5), q^2 + O(q^5), q^3 + O(q^5), q^4 + O(q^5)]
sage: QF.q_basis(m=-1, min_exp=-1)
q^-1 + O(q^5)
sage: MF = ModularForms(k=36)
sage: MF.q_basis() == MF.gens()
True
sage: QF = QuasiModularForms(k=6)
sage: QF.required_laurent_prec()
3
sage: QF.q_basis()
[1 - 20160*q^3 - 158760*q^4 + O(q^5), q - 60*q^3 - 248*q^4 + O(q^5), q^2 + 8*q^3 + 30*q^4 + O(q^5)]
sage: QF = QuasiWeakModularForms(n=infinity, k=-2, ep=-1)
sage: QF.q_basis(order_1=-1)
[1 - 168*q^2 + 2304*q^3 - 19320*q^4 + O(q^5),
q - 18*q^2 + 180*q^3 - 1316*q^4 + O(q^5)]
"""
if (not self.is_weakly_holomorphic()):
from warnings import warn
warn("This function only determines elements / a basis of (quasi) weakly modular forms!")
(min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)
order_inf = self._l1 - order_1
if (m is None):
A = self._quasi_form_matrix(min_exp=min_exp, order_1=order_1)
# If A is square it should automatically be invertible (by the previous procedures)
if (A.is_square()):
B = A.inverse()
max_exp = order_inf + 1
basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)
column_len = A.dimensions()[1]
q_basis = []
for k in range(0, column_len):
el = self(sum([B[l][k] * basis[l] for l in range(0, column_len)]))
q_basis += [el]
return q_basis
else:
raise ValueError("Unfortunately a q_basis doesn't exist in this case (this is rare/interesting, please report)")
else:
if (m < min_exp):
raise ValueError("Index out of range: m={} < {}=min_exp".format(m, min_exp))
# If the whole basis is available, then use it
if (self.q_basis.is_in_cache(min_exp=min_exp, order_1=order_1)):
q_basis = self.q_basis(min_exp=min_exp, order_1=order_1)
column_len = len(q_basis)
if (m >= column_len + min_exp):
raise ValueError("Index out of range: m={} >= {}=dimension + min_exp".format(m, column_len + min_exp))
return q_basis[m - min_exp]
else:
row_len = self.required_laurent_prec(min_exp=min_exp, order_1=order_1) - min_exp
if (m >= row_len + min_exp):
raise ValueError("Index out of range: m={} >= {}=required_precision + min_exp".format(m, row_len + min_exp))
A = self._quasi_form_matrix(min_exp = min_exp, order_1=order_1)
b = vector(self.coeff_ring(), row_len)
b[m - min_exp] = 1
try:
coord_vector = A.solve_right(b)
except ValueError:
raise ValueError("Unfortunately the q_basis vector (m={}, min_exp={}) doesn't exist in this case (this is rare/interesting, please report)".format(m, min_exp))
max_exp = order_inf + 1
basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)
column_len = A.dimensions()[1]
el = self(sum([coord_vector[l] * basis[l] for l in range(0, column_len)]))
return el
def rationalize_series(self, laurent_series, coeff_bound = 1e-10, denom_factor = ZZ(1)):
r"""
Try to return a Laurent series with coefficients in ``self.coeff_ring()``
that matches the given Laurent series.
We give our best but there is absolutely no guarantee that it will work!
INPUT:
- ``laurent_series`` -- A Laurent series. If the Laurent coefficients already
coerce into ``self.coeff_ring()`` with a formal parameter
then the Laurent series is returned as is.
Otherwise it is assumed that the series is normalized
in the sense that the first non-trivial coefficient
is a power of ``d`` (e.g. ``1``).
- ``coeff_bound`` -- Either ``None`` resp. ``0`` or a positive real number
(default: ``1e-10``). If specified ``coeff_bound``
gives a lower bound for the size of the initial Laurent
coefficients. If a coefficient is smaller it is
assumed to be zero.
For calculations with very small coefficients (less than
``1e-10``) ``coeff_bound`` should be set to something
even smaller or just ``0``.
Non-exact calculations often produce non-zero
coefficients which are supposed to be zero. In those
cases this parameter helps a lot.
- ``denom_factor`` -- An integer (default: 1) whose factor might occur in
the denominator of the given Laurent coefficients
(in addition to naturally occuring factors).
OUTPUT:
A Laurent series over ``self.coeff_ring()`` corresponding to the given Laurent series.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import WeakModularForms, ModularForms, QuasiCuspForms
sage: WF = WeakModularForms(n=14)
sage: qexp = WF.J_inv().q_expansion_fixed_d(d_num_prec=1000)
sage: qexp.parent()
Laurent Series Ring in q over Real Field with 1000 bits of precision
sage: qexp_int = WF.rationalize_series(qexp)
sage: qexp_int.add_bigoh(3)
d*q^-1 + 37/98 + 2587/(38416*d)*q + 899/(117649*d^2)*q^2 + O(q^3)
sage: qexp_int == WF.J_inv().q_expansion()
True
sage: WF.rationalize_series(qexp_int) == qexp_int
True
sage: WF(qexp_int) == WF.J_inv()
True
sage: WF.rationalize_series(qexp.parent()(1))
1
sage: WF.rationalize_series(qexp_int.parent()(1)).parent()
Laurent Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: MF = ModularForms(n=infinity, k=4)
sage: qexp = MF.E4().q_expansion_fixed_d()
sage: qexp.parent()
Power Series Ring in q over Rational Field
sage: qexp_int = MF.rationalize_series(qexp)
sage: qexp_int.parent()
Power Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: qexp_int == MF.E4().q_expansion()
True
sage: MF.rationalize_series(qexp_int) == qexp_int
True
sage: MF(qexp_int) == MF.E4()
True
sage: QF = QuasiCuspForms(n=8, k=22/3, ep=-1)
sage: el = QF(QF.f_inf()*QF.E2())
sage: qexp = el.q_expansion_fixed_d(d_num_prec=1000)
sage: qexp.parent()
Power Series Ring in q over Real Field with 1000 bits of precision
sage: qexp_int = QF.rationalize_series(qexp)
sage: qexp_int.parent()
Power Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: qexp_int == el.q_expansion()
True
sage: QF.rationalize_series(qexp_int) == qexp_int
True
sage: QF(qexp_int) == el
True
"""
from sage.rings.all import prime_range
from sage.misc.all import prod
from warnings import warn
denom_factor = ZZ(denom_factor)
base_ring = laurent_series.base_ring()
series_prec = laurent_series.prec()
# If the coefficients already coerce to our coefficient ring
# and are in polynomial form we simply return the Laurent series
if (is_PolynomialRing(base_ring.base())):
if (self.coeff_ring().has_coerce_map_from(base_ring)):
return laurent_series
else:
raise ValueError("The Laurent coefficients don't coerce into the coefficient ring of self!")
# Else the case that the Laurent series is exact but the group is non-arithmetic
# shouldn't occur (except for trivial cases)
elif (base_ring.is_exact() and not self.group().is_arithmetic()):
prec = self.default_num_prec()
dvalue = self.group().dvalue().n(prec)
# For arithmetic groups the coefficients are exact though (so is d)
elif (base_ring.is_exact()):
prec = self.default_num_prec()
dvalue = self.group().dvalue()
else:
prec = laurent_series.base_ring().prec()
dvalue = self.group().dvalue().n(prec)
# This messes up doctests! :-(
warn("Using an experimental rationalization of coefficients, please check the result for correctness!")
d = self.get_d()
q = self.get_q()
if (not base_ring.is_exact() and coeff_bound):
coeff_bound = base_ring(coeff_bound)
num_q = laurent_series.parent().gen()
laurent_series = sum([laurent_series[i]*num_q**i for i in range(laurent_series.exponents()[0], laurent_series.exponents()[-1]+1) if laurent_series[i].abs() > coeff_bound]).add_bigoh(series_prec)
first_exp = laurent_series.exponents()[0]
first_coeff = laurent_series[first_exp]
d_power = (first_coeff.abs().n(prec).log()/dvalue.n(prec).log()).round()
if (first_coeff < 0):
return -self.rationalize_series(-laurent_series, coeff_bound=coeff_bound)
elif (first_exp + d_power != 0):
cor_factor = dvalue**(-(first_exp + d_power))
return d**(first_exp + d_power) * self.rationalize_series(cor_factor * laurent_series, coeff_bound=coeff_bound)
else:
if (base_ring.is_exact() and self.group().is_arithmetic()):
tolerance = 0
else:
tolerance = 10*ZZ(1).n(prec).ulp()
if (first_coeff * dvalue**first_exp - ZZ(1)) > tolerance:
raise ValueError("The Laurent series is not normalized correctly!")
# TODO: This is not a good enough estimate, see e.g. E12
# (however for exact base rings + arithmetic groups we don't need it)
def denominator_estimate(m):
cor_exp = max(-first_exp, 0)
m += cor_exp
if self.group().is_arithmetic():
return ZZ(1/dvalue)**m
hecke_n = self.hecke_n()
bad_factors = [fac for fac in Integer(m).factorial().factor() if (fac[0] % hecke_n) not in [1, hecke_n-1] and fac[0] > 2]
bad_factorial = prod([fac[0]**fac[1] for fac in bad_factors])
return ZZ(2**(6*m) * hecke_n**(2*m) * prod([ p**m for p in prime_range(m+1) if hecke_n % p == 0 and p > 2 ]) * bad_factorial)**(cor_exp + 1)
def rationalize_coefficient(coeff, m):
# TODO: figure out a correct bound for the required precision
if (not self.group().is_arithmetic() and denominator_estimate(m).log(2).n().ceil() > prec):
warn("The precision from coefficient m={} on is too low!".format(m))
rational_coeff = coeff * dvalue**m
if (base_ring.is_exact() and self.group().is_arithmetic() and rational_coeff in QQ):
rational_coeff = QQ(rational_coeff)
else:
int_estimate = denominator_estimate(m) * denom_factor * rational_coeff
rational_coeff = int_estimate.round() / denominator_estimate(m) / denom_factor
return rational_coeff / d**m
laurent_series = sum([rationalize_coefficient(laurent_series[m], m) * q**m for m in range(first_exp, laurent_series.exponents()[-1] + 1)]).add_bigoh(series_prec)
return laurent_series
# DEFAULT METHODS (should be overwritten in concrete classes)
def _an_element_(self):
r"""
Return an element of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiMeromorphicModularForms
sage: el = QuasiMeromorphicModularForms(k=2, ep=-1).an_element()
sage: el.parent()
QuasiMeromorphicModularForms(n=3, k=2, ep=-1) over Integer Ring
sage: el.is_zero()
True
sage: el
O(q^5)
"""
# this seems ok, so might as well leave it as is for everything
return self(ZZ(0))
#return self.F_simple()
@cached_method
def dimension(self):
r"""
Return the dimension of ``self``.
.. NOTE::
This method should be overloaded by subclasses.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiMeromorphicModularForms
sage: QuasiMeromorphicModularForms(k=2, ep=-1).dimension()
+Infinity
"""
return infinity
def rank(self):
r"""
Return the rank of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(n=4, k=24, ep=-1)
sage: MF.rank()
3
sage: MF.subspace([MF.gen(0), MF.gen(2)]).rank()
2
"""
return self.dimension()
def degree(self):
r"""
Return the degree of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(n=4, k=24, ep=-1)
sage: MF.degree()
3
sage: MF.subspace([MF.gen(0), MF.gen(2)]).degree() # defined in subspace.py
3
"""
return self.dimension()
def coordinate_vector(self, v):
r"""
This method should be overloaded by subclasses.
Return the coordinate vector of the element ``v``
with respect to ``self.gens()``.
NOTE:
Elements use this method (from their parent)
to calculate their coordinates.
INPUT:
- ``v`` -- An element of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(n=4, k=24, ep=-1)
sage: MF.coordinate_vector(MF.gen(0)).parent() # defined in space.py
Vector space of dimension 3 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: MF.coordinate_vector(MF.gen(0)) # defined in space.py
(1, 0, 0)
sage: subspace = MF.subspace([MF.gen(0), MF.gen(2)])
sage: subspace.coordinate_vector(subspace.gen(0)).parent() # defined in subspace.py
Vector space of dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: subspace.coordinate_vector(subspace.gen(0)) # defined in subspace.py
(1, 0)
"""
raise NotImplementedError("No coordinate vector is implemented yet for {}!".format(self))
@cached_method
def ambient_coordinate_vector(self, v):
r"""
Return the coordinate vector of the element ``v``
in ``self.module()`` with respect to the basis
from ``self.ambient_space``.
NOTE:
Elements use this method (from their parent)
to calculate their coordinates.
INPUT:
- ``v`` -- An element of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(n=4, k=24, ep=-1)
sage: MF.ambient_coordinate_vector(MF.gen(0)).parent()
Vector space of dimension 3 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: MF.ambient_coordinate_vector(MF.gen(0))
(1, 0, 0)
sage: subspace = MF.subspace([MF.gen(0), MF.gen(2)])
sage: subspace.ambient_coordinate_vector(subspace.gen(0)).parent()
Vector space of degree 3 and dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
Basis matrix:
[1 0 0]
[0 0 1]
sage: subspace.ambient_coordinate_vector(subspace.gen(0))
(1, 0, 0)
"""
return self.module()(self.ambient_space().coordinate_vector(v))
def gens(self):
r"""
This method should be overloaded by subclasses.
Return a basis of ``self``.
Note that the coordinate vector of elements of ``self``
are with respect to this basis.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: ModularForms(k=12).gens() # defined in space.py
[1 + 196560*q^2 + 16773120*q^3 + 398034000*q^4 + O(q^5),
q - 24*q^2 + 252*q^3 - 1472*q^4 + O(q^5)]
"""
raise NotImplementedError("No generators are implemented yet for {}!".format(self))
def gen(self, k=0):
r"""
Return the ``k``'th basis element of ``self``
if possible (default: ``k=0``).
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: ModularForms(k=12).gen(1).parent()
ModularForms(n=3, k=12, ep=1) over Integer Ring
sage: ModularForms(k=12).gen(1)
q - 24*q^2 + 252*q^3 - 1472*q^4 + O(q^5)
"""
k = ZZ(k)
if k>=0 and k < self.dimension():
return self.gens()[k]
else:
raise ValueError("Invalid index: k={} does not satisfy 0 <= k <= {}!".format(k, self.dimension()))
| 40.511956 | 206 | 0.551323 |
b14595332008db0e918118a0aefb55ae66bed666 | 9,854 | py | Python | pydiscord/__main__.py | AryamanSrii/PyDiscord | 3366d20e2725672ae7e6b29335119cac1aee76f9 | [
"MIT"
] | null | null | null | pydiscord/__main__.py | AryamanSrii/PyDiscord | 3366d20e2725672ae7e6b29335119cac1aee76f9 | [
"MIT"
] | null | null | null | pydiscord/__main__.py | AryamanSrii/PyDiscord | 3366d20e2725672ae7e6b29335119cac1aee76f9 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import argparse
import sys
from pathlib import Path
import pydiscord
import pkg_resources
import aiohttp
import platform
def show_version():
entries = ['- Python v{0.major}.{0.minor}.{0.micro}-{0.releaselevel}'.format(sys.version_info)]
version_info = pydiscord.version_info
entries.append('- pydiscord.py v{0.major}.{0.minor}.{0.micro}-{0.releaselevel}'.format(version_info))
if version_info.releaselevel != 'final':
pkg = pkg_resources.get_distribution('pydiscord.py')
if pkg:
entries.append(f' - pydiscord.py pkg_resources: v{pkg.version}')
entries.append(f'- aiohttp v{aiohttp.__version__}')
uname = platform.uname()
entries.append('- system info: {0.system} {0.release} {0.version}'.format(uname))
print('\n'.join(entries))
def core(parser, args):
if args.version:
show_version()
_bot_template = """#!/usr/bin/env python3
from pydiscord.ext import commands
import pydiscord
import config
class Bot(commands.{base}):
def __init__(self, **kwargs):
super().__init__(command_prefix=commands.when_mentioned_or('{prefix}'), **kwargs)
for cog in config.cogs:
try:
self.load_extension(cog)
except Exception as exc:
print(f'Could not load extension {{cog}} due to {{exc.__class__.__name__}}: {{exc}}')
async def on_ready(self):
print(f'Logged on as {{self.user}} (ID: {{self.user.id}})')
bot = Bot()
# write general commands here
bot.run(config.token)
"""
_gitignore_template = """# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Our configuration files
config.py
"""
_cog_template = '''from pydiscord.ext import commands
import pydiscord
class {name}(commands.Cog{attrs}):
"""The description for {name} goes here."""
def __init__(self, bot):
self.bot = bot
{extra}
def setup(bot):
bot.add_cog({name}(bot))
'''
_cog_extras = '''
def cog_unload(self):
# clean up logic goes here
pass
async def cog_check(self, ctx):
# checks that apply to every command in here
return True
async def bot_check(self, ctx):
# checks that apply to every command to the bot
return True
async def bot_check_once(self, ctx):
# check that apply to every command but is guaranteed to be called only once
return True
async def cog_command_error(self, ctx, error):
# error handling to every command in here
pass
async def cog_before_invoke(self, ctx):
# called before a command is called here
pass
async def cog_after_invoke(self, ctx):
# called after a command is called here
pass
'''
# certain file names and directory names are forbidden
# see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247%28v=vs.85%29.aspx
# although some of this doesn't apply to Linux, we might as well be consistent
_base_table = {
'<': '-',
'>': '-',
':': '-',
'"': '-',
# '/': '-', these are fine
# '\\': '-',
'|': '-',
'?': '-',
'*': '-',
}
# NUL (0) and 1-31 are disallowed
_base_table.update((chr(i), None) for i in range(32))
_translation_table = str.maketrans(_base_table)
def to_path(parser, name, *, replace_spaces=False):
if isinstance(name, Path):
return name
if sys.platform == 'win32':
forbidden = ('CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', \
'COM8', 'COM9', 'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9')
if len(name) <= 4 and name.upper() in forbidden:
parser.error('invalid directory name given, use a different one')
name = name.translate(_translation_table)
if replace_spaces:
name = name.replace(' ', '-')
return Path(name)
def newbot(parser, args):
new_directory = to_path(parser, args.directory) / to_path(parser, args.name)
# as a note exist_ok for Path is a 3.5+ only feature
# since we already checked above that we're >3.5
try:
new_directory.mkdir(exist_ok=True, parents=True)
except OSError as exc:
parser.error(f'could not create our bot directory ({exc})')
cogs = new_directory / 'cogs'
try:
cogs.mkdir(exist_ok=True)
init = cogs / '__init__.py'
init.touch()
except OSError as exc:
print(f'warning: could not create cogs directory ({exc})')
try:
with open(str(new_directory / 'config.py'), 'w', encoding='utf-8') as fp:
fp.write('token = "place your token here"\ncogs = []\n')
except OSError as exc:
parser.error(f'could not create config file ({exc})')
try:
with open(str(new_directory / 'bot.py'), 'w', encoding='utf-8') as fp:
base = 'Bot' if not args.sharded else 'AutoShardedBot'
fp.write(_bot_template.format(base=base, prefix=args.prefix))
except OSError as exc:
parser.error(f'could not create bot file ({exc})')
if not args.no_git:
try:
with open(str(new_directory / '.gitignore'), 'w', encoding='utf-8') as fp:
fp.write(_gitignore_template)
except OSError as exc:
print(f'warning: could not create .gitignore file ({exc})')
print('successfully made bot at', new_directory)
def newcog(parser, args):
cog_dir = to_path(parser, args.directory)
try:
cog_dir.mkdir(exist_ok=True)
except OSError as exc:
print(f'warning: could not create cogs directory ({exc})')
directory = cog_dir / to_path(parser, args.name)
directory = directory.with_suffix('.py')
try:
with open(str(directory), 'w', encoding='utf-8') as fp:
attrs = ''
extra = _cog_extras if args.full else ''
if args.class_name:
name = args.class_name
else:
name = str(directory.stem)
if '-' in name or '_' in name:
translation = str.maketrans('-_', ' ')
name = name.translate(translation).title().replace(' ', '')
else:
name = name.title()
if args.display_name:
attrs += f', name="{args.display_name}"'
if args.hide_commands:
attrs += ', command_attrs=dict(hidden=True)'
fp.write(_cog_template.format(name=name, extra=extra, attrs=attrs))
except OSError as exc:
parser.error(f'could not create cog file ({exc})')
else:
print('successfully made cog at', directory)
def add_newbot_args(subparser):
parser = subparser.add_parser('newbot', help='creates a command bot project quickly')
parser.set_defaults(func=newbot)
parser.add_argument('name', help='the bot project name')
parser.add_argument('directory', help='the directory to place it in (default: .)', nargs='?', default=Path.cwd())
parser.add_argument('--prefix', help='the bot prefix (default: $)', default='$', metavar='<prefix>')
parser.add_argument('--sharded', help='whether to use AutoShardedBot', action='store_true')
parser.add_argument('--no-git', help='do not create a .gitignore file', action='store_true', dest='no_git')
def add_newcog_args(subparser):
parser = subparser.add_parser('newcog', help='creates a new cog template quickly')
parser.set_defaults(func=newcog)
parser.add_argument('name', help='the cog name')
parser.add_argument('directory', help='the directory to place it in (default: cogs)', nargs='?', default=Path('cogs'))
parser.add_argument('--class-name', help='the class name of the cog (default: <name>)', dest='class_name')
parser.add_argument('--display-name', help='the cog name (default: <name>)')
parser.add_argument('--hide-commands', help='whether to hide all commands in the cog', action='store_true')
parser.add_argument('--full', help='add all special methods as well', action='store_true')
def parse_args():
parser = argparse.ArgumentParser(prog='pydiscord', description='Tools for helping with pydiscord.py')
parser.add_argument('-v', '--version', action='store_true', help='shows the library version')
parser.set_defaults(func=core)
subparser = parser.add_subparsers(dest='subcommand', title='subcommands')
add_newbot_args(subparser)
add_newcog_args(subparser)
return parser, parser.parse_args()
def main():
parser, args = parse_args()
args.func(parser, args)
if __name__ == '__main__':
main()
| 31.684887 | 122 | 0.649279 |
6bfdcb8a93cb6519c4f397dea31b607f6bfa8a28 | 3,244 | py | Python | dataset/hcp/loader.py | cassianobecker/dnn | bb2ea04f77733de9df10f795bb049ac3b9d30478 | [
"MIT"
] | 3 | 2020-02-21T21:35:07.000Z | 2020-09-29T15:20:00.000Z | dataset/hcp/loader.py | cassianobecker/dnn | bb2ea04f77733de9df10f795bb049ac3b9d30478 | [
"MIT"
] | 27 | 2020-02-20T21:00:23.000Z | 2020-05-22T15:23:25.000Z | dataset/hcp/loader.py | cassianobecker/dnn | bb2ea04f77733de9df10f795bb049ac3b9d30478 | [
"MIT"
] | null | null | null | import os
import torch.utils.data
from dataset.hcp.reader import HcpReader, SkipSubjectException
from util.logging import get_logger, set_logger
from util.lang import to_bool
from fwk.config import Config
import numpy.random as npr
import numpy as np
class HcpDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset to host and dti diffusion data
"""
def __init__(self, device, subjects, half_precision=False, max_img_channels=None, perturb=False, regression=False):
results_path = os.path.expanduser(Config.config['EXPERIMENT']['results_path'])
if not os.path.exists(os.path.join(results_path, 'log')):
os.mkdir(os.path.join(results_path, 'log'))
log_furl = os.path.join(results_path, 'log', 'dataloader.log')
set_logger('HcpDataset', Config.config['LOGGING']['dataloader_level'], log_furl)
self.logger = get_logger('HcpDataset')
self.device = device
self.half_precision = half_precision
self.max_img_channels = max_img_channels
self.perturb = perturb
self.regression = regression
self.reader = HcpReader()
if Config.config.has_option('TRANSFORMS', 'region'):
region_str = Config.config['TRANSFORMS']['region']
self.region = self.reader.parse_region(region_str)
else:
self.region = None
self.scale = int(Config.get_option('TRANSFORMS', 'scale', 1))
self.subjects = subjects
def __len__(self):
return len(self.subjects)
def __getitem__(self, idx):
subject = self.subjects[idx]
return self.data_for_subject(
subject,
region=self.region,
max_img_channels=self.max_img_channels,
perturb=self.perturb
)
def data_for_subject(self, subject, region=None, max_img_channels=None, perturb=False):
dti_tensor, target = None, None
try:
self.reader.logger.info("feeding subject {:}".format(subject))
dwi_tensor = self.reader.load_dwi_tensor_image(
subject,
region=region,
max_img_channels=max_img_channels,
scale=self.scale,
perturb=perturb
)
target = self.reader.load_covariate(subject, regression=self.regression)
if to_bool(Config.get_option('DATABASE', 'randomize', 'False')):
dwi_tensor = self._randomize_dwi_tensor(dwi_tensor, target)
except SkipSubjectException:
self.reader.logger.warning("skipping subject {:}".format(subject))
return dwi_tensor, target, subject
def _randomize_dwi_tensor(self, dwi_tensor, target):
if target.argmax() == 1:
dwi_tensor = dwi_tensor * (2 * npr.rand(*dwi_tensor.shape) - 1)
return dwi_tensor.astype(np.double)
def tensor_size(self):
tensor_shape = self.__getitem__(0)[0].shape
return tensor_shape
def number_of_classes(self):
num_classes = self.__getitem__(0)[1].size
return num_classes
class HcpDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super(HcpDataLoader, self).__init__(*args, **kwargs)
| 31.803922 | 119 | 0.647041 |
2ac73ebf7bc2b5e305be33c2be635b3a5ead9c44 | 1,077 | py | Python | pyvisdk/do/disallowed_migration_device_attached.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/disallowed_migration_device_attached.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/disallowed_migration_device_attached.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def DisallowedMigrationDeviceAttached(vim, *args, **kwargs):
'''The virtual machine is using a type of device that prevents migration.'''
obj = vim.client.factory.create('{urn:vim25}DisallowedMigrationDeviceAttached')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'fault', 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 32.636364 | 124 | 0.618384 |
ed9cdcbf7ce6ed853e4232e552d1e9f0671ddfb7 | 604 | py | Python | capstone2/users/models.py | onghbaophuoc118/Capstone2_Final | 1694848250d11617fb3bfa82bd61583e60b8422e | [
"MIT"
] | null | null | null | capstone2/users/models.py | onghbaophuoc118/Capstone2_Final | 1694848250d11617fb3bfa82bd61583e60b8422e | [
"MIT"
] | 3 | 2021-05-12T05:25:43.000Z | 2022-03-30T20:05:07.000Z | capstone2/users/models.py | onghbaophuoc118/Capstone2_Final | 1694848250d11617fb3bfa82bd61583e60b8422e | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
"""Default user for Capstone2."""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
| 28.761905 | 74 | 0.690397 |
21aeec8787692f947efe46bc56e1b8a6dc2f2941 | 1,509 | py | Python | bsp/realview-a8/rtconfig.py | Davidfind/rt-thread | 56f1a8af4f9e8bad0a0fdc5cea7112767267b243 | [
"Apache-2.0"
] | 10 | 2019-12-23T07:18:27.000Z | 2020-12-19T04:35:43.000Z | bsp/realview-a8/rtconfig.py | zlzerg/rt-thread | c0a400ccbee720fc0e9ee904298f09bd07a21382 | [
"Apache-2.0"
] | 5 | 2019-02-28T10:07:03.000Z | 2019-03-11T10:40:20.000Z | bsp/realview-a8/rtconfig.py | zlzerg/rt-thread | c0a400ccbee720fc0e9ee904298f09bd07a21382 | [
"Apache-2.0"
] | 7 | 2019-07-01T02:50:47.000Z | 2020-12-11T10:01:07.000Z | import os
# toolchains options
ARCH='arm'
CPU='realview-a8-vmm'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
PLATFORM = 'gcc'
# EXEC_PATH = r'/opt/arm-2012.09/bin'
EXEC_PATH = r'C:\Program Files (x86)\CodeSourcery\Sourcery_CodeBench_Lite_for_ARM_EABI\bin'
EXEC_PATH = '/opt/gcc-arm-none-eabi-4_8-2014q1_gri/bin'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -march=armv7-a -mtune=cortex-a8 -mfpu=vfpv3-d16 -ftree-vectorize -ffast-math -mfloat-abi=softfp'
CFLAGS = DEVICE + ' -Wall'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__'
LINK_SCRIPT = 'realview_vmm.lds'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=realview.map,-cref,-u,system_vectors'+\
' -T %s' % LINK_SCRIPT
CPATH = ''
LPATH = ''
# generate debug info in all cases
AFLAGS += ' -gdwarf-2'
CFLAGS += ' -g -gdwarf-2'
if BUILD == 'debug':
CFLAGS += ' -O0'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' +\
SIZE + ' $TARGET \n' +\
OBJDUMP + ' -S $TARGET > rtt.S\n'
| 26.946429 | 111 | 0.585156 |
10d249fb37bfd9c1e26b5c25728cd24bc75dfbb8 | 18 | py | Python | go-engine-generator/lib/gosgf/__init__.py | antkaynak/GoGame | 421efb59bee7edb1c3eb640a301d6c12f1788322 | [
"MIT"
] | 6 | 2019-02-26T18:21:25.000Z | 2021-07-04T23:23:56.000Z | go-engine-generator/lib/gosgf/__init__.py | antkaynak/GoGame | 421efb59bee7edb1c3eb640a301d6c12f1788322 | [
"MIT"
] | null | null | null | go-engine-generator/lib/gosgf/__init__.py | antkaynak/GoGame | 421efb59bee7edb1c3eb640a301d6c12f1788322 | [
"MIT"
] | null | null | null | from .sgf import * | 18 | 18 | 0.722222 |
f87a56a89dcde62c5ceed11835830ff5d34addfa | 11,619 | py | Python | src/livestreamer/stream/hls.py | jaccarmac/livestreamer | ab80dbd6560f6f9835865b2fc9f9c6015aee5658 | [
"BSD-2-Clause",
"MIT"
] | 3,614 | 2015-01-01T08:07:27.000Z | 2022-03-20T00:31:07.000Z | src/livestreamer/stream/hls.py | kviktor/livestreamer | ab80dbd6560f6f9835865b2fc9f9c6015aee5658 | [
"BSD-2-Clause",
"MIT"
] | 1,028 | 2015-01-02T03:38:38.000Z | 2021-08-06T16:17:48.000Z | src/livestreamer/stream/hls.py | kviktor/livestreamer | ab80dbd6560f6f9835865b2fc9f9c6015aee5658 | [
"BSD-2-Clause",
"MIT"
] | 795 | 2015-01-02T06:12:04.000Z | 2022-03-27T23:41:53.000Z | from collections import defaultdict, namedtuple
try:
from Crypto.Cipher import AES
import struct
def num_to_iv(n):
return struct.pack(">8xq", n)
CAN_DECRYPT = True
except ImportError:
CAN_DECRYPT = False
from . import hls_playlist
from .http import HTTPStream
from .segmented import (SegmentedStreamReader,
SegmentedStreamWriter,
SegmentedStreamWorker)
from ..exceptions import StreamError
Sequence = namedtuple("Sequence", "num segment")
class HLSStreamWriter(SegmentedStreamWriter):
def __init__(self, reader, *args, **kwargs):
options = reader.stream.session.options
kwargs["retries"] = options.get("hls-segment-attempts")
kwargs["threads"] = options.get("hls-segment-threads")
kwargs["timeout"] = options.get("hls-segment-timeout")
SegmentedStreamWriter.__init__(self, reader, *args, **kwargs)
self.byterange_offsets = defaultdict(int)
self.key_data = None
self.key_uri = None
def create_decryptor(self, key, sequence):
if key.method != "AES-128":
raise StreamError("Unable to decrypt cipher {0}", key.method)
if not key.uri:
raise StreamError("Missing URI to decryption key")
if self.key_uri != key.uri:
res = self.session.http.get(key.uri, exception=StreamError,
**self.reader.request_params)
self.key_data = res.content
self.key_uri = key.uri
iv = key.iv or num_to_iv(sequence)
# Pad IV if needed
iv = b"\x00" * (16 - len(iv)) + iv
return AES.new(self.key_data, AES.MODE_CBC, iv)
def create_request_params(self, sequence):
request_params = dict(self.reader.request_params)
headers = request_params.pop("headers", {})
if sequence.segment.byterange:
bytes_start = self.byterange_offsets[sequence.segment.uri]
if sequence.segment.byterange.offset is not None:
bytes_start = sequence.segment.byterange.offset
bytes_len = max(sequence.segment.byterange.range - 1, 0)
bytes_end = bytes_start + bytes_len
headers["Range"] = "bytes={0}-{1}".format(bytes_start, bytes_end)
self.byterange_offsets[sequence.segment.uri] = bytes_end + 1
request_params["headers"] = headers
return request_params
def fetch(self, sequence, retries=None):
if self.closed or not retries:
return
try:
request_params = self.create_request_params(sequence)
return self.session.http.get(sequence.segment.uri,
timeout=self.timeout,
exception=StreamError,
**request_params)
except StreamError as err:
self.logger.error("Failed to open segment {0}: {1}", sequence.num, err)
return self.fetch(sequence, retries - 1)
def write(self, sequence, res, chunk_size=8192):
if sequence.segment.key and sequence.segment.key.method != "NONE":
try:
decryptor = self.create_decryptor(sequence.segment.key,
sequence.num)
except StreamError as err:
self.logger.error("Failed to create decryptor: {0}", err)
self.close()
return
# If the input data is not a multiple of 16, cut off any garbage
garbage_len = len(res.content) % 16
if garbage_len:
self.logger.debug("Cutting off {0} bytes of garbage "
"before decrypting", garbage_len)
content = decryptor.decrypt(res.content[:-(garbage_len)])
else:
content = decryptor.decrypt(res.content)
else:
content = res.content
self.reader.buffer.write(content)
self.logger.debug("Download of segment {0} complete", sequence.num)
class HLSStreamWorker(SegmentedStreamWorker):
def __init__(self, *args, **kwargs):
SegmentedStreamWorker.__init__(self, *args, **kwargs)
self.playlist_changed = False
self.playlist_end = None
self.playlist_sequence = -1
self.playlist_sequences = []
self.playlist_reload_time = 15
self.live_edge = self.session.options.get("hls-live-edge")
self.reload_playlist()
def reload_playlist(self):
if self.closed:
return
self.reader.buffer.wait_free()
self.logger.debug("Reloading playlist")
res = self.session.http.get(self.stream.url,
exception=StreamError,
**self.reader.request_params)
try:
playlist = hls_playlist.load(res.text, res.url)
except ValueError as err:
raise StreamError(err)
if playlist.is_master:
raise StreamError("Attempted to play a variant playlist, use "
"'hlsvariant://{0}' instead".format(self.stream.url))
if playlist.iframes_only:
raise StreamError("Streams containing I-frames only is not playable")
media_sequence = playlist.media_sequence or 0
sequences = [Sequence(media_sequence + i, s)
for i, s in enumerate(playlist.segments)]
if sequences:
self.process_sequences(playlist, sequences)
def process_sequences(self, playlist, sequences):
first_sequence, last_sequence = sequences[0], sequences[-1]
if first_sequence.segment.key and first_sequence.segment.key.method != "NONE":
self.logger.debug("Segments in this playlist are encrypted")
if not CAN_DECRYPT:
raise StreamError("Need pyCrypto installed to decrypt this stream")
self.playlist_changed = ([s.num for s in self.playlist_sequences] !=
[s.num for s in sequences])
self.playlist_reload_time = (playlist.target_duration or
last_sequence.segment.duration)
self.playlist_sequences = sequences
if not self.playlist_changed:
self.playlist_reload_time = max(self.playlist_reload_time / 2, 1)
if playlist.is_endlist:
self.playlist_end = last_sequence.num
if self.playlist_sequence < 0:
if self.playlist_end is None:
edge_index = -(min(len(sequences), max(int(self.live_edge), 1)))
edge_sequence = sequences[edge_index]
self.playlist_sequence = edge_sequence.num
else:
self.playlist_sequence = first_sequence.num
def valid_sequence(self, sequence):
return sequence.num >= self.playlist_sequence
def iter_segments(self):
while not self.closed:
for sequence in filter(self.valid_sequence, self.playlist_sequences):
self.logger.debug("Adding segment {0} to queue", sequence.num)
yield sequence
# End of stream
stream_end = self.playlist_end and sequence.num >= self.playlist_end
if self.closed or stream_end:
return
self.playlist_sequence = sequence.num + 1
if self.wait(self.playlist_reload_time):
try:
self.reload_playlist()
except StreamError as err:
self.logger.warning("Failed to reload playlist: {0}", err)
class HLSStreamReader(SegmentedStreamReader):
__worker__ = HLSStreamWorker
__writer__ = HLSStreamWriter
def __init__(self, stream, *args, **kwargs):
SegmentedStreamReader.__init__(self, stream, *args, **kwargs)
self.logger = stream.session.logger.new_module("stream.hls")
self.request_params = dict(stream.args)
self.timeout = stream.session.options.get("hls-timeout")
# These params are reserved for internal use
self.request_params.pop("exception", None)
self.request_params.pop("stream", None)
self.request_params.pop("timeout", None)
self.request_params.pop("url", None)
class HLSStream(HTTPStream):
"""Implementation of the Apple HTTP Live Streaming protocol
*Attributes:*
- :attr:`url` The URL to the HLS playlist.
- :attr:`args` A :class:`dict` containing keyword arguments passed
to :meth:`requests.request`, such as headers and cookies.
.. versionchanged:: 1.7.0
Added *args* attribute.
"""
__shortname__ = "hls"
def __init__(self, session_, url, **args):
HTTPStream.__init__(self, session_, url, **args)
def __repr__(self):
return "<HLSStream({0!r})>".format(self.url)
def __json__(self):
json = HTTPStream.__json__(self)
# Pretty sure HLS is GET only.
del json["method"]
del json["body"]
return json
def open(self):
reader = HLSStreamReader(self)
reader.open()
return reader
@classmethod
def parse_variant_playlist(cls, session_, url, name_key="name",
name_prefix="", check_streams=False,
**request_params):
"""Attempts to parse a variant playlist and return its streams.
:param url: The URL of the variant playlist.
:param name_key: Prefer to use this key as stream name, valid keys are:
name, pixels, bitrate.
:param name_prefix: Add this prefix to the stream names.
:param check_streams: Only allow streams that are accesible.
"""
# Backwards compatibility with "namekey" and "nameprefix" params.
name_key = request_params.pop("namekey", name_key)
name_prefix = request_params.pop("nameprefix", name_prefix)
res = session_.http.get(url, exception=IOError, **request_params)
try:
parser = hls_playlist.load(res.text, base_uri=res.url)
except ValueError as err:
raise IOError("Failed to parse playlist: {0}".format(err))
streams = {}
for playlist in filter(lambda p: not p.is_iframe, parser.playlists):
names = dict(name=None, pixels=None, bitrate=None)
for media in playlist.media:
if media.type == "VIDEO" and media.name:
names["name"] = media.name
if playlist.stream_info.resolution:
width, height = playlist.stream_info.resolution
names["pixels"] = "{0}p".format(height)
if playlist.stream_info.bandwidth:
bw = playlist.stream_info.bandwidth
if bw >= 1000:
names["bitrate"] = "{0}k".format(int(bw / 1000.0))
else:
names["bitrate"] = "{0}k".format(bw / 1000.0)
stream_name = (names.get(name_key) or names.get("name") or
names.get("pixels") or names.get("bitrate"))
if not stream_name or stream_name in streams:
continue
if check_streams:
try:
session_.http.get(playlist.uri, **request_params)
except Exception:
continue
stream = HLSStream(session_, playlist.uri, **request_params)
streams[name_prefix + stream_name] = stream
return streams
| 36.083851 | 86 | 0.59308 |
479a862fc57618ecb3d3f6d2037ee3fd84f9ee43 | 9,274 | py | Python | test/azure/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/aio/operations/_header_operations.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | null | null | null | test/azure/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/aio/operations/_header_operations.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | null | null | null | test/azure/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/aio/operations/_header_operations.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class HeaderOperations:
"""HeaderOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azurespecialproperties.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def custom_named_request_id(self, foo_client_request_id: str, **kwargs: Any) -> None:
"""Send foo-client-request-id = 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0 in the header of the request.
:param foo_client_request_id: The fooRequestId.
:type foo_client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
# Construct URL
url = self.custom_named_request_id.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["foo-client-request-id"] = self._serialize.header(
"foo_client_request_id", foo_client_request_id, "str"
)
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers["foo-request-id"] = self._deserialize("str", response.headers.get("foo-request-id"))
if cls:
return cls(pipeline_response, None, response_headers)
custom_named_request_id.metadata = {"url": "/azurespecials/customNamedRequestId"} # type: ignore
@distributed_trace_async
async def custom_named_request_id_param_grouping(
self,
header_custom_named_request_id_param_grouping_parameters: "_models.HeaderCustomNamedRequestIdParamGroupingParameters",
**kwargs: Any
) -> None:
"""Send foo-client-request-id = 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0 in the header of the request,
via a parameter group.
:param header_custom_named_request_id_param_grouping_parameters: Parameter group.
:type header_custom_named_request_id_param_grouping_parameters: ~azurespecialproperties.models.HeaderCustomNamedRequestIdParamGroupingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_foo_client_request_id = None
if header_custom_named_request_id_param_grouping_parameters is not None:
_foo_client_request_id = header_custom_named_request_id_param_grouping_parameters.foo_client_request_id
accept = "application/json"
# Construct URL
url = self.custom_named_request_id_param_grouping.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["foo-client-request-id"] = self._serialize.header(
"foo_client_request_id", _foo_client_request_id, "str"
)
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers["foo-request-id"] = self._deserialize("str", response.headers.get("foo-request-id"))
if cls:
return cls(pipeline_response, None, response_headers)
custom_named_request_id_param_grouping.metadata = {"url": "/azurespecials/customNamedRequestIdParamGrouping"} # type: ignore
@distributed_trace_async
async def custom_named_request_id_head(self, foo_client_request_id: str, **kwargs: Any) -> bool:
"""Send foo-client-request-id = 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0 in the header of the request.
:param foo_client_request_id: The fooRequestId.
:type foo_client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
# Construct URL
url = self.custom_named_request_id_head.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["foo-client-request-id"] = self._serialize.header(
"foo_client_request_id", foo_client_request_id, "str"
)
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers["foo-request-id"] = self._deserialize("str", response.headers.get("foo-request-id"))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
custom_named_request_id_head.metadata = {"url": "/azurespecials/customNamedRequestIdHead"} # type: ignore
| 46.37 | 152 | 0.695061 |
997e310057a53faf6e579c5a14c509229d8e2431 | 10,050 | py | Python | agsconfig/services/extension_base.py | DavidWhittingham/agsconfig | c0ac6c37e5e49f87d2812220d756aef118c08024 | [
"BSD-3-Clause"
] | 1 | 2019-05-17T01:44:41.000Z | 2019-05-17T01:44:41.000Z | agsconfig/services/extension_base.py | DavidWhittingham/agsconfig | c0ac6c37e5e49f87d2812220d756aef118c08024 | [
"BSD-3-Clause"
] | 2 | 2019-04-09T02:01:26.000Z | 2019-06-25T05:27:11.000Z | agsconfig/services/extension_base.py | DavidWhittingham/agsconfig | c0ac6c37e5e49f87d2812220d756aef118c08024 | [
"BSD-3-Clause"
] | 2 | 2019-03-21T04:58:18.000Z | 2019-09-09T23:00:48.000Z | # coding=utf-8
"""This module contains the ExtensionBase abstract base class for implementing ArcGIS Server service extensions."""
# Python 2/3 compatibility
# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-order,wrong-import-position
from __future__ import (absolute_import, division, print_function, unicode_literals)
from future.builtins.disabled import *
from future.builtins import *
from future.standard_library import install_aliases
install_aliases()
# pylint: enable=wildcard-import,unused-wildcard-import,wrong-import-order,wrong-import-position
# Python lib imports
import logging as _logging
from abc import ABCMeta
# Local package imports
from .._enum import StrEnum as Enum
from ..editing.edit_prop import EditorProperty
from ..model_base import ModelBase
class ExtensionBase(ModelBase):
"""Contains base settings/configuration that are common across ArcGIS Service extensions."""
__metaclass__ = ABCMeta
_editor = None
_extension_name = None
_logger = _logging.getLogger(__name__)
_web_capabilities_key = "WebCapabilities"
_AGSJSON_EXTENSION_STRUCTURE = {"children": [{"value": lambda extension_name: {"typeName": extension_name}}]}
class Capability(Enum):
"""Must be overridden by sub-classes if any capabilities are supported."""
pass
def __init__(self, editor, extension_name):
"""Initilises the class.
Args:
editor: An editor object that will receive metadata about each property
extension_name: Used to find xpaths in sddrafts where there is more than one extension
"""
self._editor = editor
self._extension_name = extension_name
@property
def extension_name(self):
return self._extension_name
capabilities = EditorProperty(
{
"formats": {
"agsJson": {
"paths": [
{# yapf: disable
"document": "main",
"path": lambda extension_name: "$.extensions[?(@.typeName = '{0}')].capabilities".format(extension_name),
"parent": {
"children": [
{
"key": "capabilities"
}
],
"parent": _AGSJSON_EXTENSION_STRUCTURE
}
}# yapf: enable
],
"conversions": [{
"id": "enumToString",
"enum": "Capability"
}, {
"id": "stringToCsv"
}],
},
"sddraft": {
"paths": [
{# yapf: disable
"path": lambda extension_name, _web_capabilities_key: "./Configurations/SVCConfiguration/Definition/Extensions/SVCExtension[TypeName='{}']/Info/PropertyArray/PropertySetProperty[Key='{}']/Value".format(extension_name, _web_capabilities_key),
"parent": {
"children": [
{
"tag": "Value",
"attributes": {
"{http://www.w3.org/2001/XMLSchema-instance}type": "xs:string"
},
}
],
"parent": {
"children": [
{
"tag": "PropertySetProperty",
"attributes": {
"{http://www.w3.org/2001/XMLSchema-instance}type": "typens:PropertySetProperty"
},
"children": [
{
"tag": "Key",
"value": lambda _web_capabilities_key: "{}".format(_web_capabilities_key)
}
]
}
],
"parent":{
"children": [
{
"tag": "PropertyArray",
"attributes": {
"{http://www.w3.org/2001/XMLSchema-instance}type": "typens:ArrayOfPropertySetProperty"
}
}
],
"parent": {
"children": [
{
"tag": "Info",
"attributes": {
"{http://www.w3.org/2001/XMLSchema-instance}type": "typens:PropertySet"
}
}
],
"parent": {
"children": [
{
"tag": "SVCExtension",
"attributes": {
"{http://www.w3.org/2001/XMLSchema-instance}type": "typens:SVCExtension"
},
"children": [
{
"tag": "TypeName",
"value": lambda extension_name: "{}".format(extension_name)
}
]
}
]
}
}
}
}
}
}# yapf: enable
],
"conversions": [{
"id": "enumToString",
"enum": "Capability"
}, {
"id": "stringToCsv"
}]
}
}
}
)
enabled = EditorProperty(
{
"formats": {
"agsJson": {
"conversions": [{
"id": "boolToString",
"allowNone": False,
"noneAsFalse": True
}],
"paths": [
{# yapf: disable
"document": "main",
"path": lambda extension_name: "$.extensions[?(@.typeName = '{0}')].enabled".format(extension_name),
"parent": {
"children": [
{
"key": "enabled"
}
],
"parent": _AGSJSON_EXTENSION_STRUCTURE
}
}
]
},
"sddraft": {
"conversions": [{
"id": "boolToString",
"allowNone": False,
"noneAsFalse": True
}],
"paths": [
{ # yapf: disable
"path": lambda extension_name: "./Configurations/SVCConfiguration/Definition/Extensions/SVCExtension[TypeName='{}']/Enabled".format(extension_name),
"parent": {
"children": [
{
"tag": "Enabled"
}
],
"parent": {
"children": [
{
"tag": "SVCExtension",
"attributes": {
"{http://www.w3.org/2001/XMLSchema-instance}type": "typens:SVCExtension"
},
"children": [
{
"tag": "TypeName",
"value": lambda extension_name: "{}".format(extension_name)
}
]
}
]
}
}
} # yapf: enable
]
}
}
}
)
| 45.475113 | 269 | 0.311343 |
1f0bd770bdd17344ea15fdff9c5ed0b0acab9b22 | 9,387 | py | Python | src/ebay_rest/api/buy_deal/api/event_item_api.py | gbm001/ebay_rest | 077d3478423ccd80ff35e0361821d6a11180bc54 | [
"MIT"
] | 3 | 2021-12-12T04:28:03.000Z | 2022-03-10T03:29:18.000Z | src/ebay_rest/api/buy_deal/api/event_item_api.py | jdavv/ebay_rest | 20fc88c6aefdae9ab90f9c1330e79abddcd750cd | [
"MIT"
] | 33 | 2021-06-16T20:44:36.000Z | 2022-03-30T14:55:06.000Z | src/ebay_rest/api/buy_deal/api/event_item_api.py | jdavv/ebay_rest | 20fc88c6aefdae9ab90f9c1330e79abddcd750cd | [
"MIT"
] | 7 | 2021-06-03T09:30:23.000Z | 2022-03-08T19:51:33.000Z | # coding: utf-8
"""
Deal API
<span class=\"tablenote\"><b>Note:</b> This is a <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> API available only to select developers approved by business units.</span><br /><br />This API allows third-party developers to search for and retrieve details about eBay deals and events, as well as the items associated with those deals and events. # noqa: E501
OpenAPI spec version: v1.3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ...buy_deal.api_client import ApiClient
class EventItemApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_event_items(self, event_ids, x_ebay_c_marketplace_id, **kwargs): # noqa: E501
"""get_event_items # noqa: E501
This method returns a paginated set of event items. The result set contains all event items associated with the specified search criteria and marketplace ID. Request headers This method uses the X-EBAY-C-ENDUSERCTX request header to support revenue sharing for eBay Partner Networks and to improve the accuracy of shipping and delivery time estimations. For details see, Request headers in the Buying Integration Guide. Restrictions This method can return a maximum of 10,000 items. For a list of supported sites and other restrictions, see API Restrictions. eBay Partner Network: In order to receive a commission for your sales, you must use the URL returned in the itemAffiliateWebUrl field to forward your buyer to the ebay.com site. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_event_items(event_ids, x_ebay_c_marketplace_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str event_ids: The unique identifiers for the eBay events. Maximum Value: 1 (required)
:param str x_ebay_c_marketplace_id: A header used to specify the eBay marketplace ID. (required)
:param str category_ids: The unique identifier of the eBay category for the search. Maximum Value: 1
:param str delivery_country: A filter for items that can be shipped to the specified country.
:param str limit: The maximum number of items, from the current result set, returned on a single page. Default: 20
:param str offset: The number of items that will be skipped in the result set. This is used with the limit field to control the pagination of the output. For example, if the offset is set to 0 and the limit is set to 10, the method will retrieve items 1 through 10 from the list of items returned. If the offset is set to 10 and the limit is set to 10, the method will retrieve items 11 through 20 from the list of items returned. Default: 0
:return: EventItemSearchResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_event_items_with_http_info(event_ids, x_ebay_c_marketplace_id, **kwargs) # noqa: E501
else:
(data) = self.get_event_items_with_http_info(event_ids, x_ebay_c_marketplace_id, **kwargs) # noqa: E501
return data
def get_event_items_with_http_info(self, event_ids, x_ebay_c_marketplace_id, **kwargs): # noqa: E501
"""get_event_items # noqa: E501
This method returns a paginated set of event items. The result set contains all event items associated with the specified search criteria and marketplace ID. Request headers This method uses the X-EBAY-C-ENDUSERCTX request header to support revenue sharing for eBay Partner Networks and to improve the accuracy of shipping and delivery time estimations. For details see, Request headers in the Buying Integration Guide. Restrictions This method can return a maximum of 10,000 items. For a list of supported sites and other restrictions, see API Restrictions. eBay Partner Network: In order to receive a commission for your sales, you must use the URL returned in the itemAffiliateWebUrl field to forward your buyer to the ebay.com site. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_event_items_with_http_info(event_ids, x_ebay_c_marketplace_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str event_ids: The unique identifiers for the eBay events. Maximum Value: 1 (required)
:param str x_ebay_c_marketplace_id: A header used to specify the eBay marketplace ID. (required)
:param str category_ids: The unique identifier of the eBay category for the search. Maximum Value: 1
:param str delivery_country: A filter for items that can be shipped to the specified country.
:param str limit: The maximum number of items, from the current result set, returned on a single page. Default: 20
:param str offset: The number of items that will be skipped in the result set. This is used with the limit field to control the pagination of the output. For example, if the offset is set to 0 and the limit is set to 10, the method will retrieve items 1 through 10 from the list of items returned. If the offset is set to 10 and the limit is set to 10, the method will retrieve items 11 through 20 from the list of items returned. Default: 0
:return: EventItemSearchResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['event_ids', 'x_ebay_c_marketplace_id', 'category_ids', 'delivery_country', 'limit', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_event_items" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'event_ids' is set
if ('event_ids' not in params or
params['event_ids'] is None):
raise ValueError("Missing the required parameter `event_ids` when calling `get_event_items`") # noqa: E501
# verify the required parameter 'x_ebay_c_marketplace_id' is set
if ('x_ebay_c_marketplace_id' not in params or
params['x_ebay_c_marketplace_id'] is None):
raise ValueError("Missing the required parameter `x_ebay_c_marketplace_id` when calling `get_event_items`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'category_ids' in params:
query_params.append(('category_ids', params['category_ids'])) # noqa: E501
if 'delivery_country' in params:
query_params.append(('delivery_country', params['delivery_country'])) # noqa: E501
if 'event_ids' in params:
query_params.append(('event_ids', params['event_ids'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
header_params = {}
if 'x_ebay_c_marketplace_id' in params:
header_params['X-EBAY-C-MARKETPLACE-ID'] = params['x_ebay_c_marketplace_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/event_item', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EventItemSearchResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 61.352941 | 758 | 0.691808 |
9a583c6845e0ecbc7b9142fc21561cb769ca27eb | 1,191 | py | Python | tools/genesis_builder.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | tools/genesis_builder.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | tools/genesis_builder.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | from binascii import hexlify
from eth_utils import denoms, encode_hex
from raiden.utils import privatekey_to_address, sha3
from raiden.tests.utils.blockchain import GENESIS_STUB
CLUSTER_NAME = 'raiden'
def generate_accounts(seeds):
"""Create private keys and addresses for all seeds.
"""
return {
seed: dict(
privatekey=encode_hex(sha3(seed)),
address=encode_hex(privatekey_to_address(sha3(seed))),
) for seed in seeds}
def mk_genesis(accounts, initial_alloc=denoms.ether * 100000000):
"""
Create a genesis-block dict with allocation for all `accounts`.
:param accounts: list of account addresses (hex)
:param initial_alloc: the amount to allocate for the `accounts`
:return: genesis dict
"""
genesis = GENESIS_STUB.copy()
genesis['extraData'] = '0x' + hexlify(CLUSTER_NAME)
genesis['alloc'].update({
account: {
'balance': str(initial_alloc),
}
for account in accounts
})
# add the one-privatekey account ("1" * 64) for convenience
genesis['alloc']['19e7e376e7c213b7e7e7e46cc70a5dd086daff2a'] = dict(balance=str(initial_alloc))
return genesis
| 29.775 | 99 | 0.68262 |
75a2597adcdcae122cb7a9e4d78b3707b95ae319 | 889 | py | Python | get_data.py | fromdatavistodatascience/Boston-Airpot-Traffic-Visualisation | 9f30e89e68c25e6fbcf13d84fee561b53ff70d84 | [
"MIT"
] | null | null | null | get_data.py | fromdatavistodatascience/Boston-Airpot-Traffic-Visualisation | 9f30e89e68c25e6fbcf13d84fee561b53ff70d84 | [
"MIT"
] | null | null | null | get_data.py | fromdatavistodatascience/Boston-Airpot-Traffic-Visualisation | 9f30e89e68c25e6fbcf13d84fee561b53ff70d84 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import json
import requests
#Retrieving my api keys information to access the Google API.
def get_keys(path):
with open(path) as f:
return json.load(f)
keys = get_keys("/Users/jjherranzsarrion/.secret/google_blog2_api.json")
api_key = keys['api_key']
url = 'https://maps.googleapis.com/maps/api/directions/json?'
origin = 'Sheepfold+Dog+Park+Fells+Path+Stoneham+MA'
destination = 'Terminal+C+Boston+Logan+International+Airport+Boston+MA+02128'
departure_time = '1566819000' #time in seconds from midnight 1st Jan 1970 (Unix start time) until Monday 19th August at 07:30 AM.
url_params = f"origin={origin}&destination={destination}&departure_time={departure_time}&key={api_key}"
request_url = url + url_params
response = requests.get(request_url)
with open('response.json', 'w') as f:
json.dump(response.json(), f)
| 31.75 | 130 | 0.743532 |
12205cdcb4a42b874a3acf884eca74f8a5b13c1c | 14,334 | py | Python | test/functional/p2p_unrequested_blocks.py | ESIRInvestments/ESIRInvestments | f457e4bf68f2749e061ee2b9dd20bc3d7aa62944 | [
"MIT"
] | null | null | null | test/functional/p2p_unrequested_blocks.py | ESIRInvestments/ESIRInvestments | f457e4bf68f2749e061ee2b9dd20bc3d7aa62944 | [
"MIT"
] | null | null | null | test/functional/p2p_unrequested_blocks.py | ESIRInvestments/ESIRInvestments | f457e4bf68f2749e061ee2b9dd20bc3d7aa62944 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import earn_save_invest_repeatTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(earn_save_invest_repeatTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "earn_save_invest_repeatd"),
help="earn_save_invest_repeatd binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| 44.240741 | 113 | 0.677271 |
23c3661df25eb969a40ac78374a677f0b6b6148d | 2,452 | py | Python | 5e/app.py | fredsonchaves07/flask-course | 4e9a3a94c3c49595c1d810794ba7533499811b58 | [
"MIT"
] | 1 | 2021-01-05T01:29:22.000Z | 2021-01-05T01:29:22.000Z | 5e/app.py | fredsonchaves07/flask-course | 4e9a3a94c3c49595c1d810794ba7533499811b58 | [
"MIT"
] | null | null | null | 5e/app.py | fredsonchaves07/flask-course | 4e9a3a94c3c49595c1d810794ba7533499811b58 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, session, redirect, url_for, flash
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from datetime import datetime
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
import os
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'Desenvolvedor Python'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
bootstrap = Bootstrap(app)
moment = Moment(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# Models
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role')
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
class NameForm(FlaskForm):
name = StringField('Informe o seu nome: ', validators=[DataRequired()])
submit = SubmitField('Enviar')
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(name=form.name.data).first()
if user is None:
user = User(name=form.name.data)
db.session.add(user)
db.session.commit()
session['known'] = False
else:
session['known'] = True
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name'), known=session.get('known', False))
return render_template('index.html',
current_time=datetime.utcnow(),
form=form,
name=session.get('name'))
@app.route('/user/<name>')
def user(name):
return render_template('user.html', name=name)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error_serve(e):
return render_template('500.html'), 500
| 28.183908 | 116 | 0.662724 |
c271bd066f7ecebd73074ce773dc6ba3ca7e9022 | 626 | py | Python | takeTrainPics.py | raghavgupta0296/ASL | 5012e1f2fa66b7f75b22f576003c8be50c59286e | [
"MIT"
] | null | null | null | takeTrainPics.py | raghavgupta0296/ASL | 5012e1f2fa66b7f75b22f576003c8be50c59286e | [
"MIT"
] | null | null | null | takeTrainPics.py | raghavgupta0296/ASL | 5012e1f2fa66b7f75b22f576003c8be50c59286e | [
"MIT"
] | null | null | null | import cv2
cam = cv2.VideoCapture(0)
i=0
num = 19
while cam.isOpened:
key = cv2.waitKey(10)
if key==27:
break
r, frame = cam.read()
frame = cv2.flip(frame,1,0)
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
if(i==0):
x = int(frame.shape[1]/2)
y = int(frame.shape[0]/2)
cv2.rectangle(frame,(x-150,y-150),(x+150,y+150),2)
cv2.imshow("store pic",frame)
if key == 32:
# cv2.imwrite("./TrainingImages/A/%s.png"%num,frame[y-150:y+150,x-150:x+150])
cv2.imwrite("%s.png"%num,frame[y-150:y+150,x-150:x+150])
num+=1
i+=1
| 25.04 | 86 | 0.549521 |
f13c7372d165781d559dde0d22ff940f61a0743b | 3,943 | py | Python | stonesoup/simulator/tests/test_detections.py | Isaac-JenkinsRA/Stone-Soup | 54c9c7dca8162dadaa58e85933cf10a0f86ce1e1 | [
"MIT"
] | 1 | 2020-07-21T15:20:20.000Z | 2020-07-21T15:20:20.000Z | stonesoup/simulator/tests/test_detections.py | Isaac-JenkinsRA/Stone-Soup | 54c9c7dca8162dadaa58e85933cf10a0f86ce1e1 | [
"MIT"
] | null | null | null | stonesoup/simulator/tests/test_detections.py | Isaac-JenkinsRA/Stone-Soup | 54c9c7dca8162dadaa58e85933cf10a0f86ce1e1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import pytest
import numpy as np
from ...types.state import State
from ..simple import SimpleDetectionSimulator, SwitchDetectionSimulator,\
SingleTargetGroundTruthSimulator, SwitchOneTargetGroundTruthSimulator
@pytest.fixture(params=[datetime.timedelta(seconds=1),
datetime.timedelta(seconds=10),
datetime.timedelta(minutes=1)])
def timestep(request):
return request.param
def test_simple_detection_simulator(
transition_model1, measurement_model, timestep):
initial_state = State(
np.array([[0], [0], [0], [0]]), timestamp=datetime.datetime.now())
groundtruth = SingleTargetGroundTruthSimulator(
transition_model1, initial_state, timestep)
meas_range = np.array([[-1, 1], [-1, 1]]) * 5000
simulate_detections = SimpleDetectionSimulator(
groundtruth, measurement_model, meas_range, clutter_rate=3)
total_detections = set()
clutter_detections = set()
for step, (time, detections) in enumerate(simulate_detections):
total_detections |= detections
clutter_detections |= simulate_detections.clutter_detections
# Check time increments correctly
assert time == initial_state.timestamp + step * timestep
# Check both real and clutter detections are generated
assert len(total_detections) > len(clutter_detections)
# Check clutter is generated within specified bounds
for clutter in clutter_detections:
assert (meas_range[:, 0] <= clutter.state_vector.ravel()).all()
assert (meas_range[:, 1] >= clutter.state_vector.ravel()).all()
assert simulate_detections.clutter_spatial_density == 3e-8
def test_switch_detection_simulator(
transition_model1, transition_model2, measurement_model, timestep):
initial_state = State(
np.array([[0], [0], [0], [0]]), timestamp=datetime.datetime.now())
model_probs = [[0.5, 0.5], [0.5, 0.5]]
groundtruth = SwitchOneTargetGroundTruthSimulator(
transition_models=[transition_model1, transition_model2],
model_probs=model_probs,
initial_state=initial_state,
timestep=timestep)
meas_range = np.array([[-1, 1], [-1, 1]]) * 5000
# Create detector with 0 and 1 detection probability.
detector = SwitchDetectionSimulator(
groundtruth, measurement_model, meas_range, clutter_rate=3,
detection_probabilities=[0, 1])
# Create reference detector with 1 detection probability.
test_detector = SimpleDetectionSimulator(
groundtruth, measurement_model, meas_range, clutter_rate=3,
detection_probability=1
)
# Run both detectors
total_detections = set()
clutter_detections = set()
for step, (time, detections) in enumerate(detector):
total_detections |= detections
clutter_detections |= detector.clutter_detections
# Check time increments correctly
assert time == initial_state.timestamp + step * timestep
test_detections = set()
test_clutter_detections = set()
for step, (time, detections) in enumerate(test_detector):
test_detections |= detections
test_clutter_detections |= test_detector.clutter_detections
# Check both real and clutter detections are generated
assert len(total_detections)
assert len(clutter_detections)
# Check clutter is generated within specified bounds
for clutter in clutter_detections | test_clutter_detections:
assert (meas_range[:, 0] <= clutter.state_vector.ravel()).all()
assert (meas_range[:, 1] >= clutter.state_vector.ravel()).all()
# Ensure switching probability detector has less detections than 100%
# detection probability detector i.e. it switched to zero probability
# of detection at some point.
assert len(total_detections - clutter_detections) \
< len(test_detections - test_clutter_detections)
| 38.656863 | 75 | 0.70809 |
2094c8319f0d595c0c356a5025ad07df20d7f6bc | 13,353 | py | Python | ultracart/models/order_payment_e_check.py | gstingy/uc_python_api | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | [
"Apache-2.0"
] | null | null | null | ultracart/models/order_payment_e_check.py | gstingy/uc_python_api | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | [
"Apache-2.0"
] | null | null | null | ultracart/models/order_payment_e_check.py | gstingy/uc_python_api | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OrderPaymentECheck(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bank_aba_code': 'str',
'bank_account_name': 'str',
'bank_account_number': 'str',
'bank_account_type': 'str',
'bank_name': 'str',
'bank_owner_type': 'str',
'customer_tax_id': 'str',
'drivers_license_dob': 'str',
'drivers_license_number': 'str',
'drivers_license_state': 'str'
}
attribute_map = {
'bank_aba_code': 'bank_aba_code',
'bank_account_name': 'bank_account_name',
'bank_account_number': 'bank_account_number',
'bank_account_type': 'bank_account_type',
'bank_name': 'bank_name',
'bank_owner_type': 'bank_owner_type',
'customer_tax_id': 'customer_tax_id',
'drivers_license_dob': 'drivers_license_dob',
'drivers_license_number': 'drivers_license_number',
'drivers_license_state': 'drivers_license_state'
}
def __init__(self, bank_aba_code=None, bank_account_name=None, bank_account_number=None, bank_account_type=None, bank_name=None, bank_owner_type=None, customer_tax_id=None, drivers_license_dob=None, drivers_license_number=None, drivers_license_state=None):
"""
OrderPaymentECheck - a model defined in Swagger
"""
self._bank_aba_code = None
self._bank_account_name = None
self._bank_account_number = None
self._bank_account_type = None
self._bank_name = None
self._bank_owner_type = None
self._customer_tax_id = None
self._drivers_license_dob = None
self._drivers_license_number = None
self._drivers_license_state = None
self.discriminator = None
if bank_aba_code is not None:
self.bank_aba_code = bank_aba_code
if bank_account_name is not None:
self.bank_account_name = bank_account_name
if bank_account_number is not None:
self.bank_account_number = bank_account_number
if bank_account_type is not None:
self.bank_account_type = bank_account_type
if bank_name is not None:
self.bank_name = bank_name
if bank_owner_type is not None:
self.bank_owner_type = bank_owner_type
if customer_tax_id is not None:
self.customer_tax_id = customer_tax_id
if drivers_license_dob is not None:
self.drivers_license_dob = drivers_license_dob
if drivers_license_number is not None:
self.drivers_license_number = drivers_license_number
if drivers_license_state is not None:
self.drivers_license_state = drivers_license_state
@property
def bank_aba_code(self):
"""
Gets the bank_aba_code of this OrderPaymentECheck.
Bank routing code
:return: The bank_aba_code of this OrderPaymentECheck.
:rtype: str
"""
return self._bank_aba_code
@bank_aba_code.setter
def bank_aba_code(self, bank_aba_code):
"""
Sets the bank_aba_code of this OrderPaymentECheck.
Bank routing code
:param bank_aba_code: The bank_aba_code of this OrderPaymentECheck.
:type: str
"""
if bank_aba_code is not None and len(bank_aba_code) > 9:
raise ValueError("Invalid value for `bank_aba_code`, length must be less than or equal to `9`")
self._bank_aba_code = bank_aba_code
@property
def bank_account_name(self):
"""
Gets the bank_account_name of this OrderPaymentECheck.
Bank account name
:return: The bank_account_name of this OrderPaymentECheck.
:rtype: str
"""
return self._bank_account_name
@bank_account_name.setter
def bank_account_name(self, bank_account_name):
"""
Sets the bank_account_name of this OrderPaymentECheck.
Bank account name
:param bank_account_name: The bank_account_name of this OrderPaymentECheck.
:type: str
"""
if bank_account_name is not None and len(bank_account_name) > 50:
raise ValueError("Invalid value for `bank_account_name`, length must be less than or equal to `50`")
self._bank_account_name = bank_account_name
@property
def bank_account_number(self):
"""
Gets the bank_account_number of this OrderPaymentECheck.
Bank account number (masked to last 4)
:return: The bank_account_number of this OrderPaymentECheck.
:rtype: str
"""
return self._bank_account_number
@bank_account_number.setter
def bank_account_number(self, bank_account_number):
"""
Sets the bank_account_number of this OrderPaymentECheck.
Bank account number (masked to last 4)
:param bank_account_number: The bank_account_number of this OrderPaymentECheck.
:type: str
"""
if bank_account_number is not None and len(bank_account_number) > 50:
raise ValueError("Invalid value for `bank_account_number`, length must be less than or equal to `50`")
self._bank_account_number = bank_account_number
@property
def bank_account_type(self):
"""
Gets the bank_account_type of this OrderPaymentECheck.
Bank account type
:return: The bank_account_type of this OrderPaymentECheck.
:rtype: str
"""
return self._bank_account_type
@bank_account_type.setter
def bank_account_type(self, bank_account_type):
"""
Sets the bank_account_type of this OrderPaymentECheck.
Bank account type
:param bank_account_type: The bank_account_type of this OrderPaymentECheck.
:type: str
"""
allowed_values = ["Checking", "Savings"]
if bank_account_type not in allowed_values:
raise ValueError(
"Invalid value for `bank_account_type` ({0}), must be one of {1}"
.format(bank_account_type, allowed_values)
)
self._bank_account_type = bank_account_type
@property
def bank_name(self):
"""
Gets the bank_name of this OrderPaymentECheck.
Bank name
:return: The bank_name of this OrderPaymentECheck.
:rtype: str
"""
return self._bank_name
@bank_name.setter
def bank_name(self, bank_name):
"""
Sets the bank_name of this OrderPaymentECheck.
Bank name
:param bank_name: The bank_name of this OrderPaymentECheck.
:type: str
"""
if bank_name is not None and len(bank_name) > 50:
raise ValueError("Invalid value for `bank_name`, length must be less than or equal to `50`")
self._bank_name = bank_name
@property
def bank_owner_type(self):
"""
Gets the bank_owner_type of this OrderPaymentECheck.
Bank owner type
:return: The bank_owner_type of this OrderPaymentECheck.
:rtype: str
"""
return self._bank_owner_type
@bank_owner_type.setter
def bank_owner_type(self, bank_owner_type):
"""
Sets the bank_owner_type of this OrderPaymentECheck.
Bank owner type
:param bank_owner_type: The bank_owner_type of this OrderPaymentECheck.
:type: str
"""
allowed_values = ["Personal", "Business"]
if bank_owner_type not in allowed_values:
raise ValueError(
"Invalid value for `bank_owner_type` ({0}), must be one of {1}"
.format(bank_owner_type, allowed_values)
)
self._bank_owner_type = bank_owner_type
@property
def customer_tax_id(self):
"""
Gets the customer_tax_id of this OrderPaymentECheck.
Customer tax id (masked to last 4)
:return: The customer_tax_id of this OrderPaymentECheck.
:rtype: str
"""
return self._customer_tax_id
@customer_tax_id.setter
def customer_tax_id(self, customer_tax_id):
"""
Sets the customer_tax_id of this OrderPaymentECheck.
Customer tax id (masked to last 4)
:param customer_tax_id: The customer_tax_id of this OrderPaymentECheck.
:type: str
"""
if customer_tax_id is not None and len(customer_tax_id) > 9:
raise ValueError("Invalid value for `customer_tax_id`, length must be less than or equal to `9`")
self._customer_tax_id = customer_tax_id
@property
def drivers_license_dob(self):
"""
Gets the drivers_license_dob of this OrderPaymentECheck.
Driver license date of birth
:return: The drivers_license_dob of this OrderPaymentECheck.
:rtype: str
"""
return self._drivers_license_dob
@drivers_license_dob.setter
def drivers_license_dob(self, drivers_license_dob):
"""
Sets the drivers_license_dob of this OrderPaymentECheck.
Driver license date of birth
:param drivers_license_dob: The drivers_license_dob of this OrderPaymentECheck.
:type: str
"""
if drivers_license_dob is not None and len(drivers_license_dob) > 10:
raise ValueError("Invalid value for `drivers_license_dob`, length must be less than or equal to `10`")
self._drivers_license_dob = drivers_license_dob
@property
def drivers_license_number(self):
"""
Gets the drivers_license_number of this OrderPaymentECheck.
Driver license number (masked to last 4)
:return: The drivers_license_number of this OrderPaymentECheck.
:rtype: str
"""
return self._drivers_license_number
@drivers_license_number.setter
def drivers_license_number(self, drivers_license_number):
"""
Sets the drivers_license_number of this OrderPaymentECheck.
Driver license number (masked to last 4)
:param drivers_license_number: The drivers_license_number of this OrderPaymentECheck.
:type: str
"""
if drivers_license_number is not None and len(drivers_license_number) > 50:
raise ValueError("Invalid value for `drivers_license_number`, length must be less than or equal to `50`")
self._drivers_license_number = drivers_license_number
@property
def drivers_license_state(self):
"""
Gets the drivers_license_state of this OrderPaymentECheck.
Driver license state
:return: The drivers_license_state of this OrderPaymentECheck.
:rtype: str
"""
return self._drivers_license_state
@drivers_license_state.setter
def drivers_license_state(self, drivers_license_state):
"""
Sets the drivers_license_state of this OrderPaymentECheck.
Driver license state
:param drivers_license_state: The drivers_license_state of this OrderPaymentECheck.
:type: str
"""
if drivers_license_state is not None and len(drivers_license_state) > 2:
raise ValueError("Invalid value for `drivers_license_state`, length must be less than or equal to `2`")
self._drivers_license_state = drivers_license_state
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, OrderPaymentECheck):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32.808354 | 260 | 0.638358 |
c70190c57acf3927ee46833cc4f535e55bbc78b9 | 273 | py | Python | my-r-lang/script/show.py | crux-plus/hello-world | 87aed8e58399d2434adbd18d84ee6cde05ae285b | [
"MIT"
] | null | null | null | my-r-lang/script/show.py | crux-plus/hello-world | 87aed8e58399d2434adbd18d84ee6cde05ae285b | [
"MIT"
] | null | null | null | my-r-lang/script/show.py | crux-plus/hello-world | 87aed8e58399d2434adbd18d84ee6cde05ae285b | [
"MIT"
] | null | null | null | import os;
import cv2.cv2 as cv;
for root, dirs, files in os.walk(os.path.join('.', 'dist')):
for file in files:
if os.path.splitext(file)[1] == '.png':
image = cv.imread(os.path.join('.', 'dist', file));
cv.imshow('image', image);
cv.waitKey(0);
| 24.818182 | 60 | 0.582418 |
0bb35d7074b91f70bea7af67317fb49e6fd87b8e | 4,674 | py | Python | python/ray/tune/suggest/bayesopt.py | hershg/ray | a1744f67fe954d8408c5b84e28ecccc130157f8e | [
"Apache-2.0"
] | 2 | 2019-06-17T12:38:24.000Z | 2020-11-11T07:52:26.000Z | python/ray/tune/suggest/bayesopt.py | hershg/ray | a1744f67fe954d8408c5b84e28ecccc130157f8e | [
"Apache-2.0"
] | 3 | 2018-08-15T19:19:25.000Z | 2021-06-30T01:54:46.000Z | python/ray/tune/suggest/bayesopt.py | hershg/ray | a1744f67fe954d8408c5b84e28ecccc130157f8e | [
"Apache-2.0"
] | 2 | 2017-10-31T23:20:07.000Z | 2019-11-13T20:16:03.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import pickle
try: # Python 3 only -- needed for lint test.
import bayes_opt as byo
except ImportError:
byo = None
from ray.tune.suggest.suggestion import SuggestionAlgorithm
logger = logging.getLogger(__name__)
class BayesOptSearch(SuggestionAlgorithm):
"""A wrapper around BayesOpt to provide trial suggestions.
Requires BayesOpt to be installed. You can install BayesOpt with the
command: `pip install bayesian-optimization`.
Parameters:
space (dict): Continuous search space. Parameters will be sampled from
this space which will be used to run trials.
max_concurrent (int): Number of maximum concurrent trials. Defaults
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function. Must
provide values for the keys `kind`, `kappa`, and `xi`.
random_state (int): Used to initialize BayesOpt.
verbose (int): Sets verbosity level for BayesOpt packages.
use_early_stopped_trials (bool): Whether to use early terminated
trial results in the optimization process.
Example:
>>> space = {
>>> 'width': (0, 20),
>>> 'height': (-100, 100),
>>> }
>>> algo = BayesOptSearch(
>>> space, max_concurrent=4, metric="mean_loss", mode="min")
"""
def __init__(self,
space,
max_concurrent=10,
reward_attr=None,
metric="episode_reward_mean",
mode="max",
utility_kwargs=None,
random_state=1,
verbose=0,
**kwargs):
assert byo is not None, (
"BayesOpt must be installed!. You can install BayesOpt with"
" the command: `pip install bayesian-optimization`.")
assert type(max_concurrent) is int and max_concurrent > 0
assert utility_kwargs is not None, (
"Must define arguments for the utiliy function!")
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
self._max_concurrent = max_concurrent
self._metric = metric
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._live_trial_mapping = {}
self.optimizer = byo.BayesianOptimization(
f=None, pbounds=space, verbose=verbose, random_state=random_state)
self.utility = byo.UtilityFunction(**utility_kwargs)
super(BayesOptSearch, self).__init__(**kwargs)
def _suggest(self, trial_id):
if self._num_live_trials() >= self._max_concurrent:
return None
new_trial = self.optimizer.suggest(self.utility)
self._live_trial_mapping[trial_id] = new_trial
return copy.deepcopy(new_trial)
def on_trial_result(self, trial_id, result):
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial."""
if result:
self._process_result(trial_id, result, early_terminated)
del self._live_trial_mapping[trial_id]
def _process_result(self, trial_id, result, early_terminated=False):
if early_terminated and self._use_early_stopped is False:
return
self.optimizer.register(
params=self._live_trial_mapping[trial_id],
target=self._metric_op * result[self._metric])
def _num_live_trials(self):
return len(self._live_trial_mapping)
def save(self, checkpoint_dir):
trials_object = self.optimizer
with open(checkpoint_dir, "wb") as output:
pickle.dump(trials_object, output)
def restore(self, checkpoint_dir):
with open(checkpoint_dir, "rb") as input:
trials_object = pickle.load(input)
self.optimizer = trials_object
| 35.679389 | 78 | 0.615533 |
70a6075c21d82659eef49b53e4ecd0521f500f3f | 2,396 | py | Python | controller/Product.py | leandro-matos/flask-application-alura | 33bc0c84e0e67535cbb48dfda3aae36b76256ac8 | [
"MIT"
] | null | null | null | controller/Product.py | leandro-matos/flask-application-alura | 33bc0c84e0e67535cbb48dfda3aae36b76256ac8 | [
"MIT"
] | null | null | null | controller/Product.py | leandro-matos/flask-application-alura | 33bc0c84e0e67535cbb48dfda3aae36b76256ac8 | [
"MIT"
] | null | null | null | from datetime import datetime
from model.Product import Product
class ProductController():
def __init__(self):
self.product_model = Product()
def save_product(self, obj):
self.product_model.name = obj['name']
self.product_model.description = obj['description']
self.product_model.qtd = obj['qtd']
self.product_model.price = obj['price']
self.product_model.date_created = datetime.now()
self.product_model.status = 1
self.product_model.category = obj['category']
self.product_model.user_created = obj['user_created']
return self.product_model.save()
def update_product(self, obj):
self.product_model.id = obj['id']
return self.product_model.update(obj)
def delete_product(self, obj):
self.product_model.id = obj['id']
return self.product_model.delete(obj)
def get_products(self, limit):
result = []
try:
res = self.product_model.get_all(limit=limit)
for r in res:
result.append({
'id': r.id,
'name': r.name,
'description': r.description,
'qtd': str(r.price),
'price': str(r.price),
'image': r.image,
'date_created': r.date_created
})
status = 200
except Exception as e:
print(e)
result = []
status = 400
finally:
return {
'result': result,
'status': status
}
def get_product_by_id(self, product_id):
result = {}
try:
self.product_model.id = product_id
res = self.product_model.get_product_by_id()
result = {
'id': res.id,
'name': res.name,
'description': res.description,
'qtd': str(res.qtd),
'price': str(res.price),
'image': res.image,
'date_created': res.date_created,
}
status = 200
except Exception as e:
print(e)
result = []
status = 400
finally:
return {
'result': result,
'status': status
}
| 29.95 | 61 | 0.492905 |
86f1c76ec1ff39c13dad65ef32c1b1359fe8f260 | 55,394 | py | Python | swig/python/osgeo/osr.py | miurahr/gdal | d547204c3338fee0096c18df37744bcb8a4499c7 | [
"Apache-2.0"
] | 3 | 2017-05-06T11:43:08.000Z | 2017-07-19T15:27:06.000Z | swig/python/osgeo/osr.py | miurahr/gdal | d547204c3338fee0096c18df37744bcb8a4499c7 | [
"Apache-2.0"
] | 29 | 2017-03-17T23:55:49.000Z | 2018-03-13T09:27:01.000Z | swig/python/osgeo/osr.py | miurahr/gdal | d547204c3338fee0096c18df37744bcb8a4499c7 | [
"Apache-2.0"
] | 1 | 2017-10-12T05:49:01.000Z | 2017-10-12T05:49:01.000Z | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_osr')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_osr')
_osr = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_osr', [dirname(__file__)])
except ImportError:
import _osr
return _osr
try:
_mod = imp.load_module('_osr', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_osr = swig_import_helper()
del swig_import_helper
else:
import _osr
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
SRS_WKT_WGS84_LAT_LONG = _osr.SRS_WKT_WGS84_LAT_LONG
SRS_PT_ALBERS_CONIC_EQUAL_AREA = _osr.SRS_PT_ALBERS_CONIC_EQUAL_AREA
SRS_PT_AZIMUTHAL_EQUIDISTANT = _osr.SRS_PT_AZIMUTHAL_EQUIDISTANT
SRS_PT_CASSINI_SOLDNER = _osr.SRS_PT_CASSINI_SOLDNER
SRS_PT_CYLINDRICAL_EQUAL_AREA = _osr.SRS_PT_CYLINDRICAL_EQUAL_AREA
SRS_PT_BONNE = _osr.SRS_PT_BONNE
SRS_PT_ECKERT_I = _osr.SRS_PT_ECKERT_I
SRS_PT_ECKERT_II = _osr.SRS_PT_ECKERT_II
SRS_PT_ECKERT_III = _osr.SRS_PT_ECKERT_III
SRS_PT_ECKERT_IV = _osr.SRS_PT_ECKERT_IV
SRS_PT_ECKERT_V = _osr.SRS_PT_ECKERT_V
SRS_PT_ECKERT_VI = _osr.SRS_PT_ECKERT_VI
SRS_PT_EQUIDISTANT_CONIC = _osr.SRS_PT_EQUIDISTANT_CONIC
SRS_PT_EQUIRECTANGULAR = _osr.SRS_PT_EQUIRECTANGULAR
SRS_PT_GALL_STEREOGRAPHIC = _osr.SRS_PT_GALL_STEREOGRAPHIC
SRS_PT_GAUSSSCHREIBERTMERCATOR = _osr.SRS_PT_GAUSSSCHREIBERTMERCATOR
SRS_PT_GEOSTATIONARY_SATELLITE = _osr.SRS_PT_GEOSTATIONARY_SATELLITE
SRS_PT_GOODE_HOMOLOSINE = _osr.SRS_PT_GOODE_HOMOLOSINE
SRS_PT_IGH = _osr.SRS_PT_IGH
SRS_PT_GNOMONIC = _osr.SRS_PT_GNOMONIC
SRS_PT_HOTINE_OBLIQUE_MERCATOR_AZIMUTH_CENTER = _osr.SRS_PT_HOTINE_OBLIQUE_MERCATOR_AZIMUTH_CENTER
SRS_PT_HOTINE_OBLIQUE_MERCATOR = _osr.SRS_PT_HOTINE_OBLIQUE_MERCATOR
SRS_PT_HOTINE_OBLIQUE_MERCATOR_TWO_POINT_NATURAL_ORIGIN = _osr.SRS_PT_HOTINE_OBLIQUE_MERCATOR_TWO_POINT_NATURAL_ORIGIN
SRS_PT_LABORDE_OBLIQUE_MERCATOR = _osr.SRS_PT_LABORDE_OBLIQUE_MERCATOR
SRS_PT_LAMBERT_CONFORMAL_CONIC_1SP = _osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_1SP
SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP = _osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP
SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP_BELGIUM = _osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP_BELGIUM
SRS_PT_LAMBERT_AZIMUTHAL_EQUAL_AREA = _osr.SRS_PT_LAMBERT_AZIMUTHAL_EQUAL_AREA
SRS_PT_MERCATOR_1SP = _osr.SRS_PT_MERCATOR_1SP
SRS_PT_MERCATOR_2SP = _osr.SRS_PT_MERCATOR_2SP
SRS_PT_MERCATOR_AUXILIARY_SPHERE = _osr.SRS_PT_MERCATOR_AUXILIARY_SPHERE
SRS_PT_MILLER_CYLINDRICAL = _osr.SRS_PT_MILLER_CYLINDRICAL
SRS_PT_MOLLWEIDE = _osr.SRS_PT_MOLLWEIDE
SRS_PT_NEW_ZEALAND_MAP_GRID = _osr.SRS_PT_NEW_ZEALAND_MAP_GRID
SRS_PT_OBLIQUE_STEREOGRAPHIC = _osr.SRS_PT_OBLIQUE_STEREOGRAPHIC
SRS_PT_ORTHOGRAPHIC = _osr.SRS_PT_ORTHOGRAPHIC
SRS_PT_POLAR_STEREOGRAPHIC = _osr.SRS_PT_POLAR_STEREOGRAPHIC
SRS_PT_POLYCONIC = _osr.SRS_PT_POLYCONIC
SRS_PT_ROBINSON = _osr.SRS_PT_ROBINSON
SRS_PT_SINUSOIDAL = _osr.SRS_PT_SINUSOIDAL
SRS_PT_STEREOGRAPHIC = _osr.SRS_PT_STEREOGRAPHIC
SRS_PT_SWISS_OBLIQUE_CYLINDRICAL = _osr.SRS_PT_SWISS_OBLIQUE_CYLINDRICAL
SRS_PT_TRANSVERSE_MERCATOR = _osr.SRS_PT_TRANSVERSE_MERCATOR
SRS_PT_TRANSVERSE_MERCATOR_SOUTH_ORIENTED = _osr.SRS_PT_TRANSVERSE_MERCATOR_SOUTH_ORIENTED
SRS_PT_TRANSVERSE_MERCATOR_MI_21 = _osr.SRS_PT_TRANSVERSE_MERCATOR_MI_21
SRS_PT_TRANSVERSE_MERCATOR_MI_22 = _osr.SRS_PT_TRANSVERSE_MERCATOR_MI_22
SRS_PT_TRANSVERSE_MERCATOR_MI_23 = _osr.SRS_PT_TRANSVERSE_MERCATOR_MI_23
SRS_PT_TRANSVERSE_MERCATOR_MI_24 = _osr.SRS_PT_TRANSVERSE_MERCATOR_MI_24
SRS_PT_TRANSVERSE_MERCATOR_MI_25 = _osr.SRS_PT_TRANSVERSE_MERCATOR_MI_25
SRS_PT_TUNISIA_MINING_GRID = _osr.SRS_PT_TUNISIA_MINING_GRID
SRS_PT_TWO_POINT_EQUIDISTANT = _osr.SRS_PT_TWO_POINT_EQUIDISTANT
SRS_PT_VANDERGRINTEN = _osr.SRS_PT_VANDERGRINTEN
SRS_PT_KROVAK = _osr.SRS_PT_KROVAK
SRS_PT_IMW_POLYCONIC = _osr.SRS_PT_IMW_POLYCONIC
SRS_PT_WAGNER_I = _osr.SRS_PT_WAGNER_I
SRS_PT_WAGNER_II = _osr.SRS_PT_WAGNER_II
SRS_PT_WAGNER_III = _osr.SRS_PT_WAGNER_III
SRS_PT_WAGNER_IV = _osr.SRS_PT_WAGNER_IV
SRS_PT_WAGNER_V = _osr.SRS_PT_WAGNER_V
SRS_PT_WAGNER_VI = _osr.SRS_PT_WAGNER_VI
SRS_PT_WAGNER_VII = _osr.SRS_PT_WAGNER_VII
SRS_PT_QSC = _osr.SRS_PT_QSC
SRS_PT_AITOFF = _osr.SRS_PT_AITOFF
SRS_PT_WINKEL_I = _osr.SRS_PT_WINKEL_I
SRS_PT_WINKEL_II = _osr.SRS_PT_WINKEL_II
SRS_PT_WINKEL_TRIPEL = _osr.SRS_PT_WINKEL_TRIPEL
SRS_PT_CRASTER_PARABOLIC = _osr.SRS_PT_CRASTER_PARABOLIC
SRS_PT_LOXIMUTHAL = _osr.SRS_PT_LOXIMUTHAL
SRS_PT_QUARTIC_AUTHALIC = _osr.SRS_PT_QUARTIC_AUTHALIC
SRS_PT_SCH = _osr.SRS_PT_SCH
SRS_PP_CENTRAL_MERIDIAN = _osr.SRS_PP_CENTRAL_MERIDIAN
SRS_PP_SCALE_FACTOR = _osr.SRS_PP_SCALE_FACTOR
SRS_PP_STANDARD_PARALLEL_1 = _osr.SRS_PP_STANDARD_PARALLEL_1
SRS_PP_STANDARD_PARALLEL_2 = _osr.SRS_PP_STANDARD_PARALLEL_2
SRS_PP_PSEUDO_STD_PARALLEL_1 = _osr.SRS_PP_PSEUDO_STD_PARALLEL_1
SRS_PP_LONGITUDE_OF_CENTER = _osr.SRS_PP_LONGITUDE_OF_CENTER
SRS_PP_LATITUDE_OF_CENTER = _osr.SRS_PP_LATITUDE_OF_CENTER
SRS_PP_LONGITUDE_OF_ORIGIN = _osr.SRS_PP_LONGITUDE_OF_ORIGIN
SRS_PP_LATITUDE_OF_ORIGIN = _osr.SRS_PP_LATITUDE_OF_ORIGIN
SRS_PP_FALSE_EASTING = _osr.SRS_PP_FALSE_EASTING
SRS_PP_FALSE_NORTHING = _osr.SRS_PP_FALSE_NORTHING
SRS_PP_AZIMUTH = _osr.SRS_PP_AZIMUTH
SRS_PP_LONGITUDE_OF_POINT_1 = _osr.SRS_PP_LONGITUDE_OF_POINT_1
SRS_PP_LATITUDE_OF_POINT_1 = _osr.SRS_PP_LATITUDE_OF_POINT_1
SRS_PP_LONGITUDE_OF_POINT_2 = _osr.SRS_PP_LONGITUDE_OF_POINT_2
SRS_PP_LATITUDE_OF_POINT_2 = _osr.SRS_PP_LATITUDE_OF_POINT_2
SRS_PP_LONGITUDE_OF_POINT_3 = _osr.SRS_PP_LONGITUDE_OF_POINT_3
SRS_PP_LATITUDE_OF_POINT_3 = _osr.SRS_PP_LATITUDE_OF_POINT_3
SRS_PP_RECTIFIED_GRID_ANGLE = _osr.SRS_PP_RECTIFIED_GRID_ANGLE
SRS_PP_LANDSAT_NUMBER = _osr.SRS_PP_LANDSAT_NUMBER
SRS_PP_PATH_NUMBER = _osr.SRS_PP_PATH_NUMBER
SRS_PP_PERSPECTIVE_POINT_HEIGHT = _osr.SRS_PP_PERSPECTIVE_POINT_HEIGHT
SRS_PP_SATELLITE_HEIGHT = _osr.SRS_PP_SATELLITE_HEIGHT
SRS_PP_FIPSZONE = _osr.SRS_PP_FIPSZONE
SRS_PP_ZONE = _osr.SRS_PP_ZONE
SRS_PP_LATITUDE_OF_1ST_POINT = _osr.SRS_PP_LATITUDE_OF_1ST_POINT
SRS_PP_LONGITUDE_OF_1ST_POINT = _osr.SRS_PP_LONGITUDE_OF_1ST_POINT
SRS_PP_LATITUDE_OF_2ND_POINT = _osr.SRS_PP_LATITUDE_OF_2ND_POINT
SRS_PP_LONGITUDE_OF_2ND_POINT = _osr.SRS_PP_LONGITUDE_OF_2ND_POINT
SRS_PP_PEG_POINT_LATITUDE = _osr.SRS_PP_PEG_POINT_LATITUDE
SRS_PP_PEG_POINT_LONGITUDE = _osr.SRS_PP_PEG_POINT_LONGITUDE
SRS_PP_PEG_POINT_HEADING = _osr.SRS_PP_PEG_POINT_HEADING
SRS_PP_PEG_POINT_HEIGHT = _osr.SRS_PP_PEG_POINT_HEIGHT
SRS_UL_METER = _osr.SRS_UL_METER
SRS_UL_FOOT = _osr.SRS_UL_FOOT
SRS_UL_FOOT_CONV = _osr.SRS_UL_FOOT_CONV
SRS_UL_US_FOOT = _osr.SRS_UL_US_FOOT
SRS_UL_US_FOOT_CONV = _osr.SRS_UL_US_FOOT_CONV
SRS_UL_NAUTICAL_MILE = _osr.SRS_UL_NAUTICAL_MILE
SRS_UL_NAUTICAL_MILE_CONV = _osr.SRS_UL_NAUTICAL_MILE_CONV
SRS_UL_LINK = _osr.SRS_UL_LINK
SRS_UL_LINK_CONV = _osr.SRS_UL_LINK_CONV
SRS_UL_CHAIN = _osr.SRS_UL_CHAIN
SRS_UL_CHAIN_CONV = _osr.SRS_UL_CHAIN_CONV
SRS_UL_ROD = _osr.SRS_UL_ROD
SRS_UL_ROD_CONV = _osr.SRS_UL_ROD_CONV
SRS_UL_LINK_Clarke = _osr.SRS_UL_LINK_Clarke
SRS_UL_LINK_Clarke_CONV = _osr.SRS_UL_LINK_Clarke_CONV
SRS_UL_KILOMETER = _osr.SRS_UL_KILOMETER
SRS_UL_KILOMETER_CONV = _osr.SRS_UL_KILOMETER_CONV
SRS_UL_DECIMETER = _osr.SRS_UL_DECIMETER
SRS_UL_DECIMETER_CONV = _osr.SRS_UL_DECIMETER_CONV
SRS_UL_CENTIMETER = _osr.SRS_UL_CENTIMETER
SRS_UL_CENTIMETER_CONV = _osr.SRS_UL_CENTIMETER_CONV
SRS_UL_MILLIMETER = _osr.SRS_UL_MILLIMETER
SRS_UL_MILLIMETER_CONV = _osr.SRS_UL_MILLIMETER_CONV
SRS_UL_INTL_NAUT_MILE = _osr.SRS_UL_INTL_NAUT_MILE
SRS_UL_INTL_NAUT_MILE_CONV = _osr.SRS_UL_INTL_NAUT_MILE_CONV
SRS_UL_INTL_INCH = _osr.SRS_UL_INTL_INCH
SRS_UL_INTL_INCH_CONV = _osr.SRS_UL_INTL_INCH_CONV
SRS_UL_INTL_FOOT = _osr.SRS_UL_INTL_FOOT
SRS_UL_INTL_FOOT_CONV = _osr.SRS_UL_INTL_FOOT_CONV
SRS_UL_INTL_YARD = _osr.SRS_UL_INTL_YARD
SRS_UL_INTL_YARD_CONV = _osr.SRS_UL_INTL_YARD_CONV
SRS_UL_INTL_STAT_MILE = _osr.SRS_UL_INTL_STAT_MILE
SRS_UL_INTL_STAT_MILE_CONV = _osr.SRS_UL_INTL_STAT_MILE_CONV
SRS_UL_INTL_FATHOM = _osr.SRS_UL_INTL_FATHOM
SRS_UL_INTL_FATHOM_CONV = _osr.SRS_UL_INTL_FATHOM_CONV
SRS_UL_INTL_CHAIN = _osr.SRS_UL_INTL_CHAIN
SRS_UL_INTL_CHAIN_CONV = _osr.SRS_UL_INTL_CHAIN_CONV
SRS_UL_INTL_LINK = _osr.SRS_UL_INTL_LINK
SRS_UL_INTL_LINK_CONV = _osr.SRS_UL_INTL_LINK_CONV
SRS_UL_US_INCH = _osr.SRS_UL_US_INCH
SRS_UL_US_INCH_CONV = _osr.SRS_UL_US_INCH_CONV
SRS_UL_US_YARD = _osr.SRS_UL_US_YARD
SRS_UL_US_YARD_CONV = _osr.SRS_UL_US_YARD_CONV
SRS_UL_US_CHAIN = _osr.SRS_UL_US_CHAIN
SRS_UL_US_CHAIN_CONV = _osr.SRS_UL_US_CHAIN_CONV
SRS_UL_US_STAT_MILE = _osr.SRS_UL_US_STAT_MILE
SRS_UL_US_STAT_MILE_CONV = _osr.SRS_UL_US_STAT_MILE_CONV
SRS_UL_INDIAN_YARD = _osr.SRS_UL_INDIAN_YARD
SRS_UL_INDIAN_YARD_CONV = _osr.SRS_UL_INDIAN_YARD_CONV
SRS_UL_INDIAN_FOOT = _osr.SRS_UL_INDIAN_FOOT
SRS_UL_INDIAN_FOOT_CONV = _osr.SRS_UL_INDIAN_FOOT_CONV
SRS_UL_INDIAN_CHAIN = _osr.SRS_UL_INDIAN_CHAIN
SRS_UL_INDIAN_CHAIN_CONV = _osr.SRS_UL_INDIAN_CHAIN_CONV
SRS_UA_DEGREE = _osr.SRS_UA_DEGREE
SRS_UA_DEGREE_CONV = _osr.SRS_UA_DEGREE_CONV
SRS_UA_RADIAN = _osr.SRS_UA_RADIAN
SRS_PM_GREENWICH = _osr.SRS_PM_GREENWICH
SRS_DN_NAD27 = _osr.SRS_DN_NAD27
SRS_DN_NAD83 = _osr.SRS_DN_NAD83
SRS_DN_WGS72 = _osr.SRS_DN_WGS72
SRS_DN_WGS84 = _osr.SRS_DN_WGS84
SRS_WGS84_SEMIMAJOR = _osr.SRS_WGS84_SEMIMAJOR
SRS_WGS84_INVFLATTENING = _osr.SRS_WGS84_INVFLATTENING
OAO_Other = _osr.OAO_Other
OAO_North = _osr.OAO_North
OAO_South = _osr.OAO_South
OAO_East = _osr.OAO_East
OAO_West = _osr.OAO_West
OAO_Up = _osr.OAO_Up
OAO_Down = _osr.OAO_Down
OAMS_TRADITIONAL_GIS_ORDER = _osr.OAMS_TRADITIONAL_GIS_ORDER
OAMS_AUTHORITY_COMPLIANT = _osr.OAMS_AUTHORITY_COMPLIANT
OAMS_CUSTOM = _osr.OAMS_CUSTOM
PROJ_ERR_INVALID_OP = _osr.PROJ_ERR_INVALID_OP
PROJ_ERR_INVALID_OP_WRONG_SYNTAX = _osr.PROJ_ERR_INVALID_OP_WRONG_SYNTAX
PROJ_ERR_INVALID_OP_MISSING_ARG = _osr.PROJ_ERR_INVALID_OP_MISSING_ARG
PROJ_ERR_INVALID_OP_ILLEGAL_ARG_VALUE = _osr.PROJ_ERR_INVALID_OP_ILLEGAL_ARG_VALUE
PROJ_ERR_INVALID_OP_MUTUALLY_EXCLUSIVE_ARGS = _osr.PROJ_ERR_INVALID_OP_MUTUALLY_EXCLUSIVE_ARGS
PROJ_ERR_INVALID_OP_FILE_NOT_FOUND_OR_INVALID = _osr.PROJ_ERR_INVALID_OP_FILE_NOT_FOUND_OR_INVALID
PROJ_ERR_COORD_TRANSFM = _osr.PROJ_ERR_COORD_TRANSFM
PROJ_ERR_COORD_TRANSFM_INVALID_COORD = _osr.PROJ_ERR_COORD_TRANSFM_INVALID_COORD
PROJ_ERR_COORD_TRANSFM_OUTSIDE_PROJECTION_DOMAIN = _osr.PROJ_ERR_COORD_TRANSFM_OUTSIDE_PROJECTION_DOMAIN
PROJ_ERR_COORD_TRANSFM_NO_OPERATION = _osr.PROJ_ERR_COORD_TRANSFM_NO_OPERATION
PROJ_ERR_COORD_TRANSFM_OUTSIDE_GRID = _osr.PROJ_ERR_COORD_TRANSFM_OUTSIDE_GRID
PROJ_ERR_COORD_TRANSFM_GRID_AT_NODATA = _osr.PROJ_ERR_COORD_TRANSFM_GRID_AT_NODATA
PROJ_ERR_OTHER = _osr.PROJ_ERR_OTHER
PROJ_ERR_OTHER_API_MISUSE = _osr.PROJ_ERR_OTHER_API_MISUSE
PROJ_ERR_OTHER_NO_INVERSE_OP = _osr.PROJ_ERR_OTHER_NO_INVERSE_OP
PROJ_ERR_OTHER_NETWORK_ERROR = _osr.PROJ_ERR_OTHER_NETWORK_ERROR
def GetUseExceptions(*args):
"""GetUseExceptions() -> int"""
return _osr.GetUseExceptions(*args)
def UseExceptions(*args):
"""UseExceptions()"""
return _osr.UseExceptions(*args)
def DontUseExceptions(*args):
"""DontUseExceptions()"""
return _osr.DontUseExceptions(*args)
def GetWellKnownGeogCSAsWKT(*args):
"""GetWellKnownGeogCSAsWKT(char const * name) -> OGRErr"""
return _osr.GetWellKnownGeogCSAsWKT(*args)
def GetUserInputAsWKT(*args):
"""GetUserInputAsWKT(char const * name) -> OGRErr"""
return _osr.GetUserInputAsWKT(*args)
class AreaOfUse(_object):
"""Proxy of C++ OSRAreaOfUse class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AreaOfUse, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AreaOfUse, name)
__repr__ = _swig_repr
__swig_getmethods__["west_lon_degree"] = _osr.AreaOfUse_west_lon_degree_get
if _newclass:
west_lon_degree = _swig_property(_osr.AreaOfUse_west_lon_degree_get)
__swig_getmethods__["south_lat_degree"] = _osr.AreaOfUse_south_lat_degree_get
if _newclass:
south_lat_degree = _swig_property(_osr.AreaOfUse_south_lat_degree_get)
__swig_getmethods__["east_lon_degree"] = _osr.AreaOfUse_east_lon_degree_get
if _newclass:
east_lon_degree = _swig_property(_osr.AreaOfUse_east_lon_degree_get)
__swig_getmethods__["north_lat_degree"] = _osr.AreaOfUse_north_lat_degree_get
if _newclass:
north_lat_degree = _swig_property(_osr.AreaOfUse_north_lat_degree_get)
__swig_getmethods__["name"] = _osr.AreaOfUse_name_get
if _newclass:
name = _swig_property(_osr.AreaOfUse_name_get)
def __init__(self, *args):
"""__init__(OSRAreaOfUse self, double west_lon_degree, double south_lat_degree, double east_lon_degree, double north_lat_degree, char * name) -> AreaOfUse"""
this = _osr.new_AreaOfUse(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _osr.delete_AreaOfUse
__del__ = lambda self: None
AreaOfUse_swigregister = _osr.AreaOfUse_swigregister
AreaOfUse_swigregister(AreaOfUse)
def OSRAreaOfUse_west_lon_degree_get(*args):
"""OSRAreaOfUse_west_lon_degree_get(AreaOfUse area) -> double"""
return _osr.OSRAreaOfUse_west_lon_degree_get(*args)
def OSRAreaOfUse_south_lat_degree_get(*args):
"""OSRAreaOfUse_south_lat_degree_get(AreaOfUse area) -> double"""
return _osr.OSRAreaOfUse_south_lat_degree_get(*args)
def OSRAreaOfUse_east_lon_degree_get(*args):
"""OSRAreaOfUse_east_lon_degree_get(AreaOfUse area) -> double"""
return _osr.OSRAreaOfUse_east_lon_degree_get(*args)
def OSRAreaOfUse_north_lat_degree_get(*args):
"""OSRAreaOfUse_north_lat_degree_get(AreaOfUse area) -> double"""
return _osr.OSRAreaOfUse_north_lat_degree_get(*args)
def OSRAreaOfUse_name_get(*args):
"""OSRAreaOfUse_name_get(AreaOfUse area) -> char const *"""
return _osr.OSRAreaOfUse_name_get(*args)
class SpatialReference(_object):
"""Proxy of C++ OSRSpatialReferenceShadow class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SpatialReference, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SpatialReference, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(OSRSpatialReferenceShadow self, char const * wkt) -> SpatialReference"""
this = _osr.new_SpatialReference(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _osr.delete_SpatialReference
__del__ = lambda self: None
def __str__(self, *args):
"""__str__(SpatialReference self) -> retStringAndCPLFree *"""
return _osr.SpatialReference___str__(self, *args)
def GetName(self, *args):
"""GetName(SpatialReference self) -> char const *"""
return _osr.SpatialReference_GetName(self, *args)
def IsSame(self, *args, **kwargs):
"""IsSame(SpatialReference self, SpatialReference rhs, char ** options=None) -> int"""
return _osr.SpatialReference_IsSame(self, *args, **kwargs)
def IsSameGeogCS(self, *args):
"""IsSameGeogCS(SpatialReference self, SpatialReference rhs) -> int"""
return _osr.SpatialReference_IsSameGeogCS(self, *args)
def IsSameVertCS(self, *args):
"""IsSameVertCS(SpatialReference self, SpatialReference rhs) -> int"""
return _osr.SpatialReference_IsSameVertCS(self, *args)
def IsGeographic(self, *args):
"""IsGeographic(SpatialReference self) -> int"""
return _osr.SpatialReference_IsGeographic(self, *args)
def IsDerivedGeographic(self, *args):
"""IsDerivedGeographic(SpatialReference self) -> int"""
return _osr.SpatialReference_IsDerivedGeographic(self, *args)
def IsProjected(self, *args):
"""IsProjected(SpatialReference self) -> int"""
return _osr.SpatialReference_IsProjected(self, *args)
def IsCompound(self, *args):
"""IsCompound(SpatialReference self) -> int"""
return _osr.SpatialReference_IsCompound(self, *args)
def IsGeocentric(self, *args):
"""IsGeocentric(SpatialReference self) -> int"""
return _osr.SpatialReference_IsGeocentric(self, *args)
def IsLocal(self, *args):
"""IsLocal(SpatialReference self) -> int"""
return _osr.SpatialReference_IsLocal(self, *args)
def IsVertical(self, *args):
"""IsVertical(SpatialReference self) -> int"""
return _osr.SpatialReference_IsVertical(self, *args)
def IsDynamic(self, *args):
"""IsDynamic(SpatialReference self) -> bool"""
return _osr.SpatialReference_IsDynamic(self, *args)
def GetCoordinateEpoch(self, *args):
"""GetCoordinateEpoch(SpatialReference self) -> double"""
return _osr.SpatialReference_GetCoordinateEpoch(self, *args)
def SetCoordinateEpoch(self, *args):
"""SetCoordinateEpoch(SpatialReference self, double coordinateEpoch)"""
return _osr.SpatialReference_SetCoordinateEpoch(self, *args)
def EPSGTreatsAsLatLong(self, *args):
"""EPSGTreatsAsLatLong(SpatialReference self) -> int"""
return _osr.SpatialReference_EPSGTreatsAsLatLong(self, *args)
def EPSGTreatsAsNorthingEasting(self, *args):
"""EPSGTreatsAsNorthingEasting(SpatialReference self) -> int"""
return _osr.SpatialReference_EPSGTreatsAsNorthingEasting(self, *args)
def SetAuthority(self, *args):
"""SetAuthority(SpatialReference self, char const * pszTargetKey, char const * pszAuthority, int nCode) -> OGRErr"""
return _osr.SpatialReference_SetAuthority(self, *args)
def GetAttrValue(self, *args):
"""GetAttrValue(SpatialReference self, char const * name, int child=0) -> char const *"""
return _osr.SpatialReference_GetAttrValue(self, *args)
def SetAttrValue(self, *args):
"""SetAttrValue(SpatialReference self, char const * name, char const * value) -> OGRErr"""
return _osr.SpatialReference_SetAttrValue(self, *args)
def SetAngularUnits(self, *args):
"""SetAngularUnits(SpatialReference self, char const * name, double to_radians) -> OGRErr"""
return _osr.SpatialReference_SetAngularUnits(self, *args)
def GetAngularUnits(self, *args):
"""GetAngularUnits(SpatialReference self) -> double"""
return _osr.SpatialReference_GetAngularUnits(self, *args)
def GetAngularUnitsName(self, *args):
"""GetAngularUnitsName(SpatialReference self) -> char const *"""
return _osr.SpatialReference_GetAngularUnitsName(self, *args)
def SetTargetLinearUnits(self, *args):
"""SetTargetLinearUnits(SpatialReference self, char const * target, char const * name, double to_meters) -> OGRErr"""
return _osr.SpatialReference_SetTargetLinearUnits(self, *args)
def SetLinearUnits(self, *args):
"""SetLinearUnits(SpatialReference self, char const * name, double to_meters) -> OGRErr"""
return _osr.SpatialReference_SetLinearUnits(self, *args)
def SetLinearUnitsAndUpdateParameters(self, *args):
"""SetLinearUnitsAndUpdateParameters(SpatialReference self, char const * name, double to_meters) -> OGRErr"""
return _osr.SpatialReference_SetLinearUnitsAndUpdateParameters(self, *args)
def GetTargetLinearUnits(self, *args):
"""GetTargetLinearUnits(SpatialReference self, char const * target_key) -> double"""
return _osr.SpatialReference_GetTargetLinearUnits(self, *args)
def GetLinearUnits(self, *args):
"""GetLinearUnits(SpatialReference self) -> double"""
return _osr.SpatialReference_GetLinearUnits(self, *args)
def GetLinearUnitsName(self, *args):
"""GetLinearUnitsName(SpatialReference self) -> char const *"""
return _osr.SpatialReference_GetLinearUnitsName(self, *args)
def GetAuthorityCode(self, *args):
"""GetAuthorityCode(SpatialReference self, char const * target_key) -> char const *"""
return _osr.SpatialReference_GetAuthorityCode(self, *args)
def GetAuthorityName(self, *args):
"""GetAuthorityName(SpatialReference self, char const * target_key) -> char const *"""
return _osr.SpatialReference_GetAuthorityName(self, *args)
def GetAreaOfUse(self, *args):
"""GetAreaOfUse(SpatialReference self) -> AreaOfUse"""
return _osr.SpatialReference_GetAreaOfUse(self, *args)
def GetAxisName(self, *args):
"""GetAxisName(SpatialReference self, char const * target_key, int iAxis) -> char const *"""
return _osr.SpatialReference_GetAxisName(self, *args)
def GetAxesCount(self, *args):
"""GetAxesCount(SpatialReference self) -> int"""
return _osr.SpatialReference_GetAxesCount(self, *args)
def GetAxisOrientation(self, *args):
"""GetAxisOrientation(SpatialReference self, char const * target_key, int iAxis) -> OGRAxisOrientation"""
return _osr.SpatialReference_GetAxisOrientation(self, *args)
def GetAxisMappingStrategy(self, *args):
"""GetAxisMappingStrategy(SpatialReference self) -> OSRAxisMappingStrategy"""
return _osr.SpatialReference_GetAxisMappingStrategy(self, *args)
def SetAxisMappingStrategy(self, *args):
"""SetAxisMappingStrategy(SpatialReference self, OSRAxisMappingStrategy strategy)"""
return _osr.SpatialReference_SetAxisMappingStrategy(self, *args)
def GetDataAxisToSRSAxisMapping(self, *args):
"""GetDataAxisToSRSAxisMapping(SpatialReference self)"""
return _osr.SpatialReference_GetDataAxisToSRSAxisMapping(self, *args)
def SetDataAxisToSRSAxisMapping(self, *args):
"""SetDataAxisToSRSAxisMapping(SpatialReference self, int nList) -> OGRErr"""
return _osr.SpatialReference_SetDataAxisToSRSAxisMapping(self, *args)
def SetUTM(self, *args):
"""SetUTM(SpatialReference self, int zone, int north=1) -> OGRErr"""
return _osr.SpatialReference_SetUTM(self, *args)
def GetUTMZone(self, *args):
"""GetUTMZone(SpatialReference self) -> int"""
return _osr.SpatialReference_GetUTMZone(self, *args)
def SetStatePlane(self, *args):
"""SetStatePlane(SpatialReference self, int zone, int is_nad83=1, char const * unitsname, double units=0.0) -> OGRErr"""
return _osr.SpatialReference_SetStatePlane(self, *args)
def AutoIdentifyEPSG(self, *args):
"""AutoIdentifyEPSG(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_AutoIdentifyEPSG(self, *args)
def FindMatches(self, *args):
"""FindMatches(SpatialReference self, char ** options=None)"""
return _osr.SpatialReference_FindMatches(self, *args)
def SetProjection(self, *args):
"""SetProjection(SpatialReference self, char const * arg) -> OGRErr"""
return _osr.SpatialReference_SetProjection(self, *args)
def SetProjParm(self, *args):
"""SetProjParm(SpatialReference self, char const * name, double val) -> OGRErr"""
return _osr.SpatialReference_SetProjParm(self, *args)
def GetProjParm(self, *args):
"""GetProjParm(SpatialReference self, char const * name, double default_val=0.0) -> double"""
return _osr.SpatialReference_GetProjParm(self, *args)
def SetNormProjParm(self, *args):
"""SetNormProjParm(SpatialReference self, char const * name, double val) -> OGRErr"""
return _osr.SpatialReference_SetNormProjParm(self, *args)
def GetNormProjParm(self, *args):
"""GetNormProjParm(SpatialReference self, char const * name, double default_val=0.0) -> double"""
return _osr.SpatialReference_GetNormProjParm(self, *args)
def GetSemiMajor(self, *args):
"""GetSemiMajor(SpatialReference self) -> double"""
return _osr.SpatialReference_GetSemiMajor(self, *args)
def GetSemiMinor(self, *args):
"""GetSemiMinor(SpatialReference self) -> double"""
return _osr.SpatialReference_GetSemiMinor(self, *args)
def GetInvFlattening(self, *args):
"""GetInvFlattening(SpatialReference self) -> double"""
return _osr.SpatialReference_GetInvFlattening(self, *args)
def SetACEA(self, *args, **kwargs):
"""SetACEA(SpatialReference self, double stdp1, double stdp2, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetACEA(self, *args, **kwargs)
def SetAE(self, *args, **kwargs):
"""SetAE(SpatialReference self, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetAE(self, *args, **kwargs)
def SetBonne(self, *args, **kwargs):
"""SetBonne(SpatialReference self, double stdp, double cm, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetBonne(self, *args, **kwargs)
def SetCEA(self, *args, **kwargs):
"""SetCEA(SpatialReference self, double stdp1, double cm, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetCEA(self, *args, **kwargs)
def SetCS(self, *args, **kwargs):
"""SetCS(SpatialReference self, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetCS(self, *args, **kwargs)
def SetEC(self, *args, **kwargs):
"""SetEC(SpatialReference self, double stdp1, double stdp2, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetEC(self, *args, **kwargs)
def SetEckertIV(self, *args, **kwargs):
"""SetEckertIV(SpatialReference self, double cm, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetEckertIV(self, *args, **kwargs)
def SetEckertVI(self, *args, **kwargs):
"""SetEckertVI(SpatialReference self, double cm, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetEckertVI(self, *args, **kwargs)
def SetEquirectangular(self, *args, **kwargs):
"""SetEquirectangular(SpatialReference self, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetEquirectangular(self, *args, **kwargs)
def SetEquirectangular2(self, *args, **kwargs):
"""SetEquirectangular2(SpatialReference self, double clat, double clong, double pseudostdparallellat, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetEquirectangular2(self, *args, **kwargs)
def SetGaussSchreiberTMercator(self, *args, **kwargs):
"""SetGaussSchreiberTMercator(SpatialReference self, double clat, double clong, double sc, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetGaussSchreiberTMercator(self, *args, **kwargs)
def SetGS(self, *args, **kwargs):
"""SetGS(SpatialReference self, double cm, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetGS(self, *args, **kwargs)
def SetGH(self, *args, **kwargs):
"""SetGH(SpatialReference self, double cm, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetGH(self, *args, **kwargs)
def SetIGH(self, *args):
"""SetIGH(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_SetIGH(self, *args)
def SetGEOS(self, *args, **kwargs):
"""SetGEOS(SpatialReference self, double cm, double satelliteheight, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetGEOS(self, *args, **kwargs)
def SetGnomonic(self, *args, **kwargs):
"""SetGnomonic(SpatialReference self, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetGnomonic(self, *args, **kwargs)
def SetHOM(self, *args, **kwargs):
"""SetHOM(SpatialReference self, double clat, double clong, double azimuth, double recttoskew, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetHOM(self, *args, **kwargs)
def SetHOM2PNO(self, *args, **kwargs):
"""SetHOM2PNO(SpatialReference self, double clat, double dfLat1, double dfLong1, double dfLat2, double dfLong2, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetHOM2PNO(self, *args, **kwargs)
def SetKrovak(self, *args, **kwargs):
"""SetKrovak(SpatialReference self, double clat, double clong, double azimuth, double pseudostdparallellat, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetKrovak(self, *args, **kwargs)
def SetLAEA(self, *args, **kwargs):
"""SetLAEA(SpatialReference self, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetLAEA(self, *args, **kwargs)
def SetLCC(self, *args, **kwargs):
"""SetLCC(SpatialReference self, double stdp1, double stdp2, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetLCC(self, *args, **kwargs)
def SetLCC1SP(self, *args, **kwargs):
"""SetLCC1SP(SpatialReference self, double clat, double clong, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetLCC1SP(self, *args, **kwargs)
def SetLCCB(self, *args, **kwargs):
"""SetLCCB(SpatialReference self, double stdp1, double stdp2, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetLCCB(self, *args, **kwargs)
def SetMC(self, *args, **kwargs):
"""SetMC(SpatialReference self, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetMC(self, *args, **kwargs)
def SetMercator(self, *args, **kwargs):
"""SetMercator(SpatialReference self, double clat, double clong, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetMercator(self, *args, **kwargs)
def SetMercator2SP(self, *args, **kwargs):
"""SetMercator2SP(SpatialReference self, double stdp1, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetMercator2SP(self, *args, **kwargs)
def SetMollweide(self, *args, **kwargs):
"""SetMollweide(SpatialReference self, double cm, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetMollweide(self, *args, **kwargs)
def SetNZMG(self, *args, **kwargs):
"""SetNZMG(SpatialReference self, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetNZMG(self, *args, **kwargs)
def SetOS(self, *args, **kwargs):
"""SetOS(SpatialReference self, double dfOriginLat, double dfCMeridian, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetOS(self, *args, **kwargs)
def SetOrthographic(self, *args, **kwargs):
"""SetOrthographic(SpatialReference self, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetOrthographic(self, *args, **kwargs)
def SetPolyconic(self, *args, **kwargs):
"""SetPolyconic(SpatialReference self, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetPolyconic(self, *args, **kwargs)
def SetPS(self, *args, **kwargs):
"""SetPS(SpatialReference self, double clat, double clong, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetPS(self, *args, **kwargs)
def SetRobinson(self, *args, **kwargs):
"""SetRobinson(SpatialReference self, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetRobinson(self, *args, **kwargs)
def SetSinusoidal(self, *args, **kwargs):
"""SetSinusoidal(SpatialReference self, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetSinusoidal(self, *args, **kwargs)
def SetStereographic(self, *args, **kwargs):
"""SetStereographic(SpatialReference self, double clat, double clong, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetStereographic(self, *args, **kwargs)
def SetSOC(self, *args, **kwargs):
"""SetSOC(SpatialReference self, double latitudeoforigin, double cm, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetSOC(self, *args, **kwargs)
def SetTM(self, *args, **kwargs):
"""SetTM(SpatialReference self, double clat, double clong, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetTM(self, *args, **kwargs)
def SetTMVariant(self, *args, **kwargs):
"""SetTMVariant(SpatialReference self, char const * pszVariantName, double clat, double clong, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetTMVariant(self, *args, **kwargs)
def SetTMG(self, *args, **kwargs):
"""SetTMG(SpatialReference self, double clat, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetTMG(self, *args, **kwargs)
def SetTMSO(self, *args, **kwargs):
"""SetTMSO(SpatialReference self, double clat, double clong, double scale, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetTMSO(self, *args, **kwargs)
def SetVDG(self, *args, **kwargs):
"""SetVDG(SpatialReference self, double clong, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetVDG(self, *args, **kwargs)
def SetVerticalPerspective(self, *args, **kwargs):
"""SetVerticalPerspective(SpatialReference self, double topoOriginLat, double topoOriginLon, double topoOriginHeight, double viewPointHeight, double fe, double fn) -> OGRErr"""
return _osr.SpatialReference_SetVerticalPerspective(self, *args, **kwargs)
def SetWellKnownGeogCS(self, *args):
"""SetWellKnownGeogCS(SpatialReference self, char const * name) -> OGRErr"""
return _osr.SpatialReference_SetWellKnownGeogCS(self, *args)
def SetFromUserInput(self, *args):
"""SetFromUserInput(SpatialReference self, char const * name) -> OGRErr"""
return _osr.SpatialReference_SetFromUserInput(self, *args)
def CopyGeogCSFrom(self, *args):
"""CopyGeogCSFrom(SpatialReference self, SpatialReference rhs) -> OGRErr"""
return _osr.SpatialReference_CopyGeogCSFrom(self, *args)
def SetTOWGS84(self, *args):
"""SetTOWGS84(SpatialReference self, double p1, double p2, double p3, double p4=0.0, double p5=0.0, double p6=0.0, double p7=0.0) -> OGRErr"""
return _osr.SpatialReference_SetTOWGS84(self, *args)
def HasTOWGS84(self, *args):
"""HasTOWGS84(SpatialReference self) -> bool"""
return _osr.SpatialReference_HasTOWGS84(self, *args)
def GetTOWGS84(self, *args):
"""GetTOWGS84(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_GetTOWGS84(self, *args)
def AddGuessedTOWGS84(self, *args):
"""AddGuessedTOWGS84(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_AddGuessedTOWGS84(self, *args)
def SetLocalCS(self, *args):
"""SetLocalCS(SpatialReference self, char const * pszName) -> OGRErr"""
return _osr.SpatialReference_SetLocalCS(self, *args)
def SetGeogCS(self, *args):
"""SetGeogCS(SpatialReference self, char const * pszGeogName, char const * pszDatumName, char const * pszEllipsoidName, double dfSemiMajor, double dfInvFlattening, char const * pszPMName, double dfPMOffset=0.0, char const * pszUnits, double dfConvertToRadians=0.0174532925199433) -> OGRErr"""
return _osr.SpatialReference_SetGeogCS(self, *args)
def SetProjCS(self, *args):
"""SetProjCS(SpatialReference self, char const * name) -> OGRErr"""
return _osr.SpatialReference_SetProjCS(self, *args)
def SetGeocCS(self, *args):
"""SetGeocCS(SpatialReference self, char const * name) -> OGRErr"""
return _osr.SpatialReference_SetGeocCS(self, *args)
def SetVertCS(self, *args):
"""SetVertCS(SpatialReference self, char const * VertCSName, char const * VertDatumName, int VertDatumType=0) -> OGRErr"""
return _osr.SpatialReference_SetVertCS(self, *args)
def SetCompoundCS(self, *args):
"""SetCompoundCS(SpatialReference self, char const * name, SpatialReference horizcs, SpatialReference vertcs) -> OGRErr"""
return _osr.SpatialReference_SetCompoundCS(self, *args)
def ImportFromWkt(self, *args):
"""ImportFromWkt(SpatialReference self, char ** ppszInput) -> OGRErr"""
return _osr.SpatialReference_ImportFromWkt(self, *args)
def ImportFromProj4(self, *args):
"""ImportFromProj4(SpatialReference self, char * ppszInput) -> OGRErr"""
return _osr.SpatialReference_ImportFromProj4(self, *args)
def ImportFromUrl(self, *args):
"""ImportFromUrl(SpatialReference self, char * url) -> OGRErr"""
return _osr.SpatialReference_ImportFromUrl(self, *args)
def ImportFromESRI(self, *args):
"""ImportFromESRI(SpatialReference self, char ** ppszInput) -> OGRErr"""
return _osr.SpatialReference_ImportFromESRI(self, *args)
def ImportFromEPSG(self, *args):
"""ImportFromEPSG(SpatialReference self, int arg) -> OGRErr"""
return _osr.SpatialReference_ImportFromEPSG(self, *args)
def ImportFromEPSGA(self, *args):
"""ImportFromEPSGA(SpatialReference self, int arg) -> OGRErr"""
return _osr.SpatialReference_ImportFromEPSGA(self, *args)
def ImportFromPCI(self, *args):
"""ImportFromPCI(SpatialReference self, char const * proj, char const * units, double [17] argin=0) -> OGRErr"""
return _osr.SpatialReference_ImportFromPCI(self, *args)
def ImportFromUSGS(self, *args):
"""ImportFromUSGS(SpatialReference self, long proj_code, long zone=0, double [15] argin=0, long datum_code=0) -> OGRErr"""
return _osr.SpatialReference_ImportFromUSGS(self, *args)
def ImportFromXML(self, *args):
"""ImportFromXML(SpatialReference self, char const * xmlString) -> OGRErr"""
return _osr.SpatialReference_ImportFromXML(self, *args)
def ImportFromERM(self, *args):
"""ImportFromERM(SpatialReference self, char const * proj, char const * datum, char const * units) -> OGRErr"""
return _osr.SpatialReference_ImportFromERM(self, *args)
def ImportFromMICoordSys(self, *args):
"""ImportFromMICoordSys(SpatialReference self, char const * pszCoordSys) -> OGRErr"""
return _osr.SpatialReference_ImportFromMICoordSys(self, *args)
def ImportFromOzi(self, *args):
"""ImportFromOzi(SpatialReference self, char const *const * papszLines) -> OGRErr"""
return _osr.SpatialReference_ImportFromOzi(self, *args)
def ExportToWkt(self, *args):
"""ExportToWkt(SpatialReference self, char ** options=None) -> OGRErr"""
return _osr.SpatialReference_ExportToWkt(self, *args)
def ExportToPrettyWkt(self, *args):
"""ExportToPrettyWkt(SpatialReference self, int simplify=0) -> OGRErr"""
return _osr.SpatialReference_ExportToPrettyWkt(self, *args)
def ExportToPROJJSON(self, *args):
"""ExportToPROJJSON(SpatialReference self, char ** options=None) -> OGRErr"""
return _osr.SpatialReference_ExportToPROJJSON(self, *args)
def ExportToProj4(self, *args):
"""ExportToProj4(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_ExportToProj4(self, *args)
def ExportToPCI(self, *args):
"""ExportToPCI(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_ExportToPCI(self, *args)
def ExportToUSGS(self, *args):
"""ExportToUSGS(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_ExportToUSGS(self, *args)
def ExportToXML(self, *args):
"""ExportToXML(SpatialReference self, char const * dialect) -> OGRErr"""
return _osr.SpatialReference_ExportToXML(self, *args)
def ExportToMICoordSys(self, *args):
"""ExportToMICoordSys(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_ExportToMICoordSys(self, *args)
def CloneGeogCS(self, *args):
"""CloneGeogCS(SpatialReference self) -> SpatialReference"""
return _osr.SpatialReference_CloneGeogCS(self, *args)
def Clone(self, *args):
"""Clone(SpatialReference self) -> SpatialReference"""
return _osr.SpatialReference_Clone(self, *args)
def Validate(self, *args):
"""Validate(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_Validate(self, *args)
def MorphToESRI(self, *args):
"""MorphToESRI(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_MorphToESRI(self, *args)
def MorphFromESRI(self, *args):
"""MorphFromESRI(SpatialReference self) -> OGRErr"""
return _osr.SpatialReference_MorphFromESRI(self, *args)
def ConvertToOtherProjection(self, *args):
"""ConvertToOtherProjection(SpatialReference self, char const * other_projection, char ** options=None) -> SpatialReference"""
return _osr.SpatialReference_ConvertToOtherProjection(self, *args)
def PromoteTo3D(self, *args):
"""PromoteTo3D(SpatialReference self, char const * name=None) -> OGRErr"""
return _osr.SpatialReference_PromoteTo3D(self, *args)
def DemoteTo2D(self, *args):
"""DemoteTo2D(SpatialReference self, char const * name=None) -> OGRErr"""
return _osr.SpatialReference_DemoteTo2D(self, *args)
def __init__(self, *args, **kwargs):
"""__init__(OSRSpatialReferenceShadow self, char const * wkt) -> SpatialReference"""
oldval = _osr.GetUseExceptions()
if not oldval:
_osr.UseExceptions()
try:
this = _osr.new_SpatialReference(*args, **kwargs)
finally:
if not oldval:
_osr.DontUseExceptions()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
SpatialReference_swigregister = _osr.SpatialReference_swigregister
SpatialReference_swigregister(SpatialReference)
class CoordinateTransformationOptions(_object):
"""Proxy of C++ OGRCoordinateTransformationOptions class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CoordinateTransformationOptions, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CoordinateTransformationOptions, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""__init__(OGRCoordinateTransformationOptions self) -> CoordinateTransformationOptions"""
this = _osr.new_CoordinateTransformationOptions(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _osr.delete_CoordinateTransformationOptions
__del__ = lambda self: None
def SetAreaOfInterest(self, *args):
"""SetAreaOfInterest(CoordinateTransformationOptions self, double westLongitudeDeg, double southLatitudeDeg, double eastLongitudeDeg, double northLatitudeDeg) -> bool"""
return _osr.CoordinateTransformationOptions_SetAreaOfInterest(self, *args)
def SetOperation(self, *args):
"""SetOperation(CoordinateTransformationOptions self, char const * operation) -> bool"""
return _osr.CoordinateTransformationOptions_SetOperation(self, *args)
def SetDesiredAccuracy(self, *args):
"""SetDesiredAccuracy(CoordinateTransformationOptions self, double accuracy) -> bool"""
return _osr.CoordinateTransformationOptions_SetDesiredAccuracy(self, *args)
def SetBallparkAllowed(self, *args):
"""SetBallparkAllowed(CoordinateTransformationOptions self, bool allowBallpark) -> bool"""
return _osr.CoordinateTransformationOptions_SetBallparkAllowed(self, *args)
CoordinateTransformationOptions_swigregister = _osr.CoordinateTransformationOptions_swigregister
CoordinateTransformationOptions_swigregister(CoordinateTransformationOptions)
class CoordinateTransformation(_object):
"""Proxy of C++ OSRCoordinateTransformationShadow class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CoordinateTransformation, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CoordinateTransformation, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(OSRCoordinateTransformationShadow self, SpatialReference src, SpatialReference dst) -> CoordinateTransformation
__init__(OSRCoordinateTransformationShadow self, SpatialReference src, SpatialReference dst, CoordinateTransformationOptions options) -> CoordinateTransformation
"""
this = _osr.new_CoordinateTransformation(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _osr.delete_CoordinateTransformation
__del__ = lambda self: None
def TransformPoint(self, *args):
"""
TransformPoint(CoordinateTransformation self, double [3] inout)
TransformPoint(CoordinateTransformation self, double [4] inout)
TransformPoint(CoordinateTransformation self, double x, double y, double z=0.0)
TransformPoint(CoordinateTransformation self, double x, double y, double z, double t)
"""
return _osr.CoordinateTransformation_TransformPoint(self, *args)
def TransformPointWithErrorCode(self, *args):
"""TransformPointWithErrorCode(CoordinateTransformation self, double x, double y, double z, double t)"""
return _osr.CoordinateTransformation_TransformPointWithErrorCode(self, *args)
def TransformPoints(self, *args):
"""TransformPoints(CoordinateTransformation self, int nCount)"""
return _osr.CoordinateTransformation_TransformPoints(self, *args)
CoordinateTransformation_swigregister = _osr.CoordinateTransformation_swigregister
CoordinateTransformation_swigregister(CoordinateTransformation)
def CreateCoordinateTransformation(*args):
"""CreateCoordinateTransformation(SpatialReference src, SpatialReference dst, CoordinateTransformationOptions options=None) -> CoordinateTransformation"""
return _osr.CreateCoordinateTransformation(*args)
OSR_CRS_TYPE_GEOGRAPHIC_2D = _osr.OSR_CRS_TYPE_GEOGRAPHIC_2D
OSR_CRS_TYPE_GEOGRAPHIC_3D = _osr.OSR_CRS_TYPE_GEOGRAPHIC_3D
OSR_CRS_TYPE_GEOCENTRIC = _osr.OSR_CRS_TYPE_GEOCENTRIC
OSR_CRS_TYPE_PROJECTED = _osr.OSR_CRS_TYPE_PROJECTED
OSR_CRS_TYPE_VERTICAL = _osr.OSR_CRS_TYPE_VERTICAL
OSR_CRS_TYPE_COMPOUND = _osr.OSR_CRS_TYPE_COMPOUND
OSR_CRS_TYPE_OTHER = _osr.OSR_CRS_TYPE_OTHER
class CRSInfo(_object):
"""Proxy of C++ OSRCRSInfo class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CRSInfo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CRSInfo, name)
__repr__ = _swig_repr
__swig_getmethods__["auth_name"] = _osr.CRSInfo_auth_name_get
if _newclass:
auth_name = _swig_property(_osr.CRSInfo_auth_name_get)
__swig_getmethods__["code"] = _osr.CRSInfo_code_get
if _newclass:
code = _swig_property(_osr.CRSInfo_code_get)
__swig_getmethods__["name"] = _osr.CRSInfo_name_get
if _newclass:
name = _swig_property(_osr.CRSInfo_name_get)
__swig_getmethods__["type"] = _osr.CRSInfo_type_get
if _newclass:
type = _swig_property(_osr.CRSInfo_type_get)
__swig_getmethods__["deprecated"] = _osr.CRSInfo_deprecated_get
if _newclass:
deprecated = _swig_property(_osr.CRSInfo_deprecated_get)
__swig_getmethods__["bbox_valid"] = _osr.CRSInfo_bbox_valid_get
if _newclass:
bbox_valid = _swig_property(_osr.CRSInfo_bbox_valid_get)
__swig_getmethods__["west_lon_degree"] = _osr.CRSInfo_west_lon_degree_get
if _newclass:
west_lon_degree = _swig_property(_osr.CRSInfo_west_lon_degree_get)
__swig_getmethods__["south_lat_degree"] = _osr.CRSInfo_south_lat_degree_get
if _newclass:
south_lat_degree = _swig_property(_osr.CRSInfo_south_lat_degree_get)
__swig_getmethods__["east_lon_degree"] = _osr.CRSInfo_east_lon_degree_get
if _newclass:
east_lon_degree = _swig_property(_osr.CRSInfo_east_lon_degree_get)
__swig_getmethods__["north_lat_degree"] = _osr.CRSInfo_north_lat_degree_get
if _newclass:
north_lat_degree = _swig_property(_osr.CRSInfo_north_lat_degree_get)
__swig_getmethods__["area_name"] = _osr.CRSInfo_area_name_get
if _newclass:
area_name = _swig_property(_osr.CRSInfo_area_name_get)
__swig_getmethods__["projection_method"] = _osr.CRSInfo_projection_method_get
if _newclass:
projection_method = _swig_property(_osr.CRSInfo_projection_method_get)
def __init__(self, *args):
"""__init__(OSRCRSInfo self, char const * auth_name, char const * code, char const * name, OSRCRSType type, bool deprecated, bool bbox_valid, double west_lon_degree, double south_lat_degree, double east_lon_degree, double north_lat_degree, char const * area_name, char const * projection_method) -> CRSInfo"""
this = _osr.new_CRSInfo(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _osr.delete_CRSInfo
__del__ = lambda self: None
CRSInfo_swigregister = _osr.CRSInfo_swigregister
CRSInfo_swigregister(CRSInfo)
def OSRCRSInfo_auth_name_get(*args):
"""OSRCRSInfo_auth_name_get(CRSInfo crsInfo) -> char const *"""
return _osr.OSRCRSInfo_auth_name_get(*args)
def OSRCRSInfo_code_get(*args):
"""OSRCRSInfo_code_get(CRSInfo crsInfo) -> char const *"""
return _osr.OSRCRSInfo_code_get(*args)
def OSRCRSInfo_name_get(*args):
"""OSRCRSInfo_name_get(CRSInfo crsInfo) -> char const *"""
return _osr.OSRCRSInfo_name_get(*args)
def OSRCRSInfo_type_get(*args):
"""OSRCRSInfo_type_get(CRSInfo crsInfo) -> OSRCRSType"""
return _osr.OSRCRSInfo_type_get(*args)
def OSRCRSInfo_deprecated_get(*args):
"""OSRCRSInfo_deprecated_get(CRSInfo crsInfo) -> bool"""
return _osr.OSRCRSInfo_deprecated_get(*args)
def OSRCRSInfo_bbox_valid_get(*args):
"""OSRCRSInfo_bbox_valid_get(CRSInfo crsInfo) -> bool"""
return _osr.OSRCRSInfo_bbox_valid_get(*args)
def OSRCRSInfo_west_lon_degree_get(*args):
"""OSRCRSInfo_west_lon_degree_get(CRSInfo crsInfo) -> double"""
return _osr.OSRCRSInfo_west_lon_degree_get(*args)
def OSRCRSInfo_south_lat_degree_get(*args):
"""OSRCRSInfo_south_lat_degree_get(CRSInfo crsInfo) -> double"""
return _osr.OSRCRSInfo_south_lat_degree_get(*args)
def OSRCRSInfo_east_lon_degree_get(*args):
"""OSRCRSInfo_east_lon_degree_get(CRSInfo crsInfo) -> double"""
return _osr.OSRCRSInfo_east_lon_degree_get(*args)
def OSRCRSInfo_north_lat_degree_get(*args):
"""OSRCRSInfo_north_lat_degree_get(CRSInfo crsInfo) -> double"""
return _osr.OSRCRSInfo_north_lat_degree_get(*args)
def OSRCRSInfo_area_name_get(*args):
"""OSRCRSInfo_area_name_get(CRSInfo crsInfo) -> char const *"""
return _osr.OSRCRSInfo_area_name_get(*args)
def OSRCRSInfo_projection_method_get(*args):
"""OSRCRSInfo_projection_method_get(CRSInfo crsInfo) -> char const *"""
return _osr.OSRCRSInfo_projection_method_get(*args)
def GetCRSInfoListFromDatabase(*args):
"""GetCRSInfoListFromDatabase(char const * authName)"""
return _osr.GetCRSInfoListFromDatabase(*args)
def SetPROJSearchPath(*args):
"""SetPROJSearchPath(char const * utf8_path)"""
return _osr.SetPROJSearchPath(*args)
def SetPROJSearchPaths(*args):
"""SetPROJSearchPaths(char ** paths)"""
return _osr.SetPROJSearchPaths(*args)
def GetPROJSearchPaths(*args):
"""GetPROJSearchPaths() -> char **"""
return _osr.GetPROJSearchPaths(*args)
def GetPROJVersionMajor(*args):
"""GetPROJVersionMajor() -> int"""
return _osr.GetPROJVersionMajor(*args)
def GetPROJVersionMinor(*args):
"""GetPROJVersionMinor() -> int"""
return _osr.GetPROJVersionMinor(*args)
def GetPROJVersionMicro(*args):
"""GetPROJVersionMicro() -> int"""
return _osr.GetPROJVersionMicro(*args)
def GetPROJEnableNetwork(*args):
"""GetPROJEnableNetwork() -> bool"""
return _osr.GetPROJEnableNetwork(*args)
def SetPROJEnableNetwork(*args):
"""SetPROJEnableNetwork(bool enabled)"""
return _osr.SetPROJEnableNetwork(*args)
def SetPROJAuxDbPath(*args):
"""SetPROJAuxDbPath(char const * utf8_path)"""
return _osr.SetPROJAuxDbPath(*args)
def SetPROJAuxDbPaths(*args):
"""SetPROJAuxDbPaths(char ** paths)"""
return _osr.SetPROJAuxDbPaths(*args)
def GetPROJAuxDbPaths(*args):
"""GetPROJAuxDbPaths() -> char **"""
return _osr.GetPROJAuxDbPaths(*args)
# This file is compatible with both classic and new-style classes.
| 41.587087 | 317 | 0.741723 |
574cd53e6c04fac3dfa822868300388ffc64584f | 2,873 | py | Python | scripts/new_session_adder.py | wangsongiam/leetcode | 96ff21bca1871816ae51fccb1fa13587b378dc50 | [
"MIT"
] | 3 | 2018-11-25T15:19:57.000Z | 2019-09-28T03:01:11.000Z | scripts/new_session_adder.py | casprwang/leetcode | 96ff21bca1871816ae51fccb1fa13587b378dc50 | [
"MIT"
] | null | null | null | scripts/new_session_adder.py | casprwang/leetcode | 96ff21bca1871816ae51fccb1fa13587b378dc50 | [
"MIT"
] | 3 | 2018-02-11T20:23:44.000Z | 2020-06-05T15:39:56.000Z | import os
import re
OLD_DIR = os.path.expanduser('~/dev/leetcode/solutions')
NEW_DIR = os.path.expanduser('~/temp/lll/solutions/')
OLD_README = os.path.expanduser('~/dev/leetcode/README.md')
NEW_README = os.path.expanduser('~/dev/leetcode/README_NEW.md')
COM_README = os.path.expanduser('~/temp/lll/README.md')
def get_sol_path_from_parent_path(prefix, parent_dir):
# filter
solution_path = os.path.join(prefix, parent_dir)
if not os.path.isdir(solution_path):
return None, None
match = re.search('\.(.*)', parent_dir)
if not match:
return None, None
# getting file name
solutinon_basename = match.groups()[0]
return os.path.join(solution_path, solutinon_basename + '.py'), solutinon_basename
# DIR -> child.py
for solution_dir in os.listdir(NEW_DIR):
# filter
new_solution_path, solutinon_basename = get_sol_path_from_parent_path(NEW_DIR, solution_dir)
if not new_solution_path:
continue
try:
old_solution_dir = os.path.join(OLD_DIR, solution_dir)
new_file_path = os.path.join(
old_solution_dir, solutinon_basename + '.py')
if os.path.exists(new_file_path):
continue
# create folders with question.md for non existing
if not os.path.exists(old_solution_dir):
os.makedirs(old_solution_dir)
with open(os.path.join(solution_path, 'question.md'), 'r') as source, open(os.path.join(old_solution_dir, 'question.md'), 'w') as target:
target.write(source.read())
# write python solutions
with open(new_solution_path, 'r') as new_sol_f, open(new_file_path, 'w') as target_f:
target_f.write(new_sol_f.read())
except Exception as e:
print(e)
# readme
# if .py in old_solution_dir and not python in readme line
with open(OLD_README, 'r') as f_old, open(NEW_README, 'w') as f_new, open(COM_README, 'r') as f_com:
for line in f_old.readlines():
new_line = line
for old_solution_dir in os.listdir(OLD_DIR):
# filtering none solution lines
match = re.search(r'\|([0-9]*?)\|', line)
if not match:
break
# filtering python solutions to add
n = match.groups()[0]
py_solution_path, _ = get_sol_path_from_parent_path(OLD_DIR, old_solution_dir)
if py_solution_path:
if n in old_solution_dir:
# update
li = line.split('|')
new_item = ' [python](./solutions/{}/{}.py)'.format(old_solution_dir, solutinon_basename)
li[3] = li[3] + new_item
new_idex = '|'.join(li)
new_line = new_idex
break
else:
pass
f_new.write(new_line)
| 32.280899 | 149 | 0.611904 |
9a17f1cfeb1bee799a3fb6a43151c6d0720f6b07 | 990 | py | Python | tests/models/ultralytics/yolov5/lightning/test_train.py | Toucan-Systems/icevision | 7dc8d1f0557b24899f3823f1370f28496b73fac7 | [
"Apache-2.0"
] | null | null | null | tests/models/ultralytics/yolov5/lightning/test_train.py | Toucan-Systems/icevision | 7dc8d1f0557b24899f3823f1370f28496b73fac7 | [
"Apache-2.0"
] | null | null | null | tests/models/ultralytics/yolov5/lightning/test_train.py | Toucan-Systems/icevision | 7dc8d1f0557b24899f3823f1370f28496b73fac7 | [
"Apache-2.0"
] | null | null | null | import pytest
from icevision.all import *
from icevision.models.ultralytics.yolov5.backbones import *
@pytest.mark.parametrize(
"backbone",
[small, medium, large],
)
def test_lightning_yolo_train(fridge_ds, backbone):
train_ds, valid_ds = fridge_ds
train_dl = models.ultralytics.yolov5.train_dl(
train_ds, batch_size=3, num_workers=0, shuffle=False
)
valid_dl = models.ultralytics.yolov5.valid_dl(
valid_ds, batch_size=3, num_workers=0, shuffle=False
)
model = models.ultralytics.yolov5.model(
num_classes=5, img_size=384, backbone=backbone(pretrained=True)
)
metrics = [COCOMetric(metric_type=COCOMetricType.bbox)]
class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter):
def configure_optimizers(self):
return SGD(self.parameters(), lr=1e-4)
light_model = LightModel(model, metrics=metrics)
trainer = pl.Trainer(max_epochs=1)
trainer.fit(light_model, train_dl, valid_dl)
| 30.9375 | 71 | 0.721212 |
960856d683c82d8da51927a2b5835b008ccecc1e | 1,672 | py | Python | efficientnet/utils/gen_csf_dataset.py | DoriHp/efficientnet-pytorch | a7f51480c40b389d54fc5c1aa4e30fedcca99adf | [
"MIT"
] | null | null | null | efficientnet/utils/gen_csf_dataset.py | DoriHp/efficientnet-pytorch | a7f51480c40b389d54fc5c1aa4e30fedcca99adf | [
"MIT"
] | null | null | null | efficientnet/utils/gen_csf_dataset.py | DoriHp/efficientnet-pytorch | a7f51480c40b389d54fc5c1aa4e30fedcca99adf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: bao
# @Date: 2021-03-01 15:02:26
# @Last Modified by: bao
# @Last Modified time: 2021-03-01 15:54:21
# Read label from original csv file, then divide dataset in to 2 classes dataset (normal and abnormal)
# Output file format
# filepath | label |
# ... | ... |
# ... | ... |
# ... | ... |
import os
from tqdm import tqdm
def generator(root_folder):
"""
Args:
- root_folder: folder that contains all image & label files (YOLOv5 format)
Output:
- train.csv and valid.csv has require output
"""
# Read original train.csv
train_dir = os.path.join(root_folder, "train")
valid_dir = os.path.join(root_folder, "valid")
train_list = [os.path.join(train_dir, f) for f in os.listdir(train_dir) if f.endswith(".txt")]
valid_list = [os.path.join(valid_dir, f) for f in os.listdir(valid_dir) if f.endswith(".txt")]
with open("../../data/train.csv", "w") as writer:
write_string = "filename,label\n"
for f in tqdm(train_list, total=len(train_list), desc="Generating train set"):
if os.stat(f).st_size == 0:
write_string += "%s,normal\n" %f.replace(".txt", ".jpg")
else:
write_string += "%s,abnormal\n" %f.replace(".txt", ".jpg")
writer.write(write_string)
with open("../../data/valid.csv", "w") as writer:
write_string = "filename,label\n"
for f in tqdm(valid_list, total=len(valid_list), desc="Generating valid set"):
if os.stat(f).st_size == 0:
write_string += "%s,normal\n" %f.replace(".txt", ".jpg")
else:
write_string += "%s,abnormal\n" %f.replace(".txt", ".jpg")
writer.write(write_string)
if __name__ == '__main__':
generator("F:/Dev/yolov5/dataset") | 32.784314 | 102 | 0.642943 |
f3f04abc81da8ad1307da0aa0e80fb5e0d0c109e | 3,837 | py | Python | spotzurnal/main.py | oskar456/spotzurnal | 6d52bcb8b9a28381ebe81c398c04b95e3ea9453d | [
"MIT"
] | null | null | null | spotzurnal/main.py | oskar456/spotzurnal | 6d52bcb8b9a28381ebe81c398c04b95e3ea9453d | [
"MIT"
] | null | null | null | spotzurnal/main.py | oskar456/spotzurnal | 6d52bcb8b9a28381ebe81c398c04b95e3ea9453d | [
"MIT"
] | null | null | null | import datetime
from pathlib import Path
import click
from yaml import safe_load
from . import croapi
from . import matcher
from .spotify import Spotify
from .cache import Cache
from .clickdate import ClickDate
from .aggregator import parse_plname
@click.command()
@click.option(
"--credentials", "-c",
metavar="<credentials_json_file>",
show_default=True,
type=click.Path(dir_okay=False),
default=str(Path(click.get_app_dir("spotzurnal")) / "credentials.json"),
help="Path where to store credentials.",
)
@click.option(
"--username", "-u",
metavar="USER",
help="Spotify user name",
)
@click.option(
"--date", "-d",
type=ClickDate(),
default=["today", ],
show_default=True,
help="Date of the playlist (can be used multiple times)",
multiple=True,
)
@click.option(
"--station", "-s",
type=click.Choice(croapi.get_cro_stations()),
default=["radiozurnal", ],
show_default=True,
help="The station to grab (can be used multiple times)",
multiple=True,
)
@click.option(
"--replace/--no-replace", "-r",
help="Replace existing playlist instead of appending",
)
@click.option(
"--cache",
metavar="<cache_sqlite_file>",
show_default=True,
type=click.Path(dir_okay=False),
default=str(Path(click.get_app_dir("spotzurnal")) / "cache.sqlite"),
help="Path to SQLite cache. (Created if necessary)",
)
@click.option(
"--quirks", "-q",
metavar="<quirks_yaml_file>",
show_default=True,
type=click.File(),
help="Path to hand-kept quirks file",
)
def main(credentials, username, date, station, replace, cache, quirks):
"""
Generate a Spotify playlist from a playlist published
by the Czech Radio.
"""
sp = Spotify(username=username, credfile=credentials)
c = Cache(cache)
if quirks:
q = safe_load(quirks)
else:
q = None
for st, d in ((st, d) for d in date for st in station):
matcher.match_cro_playlist(sp, d, st, replace, c, q)
print()
@click.command()
@click.option(
"--credentials", "-c",
metavar="<credentials_json_file>",
show_default=True,
type=click.Path(dir_okay=False),
default=str(Path(click.get_app_dir("spotzurnal")) / "credentials.json"),
help="Path where to store credentials.",
)
@click.option(
"--username", "-u",
metavar="USER",
help="Spotify user name",
)
@click.option(
"--month", "-m",
type=ClickDate(),
default=datetime.date.today(),
show_default=True,
help="Month of the playlist",
)
@click.option(
"--station", "-s",
type=click.Choice(croapi.get_cro_stations()),
)
@click.option(
"--cache",
metavar="<cache_sqlite_file>",
show_default=True,
type=click.Path(dir_okay=False),
default=str(Path(click.get_app_dir("spotzurnal")) / "cache.sqlite"),
help="Path to SQLite cache. (Created if necessary)",
)
@click.option(
"--quirks", "-q",
metavar="<quirks_yaml_file>",
show_default=True,
type=click.File(),
help="Path to hand-kept quirks file",
)
def rematch(credentials, username, month, station, cache, quirks):
"""
Regenerate Spotify playlists from a playlist published
by the Czech Radio -- possibly using new quirks and cache contents.
"""
sp = Spotify(username=username, credfile=credentials)
c = Cache(cache)
if quirks:
q = safe_load(quirks)
else:
q = None
playlists = [
parse_plname(p)
for p in sp.get_all_data(sp.current_user_playlists, limit=50)
]
playlists = [
p for p in playlists
if p
and ((station is None) or p.station == station)
and p.date.year == month.year
and p.date.month == month.month
]
for p in playlists:
matcher.match_cro_playlist(sp, p.date, p.station, True, c, q)
| 26.645833 | 76 | 0.643211 |
eb6ad5cd5f552f34136a057e98dd9b047d8f4ec0 | 90,957 | py | Python | keras/layers/recurrent.py | ekholabs/keras | 4a28dc5debcd0bd790ef29d202342184fde5a1f4 | [
"MIT"
] | 7 | 2017-06-02T19:07:36.000Z | 2021-07-23T21:01:44.000Z | keras/layers/recurrent.py | ekholabs/keras | 4a28dc5debcd0bd790ef29d202342184fde5a1f4 | [
"MIT"
] | null | null | null | keras/layers/recurrent.py | ekholabs/keras | 4a28dc5debcd0bd790ef29d202342184fde5a1f4 | [
"MIT"
] | 4 | 2017-05-27T02:37:54.000Z | 2017-08-05T16:01:31.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
import warnings
from .. import backend as K
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from ..engine import Layer
from ..engine import InputSpec
from ..utils.generic_utils import has_arg
# Legacy support.
from ..legacy.layers import Recurrent
from ..legacy import interfaces
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
# Arguments
cells: List of RNN cell instances.
# Examples
```python
cells = [
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
]
inputs = keras.Input((timesteps, input_dim))
x = keras.layers.RNN(cells)(inputs)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if not hasattr(cell, 'call'):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not hasattr(cell, 'state_size'):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
# States are a flat list
# in reverse order of the cell stack.
# This allows to preserve the requirement
# `stack.state_size[0] == output_dim`.
# e.g. states of a 2-layer LSTM would be
# `[h2, c2, h1, c1]`
# (assuming one LSTM has states [h, c])
state_size = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
state_size += list(cell.state_size)
else:
state_size.append(cell.state_size)
return tuple(state_size)
def call(self, inputs, states, **kwargs):
# Recover per-cell states.
nested_states = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
nested_states.append(states[:len(cell.state_size)])
states = states[len(cell.state_size):]
else:
nested_states.append([states[0]])
states = states[1:]
nested_states = nested_states[::-1]
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
# Format the new states as a flat list
# in reverse cell order.
states = []
for cell_states in new_nested_states[::-1]:
states += cell_states
return inputs, states
def build(self, input_shape):
for cell in self.cells:
if isinstance(cell, Layer):
cell.build(input_shape)
if hasattr(cell.state_size, '__len__'):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = (input_shape[0], input_shape[1], output_dim)
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({'class_name': cell.__class__.__name__,
'config': cell.get_config()})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cells = []
for cell_config in config.pop('cells'):
cells.append(deserialize_layer(cell_config,
custom_objects=custom_objects))
return cls(cells, **config)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self.cells:
if isinstance(cell, Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def get_weights(self):
"""Retrieves the weights of the model.
# Returns
A flat list of Numpy arrays.
"""
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.weights
return K.batch_get_value(weights)
def set_weights(self, weights):
"""Sets the weights of the model.
# Arguments
weights: A list of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
tuples = []
for cell in self.cells:
if isinstance(cell, Layer):
num_param = len(cell.weights)
weights = weights[:num_param]
for sw, w in zip(cell.weights, weights):
tuples.append((sw, w))
weights = weights[num_param:]
K.batch_set_value(tuples)
@property
def losses(self):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.losses
losses += cell_losses
return losses
def get_losses_for(self, inputs=None):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.get_losses_for(inputs)
losses += cell_losses
return losses
class RNN(Layer):
"""Base class for recurrent layers.
# Arguments
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shape
3D tensor with shape `(batch_size, timesteps, input_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
# Examples
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self, cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
super(RNN, self).__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = None
@property
def states(self):
if self._states is None:
if isinstance(self.cell.state_size, int):
num_states = 1
else:
num_states = len(self.cell.state_size)
return [None for _ in range(num_states)]
return self._states
@states.setter
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
if hasattr(self.cell.state_size, '__len__'):
output_dim = self.cell.state_size[0]
else:
output_dim = self.cell.state_size
if self.return_sequences:
output_shape = (input_shape[0], input_shape[1], output_dim)
else:
output_shape = (input_shape[0], output_dim)
if self.return_state:
state_shape = [(input_shape[0], output_dim) for _ in self.states]
return [output_shape] + state_shape
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:]
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
input_dim = input_shape[-1]
self.input_spec[0] = InputSpec(shape=(batch_size, None, input_dim))
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if not [spec.shape[-1] for spec in self.state_spec] == state_size:
raise ValueError(
'An initial_state was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'However `cell.state_size` is '
'{}'.format(self.state_spec, self.cell.state_size))
else:
self.state_spec = [InputSpec(shape=(None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
def get_initial_state(self, inputs):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
if hasattr(self.cell.state_size, '__len__'):
return [K.tile(initial_state, [1, dim])
for dim in self.cell.state_size]
else:
return [K.tile(initial_state, [1, self.cell.state_size])]
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = self._standardize_args(
inputs, initial_state, constants)
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
self.state_spec = [InputSpec(shape=K.int_shape(state))
for state in initial_state]
additional_specs += self.state_spec
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
self.constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
is_keras_tensor = hasattr(additional_inputs[0], '_keras_history')
for tensor in additional_inputs:
if hasattr(tensor, '_keras_history') != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors')
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
input_shape = K.int_shape(inputs)
timesteps = input_shape[1]
if self.unroll and timesteps in [None, 1]:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined or equal to 1. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:]
states = states[:-self._num_constants]
return self.cell.call(inputs, states, constants=constants,
**kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
# Properly set learning phase
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def _standardize_args(self, inputs, initial_state, constants):
"""Brings the arguments of `__call__` that can contain input tensors to
standard format.
When running a model loaded from file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
# Arguments
inputs: tensor or list/tuple of tensors
initial_state: tensor or list of tensors or None
constants: tensor or list of tensors or None
# Returns
inputs: tensor
initial_state: list of tensors or None
constants: list of tensors or None
"""
if isinstance(inputs, list):
assert initial_state is None and constants is None
if self._num_constants is not None:
constants = inputs[-self._num_constants:]
inputs = inputs[:-self._num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [K.zeros((batch_size, dim))
for dim in self.cell.state_size]
else:
self.states = [K.zeros((batch_size, self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state, np.zeros((batch_size, dim)))
else:
K.set_value(self.states[0],
np.zeros((batch_size, self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' +
str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != (batch_size, dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str((batch_size, dim)) +
', found shape=' + str(value.shape))
# TODO: consider batch calls to `set_value`.
K.set_value(state, value)
def get_config(self):
config = {'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll}
if self._num_constants is not None:
config['num_constants'] = self._num_constants
cell_config = self.cell.get_config()
config['cell'] = {'class_name': self.cell.__class__.__name__,
'config': cell_config}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cell = deserialize_layer(config.pop('cell'),
custom_objects=custom_objects)
num_constants = config.pop('num_constants', None)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@property
def trainable_weights(self):
if not self.trainable:
return []
if isinstance(self.cell, Layer):
return self.cell.trainable_weights
return []
@property
def non_trainable_weights(self):
if isinstance(self.cell, Layer):
if not self.trainable:
return self.cell.weights
return self.cell.non_trainable_weights
return []
@property
def losses(self):
if isinstance(self.cell, Layer):
return self.cell.losses
return []
def get_losses_for(self, inputs=None):
if isinstance(self.cell, Layer):
cell_losses = self.cell.get_losses_for(inputs)
return cell_losses + super(RNN, self).get_losses_for(inputs)
return super(RNN, self).get_losses_for(inputs)
class SimpleRNNCell(Layer):
"""Cell class for SimpleRNN.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
"""
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0]
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(K.shape(inputs),
self.dropout,
training=training)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
[K.shape(inputs)[0], self.units],
self.recurrent_dropout,
training=training)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output *= rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
output._uses_learning_phase = True
return output, [output]
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
warnings.warn('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
if K.backend() == 'cntk':
if not kwargs.get('unroll') and (dropout > 0 or recurrent_dropout > 0):
warnings.warn(
'RNN dropout is not supported with the CNTK backend '
'when using dynamic RNNs (i.e. non-unrolled). '
'You can either set `unroll=True`, '
'set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = SimpleRNNCell(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(SimpleRNN, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(SimpleRNN, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
class GRUCell(Layer):
"""Cell class for the GRU layer.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units * 3,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_z = self.kernel[:, :self.units]
self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]
self.kernel_r = self.kernel[:, self.units: self.units * 2]
self.recurrent_kernel_r = self.recurrent_kernel[:,
self.units:
self.units * 2]
self.kernel_h = self.kernel[:, self.units * 2:]
self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]
if self.use_bias:
self.bias_z = self.bias[:self.units]
self.bias_r = self.bias[self.units: self.units * 2]
self.bias_h = self.bias[self.units * 2:]
else:
self.bias_z = None
self.bias_r = None
self.bias_h = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(K.shape(inputs),
self.dropout,
training=training,
count=3)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
[K.shape(inputs)[0], self.units],
self.recurrent_dropout,
training=training,
count=3)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel_z)
x_r = K.dot(inputs_r, self.kernel_r)
x_h = K.dot(inputs_h, self.kernel_h)
if self.use_bias:
x_z = K.bias_add(x_z, self.bias_z)
x_r = K.bias_add(x_r, self.bias_r)
x_h = K.bias_add(x_h, self.bias_h)
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
z = self.recurrent_activation(x_z + K.dot(h_tm1_z,
self.recurrent_kernel_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1_r,
self.recurrent_kernel_r))
hh = self.activation(x_h + K.dot(r * h_tm1_h,
self.recurrent_kernel_h))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
matrix_inner = K.dot(h_tm1,
self.recurrent_kernel[:, :2 * self.units])
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units: 2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
x_h = matrix_x[:, 2 * self.units:]
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
h = z * h_tm1 + (1 - z) * hh
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h]
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
# References
- [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'cntk':
if not kwargs.get('unroll') and (dropout > 0 or recurrent_dropout > 0):
warnings.warn(
'RNN dropout is not supported with the CNTK backend '
'when using dynamic RNNs (i.e. non-unrolled). '
'You can either set `unroll=True`, '
'set `dropout` and `recurrent_dropout` to 0, '
'or use a different backend.')
dropout = 0.
recurrent_dropout = 0.
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = GRUCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(GRU, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(GRU, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
class LSTMCell(Layer):
"""Cell class for the LSTM layer.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = (self.units, self.units)
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(shape, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units: self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units: self.units * 2]
self.bias_c = self.bias[self.units * 2: self.units * 3]
self.bias_o = self.bias[self.units * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(K.shape(inputs),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
[K.shape(inputs)[0], self.units],
self.recurrent_dropout,
training=training,
count=4)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
x_i = K.dot(inputs_i, self.kernel_i)
x_f = K.dot(inputs_f, self.kernel_f)
x_c = K.dot(inputs_c, self.kernel_c)
x_o = K.dot(inputs_o, self.kernel_o)
if self.use_bias:
x_i = K.bias_add(x_i, self.bias_i)
x_f = K.bias_add(x_f, self.bias_f)
x_c = K.bias_add(x_c, self.bias_c)
x_o = K.bias_add(x_o, self.bias_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
i = self.recurrent_activation(x_i + K.dot(h_tm1_i,
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(h_tm1_f,
self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1_c,
self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(h_tm1_o,
self.recurrent_kernel_o))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h, c]
class LSTM(RNN):
"""Long-Short Term Memory layer - Hochreiter 1997.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
# References
- [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)
- [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'cntk':
if not kwargs.get('unroll') and (dropout > 0 or recurrent_dropout > 0):
warnings.warn(
'RNN dropout is not supported with the CNTK backend '
'when using dynamic RNNs (i.e. non-unrolled). '
'You can either set `unroll=True`, '
'set `dropout` and `recurrent_dropout` to 0, '
'or use a different backend.')
dropout = 0.
recurrent_dropout = 0.
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = LSTMCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(LSTM, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(LSTM, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_mask(shape, rate, training=None, count=1):
ones = K.ones(shape)
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [K.in_train_phase(
dropped_inputs,
ones,
training=training) for _ in range(count)]
return K.in_train_phase(
dropped_inputs,
ones,
training=training)
| 43.271646 | 130 | 0.576822 |
07847e471090ec4246cc6f8240feefa8dca006bd | 28,333 | py | Python | tests/test_generics.py | ofek/pydantic | bd9c5723c676395363689549268738153e45a7e5 | [
"MIT"
] | null | null | null | tests/test_generics.py | ofek/pydantic | bd9c5723c676395363689549268738153e45a7e5 | [
"MIT"
] | 93 | 2021-06-14T06:17:35.000Z | 2022-03-28T10:04:47.000Z | tests/test_generics.py | Hultner/pydantic | 37e0e39c643e49e6a744a54c70d130c4f921f3c9 | [
"MIT"
] | null | null | null | import sys
from enum import Enum
from typing import Any, Callable, ClassVar, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union
import pytest
from pydantic import BaseModel, Field, ValidationError, root_validator, validator
from pydantic.generics import GenericModel, _generic_types_cache, iter_contained_typevars, replace_types
skip_36 = pytest.mark.skipif(sys.version_info < (3, 7), reason='generics only supported for python 3.7 and above')
@skip_36
def test_generic_name():
data_type = TypeVar('data_type')
class Result(GenericModel, Generic[data_type]):
data: data_type
if sys.version_info >= (3, 9):
assert Result[list[int]].__name__ == 'Result[list[int]]'
assert Result[List[int]].__name__ == 'Result[List[int]]'
assert Result[int].__name__ == 'Result[int]'
@skip_36
def test_double_parameterize_error():
data_type = TypeVar('data_type')
class Result(GenericModel, Generic[data_type]):
data: data_type
with pytest.raises(TypeError) as exc_info:
Result[int][int]
assert str(exc_info.value) == 'Cannot parameterize a concrete instantiation of a generic model'
@skip_36
def test_value_validation():
T = TypeVar('T')
class Response(GenericModel, Generic[T]):
data: T
@validator('data', each_item=True)
def validate_value_nonzero(cls, v):
if v == 0:
raise ValueError('value is zero')
return v
@root_validator()
def validate_sum(cls, values):
if sum(values.get('data', {}).values()) > 5:
raise ValueError('sum too large')
return values
assert Response[Dict[int, int]](data={1: '4'}).dict() == {'data': {1: 4}}
with pytest.raises(ValidationError) as exc_info:
Response[Dict[int, int]](data={1: 'a'})
assert exc_info.value.errors() == [
{'loc': ('data', 1), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
with pytest.raises(ValidationError) as exc_info:
Response[Dict[int, int]](data={1: 0})
assert exc_info.value.errors() == [{'loc': ('data', 1), 'msg': 'value is zero', 'type': 'value_error'}]
with pytest.raises(ValidationError) as exc_info:
Response[Dict[int, int]](data={1: 3, 2: 6})
assert exc_info.value.errors() == [{'loc': ('__root__',), 'msg': 'sum too large', 'type': 'value_error'}]
@skip_36
def test_methods_are_inherited():
class CustomGenericModel(GenericModel):
def method(self):
return self.data
T = TypeVar('T')
class Model(CustomGenericModel, Generic[T]):
data: T
instance = Model[int](data=1)
assert instance.method() == 1
@skip_36
def test_config_is_inherited():
class CustomGenericModel(GenericModel):
class Config:
allow_mutation = False
T = TypeVar('T')
class Model(CustomGenericModel, Generic[T]):
data: T
instance = Model[int](data=1)
with pytest.raises(TypeError) as exc_info:
instance.data = 2
assert str(exc_info.value) == '"Model[int]" is immutable and does not support item assignment'
@skip_36
def test_default_argument():
T = TypeVar('T')
class Result(GenericModel, Generic[T]):
data: T
other: bool = True
result = Result[int](data=1)
assert result.other is True
@skip_36
def test_default_argument_for_typevar():
T = TypeVar('T')
class Result(GenericModel, Generic[T]):
data: T = 4
result = Result[int]()
assert result.data == 4
result = Result[float]()
assert result.data == 4
result = Result[int](data=1)
assert result.data == 1
@skip_36
def test_classvar():
T = TypeVar('T')
class Result(GenericModel, Generic[T]):
data: T
other: ClassVar[int] = 1
assert Result.other == 1
assert Result[int].other == 1
assert Result[int](data=1).other == 1
assert 'other' not in Result.__fields__
@skip_36
def test_non_annotated_field():
T = TypeVar('T')
class Result(GenericModel, Generic[T]):
data: T
other = True
assert 'other' in Result.__fields__
assert 'other' in Result[int].__fields__
result = Result[int](data=1)
assert result.other is True
@skip_36
def test_must_inherit_from_generic():
with pytest.raises(TypeError) as exc_info:
class Result(GenericModel):
pass
Result[int]
assert str(exc_info.value) == 'Type Result must inherit from typing.Generic before being parameterized'
@skip_36
def test_parameters_placed_on_generic():
T = TypeVar('T')
with pytest.raises(TypeError, match='Type parameters should be placed on typing.Generic, not GenericModel'):
class Result(GenericModel[T]):
pass
@skip_36
def test_parameters_must_be_typevar():
with pytest.raises(TypeError, match='Type GenericModel must inherit from typing.Generic before being '):
class Result(GenericModel[int]):
pass
@skip_36
def test_subclass_can_be_genericized():
T = TypeVar('T')
class Result(GenericModel, Generic[T]):
pass
Result[T]
@skip_36
def test_parameter_count():
T = TypeVar('T')
S = TypeVar('S')
class Model(GenericModel, Generic[T, S]):
x: T
y: S
with pytest.raises(TypeError) as exc_info:
Model[int, int, int]
assert str(exc_info.value) == 'Too many parameters for Model; actual 3, expected 2'
with pytest.raises(TypeError) as exc_info:
Model[int]
assert str(exc_info.value) == 'Too few parameters for Model; actual 1, expected 2'
@skip_36
def test_cover_cache():
cache_size = len(_generic_types_cache)
T = TypeVar('T')
class Model(GenericModel, Generic[T]):
x: T
Model[int] # adds both with-tuple and without-tuple version to cache
assert len(_generic_types_cache) == cache_size + 2
Model[int] # uses the cache
assert len(_generic_types_cache) == cache_size + 2
@skip_36
def test_generic_config():
data_type = TypeVar('data_type')
class Result(GenericModel, Generic[data_type]):
data: data_type
class Config:
allow_mutation = False
result = Result[int](data=1)
assert result.data == 1
with pytest.raises(TypeError):
result.data = 2
@skip_36
def test_enum_generic():
T = TypeVar('T')
class MyEnum(Enum):
x = 1
y = 2
class Model(GenericModel, Generic[T]):
enum: T
Model[MyEnum](enum=MyEnum.x)
Model[MyEnum](enum=2)
@skip_36
def test_generic():
data_type = TypeVar('data_type')
error_type = TypeVar('error_type')
class Result(GenericModel, Generic[data_type, error_type]):
data: Optional[List[data_type]]
error: Optional[error_type]
positive_number: int
@validator('error', always=True)
def validate_error(cls, v: Optional[error_type], values: Dict[str, Any]) -> Optional[error_type]:
if values.get('data', None) is None and v is None:
raise ValueError('Must provide data or error')
if values.get('data', None) is not None and v is not None:
raise ValueError('Must not provide both data and error')
return v
@validator('positive_number')
def validate_positive_number(cls, v: int) -> int:
if v < 0:
raise ValueError
return v
class Error(BaseModel):
message: str
class Data(BaseModel):
number: int
text: str
success1 = Result[Data, Error](data=[Data(number=1, text='a')], positive_number=1)
assert success1.dict() == {'data': [{'number': 1, 'text': 'a'}], 'error': None, 'positive_number': 1}
assert repr(success1) == "Result[Data, Error](data=[Data(number=1, text='a')], error=None, positive_number=1)"
success2 = Result[Data, Error](error=Error(message='error'), positive_number=1)
assert success2.dict() == {'data': None, 'error': {'message': 'error'}, 'positive_number': 1}
assert repr(success2) == "Result[Data, Error](data=None, error=Error(message='error'), positive_number=1)"
with pytest.raises(ValidationError) as exc_info:
Result[Data, Error](error=Error(message='error'), positive_number=-1)
assert exc_info.value.errors() == [{'loc': ('positive_number',), 'msg': '', 'type': 'value_error'}]
with pytest.raises(ValidationError) as exc_info:
Result[Data, Error](data=[Data(number=1, text='a')], error=Error(message='error'), positive_number=1)
assert exc_info.value.errors() == [
{'loc': ('error',), 'msg': 'Must not provide both data and error', 'type': 'value_error'}
]
with pytest.raises(ValidationError) as exc_info:
Result[Data, Error](data=[Data(number=1, text='a')], error=Error(message='error'), positive_number=1)
assert exc_info.value.errors() == [
{'loc': ('error',), 'msg': 'Must not provide both data and error', 'type': 'value_error'}
]
@skip_36
def test_alongside_concrete_generics():
from pydantic.generics import GenericModel
T = TypeVar('T')
class MyModel(GenericModel, Generic[T]):
item: T
metadata: Dict[str, Any]
model = MyModel[int](item=1, metadata={})
assert model.item == 1
assert model.metadata == {}
@skip_36
def test_complex_nesting():
from pydantic.generics import GenericModel
T = TypeVar('T')
class MyModel(GenericModel, Generic[T]):
item: List[Dict[Union[int, T], str]]
item = [{1: 'a', 'a': 'a'}]
model = MyModel[str](item=item)
assert model.item == item
@skip_36
def test_required_value():
T = TypeVar('T')
class MyModel(GenericModel, Generic[T]):
a: int
with pytest.raises(ValidationError) as exc_info:
MyModel[int]()
assert exc_info.value.errors() == [{'loc': ('a',), 'msg': 'field required', 'type': 'value_error.missing'}]
@skip_36
def test_optional_value():
T = TypeVar('T')
class MyModel(GenericModel, Generic[T]):
a: Optional[int] = 1
model = MyModel[int]()
assert model.dict() == {'a': 1}
@skip_36
def test_custom_schema():
T = TypeVar('T')
class MyModel(GenericModel, Generic[T]):
a: int = Field(1, description='Custom')
schema = MyModel[int].schema()
assert schema['properties']['a'].get('description') == 'Custom'
@skip_36
def test_child_schema():
T = TypeVar('T')
class Model(GenericModel, Generic[T]):
a: T
class Child(Model[T], Generic[T]):
pass
schema = Child[int].schema()
assert schema == {
'title': 'Child[int]',
'type': 'object',
'properties': {'a': {'title': 'A', 'type': 'integer'}},
'required': ['a'],
}
@skip_36
def test_custom_generic_naming():
T = TypeVar('T')
class MyModel(GenericModel, Generic[T]):
value: Optional[T]
@classmethod
def __concrete_name__(cls: Type[Any], params: Tuple[Type[Any], ...]) -> str:
param_names = [param.__name__ if hasattr(param, '__name__') else str(param) for param in params]
title = param_names[0].title()
return f'Optional{title}Wrapper'
assert repr(MyModel[int](value=1)) == 'OptionalIntWrapper(value=1)'
assert repr(MyModel[str](value=None)) == 'OptionalStrWrapper(value=None)'
@skip_36
def test_nested():
AT = TypeVar('AT')
class InnerT(GenericModel, Generic[AT]):
a: AT
inner_int = InnerT[int](a=8)
inner_str = InnerT[str](a='ate')
inner_dict_any = InnerT[Any](a={})
inner_int_any = InnerT[Any](a=7)
class OuterT_SameType(GenericModel, Generic[AT]):
i: InnerT[AT]
OuterT_SameType[int](i=inner_int)
OuterT_SameType[str](i=inner_str)
OuterT_SameType[int](i=inner_int_any) # ensure parsing the broader inner type works
with pytest.raises(ValidationError) as exc_info:
OuterT_SameType[int](i=inner_str)
assert exc_info.value.errors() == [
{'loc': ('i', 'a'), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
with pytest.raises(ValidationError) as exc_info:
OuterT_SameType[int](i=inner_dict_any)
assert exc_info.value.errors() == [
{'loc': ('i', 'a'), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
@skip_36
def test_partial_specification():
AT = TypeVar('AT')
BT = TypeVar('BT')
class Model(GenericModel, Generic[AT, BT]):
a: AT
b: BT
partial_model = Model[int, BT]
concrete_model = partial_model[str]
concrete_model(a=1, b='abc')
with pytest.raises(ValidationError) as exc_info:
concrete_model(a='abc', b=None)
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ('b',), 'msg': 'none is not an allowed value', 'type': 'type_error.none.not_allowed'},
]
@skip_36
def test_partial_specification_with_inner_typevar():
AT = TypeVar('AT')
BT = TypeVar('BT')
class Model(GenericModel, Generic[AT, BT]):
a: List[AT]
b: List[BT]
partial_model = Model[str, BT]
assert partial_model.__concrete__ is False
concrete_model = partial_model[int]
assert concrete_model.__concrete__ is True
# nested resolution of partial models should work as expected
nested_resolved = concrete_model(a=[123], b=['456'])
assert nested_resolved.a == ['123']
assert nested_resolved.b == [456]
@skip_36
def test_partial_specification_name():
AT = TypeVar('AT')
BT = TypeVar('BT')
class Model(GenericModel, Generic[AT, BT]):
a: AT
b: BT
partial_model = Model[int, BT]
assert partial_model.__name__ == 'Model[int, BT]'
concrete_model = partial_model[str]
assert concrete_model.__name__ == 'Model[int, BT][str]'
@skip_36
def test_partial_specification_instantiation():
AT = TypeVar('AT')
BT = TypeVar('BT')
class Model(GenericModel, Generic[AT, BT]):
a: AT
b: BT
partial_model = Model[int, BT]
partial_model(a=1, b=2)
partial_model(a=1, b='a')
with pytest.raises(ValidationError) as exc_info:
partial_model(a='a', b=2)
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
@skip_36
def test_partial_specification_instantiation_bounded():
AT = TypeVar('AT')
BT = TypeVar('BT', bound=int)
class Model(GenericModel, Generic[AT, BT]):
a: AT
b: BT
Model(a=1, b=1)
with pytest.raises(ValidationError) as exc_info:
Model(a=1, b='a')
assert exc_info.value.errors() == [
{'loc': ('b',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
partial_model = Model[int, BT]
partial_model(a=1, b=1)
with pytest.raises(ValidationError) as exc_info:
partial_model(a=1, b='a')
assert exc_info.value.errors() == [
{'loc': ('b',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
@skip_36
def test_typevar_parametrization():
AT = TypeVar('AT')
BT = TypeVar('BT')
class Model(GenericModel, Generic[AT, BT]):
a: AT
b: BT
CT = TypeVar('CT', bound=int)
DT = TypeVar('DT', bound=int)
with pytest.raises(ValidationError) as exc_info:
Model[CT, DT](a='a', b='b')
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ('b',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
]
@skip_36
def test_multiple_specification():
AT = TypeVar('AT')
BT = TypeVar('BT')
class Model(GenericModel, Generic[AT, BT]):
a: AT
b: BT
CT = TypeVar('CT')
partial_model = Model[CT, CT]
concrete_model = partial_model[str]
with pytest.raises(ValidationError) as exc_info:
concrete_model(a=None, b=None)
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'none is not an allowed value', 'type': 'type_error.none.not_allowed'},
{'loc': ('b',), 'msg': 'none is not an allowed value', 'type': 'type_error.none.not_allowed'},
]
@skip_36
def test_generic_subclass_of_concrete_generic():
T = TypeVar('T')
U = TypeVar('U')
class GenericBaseModel(GenericModel, Generic[T]):
data: T
class GenericSub(GenericBaseModel[int], Generic[U]):
extra: U
ConcreteSub = GenericSub[int]
with pytest.raises(ValidationError):
ConcreteSub(data=2, extra='wrong')
with pytest.raises(ValidationError):
ConcreteSub(data='wrong', extra=2)
ConcreteSub(data=2, extra=3)
@skip_36
def test_generic_model_pickle(create_module):
# Using create_module because pickle doesn't support
# objects with <locals> in their __qualname__ (e. g. defined in function)
@create_module
def module():
import pickle
from typing import Generic, TypeVar
from pydantic import BaseModel
from pydantic.generics import GenericModel
t = TypeVar('t')
class Model(BaseModel):
a: float
b: int = 10
class MyGeneric(GenericModel, Generic[t]):
value: t
original = MyGeneric[Model](value=Model(a='24'))
dumped = pickle.dumps(original)
loaded = pickle.loads(dumped)
assert loaded.value.a == original.value.a == 24
assert loaded.value.b == original.value.b == 10
assert loaded == original
@skip_36
def test_generic_model_from_function_pickle_fail(create_module):
@create_module
def module():
import pickle
from typing import Generic, TypeVar
import pytest
from pydantic import BaseModel
from pydantic.generics import GenericModel
t = TypeVar('t')
class Model(BaseModel):
a: float
b: int = 10
class MyGeneric(GenericModel, Generic[t]):
value: t
def get_generic(t):
return MyGeneric[t]
original = get_generic(Model)(value=Model(a='24'))
with pytest.raises(pickle.PicklingError):
pickle.dumps(original)
@skip_36
def test_generic_model_redefined_without_cache_fail(create_module, monkeypatch):
# match identity checker otherwise we never get to the redefinition check
monkeypatch.setattr('pydantic.generics.all_identical', lambda left, right: False)
@create_module
def module():
from typing import Generic, TypeVar
from pydantic import BaseModel
from pydantic.generics import GenericModel, _generic_types_cache
t = TypeVar('t')
class MyGeneric(GenericModel, Generic[t]):
value: t
class Model(BaseModel):
...
concrete = MyGeneric[Model]
_generic_types_cache.clear()
second_concrete = MyGeneric[Model]
class Model(BaseModel): # same name, but type different, so it's not in cache
...
third_concrete = MyGeneric[Model]
assert concrete is not second_concrete
assert concrete is not third_concrete
assert second_concrete is not third_concrete
assert globals()['MyGeneric[Model]'] is concrete
assert globals()['MyGeneric[Model]_'] is second_concrete
assert globals()['MyGeneric[Model]__'] is third_concrete
def test_get_caller_frame_info(create_module):
@create_module
def module():
from pydantic.generics import get_caller_frame_info
def function():
assert get_caller_frame_info() == (__name__, True)
another_function()
def another_function():
assert get_caller_frame_info() == (__name__, False)
third_function()
def third_function():
assert get_caller_frame_info() == (__name__, False)
function()
def test_get_caller_frame_info_called_from_module(create_module):
@create_module
def module():
from unittest.mock import patch
import pytest
from pydantic.generics import get_caller_frame_info
with pytest.raises(RuntimeError, match='This function must be used inside another function'):
with patch('sys._getframe', side_effect=ValueError('getframe_exc')):
get_caller_frame_info()
def test_get_caller_frame_info_when_sys_getframe_undefined():
from pydantic.generics import get_caller_frame_info
getframe = sys._getframe
del sys._getframe
try:
assert get_caller_frame_info() == (None, False)
finally: # just to make sure we always setting original attribute back
sys._getframe = getframe
@skip_36
def test_iter_contained_typevars():
T = TypeVar('T')
T2 = TypeVar('T2')
class Model(GenericModel, Generic[T]):
a: T
assert list(iter_contained_typevars(Model[T])) == [T]
assert list(iter_contained_typevars(Optional[List[Union[str, Model[T]]]])) == [T]
assert list(iter_contained_typevars(Optional[List[Union[str, Model[int]]]])) == []
assert list(iter_contained_typevars(Optional[List[Union[str, Model[T], Callable[[T2, T], str]]]])) == [T, T2, T]
@skip_36
def test_nested_identity_parameterization():
T = TypeVar('T')
T2 = TypeVar('T2')
class Model(GenericModel, Generic[T]):
a: T
assert Model[T][T][T] is Model
assert Model[T] is Model
assert Model[T2] is not Model
@skip_36
def test_replace_types():
T = TypeVar('T')
class Model(GenericModel, Generic[T]):
a: T
assert replace_types(T, {T: int}) is int
assert replace_types(List[Union[str, list, T]], {T: int}) == List[Union[str, list, int]]
assert replace_types(Callable, {T: int}) == Callable
assert replace_types(Callable[[int, str, T], T], {T: int}) == Callable[[int, str, int], int]
assert replace_types(T, {}) is T
assert replace_types(Model[List[T]], {T: int}) == Model[List[T]][int]
assert replace_types(T, {}) is T
assert replace_types(Type[T], {T: int}) == Type[int]
assert replace_types(Model[T], {T: T}) == Model[T]
if sys.version_info >= (3, 9):
# Check generic aliases (subscripted builtin types) to make sure they
# resolve correctly (don't get translated to typing versions for
# example)
assert replace_types(list[Union[str, list, T]], {T: int}) == list[Union[str, list, int]]
@skip_36
def test_replace_types_identity_on_unchanged():
T = TypeVar('T')
U = TypeVar('U')
type_ = List[Union[str, Callable[[list], Optional[str]], U]]
assert replace_types(type_, {T: int}) is type_
@skip_36
def test_deep_generic():
T = TypeVar('T')
S = TypeVar('S')
R = TypeVar('R')
class OuterModel(GenericModel, Generic[T, S, R]):
a: Dict[R, Optional[List[T]]]
b: Optional[Union[S, R]]
c: R
d: float
class InnerModel(GenericModel, Generic[T, R]):
c: T
d: R
class NormalModel(BaseModel):
e: int
f: str
inner_model = InnerModel[int, str]
generic_model = OuterModel[inner_model, NormalModel, int]
inner_models = [inner_model(c=1, d='a')]
generic_model(a={1: inner_models, 2: None}, b=None, c=1, d=1.5)
generic_model(a={}, b=NormalModel(e=1, f='a'), c=1, d=1.5)
generic_model(a={}, b=1, c=1, d=1.5)
assert InnerModel.__concrete__ is False
assert inner_model.__concrete__ is True
@skip_36
def test_deep_generic_with_inner_typevar():
T = TypeVar('T')
class OuterModel(GenericModel, Generic[T]):
a: List[T]
class InnerModel(OuterModel[T], Generic[T]):
pass
assert InnerModel[int].__concrete__ is True
assert InnerModel.__concrete__ is False
with pytest.raises(ValidationError):
InnerModel[int](a=['wrong'])
assert InnerModel[int](a=['1']).a == [1]
@skip_36
def test_deep_generic_with_referenced_generic():
T = TypeVar('T')
R = TypeVar('R')
class ReferencedModel(GenericModel, Generic[R]):
a: R
class OuterModel(GenericModel, Generic[T]):
a: ReferencedModel[T]
class InnerModel(OuterModel[T], Generic[T]):
pass
assert InnerModel[int].__concrete__ is True
assert InnerModel.__concrete__ is False
with pytest.raises(ValidationError):
InnerModel[int](a={'a': 'wrong'})
assert InnerModel[int](a={'a': 1}).a.a == 1
@skip_36
def test_deep_generic_with_referenced_inner_generic():
T = TypeVar('T')
class ReferencedModel(GenericModel, Generic[T]):
a: T
class OuterModel(GenericModel, Generic[T]):
a: Optional[List[Union[ReferencedModel[T], str]]]
class InnerModel(OuterModel[T], Generic[T]):
pass
assert InnerModel[int].__concrete__ is True
assert InnerModel.__concrete__ is False
with pytest.raises(ValidationError):
InnerModel[int](a=['s', {'a': 'wrong'}])
assert InnerModel[int](a=['s', {'a': 1}]).a[1].a == 1
assert InnerModel[int].__fields__['a'].outer_type_ == List[Union[ReferencedModel[int], str]]
assert (InnerModel[int].__fields__['a'].sub_fields[0].sub_fields[0].outer_type_.__fields__['a'].outer_type_) == int
@skip_36
def test_deep_generic_with_multiple_typevars():
T = TypeVar('T')
U = TypeVar('U')
class OuterModel(GenericModel, Generic[T]):
data: List[T]
class InnerModel(OuterModel[T], Generic[U, T]):
extra: U
ConcreteInnerModel = InnerModel[int, float]
assert ConcreteInnerModel.__fields__['data'].outer_type_ == List[float]
assert ConcreteInnerModel.__fields__['extra'].outer_type_ == int
assert ConcreteInnerModel(data=['1'], extra='2').dict() == {'data': [1.0], 'extra': 2}
@skip_36
def test_deep_generic_with_multiple_inheritance():
K = TypeVar('K')
V = TypeVar('V')
T = TypeVar('T')
class OuterModelA(GenericModel, Generic[K, V]):
data: Dict[K, V]
class OuterModelB(GenericModel, Generic[T]):
stuff: List[T]
class InnerModel(OuterModelA[K, V], OuterModelB[T], Generic[K, V, T]):
extra: int
ConcreteInnerModel = InnerModel[int, float, str]
assert ConcreteInnerModel.__fields__['data'].outer_type_ == Dict[int, float]
assert ConcreteInnerModel.__fields__['stuff'].outer_type_ == List[str]
assert ConcreteInnerModel.__fields__['extra'].outer_type_ == int
ConcreteInnerModel(data={1.1: '5'}, stuff=[123], extra=5).dict() == {
'data': {1: 5},
'stuff': ['123'],
'extra': 5,
}
@skip_36
def test_generic_with_referenced_generic_type_1():
T = TypeVar('T')
class ModelWithType(GenericModel, Generic[T]):
# Type resolves to type origin of "type" which is non-subscriptible for
# python < 3.9 so we want to make sure it works for other versions
some_type: Type[T]
class ReferenceModel(GenericModel, Generic[T]):
abstract_base_with_type: ModelWithType[T]
ReferenceModel[int]
@skip_36
def test_generic_with_referenced_nested_typevar():
T = TypeVar('T')
class ModelWithType(GenericModel, Generic[T]):
# Type resolves to type origin of "collections.abc.Sequence" which is
# non-subscriptible for
# python < 3.9 so we want to make sure it works for other versions
some_type: Sequence[T]
class ReferenceModel(GenericModel, Generic[T]):
abstract_base_with_type: ModelWithType[T]
ReferenceModel[int]
@skip_36
def test_generic_with_callable():
T = TypeVar('T')
class Model(GenericModel, Generic[T]):
# Callable is a test for any type that accepts a list as an argument
some_callable: Callable[[Optional[int], T], None]
Model[str].__concrete__ is True
Model.__concrete__ is False
@skip_36
def test_generic_with_partial_callable():
T = TypeVar('T')
U = TypeVar('U')
class Model(GenericModel, Generic[T, U]):
t: T
u: U
# Callable is a test for any type that accepts a list as an argument
some_callable: Callable[[Optional[int], str], None]
Model[str, U].__concrete__ is False
Model[str, U].__parameters__ == [U]
Model[str, int].__concrete__ is False
| 27.832024 | 119 | 0.638337 |
0029ad751b4c7904f152bcc5666a2c3b07310cd3 | 139 | py | Python | estimates_scraper/__main__.py | OpenGovAus/estimates-scraper | be3434b07a658436d095b7fce04a888124350881 | [
"MIT"
] | null | null | null | estimates_scraper/__main__.py | OpenGovAus/estimates-scraper | be3434b07a658436d095b7fce04a888124350881 | [
"MIT"
] | null | null | null | estimates_scraper/__main__.py | OpenGovAus/estimates-scraper | be3434b07a658436d095b7fce04a888124350881 | [
"MIT"
] | null | null | null | import json
if __name__ == '__main__':
import estimates_scraper
print(json.dumps(estimates_scraper.scrape_committees(), indent=2)) | 27.8 | 70 | 0.76259 |
4b14c4bf1669fba39b874a7eaa022ff522f3a5c9 | 2,208 | gyp | Python | client/client.gyp | LaudateCorpus1/crashpad | d7798a4e284456702ce154e00e8adc2846f3a4f3 | [
"Apache-2.0"
] | 37 | 2016-04-08T15:13:05.000Z | 2022-02-14T18:59:51.000Z | client/client.gyp | LaudateCorpus1/crashpad | d7798a4e284456702ce154e00e8adc2846f3a4f3 | [
"Apache-2.0"
] | 3 | 2017-01-17T08:42:34.000Z | 2017-08-28T22:53:36.000Z | client/client.gyp | atom/crashpad | d7798a4e284456702ce154e00e8adc2846f3a4f3 | [
"Apache-2.0"
] | 22 | 2016-04-27T21:07:06.000Z | 2022-02-02T08:04:06.000Z | # Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'includes': [
'../build/crashpad.gypi',
],
'targets': [
{
'target_name': 'crashpad_client',
'type': 'static_library',
'dependencies': [
'../compat/compat.gyp:crashpad_compat',
'../third_party/mini_chromium/mini_chromium.gyp:base',
'../util/util.gyp:crashpad_util',
],
'include_dirs': [
'..',
],
'sources': [
'annotation.cc',
'annotation.h',
'annotation_list.cc',
'annotation_list.h',
'capture_context_mac.S',
'capture_context_mac.h',
'crash_report_database.cc',
'crash_report_database.h',
'crash_report_database_mac.mm',
'crash_report_database_win.cc',
'crashpad_client.h',
'crashpad_client_mac.cc',
'crashpad_client_win.cc',
'crashpad_info.cc',
'crashpad_info.h',
'prune_crash_reports.cc',
'prune_crash_reports.h',
'settings.cc',
'settings.h',
'simple_string_dictionary.h',
'simple_address_range_bag.h',
'simulate_crash.h',
'simulate_crash_mac.cc',
'simulate_crash_mac.h',
'simulate_crash_win.h',
],
'conditions': [
['OS=="win"', {
'link_settings': {
'libraries': [
'-lrpcrt4.lib',
],
},
}],
['OS!="mac"', {
'sources!': [
'capture_context_mac.S',
],
}],
],
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
},
],
}
| 27.6 | 74 | 0.566123 |
dc3eb78a73f9f930cf1db967622e846ee244cc99 | 558 | py | Python | src/player/Logger.py | cybekRT/DJPajton2 | e461070269a5b39bac6c8df1eb739ea79c7099b5 | [
"MIT"
] | null | null | null | src/player/Logger.py | cybekRT/DJPajton2 | e461070269a5b39bac6c8df1eb739ea79c7099b5 | [
"MIT"
] | null | null | null | src/player/Logger.py | cybekRT/DJPajton2 | e461070269a5b39bac6c8df1eb739ea79c7099b5 | [
"MIT"
] | null | null | null | _logger = None
class Logger():
@staticmethod
def instance():
global _logger
if _logger is None:
_logger = Logger()
return _logger
def __init__(self):
global _logger
if _logger is not None:
raise "Logger was initialized!"
self.messages = []
def Latest(self):
if len(self.messages) == 0:
return -1
return len(self.messages) - 1
def GetSince(self, index):
if index < 0:
return self.messages
return self.messages[index+1:]
def Log(self, msg):
print("Logged: {}".format(msg))
self.messages.append(msg) | 16.909091 | 34 | 0.655914 |
63f2e15c0066b57e1f71572de1ac3862fffcaa94 | 7,381 | py | Python | python/archive/repeater_autotime_090118.py | hmallen/xbee_garage | a3ff4a0fd596b0c7549ccf84737d47076459cd58 | [
"MIT"
] | null | null | null | python/archive/repeater_autotime_090118.py | hmallen/xbee_garage | a3ff4a0fd596b0c7549ccf84737d47076459cd58 | [
"MIT"
] | null | null | null | python/archive/repeater_autotime_090118.py | hmallen/xbee_garage | a3ff4a0fd596b0c7549ccf84737d47076459cd58 | [
"MIT"
] | null | null | null | import configparser
import datetime
import logging
import serial
# import sys
import time
import cayenne.client
from pymongo import MongoClient
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
mqtt_loop_interval = 10
config_path = '../config/config.ini'
config = configparser.ConfigParser()
config.read(config_path)
mqtt_username = config['mqtt']['username']
mqtt_password = config['mqtt']['password']
mqtt_client_id = config['mqtt']['client_id']
mqtt_client = cayenne.client.CayenneMQTTClient()
mongo_uri = config['mongodb']['uri']
mongo_db = config['mongodb']['database']
collections = {
'log': config['mongodb']['collection_log'],
'state': config['mongodb']['collection_state']
}
db = MongoClient(mongo_uri)[mongo_db]
ser = serial.Serial(
port='/dev/ttyUSB0',
baudrate=9600,
# stimeout=0.5
)
# Trigger actions from received MQTT messages
def trigger_action(target, action=None):
command_success = True
if target == 'door':
logger.debug('Constructing door ' + str(action) + ' command.')
door_command = '@D'
if action == 'open':
door_command += 'O'
elif action == 'close':
door_command += 'C'
elif action == None:
door_command += 'F'
else:
logger.error('Unrecognized action variable passed to trigger_action().')
command_success = False
if command_success is True:
door_command += '^'
door_command = door_command.encode('utf-8')
logger.debug('door_command: ' + str(door_command))
logger.info('Sending door ' + str(action) + ' command.')
ser.write(door_command)
else:
logger.error('Error while constructing command. No command sent.')
else:
logger.error('Unknown target variable passed to trigger_action().')
# Function for updating dashboard values via MQTT
def mqtt_update(variable, value):
update_ready = True
channel = None
data_type = 'null'
data_unit = None
if variable == 'doorOpen':
channel = 1
data_unit = 'd'
elif variable == 'lockStateDoor':
channel = 2
data_unit = 'd'
elif variable == 'lockStateButton':
channel = 3
data_unit = 'd'
elif variable == 'doorAlarmActive':
channel = 4
data_unit = 'd'
elif variable == 'heartbeatLast':
update_ready = False
if update_ready is True:
logger.debug('Updating "' + variable + '" via MQTT.')
mqtt_client.virtualWrite(channel, value, data_type, data_unit)
else:
logger.debug('Skipping "' + variable + '" update.')
# Callback for messages received from Cayenne
def on_message(msg):
logger.info('msg [on_message]: ' + str(msg))
logger.debug('msg.client_id: ' + msg.client_id)
logger.debug('msg.topic: ' + msg.topic)
logger.debug('msg.channel: ' + str(msg.channel))
logger.debug('msg.msg_id: ' + msg.msg_id)
logger.debug('msg.value: ' + msg.value)
# If door button channel, trigger door open/close via serial
if msg.channel == 5:
logger.info('Received door trigger command via MQTT.')
trigger_action('door')
def process_message(msg):
process_return = {'success': True, 'rebroadcast': False, 'message': None}
try:
msg_decoded = msg.decode()
logger.debug('msg_decoded: ' + msg_decoded)
start_char = msg_decoded[0]
logger.debug('start_char: ' + start_char)
end_char = msg_decoded[-1]
logger.debug('end_char: ' + end_char)
if start_char == '@':
if end_char == '^':
process_return['message'] = msg
process_return['rebroadcast'] = True
else:
logger.error('Unrecognized end character in command from remote --> controller.')
elif start_char == '^':
if end_char == '@':
process_return['message'] = msg
process_return['rebroadcast'] = True
else:
logger.error('Unrecognized end character in command from controller --> remote')
elif start_char == '&':
updates = [(var.split('$')[0], var.split('$')[1]) for var in msg_decoded[3:-1].split('%')]
logger.debug('updates: ' + str(updates))
[mqtt_update(update[0], update[1]) for update in updates]
elif start_char == '#':
if msg_decoded == '#TS#':
dt_current = datetime.datetime.now()
time_message = dt_current.strftime('#m%md%dy%YH%HM%MS%S#')
bytes_written = ser.write(time_message.encode('utf-8'))
logger.debug('bytes_written: ' + str(bytes_written))
else:
logger.error('Unrecognized time sync message received from controller.')
except Exception as e:
logger.exception(e)
process_return['success'] = False
finally:
return process_return
def update_state(key, value):
garage_state[key] = value
def update_log():
pass
if __name__ == '__main__':
# Flush serial receive buffer to start fresh
if ser.in_waiting > 0:
logger.info('Flushing serial buffer.')
while ser.in_waiting > 0:
c = ser.read()
time.sleep(0.1)
garage_state = {
'doorOpen': None,
'lastOpened': None,
'lockStateDoor': None,
'lockStateButton': None,
'doorAlarmActive': None
}
mqtt_client.on_message = on_message
mqtt_client.begin(mqtt_username, mqtt_password, mqtt_client_id)
mqtt_client.loop()
mqtt_loop_last = time.time()
new_msg = False
# Request starting values from controller and update
logger.info('Requesting full data update from controller.')
# update_request = '@UF^'
update_request = '@UA^'
logger.debug('update_request: ' + str(update_request))
bytes_written = ser.write(update_request.encode('utf-8'))
logger.debug('bytes_written: ' + str(bytes_written))
while (True):
if ser.in_waiting > 0:
c = ser.read()
if c == b'@' or c == b'^' or c == b'&' or c == b'#':
if new_msg is False:
new_msg = True
msg = c
else:
msg += c
process_result = process_message(msg)
new_msg = False
if process_result['success'] is True:
if process_result['rebroadcast'] is True:
bytes_written = ser.write(process_result['message'])
logger.debug('bytes_written: ' + str(bytes_written))
# time.sleep(0.05)
else:
logger.error('Error while processing message.')
elif new_msg is True:
msg += c
else:
logger.warning('Orphaned character(s) in serial buffer. Flushing buffer.')
while ser.in_waiting > 0:
orph_char = ser.read()
logger.debug('orph_char: ' + str(orph_char))
if (time.time() - mqtt_loop_last) > mqtt_loop_interval:
mqtt_client.loop()
mqtt_loop_last = time.time()
time.sleep(0.1)
| 30.374486 | 102 | 0.583796 |
cb9ffca95d97c0638404968a2c900121327f037e | 5,865 | py | Python | cortex/surfinfo.py | pulkitag/pycortex | f329dccbe3f04dc4556b930f284bcddd760fc76b | [
"BSD-2-Clause"
] | 1 | 2021-03-08T22:52:52.000Z | 2021-03-08T22:52:52.000Z | cortex/surfinfo.py | candleinwindsteve/pycortex | 988dc996a33f16208537a2e8ffe14e2ae3ea6647 | [
"BSD-2-Clause"
] | null | null | null | cortex/surfinfo.py | candleinwindsteve/pycortex | 988dc996a33f16208537a2e8ffe14e2ae3ea6647 | [
"BSD-2-Clause"
] | null | null | null | import os
import shlex
import shutil
import tempfile
import subprocess as sp
import numpy as np
from . import utils
from . import polyutils
from .database import db
from .xfm import Transform
def curvature(outfile, subject, smooth=20, **kwargs):
curvs = []
for pts, polys in db.get_surf(subject, "fiducial"):
surf = polyutils.Surface(pts, polys)
curv = surf.smooth(surf.mean_curvature(), smooth)
curvs.append(curv)
np.savez(outfile, left=curvs[0], right=curvs[1])
def distortion(outfile, subject, type='areal', smooth=20):
"""Computes distortion of flatmap relative to fiducial surface. Several different
types of distortion are available:
'areal': computes the areal distortion for each triangle in the flatmap, defined as the
log ratio of the area in the fiducial mesh to the area in the flat mesh. Returns
a per-vertex value that is the average of the neighboring triangles.
See: http://brainvis.wustl.edu/wiki/index.php/Caret:Operations/Morphing
'metric': computes the linear distortion for each vertex in the flatmap, defined as
the mean squared difference between distances in the fiducial map and distances in
the flatmap, for each pair of neighboring vertices. See Fishl, Sereno, and Dale, 1999.
"""
distortions = []
for hem in ["lh", "rh"]:
fidvert, fidtri = db.get_surf(subject, "fiducial", hem)
flatvert, flattri = db.get_surf(subject, "flat", hem)
surf = polyutils.Surface(fidvert, fidtri)
dist = getattr(polyutils.Distortion(flatvert, fidvert, flattri), type)
smdist = surf.smooth(dist, smooth)
distortions.append(smdist)
np.savez(outfile, left=distortions[0], right=distortions[1])
def thickness(outfile, subject):
pl, pr = db.get_surf(subject, "pia")
wl, wr = db.get_surf(subject, "wm")
left = np.sqrt(((pl[0] - wl[0])**2).sum(1))
right = np.sqrt(((pr[0] - wr[0])**2).sum(1))
np.savez(outfile, left=left, right=right)
def tissots_indicatrix(outfile, sub, radius=10, spacing=50, maxfails=100):
tissots = []
allcenters = []
for hem in ["lh", "rh"]:
fidpts, fidpolys = db.get_surf(sub, "fiducial", hem)
#G = make_surface_graph(fidtri)
surf = polyutils.Surface(fidpts, fidpolys)
nvert = fidpts.shape[0]
tissot_array = np.zeros((nvert,))
centers = [np.random.randint(nvert)]
cdists = [surf.geodesic_distance(centers)]
while True:
## Find possible vertices
mcdist = np.vstack(cdists).min(0)
possverts = np.nonzero(mcdist > spacing)[0]
#possverts = np.nonzero(surf.geodesic_distance(centers) > spacing)[0]
if not len(possverts):
break
## Pick random vertex
centervert = possverts[np.random.randint(len(possverts))]
centers.append(centervert)
print("Adding vertex %d.." % centervert)
dists = surf.geodesic_distance([centervert])
cdists.append(dists)
## Find appropriate set of vertices
selverts = dists < radius
tissot_array[selverts] = 1
tissots.append(tissot_array)
allcenters.append(np.array(centers))
np.savez(outfile, left=tissots[0], right=tissots[1], centers=allcenters)
def flat_border(outfile, subject):
flatpts, flatpolys = db.get_surf(subject, "flat", merge=True, nudge=True)
flatpolyset = set(map(tuple, flatpolys))
fidpts, fidpolys = db.get_surf(subject, "fiducial", merge=True, nudge=True)
fidpolyset = set(map(tuple, fidpolys))
fidonlypolys = fidpolyset - flatpolyset
fidonlypolyverts = np.unique(np.array(list(fidonlypolys)).ravel())
fidonlyverts = np.setdiff1d(fidpolys.ravel(), flatpolys.ravel())
import networkx as nx
def iter_surfedges(tris):
for a,b,c in tris:
yield a,b
yield b,c
yield a,c
def make_surface_graph(tris):
graph = nx.Graph()
graph.add_edges_from(iter_surfedges(tris))
return graph
bounds = [p for p in polyutils.trace_poly(polyutils.boundary_edges(flatpolys))]
allbounds = np.hstack(bounds)
g = make_surface_graph(fidonlypolys)
fog = g.subgraph(fidonlyverts)
badverts = np.array([v for v,d in fog.degree().iteritems() if d<2])
g.remove_nodes_from(badverts)
fog.remove_nodes_from(badverts)
mwallset = set.union(*(set(g[v]) for v in fog.nodes())) & set(allbounds)
#cutset = (set(g.nodes()) - mwallset) & set(allbounds)
mwallbounds = [np.in1d(b, mwallset) for b in bounds]
changes = [np.nonzero(np.diff(b.astype(float))!=0)[0]+1 for b in mwallbounds]
#splitbounds = [np.split(b, c) for b,c in zip(bounds, changes)]
splitbounds = []
for b,c in zip(bounds, changes):
sb = []
rb = [b[-1]] + b
rc = [1] + (c + 1).tolist() + [len(b)]
for ii in range(len(rc)-1):
sb.append(rb[rc[ii]-1 : rc[ii+1]])
splitbounds.append(sb)
ismwall = [[s.mean()>0.5 for s in np.split(mwb, c)] for mwb,c in zip(mwallbounds, changes)]
aspect = (height / (flatpts.max(0) - flatpts.min(0))[1])
lpts = (flatpts - flatpts.min(0)) * aspect
rpts = (flatpts - flatpts.min(0)) * aspect
#im = Image.new('RGBA', (int(aspect * (flatpts.max(0) - flatpts.min(0))[0]), height))
#draw = ImageDraw.Draw(im)
ismwalls = []
lines = []
for bnds, mw, pts in zip(splitbounds, ismwall, [lpts, rpts]):
for pbnd, pmw in zip(bnds, mw):
#color = {True:(0,0,255,255), False:(255,0,0,255)}[pmw]
#draw.line(pts[pbnd,:2].ravel().tolist(), fill=color, width=2)
ismwalls.append(pmw)
lines.append(pts[pbnd,:2])
np.savez(outfile, lines=lines, ismwalls=ismwalls)
| 37.596154 | 95 | 0.629156 |
7b01bb7810a6d5b34cb98a636cc9754bb4fe16be | 13,395 | py | Python | objectModel/Python/cdm/storage/syms.py | rt112000/CDM | 34bd34f9260140a8f8aa02bd87c23033f3daad4c | [
"CC-BY-4.0",
"MIT"
] | 884 | 2019-05-10T02:09:10.000Z | 2022-03-31T14:02:00.000Z | objectModel/Python/cdm/storage/syms.py | rt112000/CDM | 34bd34f9260140a8f8aa02bd87c23033f3daad4c | [
"CC-BY-4.0",
"MIT"
] | 171 | 2019-06-10T11:34:37.000Z | 2022-03-31T22:50:12.000Z | objectModel/Python/cdm/storage/syms.py | rt112000/CDM | 34bd34f9260140a8f8aa02bd87c23033f3daad4c | [
"CC-BY-4.0",
"MIT"
] | 340 | 2019-05-07T18:00:16.000Z | 2022-03-31T12:00:15.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from http import HTTPStatus
from typing import List, Optional
import json, urllib, urllib.parse
import msal
from cdm.utilities import StorageUtils
from cdm.utilities.string_utils import StringUtils
from cdm.utilities.network.cdm_http_client import CdmHttpClient
from cdm.storage.network import NetworkAdapter
from cdm.enums.azure_cloud_endpoint import AzureCloudEndpoint
from .base import StorageAdapterBase
class SymsAdapter(NetworkAdapter, StorageAdapterBase):
"""SyMS storage adapter"""
SYMS_DEFAULT_TIMEOUT = 30000
HTTP_DEFAULT_MAX_RESULTS = 100000
API_VERSION = 'api-version=2021-04-01'
DATABASE_MANIFEST = 'databases.manifest.cdm.json'
def __init__(self, endpoint: Optional[str] = None, **kwargs) -> None:
super().__init__()
super(NetworkAdapter, self).__init__()
super(StorageAdapterBase, self).__init__()
# --- internal ---
self._formatted_endpoint = None # type: Optional[str]
self._http_authorization = 'Authorization'
self._http_client = CdmHttpClient() # type: CdmHttpClient
self._http_xms_continuation = 'x-ms-continuation'
self._http_xms_date = 'x-ms-date'
self._http_xms_version = 'x-ms-version'
self._scope = ['https://dev.azuresynapse.net/.default'] # type: Optional[List[str]]
self._type = 'Syms'
self.http_max_results = self.HTTP_DEFAULT_MAX_RESULTS # type: int
self.timeout = self.SYMS_DEFAULT_TIMEOUT # type: int
if endpoint:
self._endpoint = self._format_endpoint(endpoint) # type: Optional[str]
self._base_uri = 'https://{}/databases'.format(self._endpoint)
self.client_id = kwargs.get('client_id', None) # type: Optional[str]
self.secret = kwargs.get('secret', None) # type: Optional[str]
self.token_provider = kwargs.get('token_provider', None) # type: Optional[TokenProvider]
self.azure_endpoint = kwargs.get('azure_endpoint', AzureCloudEndpoint.AZURE_PUBLIC) # type: AzureCloudEndpoint
# --- internal ---
self._tenant = kwargs.get('tenant', None) # type: Optional[str]
self._auth_context = None
@property
def endpoint(self) -> str:
return self._endpoint
@property
def base_uri(self) -> str:
return self._base_uri
@endpoint.setter
def endpoint(self, value: str):
self._endpoint = self._format_endpoint(value)
@property
def tenant(self) -> str:
return self._tenant
def can_read(self) -> bool:
return True
def can_write(self) -> bool:
return True
def create_adapter_path(self, corpus_path: str) -> str:
formatted_corpus_path = self._format_corpus_path(corpus_path)
if formatted_corpus_path is not None:
if formatted_corpus_path == '/':
return '{}?{}'.format(self._base_uri, self.API_VERSION)
if formatted_corpus_path == '/' + self.DATABASE_MANIFEST or formatted_corpus_path == self.DATABASE_MANIFEST:
return '{}?{}'.format(self._base_uri, self.API_VERSION)
formatted_corpus_path = StringUtils.trim_start(formatted_corpus_path, '/')
paths = formatted_corpus_path.split('/')
if len(paths) == 2: # 2 level is supported currently
# paths[0]: databasename
# paths[1]: filename
if paths[1].endswith('.manifest.cdm.json'):
return '{}/{}/?{}'.format(self._base_uri, paths[0], self.API_VERSION)
if paths[1].endswith('.cdm.json'):
return '{}/{}/tables/{}?{}'.format(self._base_uri, paths[0], paths[1].replace('.cdm.json', ''), self.API_VERSION)
else:
raise Exception('Syms adapter: Failed to convert to adapter path from corpus path. Invalid corpus path :' + corpus_path + '. Supported file format are manifest.cdm.json and .cdm.json')
elif len(paths) == 3: # 3 level is supported for relationship and entitydefinitions
# paths[0]: database name
# paths[1]: filename
if paths[1].endswith('.manifest.cdm.json') and paths[2] == 'relationships':
return '{}/{}/relationships?{}'.format(self._base_uri, paths[0], self.API_VERSION)
elif paths[1].endswith('.manifest.cdm.json') and paths[2] == 'entitydefinition':
return '{}/{}/tables?{}'.format(self._base_uri, paths[0], self.API_VERSION)
else:
raise Exception('Syms adapter: Failed to convert to adapter path from corpus path' + corpus_path + '. corpus path must be in following form: /<databasename>/<filename>.manifest.cdm.json/relationships or /<databasename>/<filename>.manifest.cdm.json/entitydefinition.')
elif len(paths) == 4: # 4 level is supported for relationship
# paths[0]: databasename
# paths[1]: filename
if paths[1].endswith('.manifest.cdm.json') and paths[2] == 'relationships':
return '{}/{}/relationships/{}?{}'.format(self._base_uri, paths[0], paths[3], self.API_VERSION)
else:
raise Exception('Syms adapter: Failed to convert to adapter path from corpus path' + corpus_path + '. + Corpus path must be in following form: /<databasename>/<filename>.manifest.cdm.json/relationships/<relationshipname>.')
else:
raise Exception('Syms adapter: Failed to convert to adapter path from corpus path' + corpus_path + '. Corpus path must be in following form: /<databasename>/<filename>.manifest.cdm.json/relationships/<relationshipname>, /<databasename>/<filename>.manifest.cdm.json/relationships or /<databasename>/<filename>.manifest.cdm.json/entitydefinition>.')
return None
def create_corpus_path(self, adapter_path: str) -> Optional[str]:
if adapter_path:
start_index = len('https://')
if not adapter_path.endswith('/'):
adapter_path = adapter_path + '/'
end_index = adapter_path.find('/', start_index + 1)
if end_index < start_index:
raise Exception('Unexpected adapter path:', adapter_path)
endpoint = self._format_endpoint(adapter_path[start_index:end_index])
if endpoint == self._endpoint:
corpus_path = self.convert_to_corpus_path(adapter_path[end_index + 1:])
return corpus_path
# Signal that we did not recognize path as one for this adapter.
return None
def convert_to_corpus_path(self, adapter_sub_path: str) -> Optional[str]:
unescaped_path = urllib.parse.unquote(adapter_sub_path)
# The path is of the format databases / tables / [tablename]?[api - version]
parts = unescaped_path.split('/')
entity_name = parts[3][0, ''.join(parts[3]).rindex('?') + 1]
return entity_name + '.cdm.json'
async def fetch_all_files_async(self, folder_corpus_path: str) -> List[str]:
formatted_corpus_path = self._format_corpus_path(folder_corpus_path)
if formatted_corpus_path is None:
return None
result = []
if formatted_corpus_path == '/' :
result.append(self.DATABASE_MANIFEST)
return result;
if not formatted_corpus_path.endswith('/') :
formatted_corpus_path = formatted_corpus_path + '/'
formatted_corpus_path = StringUtils.trim_start(formatted_corpus_path, '/')
paths = formatted_corpus_path.split('/')
# paths[0]: databasename
# paths[1]: empty as path ends with /
if len(paths) != 2:
raise Exception('Syms adapter: Conversion from corpus path {folderCorpusPath} to adpater is failed. Path must be in format : <databasename>/.')
url = '{}/{}/tables?{}'.format(self._base_uri, paths[0], self.API_VERSION)
continuation_token = None
results = []
while True:
if continuation_token is None:
request = self._build_request('{}'.format(url), 'GET')
else:
request = self._build_request('{}?continuation={}'.format(url, continuation_token), 'GET')
cdm_response = await self._http_client._send_async(request, self.wait_time_callback, self.ctx)
if cdm_response.status_code == HTTPStatus.OK:
continuation_token = cdm_response.response_headers.get(self._http_xms_continuation)
data = json.loads(cdm_response.content)
for path in data['items']:
results.append(path['name'] + '.cdm.json')
if continuation_token is None:
break
return results
def fetch_config(self) -> str:
result_config = {'type': self._type}
config_object = {
'endpoint': self.endpoint
}
# Check for clientId auth, we won't write secrets to JSON.
if self.client_id and self.tenant:
config_object['tenant'] = self.tenant
config_object['clientId'] = self.client_id
# Try constructing network configs.
config_object.update(self.fetch_network_config())
result_config['config'] = config_object
return json.dumps(result_config)
async def read_async(self, corpus_path: str) -> str:
url = self.create_adapter_path(corpus_path)
request = self._build_request(url, 'GET')
return await super()._read(request)
def update_config(self, config: str):
configs_json = json.loads(config)
if configs_json.get('endpoint'):
self._endpoint = configs_json['endpoint']
else:
raise Exception('Endpoint has to be set for Syms adapter.')
self.update_network_config(config)
if configs_json.get('tenant') and configs_json.get('clientId'):
self._tenant = configs_json['tenant']
self.client_id = configs_json['clientId']
async def write_async(self, corpus_path: str, data: str) -> None:
url = self.create_adapter_path(corpus_path)
if data is None:
request = self._build_request(url, 'DELETE', data, 'application/json')
else:
request = self._build_request(url, 'PUT', data, 'application/json')
await self._http_client._send_async(request, self.wait_time_callback, self.ctx)
def _build_request(self, url: str, method: str = 'GET', content: Optional[str] = None,
content_type: Optional[str] = None):
if self.tenant is not None and self.client_id is not None and self.secret is not None:
token = self._generate_bearer_token()
headers = {'Authorization': token['token_type'] + ' ' + token['access_token']}
request = self._set_up_cdm_request(url, headers, method)
elif self.token_provider is not None:
headers = {'Authorization': self.token_provider.get_token()}
request = self._set_up_cdm_request(url, headers, method)
else:
raise Exception('Syms adapter is not configured with any auth method')
if content is not None:
request.content = content
request.content_type = content_type
return request
def _format_corpus_path(self, corpus_path: str) -> Optional[str]:
path_tuple = StorageUtils.split_namespace_path(corpus_path)
if not path_tuple:
return None
corpus_path = path_tuple[1]
if corpus_path and corpus_path[0] != '/':
corpus_path = '/' + corpus_path
return corpus_path
def _format_endpoint(self, endpoint: str) -> str:
if endpoint.startswith('https://'):
endpoint = endpoint.replace('https://', '');
return '{}'.format(StringUtils.trim_end(endpoint, '/'))
def _generate_bearer_token(self) -> Optional[dict]:
self._build_context()
result = self._auth_context.acquire_token_for_client(scopes=self._scope)
if result and 'error' in result:
error_description = result['error'] + ' error_description: ' + result['error_description'] \
if 'error_description' in result else result['error']
raise Exception('There was an error while acquiring Syms Adapter\'s Token with '
'client ID/secret authentication. Exception: ' + error_description)
if result is None or 'access_token' not in result or 'token_type' not in result:
raise Exception('Received invalid Syms Adapter\'s authentication result. The result may be None, or missing'
' access_toke and/or token_type authorization header from the authentication result.')
return result
def _build_context(self):
"""Build context when users make the first call. Also need to ensure client Id, tenant and secret are not null."""
if self._auth_context is None:
self._auth_context = msal.ConfidentialClientApplication(
self.client_id, authority=self.azure_endpoint.value + self.tenant, client_credential=self.secret)
| 45.716724 | 363 | 0.637402 |
a90f8d43e063b429547c8b3840f578e7e5c9d694 | 5,306 | py | Python | selfdrive/car/car_helpers.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 3 | 2019-06-29T08:32:58.000Z | 2019-09-06T15:58:03.000Z | selfdrive/car/car_helpers.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 1 | 2019-09-22T06:44:10.000Z | 2019-09-22T06:44:10.000Z | selfdrive/car/car_helpers.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 2 | 2020-03-18T02:56:23.000Z | 2020-05-12T16:22:31.000Z | import os
import zmq
from cereal import car
from common.params import Params
from common.vin import get_vin, VIN_UNKNOWN
from common.basedir import BASEDIR
from common.fingerprints import eliminate_incompatible_cars, all_known_cars
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
def get_one_can(logcan):
while True:
try:
can = messaging.recv_one(logcan)
if len(can.can) > 0:
return can
except zmq.error.Again:
continue
def get_startup_alert(car_recognized, controller_available):
alert = 'startup'
if not car_recognized:
alert = 'startupNoCar'
elif car_recognized and not controller_available:
alert = 'startupNoControl'
return alert
def load_interfaces(brand_names):
ret = {}
for brand_name in brand_names:
path = ('selfdrive.car.%s' % brand_name)
CarInterface = __import__(path + '.interface', fromlist=['CarInterface']).CarInterface
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carcontroller.py'):
CarController = __import__(path + '.carcontroller', fromlist=['CarController']).CarController
else:
CarController = None
for model_name in brand_names[brand_name]:
ret[model_name] = (CarInterface, CarController)
return ret
def _get_interface_names():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have an interface for
# - values are lists of spefic car models for a given car
brand_names = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
try:
brand_name = car_folder.split('/')[-1]
model_names = __import__('selfdrive.car.%s.values' % brand_name, fromlist=['CAR']).CAR
model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")]
brand_names[brand_name] = model_names
except (ImportError, IOError):
pass
return brand_names
# imports from directory selfdrive/car/<name>/
interfaces = load_interfaces(_get_interface_names())
def only_toyota_left(candidate_cars):
return all(("TOYOTA" in c or "LEXUS" in c) for c in candidate_cars) and len(candidate_cars) > 0
# BOUNTY: every added fingerprint in selfdrive/car/*/values.py is a $100 coupon code on shop.comma.ai
# **** for use live only ****
def fingerprint(logcan, sendcan, is_panda_black):
if os.getenv("SIMULATOR2") is not None:
return ("simulator2", None, "")
elif os.getenv("SIMULATOR") is not None:
return ("simulator", None, "")
params = Params()
car_params = params.get("CarParams")
if car_params is not None:
# use already stored VIN: a new VIN query cannot be done, since panda isn't in ELM327 mode
car_params = car.CarParams.from_bytes(car_params)
vin = VIN_UNKNOWN if car_params.carVin == "" else car_params.carVin
elif is_panda_black:
# Vin query only reliably works thorugh OBDII
vin = get_vin(logcan, sendcan, 1)
else:
vin = VIN_UNKNOWN
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = {i: {} for i in range(0, 4)} # collect on all buses
candidate_cars = {i: all_known_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1
frame = 0
frame_fingerprint = 10 # 0.1s
car_fingerprint = None
done = False
while not done:
a = get_one_can(logcan)
for can in a.can:
# need to independently try to fingerprint both bus 0 and 1 to work
# for the combo black_panda and honda_bosch. Ignore extended messages
# and VIN query response.
# Include bus 2 for toyotas to disambiguate cars using camera messages
# (ideally should be done for all cars but we can't for Honda Bosch)
for b in candidate_cars:
if (can.src == b or (only_toyota_left(candidate_cars[b]) and can.src == 2)) and \
can.address < 0x800 and can.address not in [0x7df, 0x7e0, 0x7e8]:
finger[can.src][can.address] = len(can.dat)
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
# Toyota needs higher time to fingerprint, since DSU does not broadcast immediately
if only_toyota_left(candidate_cars[b]):
frame_fingerprint = 100 # 1s
if len(candidate_cars[b]) == 1:
if frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
# bail if no cars left or we've been waiting for more than 2s
failed = all(len(cc) == 0 for cc in candidate_cars.itervalues()) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
cloudlog.warning("fingerprinted %s", car_fingerprint)
return car_fingerprint, finger, vin
def get_car(logcan, sendcan, is_panda_black=False):
candidate, fingerprints, vin = fingerprint(logcan, sendcan, is_panda_black)
if candidate is None:
cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints)
candidate = "mock"
CarInterface, CarController = interfaces[candidate]
car_params = CarInterface.get_params(candidate, fingerprints[0], vin, is_panda_black)
return CarInterface(car_params, CarController), car_params
| 35.851351 | 108 | 0.70392 |
ec973e264a484640ced2159055b29b420440c60e | 424 | py | Python | stdplugins/F.py | kaalhoonme/PepeBot | d1678f3c5e57adb8c9d2e1bc5a54568ad2938258 | [
"Apache-2.0"
] | 1 | 2020-08-12T21:36:58.000Z | 2020-08-12T21:36:58.000Z | stdplugins/F.py | shn999/PepeBot | 912b97dc89c4c7581cbaee337d4fcc05c98d79c0 | [
"Apache-2.0"
] | null | null | null | stdplugins/F.py | shn999/PepeBot | 912b97dc89c4c7581cbaee337d4fcc05c98d79c0 | [
"Apache-2.0"
] | null | null | null | from telethon import events
import asyncio
import os
import sys
from uniborg import util
@borg.on(util.admin_cmd(pattern="ft ?(.*)"))
async def payf(event):
paytext=event.pattern_match.group(1)
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(paytext*8, paytext*8, paytext*2, paytext*2, paytext*2, paytext*6, paytext*6, paytext*2, paytext*2, paytext*2, paytext*2, paytext*2)
await event.edit(pay)
| 32.615385 | 197 | 0.669811 |
095a0b2b94c409a2f229227b446abcf01315978b | 1,837 | py | Python | tests/nlp/utils_test.py | sudharkj/unibrowser | 7f3b407d3469ee2dffa72d3917af1e42100b71de | [
"Apache-2.0"
] | null | null | null | tests/nlp/utils_test.py | sudharkj/unibrowser | 7f3b407d3469ee2dffa72d3917af1e42100b71de | [
"Apache-2.0"
] | null | null | null | tests/nlp/utils_test.py | sudharkj/unibrowser | 7f3b407d3469ee2dffa72d3917af1e42100b71de | [
"Apache-2.0"
] | null | null | null | # system path
import sys
import os
# unit testing
import unittest
# Sets the execution path
sys.path.insert(0, os.path.realpath('./'))
# internal modulesunit
from nlp.utils import get_faq_data, get_lemmatize_dict, get_word_clusters, save_faq_slots
class TestUtils(unittest.TestCase):
"""
Class containing all the unit tests for the utility functions.
"""
def test_get_faq_data(self):
"""
Tests if the data is being fetched from the database.
"""
faqs = get_faq_data('faq')
self.assertTrue(len(faqs) > 0)
def test_lemmatize_text(self):
"""
Tests if the lemmatize_dict function works as expected.
"""
sentences = ["testing funny dogs", "doing natural language processing", "working on computer processing"]
lemmatize_text_dict = get_lemmatize_dict(sentences)
self.assertTrue(len(lemmatize_text_dict.items()) > 0)
def test_get_word_clusters(self):
"""
Tests if the get_word_clusters function works as expected.
"""
thres = 0.5
lemma_dict = \
{
'cat': ['cats', 'cat'],
'dog': ['dogs', 'dog']
}
lemma_slot, clusters = get_word_clusters(lemma_dict, thres)
self.assertTrue(len(lemma_slot.items()) == len(lemma_dict.items()))
self.assertTrue(len(clusters) > 0)
def test_save_results(self):
thres = 0.5
lemma_dict = \
{
'cat': ['cats', 'cat'],
'dog': ['dogs', 'dog']
}
lemma_slot, clusters = get_word_clusters(lemma_dict, thres)
res = save_faq_slots(lemma_slot, clusters)
self.assertTrue(res == 1)
if __name__ == '__main__':
"""
Runs all the unit tests defined above.
"""
unittest.main()
| 28.261538 | 113 | 0.597714 |
5e94b4b379acda1631cc214c340c6ace9dbf7633 | 14,715 | py | Python | venv/Lib/site-packages/snowballstemmer/lithuanian_stemmer.py | ChristaPatton/django_cityloc_pkg_ChristaPatton | 22986c062de627b64506732fc9b74374600f00fa | [
"MIT"
] | 6 | 2022-01-04T02:51:33.000Z | 2022-01-25T11:36:39.000Z | venv/Lib/site-packages/snowballstemmer/lithuanian_stemmer.py | ChristaPatton/django_cityloc_pkg_ChristaPatton | 22986c062de627b64506732fc9b74374600f00fa | [
"MIT"
] | 1 | 2022-01-13T23:27:10.000Z | 2022-01-15T05:03:24.000Z | venv/Lib/site-packages/snowballstemmer/lithuanian_stemmer.py | ChristaPatton/django_cityloc_pkg_ChristaPatton | 22986c062de627b64506732fc9b74374600f00fa | [
"MIT"
] | null | null | null | # Generated by Snowball 2.2.0 - https://snowballstem.org/
from .basestemmer import BaseStemmer
from .among import Among
class LithuanianStemmer(BaseStemmer):
'''
This class implements the stemming algorithm defined by a snowball script.
Generated by Snowball 2.2.0 - https://snowballstem.org/
'''
a_0 = [
Among(u"a", -1, -1),
Among(u"ia", 0, -1),
Among(u"eria", 1, -1),
Among(u"osna", 0, -1),
Among(u"iosna", 3, -1),
Among(u"uosna", 3, -1),
Among(u"iuosna", 5, -1),
Among(u"ysna", 0, -1),
Among(u"\u0117sna", 0, -1),
Among(u"e", -1, -1),
Among(u"ie", 9, -1),
Among(u"enie", 10, -1),
Among(u"erie", 10, -1),
Among(u"oje", 9, -1),
Among(u"ioje", 13, -1),
Among(u"uje", 9, -1),
Among(u"iuje", 15, -1),
Among(u"yje", 9, -1),
Among(u"enyje", 17, -1),
Among(u"eryje", 17, -1),
Among(u"\u0117je", 9, -1),
Among(u"ame", 9, -1),
Among(u"iame", 21, -1),
Among(u"sime", 9, -1),
Among(u"ome", 9, -1),
Among(u"\u0117me", 9, -1),
Among(u"tum\u0117me", 25, -1),
Among(u"ose", 9, -1),
Among(u"iose", 27, -1),
Among(u"uose", 27, -1),
Among(u"iuose", 29, -1),
Among(u"yse", 9, -1),
Among(u"enyse", 31, -1),
Among(u"eryse", 31, -1),
Among(u"\u0117se", 9, -1),
Among(u"ate", 9, -1),
Among(u"iate", 35, -1),
Among(u"ite", 9, -1),
Among(u"kite", 37, -1),
Among(u"site", 37, -1),
Among(u"ote", 9, -1),
Among(u"tute", 9, -1),
Among(u"\u0117te", 9, -1),
Among(u"tum\u0117te", 42, -1),
Among(u"i", -1, -1),
Among(u"ai", 44, -1),
Among(u"iai", 45, -1),
Among(u"eriai", 46, -1),
Among(u"ei", 44, -1),
Among(u"tumei", 48, -1),
Among(u"ki", 44, -1),
Among(u"imi", 44, -1),
Among(u"erimi", 51, -1),
Among(u"umi", 44, -1),
Among(u"iumi", 53, -1),
Among(u"si", 44, -1),
Among(u"asi", 55, -1),
Among(u"iasi", 56, -1),
Among(u"esi", 55, -1),
Among(u"iesi", 58, -1),
Among(u"siesi", 59, -1),
Among(u"isi", 55, -1),
Among(u"aisi", 61, -1),
Among(u"eisi", 61, -1),
Among(u"tumeisi", 63, -1),
Among(u"uisi", 61, -1),
Among(u"osi", 55, -1),
Among(u"\u0117josi", 66, -1),
Among(u"uosi", 66, -1),
Among(u"iuosi", 68, -1),
Among(u"siuosi", 69, -1),
Among(u"usi", 55, -1),
Among(u"ausi", 71, -1),
Among(u"\u010Diausi", 72, -1),
Among(u"\u0105si", 55, -1),
Among(u"\u0117si", 55, -1),
Among(u"\u0173si", 55, -1),
Among(u"t\u0173si", 76, -1),
Among(u"ti", 44, -1),
Among(u"enti", 78, -1),
Among(u"inti", 78, -1),
Among(u"oti", 78, -1),
Among(u"ioti", 81, -1),
Among(u"uoti", 81, -1),
Among(u"iuoti", 83, -1),
Among(u"auti", 78, -1),
Among(u"iauti", 85, -1),
Among(u"yti", 78, -1),
Among(u"\u0117ti", 78, -1),
Among(u"tel\u0117ti", 88, -1),
Among(u"in\u0117ti", 88, -1),
Among(u"ter\u0117ti", 88, -1),
Among(u"ui", 44, -1),
Among(u"iui", 92, -1),
Among(u"eniui", 93, -1),
Among(u"oj", -1, -1),
Among(u"\u0117j", -1, -1),
Among(u"k", -1, -1),
Among(u"am", -1, -1),
Among(u"iam", 98, -1),
Among(u"iem", -1, -1),
Among(u"im", -1, -1),
Among(u"sim", 101, -1),
Among(u"om", -1, -1),
Among(u"tum", -1, -1),
Among(u"\u0117m", -1, -1),
Among(u"tum\u0117m", 105, -1),
Among(u"an", -1, -1),
Among(u"on", -1, -1),
Among(u"ion", 108, -1),
Among(u"un", -1, -1),
Among(u"iun", 110, -1),
Among(u"\u0117n", -1, -1),
Among(u"o", -1, -1),
Among(u"io", 113, -1),
Among(u"enio", 114, -1),
Among(u"\u0117jo", 113, -1),
Among(u"uo", 113, -1),
Among(u"s", -1, -1),
Among(u"as", 118, -1),
Among(u"ias", 119, -1),
Among(u"es", 118, -1),
Among(u"ies", 121, -1),
Among(u"is", 118, -1),
Among(u"ais", 123, -1),
Among(u"iais", 124, -1),
Among(u"tumeis", 123, -1),
Among(u"imis", 123, -1),
Among(u"enimis", 127, -1),
Among(u"omis", 123, -1),
Among(u"iomis", 129, -1),
Among(u"umis", 123, -1),
Among(u"\u0117mis", 123, -1),
Among(u"enis", 123, -1),
Among(u"asis", 123, -1),
Among(u"ysis", 123, -1),
Among(u"ams", 118, -1),
Among(u"iams", 136, -1),
Among(u"iems", 118, -1),
Among(u"ims", 118, -1),
Among(u"enims", 139, -1),
Among(u"erims", 139, -1),
Among(u"oms", 118, -1),
Among(u"ioms", 142, -1),
Among(u"ums", 118, -1),
Among(u"\u0117ms", 118, -1),
Among(u"ens", 118, -1),
Among(u"os", 118, -1),
Among(u"ios", 147, -1),
Among(u"uos", 147, -1),
Among(u"iuos", 149, -1),
Among(u"ers", 118, -1),
Among(u"us", 118, -1),
Among(u"aus", 152, -1),
Among(u"iaus", 153, -1),
Among(u"ius", 152, -1),
Among(u"ys", 118, -1),
Among(u"enys", 156, -1),
Among(u"erys", 156, -1),
Among(u"\u0105s", 118, -1),
Among(u"i\u0105s", 159, -1),
Among(u"\u0117s", 118, -1),
Among(u"am\u0117s", 161, -1),
Among(u"iam\u0117s", 162, -1),
Among(u"im\u0117s", 161, -1),
Among(u"kim\u0117s", 164, -1),
Among(u"sim\u0117s", 164, -1),
Among(u"om\u0117s", 161, -1),
Among(u"\u0117m\u0117s", 161, -1),
Among(u"tum\u0117m\u0117s", 168, -1),
Among(u"at\u0117s", 161, -1),
Among(u"iat\u0117s", 170, -1),
Among(u"sit\u0117s", 161, -1),
Among(u"ot\u0117s", 161, -1),
Among(u"\u0117t\u0117s", 161, -1),
Among(u"tum\u0117t\u0117s", 174, -1),
Among(u"\u012Fs", 118, -1),
Among(u"\u016Bs", 118, -1),
Among(u"t\u0173s", 118, -1),
Among(u"at", -1, -1),
Among(u"iat", 179, -1),
Among(u"it", -1, -1),
Among(u"sit", 181, -1),
Among(u"ot", -1, -1),
Among(u"\u0117t", -1, -1),
Among(u"tum\u0117t", 184, -1),
Among(u"u", -1, -1),
Among(u"au", 186, -1),
Among(u"iau", 187, -1),
Among(u"\u010Diau", 188, -1),
Among(u"iu", 186, -1),
Among(u"eniu", 190, -1),
Among(u"siu", 190, -1),
Among(u"y", -1, -1),
Among(u"\u0105", -1, -1),
Among(u"i\u0105", 194, -1),
Among(u"\u0117", -1, -1),
Among(u"\u0119", -1, -1),
Among(u"\u012F", -1, -1),
Among(u"en\u012F", 198, -1),
Among(u"er\u012F", 198, -1),
Among(u"\u0173", -1, -1),
Among(u"i\u0173", 201, -1),
Among(u"er\u0173", 201, -1)
]
a_1 = [
Among(u"ing", -1, -1),
Among(u"aj", -1, -1),
Among(u"iaj", 1, -1),
Among(u"iej", -1, -1),
Among(u"oj", -1, -1),
Among(u"ioj", 4, -1),
Among(u"uoj", 4, -1),
Among(u"iuoj", 6, -1),
Among(u"auj", -1, -1),
Among(u"\u0105j", -1, -1),
Among(u"i\u0105j", 9, -1),
Among(u"\u0117j", -1, -1),
Among(u"\u0173j", -1, -1),
Among(u"i\u0173j", 12, -1),
Among(u"ok", -1, -1),
Among(u"iok", 14, -1),
Among(u"iuk", -1, -1),
Among(u"uliuk", 16, -1),
Among(u"u\u010Diuk", 16, -1),
Among(u"i\u0161k", -1, -1),
Among(u"iul", -1, -1),
Among(u"yl", -1, -1),
Among(u"\u0117l", -1, -1),
Among(u"am", -1, -1),
Among(u"dam", 23, -1),
Among(u"jam", 23, -1),
Among(u"zgan", -1, -1),
Among(u"ain", -1, -1),
Among(u"esn", -1, -1),
Among(u"op", -1, -1),
Among(u"iop", 29, -1),
Among(u"ias", -1, -1),
Among(u"ies", -1, -1),
Among(u"ais", -1, -1),
Among(u"iais", 33, -1),
Among(u"os", -1, -1),
Among(u"ios", 35, -1),
Among(u"uos", 35, -1),
Among(u"iuos", 37, -1),
Among(u"aus", -1, -1),
Among(u"iaus", 39, -1),
Among(u"\u0105s", -1, -1),
Among(u"i\u0105s", 41, -1),
Among(u"\u0119s", -1, -1),
Among(u"ut\u0117ait", -1, -1),
Among(u"ant", -1, -1),
Among(u"iant", 45, -1),
Among(u"siant", 46, -1),
Among(u"int", -1, -1),
Among(u"ot", -1, -1),
Among(u"uot", 49, -1),
Among(u"iuot", 50, -1),
Among(u"yt", -1, -1),
Among(u"\u0117t", -1, -1),
Among(u"yk\u0161t", -1, -1),
Among(u"iau", -1, -1),
Among(u"dav", -1, -1),
Among(u"sv", -1, -1),
Among(u"\u0161v", -1, -1),
Among(u"yk\u0161\u010D", -1, -1),
Among(u"\u0119", -1, -1),
Among(u"\u0117j\u0119", 60, -1)
]
a_2 = [
Among(u"ojime", -1, 7),
Among(u"\u0117jime", -1, 3),
Among(u"avime", -1, 6),
Among(u"okate", -1, 8),
Among(u"aite", -1, 1),
Among(u"uote", -1, 2),
Among(u"asius", -1, 5),
Among(u"okat\u0117s", -1, 8),
Among(u"ait\u0117s", -1, 1),
Among(u"uot\u0117s", -1, 2),
Among(u"esiu", -1, 4)
]
a_3 = [
Among(u"\u010D", -1, 1),
Among(u"d\u017E", -1, 2)
]
a_4 = [
Among(u"gd", -1, 1)
]
g_v = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 64, 1, 0, 64, 0, 0, 0, 0, 0, 0, 0, 4, 4]
I_p1 = 0
def __r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def __r_step1(self):
if self.cursor < self.I_p1:
return False
v_2 = self.limit_backward
self.limit_backward = self.I_p1
self.ket = self.cursor
if self.find_among_b(LithuanianStemmer.a_0) == 0:
self.limit_backward = v_2
return False
self.bra = self.cursor
self.limit_backward = v_2
if not self.__r_R1():
return False
if not self.slice_del():
return False
return True
def __r_step2(self):
while True:
v_1 = self.limit - self.cursor
try:
if self.cursor < self.I_p1:
raise lab0()
v_3 = self.limit_backward
self.limit_backward = self.I_p1
self.ket = self.cursor
if self.find_among_b(LithuanianStemmer.a_1) == 0:
self.limit_backward = v_3
raise lab0()
self.bra = self.cursor
self.limit_backward = v_3
if not self.slice_del():
return False
continue
except lab0: pass
self.cursor = self.limit - v_1
break
return True
def __r_fix_conflicts(self):
self.ket = self.cursor
among_var = self.find_among_b(LithuanianStemmer.a_2)
if among_var == 0:
return False
self.bra = self.cursor
if among_var == 1:
if not self.slice_from(u"ait\u0117"):
return False
elif among_var == 2:
if not self.slice_from(u"uot\u0117"):
return False
elif among_var == 3:
if not self.slice_from(u"\u0117jimas"):
return False
elif among_var == 4:
if not self.slice_from(u"esys"):
return False
elif among_var == 5:
if not self.slice_from(u"asys"):
return False
elif among_var == 6:
if not self.slice_from(u"avimas"):
return False
elif among_var == 7:
if not self.slice_from(u"ojimas"):
return False
else:
if not self.slice_from(u"okat\u0117"):
return False
return True
def __r_fix_chdz(self):
self.ket = self.cursor
among_var = self.find_among_b(LithuanianStemmer.a_3)
if among_var == 0:
return False
self.bra = self.cursor
if among_var == 1:
if not self.slice_from(u"t"):
return False
else:
if not self.slice_from(u"d"):
return False
return True
def __r_fix_gd(self):
self.ket = self.cursor
if self.find_among_b(LithuanianStemmer.a_4) == 0:
return False
self.bra = self.cursor
if not self.slice_from(u"g"):
return False
return True
def _stem(self):
self.I_p1 = self.limit
v_1 = self.cursor
try:
v_2 = self.cursor
try:
v_3 = self.cursor
if not self.eq_s(u"a"):
self.cursor = v_2
raise lab1()
self.cursor = v_3
if not len(self.current) > 6:
self.cursor = v_2
raise lab1()
c = self.cursor + 1
if c > self.limit:
self.cursor = v_2
raise lab1()
self.cursor = c
except lab1: pass
if not self.go_out_grouping(LithuanianStemmer.g_v, 97, 371):
raise lab0()
self.cursor += 1
if not self.go_in_grouping(LithuanianStemmer.g_v, 97, 371):
raise lab0()
self.cursor += 1
self.I_p1 = self.cursor
except lab0: pass
self.cursor = v_1
self.limit_backward = self.cursor
self.cursor = self.limit
v_4 = self.limit - self.cursor
self.__r_fix_conflicts()
self.cursor = self.limit - v_4
v_5 = self.limit - self.cursor
self.__r_step1()
self.cursor = self.limit - v_5
v_6 = self.limit - self.cursor
self.__r_fix_chdz()
self.cursor = self.limit - v_6
v_7 = self.limit - self.cursor
self.__r_step2()
self.cursor = self.limit - v_7
v_8 = self.limit - self.cursor
self.__r_fix_chdz()
self.cursor = self.limit - v_8
v_9 = self.limit - self.cursor
self.__r_fix_gd()
self.cursor = self.limit - v_9
self.cursor = self.limit_backward
return True
class lab0(BaseException): pass
class lab1(BaseException): pass
| 31.308511 | 121 | 0.445668 |
31586b0e8440ac0fb7f47895645a135f6339d512 | 15,014 | py | Python | data/external/repositories_2to3/267667/kaggle-heart-master/generate_roi_pkl.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/267667/kaggle-heart-master/generate_roi_pkl.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/267667/kaggle-heart-master/generate_roi_pkl.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import argparse
import numpy as np
import glob
import re
from log import print_to_file
from scipy.fftpack import fftn, ifftn
from skimage.feature import peak_local_max, canny
from skimage.transform import hough_circle
import pickle as pickle
from paths import TRAIN_DATA_PATH, LOGS_PATH, PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH
from paths import TEST_DATA_PATH
def orthogonal_projection_on_slice(percentual_coordinate, source_metadata, target_metadata):
point = np.array([[percentual_coordinate[0]],
[percentual_coordinate[1]],
[0],
[1]])
image_size = [source_metadata["Rows"], source_metadata["Columns"]]
point = np.dot(np.array( [[image_size[0],0,0,0],
[0,image_size[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
pixel_spacing = source_metadata["PixelSpacing"]
point = np.dot(np.array( [[pixel_spacing[0],0,0,0],
[0,pixel_spacing[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
Fa = np.array(source_metadata["ImageOrientationPatient"]).reshape( (2,3) )[::-1,:]
posa = source_metadata["ImagePositionPatient"]
point = np.dot(np.array( [[Fa[0,0],Fa[1,0],0,posa[0]],
[Fa[0,1],Fa[1,1],0,posa[1]],
[Fa[0,2],Fa[1,2],0,posa[2]],
[0,0,0,1]]), point)
posb = target_metadata["ImagePositionPatient"]
point = np.dot(np.array( [[1,0,0,-posb[0]],
[0,1,0,-posb[1]],
[0,0,1,-posb[2]],
[0,0,0,1]]), point)
Fb = np.array(target_metadata["ImageOrientationPatient"]).reshape( (2,3) )[::-1,:]
ff0 = np.sqrt(np.sum(Fb[0,:]*Fb[0,:]))
ff1 = np.sqrt(np.sum(Fb[1,:]*Fb[1,:]))
point = np.dot(np.array( [[Fb[0,0]/ff0,Fb[0,1]/ff0,Fb[0,2]/ff0,0],
[Fb[1,0]/ff1,Fb[1,1]/ff1,Fb[1,2]/ff1,0],
[0,0,0,0],
[0,0,0,1]]), point)
pixel_spacing = target_metadata["PixelSpacing"]
point = np.dot(np.array( [[1./pixel_spacing[0],0,0,0],
[0,1./pixel_spacing[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
image_size = [target_metadata["Rows"], target_metadata["Columns"]]
point = np.dot(np.array( [[1./image_size[0],0,0,0],
[0,1./image_size[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
return point[:2,0] # percentual coordinate as well
#joni
minradius = 15
maxradius = 65
kernel_width = 5
center_margin = 8
num_peaks = 10
num_circles = 10 # 20
radstep = 2
#ira
minradius_mm=25
maxradius_mm=45
kernel_width=5
center_margin=8
num_peaks=10
num_circles=20
radstep=2
def extract_roi(data, pixel_spacing, minradius_mm=15, maxradius_mm=65, kernel_width=5, center_margin=8, num_peaks=10,
num_circles=10, radstep=2):
"""
Returns center and radii of ROI region in (i,j) format
"""
# radius of the smallest and largest circles in mm estimated from the train set
# convert to pixel counts
minradius = int(minradius_mm / pixel_spacing)
maxradius = int(maxradius_mm / pixel_spacing)
ximagesize = data[0]['data'].shape[1]
yimagesize = data[0]['data'].shape[2]
xsurface = np.tile(list(range(ximagesize)), (yimagesize, 1)).T
ysurface = np.tile(list(range(yimagesize)), (ximagesize, 1))
lsurface = np.zeros((ximagesize, yimagesize))
allcenters = []
allaccums = []
allradii = []
for dslice in data:
ff1 = fftn(dslice['data'])
fh = np.absolute(ifftn(ff1[1, :, :]))
fh[fh < 0.1 * np.max(fh)] = 0.0
image = 1. * fh / np.max(fh)
# find hough circles and detect two radii
edges = canny(image, sigma=3)
hough_radii = np.arange(minradius, maxradius, radstep)
hough_res = hough_circle(edges, hough_radii)
if hough_res.any():
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract num_peaks circles
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
# Keep the most prominent num_circles circles
sorted_circles_idxs = np.argsort(accums)[::-1][:num_circles]
for idx in sorted_circles_idxs:
center_x, center_y = centers[idx]
allcenters.append(centers[idx])
allradii.append(radii[idx])
allaccums.append(accums[idx])
brightness = accums[idx]
lsurface = lsurface + brightness * np.exp(
-((xsurface - center_x) ** 2 + (ysurface - center_y) ** 2) / kernel_width ** 2)
lsurface = lsurface / lsurface.max()
# select most likely ROI center
roi_center = np.unravel_index(lsurface.argmax(), lsurface.shape)
# determine ROI radius
roi_x_radius = 0
roi_y_radius = 0
for idx in range(len(allcenters)):
xshift = np.abs(allcenters[idx][0] - roi_center[0])
yshift = np.abs(allcenters[idx][1] - roi_center[1])
if (xshift <= center_margin) & (yshift <= center_margin):
roi_x_radius = np.max((roi_x_radius, allradii[idx] + xshift))
roi_y_radius = np.max((roi_y_radius, allradii[idx] + yshift))
if roi_x_radius > 0 and roi_y_radius > 0:
roi_radii = roi_x_radius, roi_y_radius
else:
roi_radii = None
return roi_center, roi_radii
def read_slice(path):
return pickle.load(open(path))['data']
def read_metadata(path):
d = pickle.load(open(path))['metadata'][0]
metadata = {k: d[k] for k in ['PixelSpacing', 'ImageOrientationPatient', 'ImagePositionPatient', 'SliceLocation',
'PatientSex', 'PatientAge', 'Rows', 'Columns']}
metadata['PixelSpacing'] = np.float32(metadata['PixelSpacing'])
metadata['ImageOrientationPatient'] = np.float32(metadata['ImageOrientationPatient'])
metadata['SliceLocation'] = np.float32(metadata['SliceLocation'])
metadata['ImagePositionPatient'] = np.float32(metadata['ImagePositionPatient'])
metadata['PatientSex'] = 1 if metadata['PatientSex'] == 'F' else 0
metadata['PatientAge'] = int(metadata['PatientAge'][1:3])
metadata['Rows'] = int(metadata['Rows'])
metadata['Columns'] = int(metadata['Columns'])
return metadata
def get_patient_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + '/sax_*.pkl'),
key=lambda x: int(re.search(r'/\w*_(\d+)*\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.search(r'/(sax_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
d = read_slice(s)
patient_data.append({'data': d, 'metadata': metadata,
'slice_id': slice_id, 'patient_id': pid})
return patient_data
def get_patient_ch_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + '/*ch_*.pkl'),
key=lambda x: int(re.search(r'/\w*_(\d+)*\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.search(r'/(\d+ch_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
d = read_slice(s)
patient_data.append({'data': d, 'metadata': metadata,
'slice_id': slice_id, 'patient_id': pid})
return patient_data
def sort_slices(slices):
nslices = len(slices)
positions = np.zeros((nslices,))
for i in range(nslices):
positions[i] = slices[i]['metadata']['SliceLocation']
sorted_slices = [s for pos, s in sorted(zip(positions.tolist(), slices),
key=lambda x: x[0], reverse=True)]
return sorted_slices
def group_slices(slice_stack):
"""
Groups slices into stacks with the same image orientation
:param slice_stack:
:return: list of slice stacks
"""
img_orientations = []
for s in slice_stack:
img_orientations.append(tuple(s['metadata']['ImageOrientationPatient']))
img_orientations = list(set(img_orientations))
if len(img_orientations) == 1:
return [slice_stack]
else:
slice_groups = [[] for _ in range(len(img_orientations))]
for s in slice_stack:
group = img_orientations.index(tuple(s['metadata']['ImageOrientationPatient']))
slice_groups[group].append(s)
return slice_groups
def plot_roi(slice_group, roi_center, roi_radii):
x_roi_center, y_roi_center = roi_center[0], roi_center[1]
x_roi_radius, y_roi_radius = roi_radii[0], roi_radii[1]
print('nslices', len(slice_group))
for dslice in [slice_group[len(slice_group) / 2]]:
outdata = dslice['data']
# print dslice['slice_id']
# print dslice['metadata']['SliceLocation']
# print dslice['metadata']['ImageOrientationPatient']
# print dslice['metadata']['PixelSpacing']
# print dslice['data'].shape
# print '--------------------------------------'
roi_mask = np.zeros_like(outdata[0])
roi_mask[x_roi_center - x_roi_radius:x_roi_center + x_roi_radius,
y_roi_center - y_roi_radius:y_roi_center + y_roi_radius] = 1
outdata[:, roi_mask > 0.5] = 0.4 * outdata[:, roi_mask > 0.5]
outdata[:, roi_mask > 0.5] = 0.4 * outdata[:, roi_mask > 0.5]
fig = plt.figure(1)
fig.canvas.set_window_title(dslice['patient_id'] + dslice['slice_id'])
def init_out():
im.set_data(outdata[0])
def animate_out(i):
im.set_data(outdata[i])
return im
im = fig.gca().imshow(outdata[0], cmap='gist_gray_r', vmin=0, vmax=255)
anim = animation.FuncAnimation(fig, animate_out, init_func=init_out, frames=30, interval=50)
plt.show()
def get_slice2roi(data_path, plot=False):
patient_paths = sorted(glob.glob(data_path + '*/study'))
slice2roi = {}
for p in patient_paths:
patient_data = get_patient_data(p)
sorted_slices = sort_slices(patient_data)
grouped_slices = group_slices(sorted_slices)
ch_data = get_patient_ch_data(p)
ch4, ch2 = None,None
for data in ch_data:
if data['slice_id'].startswith("4"):
ch4 = data
elif data['slice_id'].startswith("2"):
ch2 = data
# init patient dict
pid = sorted_slices[0]['patient_id']
print("processing patient %s" % pid)
# print pid
slice2roi[pid] = {}
# pixel spacing doesn't change within one patient
pixel_spacing = sorted_slices[0]['metadata']['PixelSpacing'][0]
for slice_group in grouped_slices:
try:
roi_center, roi_radii = extract_roi(slice_group, pixel_spacing)
except:
print('Could not find ROI')
roi_center, roi_radii = None, None
print(roi_center, roi_radii)
if plot and roi_center and roi_radii:
pass
#plot_roi(slice_group, roi_center, roi_radii)
for s in slice_group:
sid = s['slice_id']
slice2roi[pid][sid] = {'roi_center': roi_center, 'roi_radii': roi_radii}
# project found roi_centers on the 4ch and 2ch slice
ch4_centers = []
ch2_centers = []
for slice in sorted_slices:
sid = slice['slice_id']
roi_center = slice2roi[pid][sid]['roi_center']
metadata_source = slice['metadata']
hough_roi_center = (float(roi_center[0]) / metadata_source['Rows'],
float(roi_center[1]) / metadata_source['Columns'])
if ch4 is not None:
metadata_target = ch4['metadata']
result = orthogonal_projection_on_slice(hough_roi_center, metadata_source, metadata_target)
ch_roi_center = [float(result[0]) * metadata_target['Rows'],
float(result[1]) * metadata_target['Columns']]
ch4_centers.append(ch_roi_center)
if ch2 is not None:
metadata_target = ch2['metadata']
result = orthogonal_projection_on_slice(hough_roi_center, metadata_source, metadata_target)
ch_roi_center = [float(result[0]) * metadata_target['Rows'],
float(result[1]) * metadata_target['Columns']]
ch2_centers.append(ch_roi_center)
if ch4 is not None:
centers = np.array(ch4_centers)
ch4_result_center = np.mean(centers, axis=0)
ch4_result_radius = np.max(np.sqrt((centers - ch4_result_center)**2))
sid = ch4['slice_id']
slice2roi[pid][sid] = {'roi_center': tuple(ch4_result_center), 'roi_radii': (ch4_result_radius, ch4_result_radius)}
if ch2 is not None:
centers = np.array(ch2_centers)
ch2_result_center = np.mean(centers, axis=0)
ch2_result_radius = np.max(np.sqrt((centers - ch2_result_center)**2))
sid = ch2['slice_id']
slice2roi[pid][sid] = {'roi_center': tuple(ch2_result_center), 'roi_radii': (ch2_result_radius, ch2_result_radius)}
filename = data_path.split('/')[-1] + '_slice2roi_joni.pkl'
with open(filename, 'w') as f:
pickle.dump(slice2roi, f)
print('saved to ', filename)
return slice2roi
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
#required.add_argument('-c', '--config',
# help='configuration to run',
# required=True)
args = parser.parse_args()
data_paths = [PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH]
log_path = LOGS_PATH + "generate_roi.log"
with print_to_file(log_path):
for d in data_paths:
get_slice2roi(d, plot=True)
print("log saved to '%s'" % log_path)
| 39.719577 | 128 | 0.566405 |
203e47ffad8ccd032e218b753cb803f605a63148 | 1,346 | py | Python | third_party/android_deps/libs/android_arch_lifecycle_viewmodel/3pp/fetch.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575 | 2015-06-18T23:58:20.000Z | 2022-03-23T09:32:39.000Z | third_party/android_deps/libs/android_arch_lifecycle_viewmodel/3pp/fetch.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/android_deps/libs/android_arch_lifecycle_viewmodel/3pp/fetch.py | DamieFC/chromium | 54ce2d3c77723697efd22cfdb02aea38f9dfa25c | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52 | 2015-07-14T10:40:50.000Z | 2022-03-15T01:11:49.000Z | #!/usr/bin/env python
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
from __future__ import print_function
import argparse
import json
import os
_FILE_URL = 'https://maven.google.com/android/arch/lifecycle/viewmodel/1.1.1/viewmodel-1.1.1.aar'
_FILE_NAME = 'viewmodel-1.1.1.aar'
_FILE_VERSION = '1.1.1'
def do_latest():
print(_FILE_VERSION)
def get_download_url(version):
if _FILE_URL.endswith('.jar'):
ext = '.jar'
elif _FILE_URL.endswith('.aar'):
ext = '.aar'
else:
raise Exception('Unsupported extension for %s' % _FILE_URL)
partial_manifest = {
'url': [_FILE_URL],
'name': [_FILE_NAME],
'ext': ext,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser("latest")
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser("get_url")
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| 23.614035 | 97 | 0.679049 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.