hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c30c09f1bd3070f07f121e14a73ab704dad99b4 | 106 | py | Python | achievements/admin.py | peterkrauz/rpg-achievements-django | c65ec12237b2bee9f12d259fedd5f18934ff6c96 | [
"Apache-2.0"
] | 1 | 2021-08-31T10:52:55.000Z | 2021-08-31T10:52:55.000Z | achievements/admin.py | peterkrauz/rpg-achievements-django | c65ec12237b2bee9f12d259fedd5f18934ff6c96 | [
"Apache-2.0"
] | null | null | null | achievements/admin.py | peterkrauz/rpg-achievements-django | c65ec12237b2bee9f12d259fedd5f18934ff6c96 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from achievements import models
admin.site.register(models.Achievement)
| 21.2 | 39 | 0.849057 | from django.contrib import admin
from achievements import models
admin.site.register(models.Achievement)
| true | true |
1c30c1d5178a357b7d1909bcc019a7c6fe827b55 | 1,929 | py | Python | Exareme-Docker/src/exareme/exareme-tools/madis/src/functions/vtable/coltypes.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | Exareme-Docker/src/exareme/exareme-tools/madis/src/functions/vtable/coltypes.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | Exareme-Docker/src/exareme/exareme-tools/madis/src/functions/vtable/coltypes.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | """
.. function:: coltypes(query:None)
Returns the input query results column names and types.
:Returned table schema:
- *column* text
Column name of input query *schema*
- *type* text
Type of column
Examples:
>>> sql("coltypes select 5 as vt")
column | type
-------------
vt | None
Applying coltypes in the result of virtual table func:`typing` function in the same query
>>> sql("coltypes typing 'vt:int' select 5 as vt")
column | type
-------------
vt | int
.. doctest::
:hide:
>>> sql("select * from (coltypes typing 'text' select '10' ) as a, (coltypes typing 'int' select '10' ) as b where a.column=b.column")
column | type | column | type
-----------------------------
'10' | text | '10' | int
"""
import functions
import vtbase
registered = True
class ColTypes(vtbase.VT):
def VTiter(self, *parsedArgs, **envars):
largs, dictargs = self.full_parse(parsedArgs)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "No query argument ")
query = dictargs['query']
connection = envars['db']
yield (('column', 'text'), ('type', 'text'))
cur = connection.cursor()
execit = cur.execute(query, parse=False)
try:
samplerow = execit.next()
except StopIteration:
pass
vals = cur.getdescriptionsafe()
cur.close()
for i in vals:
yield i
def Source():
return vtbase.VTGenerator(ColTypes)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| 22.172414 | 138 | 0.572836 |
import functions
import vtbase
registered = True
class ColTypes(vtbase.VT):
def VTiter(self, *parsedArgs, **envars):
largs, dictargs = self.full_parse(parsedArgs)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "No query argument ")
query = dictargs['query']
connection = envars['db']
yield (('column', 'text'), ('type', 'text'))
cur = connection.cursor()
execit = cur.execute(query, parse=False)
try:
samplerow = execit.next()
except StopIteration:
pass
vals = cur.getdescriptionsafe()
cur.close()
for i in vals:
yield i
def Source():
return vtbase.VTGenerator(ColTypes)
if not ('.' in __name__):
import sys
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| true | true |
1c30c39d0a2412a67147274fef0892a00df998f4 | 215 | py | Python | codes/prob_distribution/__init__.py | NCEPU-Sunrise/2021-MachineLearningGroup | d47a73fa1627f0452ed9e39aacf72e925d25ee73 | [
"MIT"
] | 3 | 2021-11-02T06:07:24.000Z | 2022-03-14T07:44:24.000Z | codes/prob_distribution/__init__.py | NCEPU-Sunrise/2021-MachineLearningGroup | d47a73fa1627f0452ed9e39aacf72e925d25ee73 | [
"MIT"
] | null | null | null | codes/prob_distribution/__init__.py | NCEPU-Sunrise/2021-MachineLearningGroup | d47a73fa1627f0452ed9e39aacf72e925d25ee73 | [
"MIT"
] | 1 | 2022-01-29T09:09:58.000Z | 2022-01-29T09:09:58.000Z | from prob_distribution.random_variable import RandomVariable
from prob_distribution.gaussian import Gaussian
from prob_distribution.gamma import Gamma
__all__ = [
"RandomVariable",
"Gaussian",
"Gamma"
] | 23.888889 | 60 | 0.790698 | from prob_distribution.random_variable import RandomVariable
from prob_distribution.gaussian import Gaussian
from prob_distribution.gamma import Gamma
__all__ = [
"RandomVariable",
"Gaussian",
"Gamma"
] | true | true |
1c30c40d48654013e2d57634b5b6cb49869d591b | 5,343 | py | Python | Core/third_party/JavaScriptCore/inspector/scripts/codegen/objc_generator_templates.py | InfiniteSynthesis/lynx-native | 022e277ee6767f5b668269a17b1679072cf7c3d6 | [
"MIT"
] | 677 | 2017-09-23T16:03:12.000Z | 2022-03-26T08:32:10.000Z | Core/third_party/JavaScriptCore/inspector/scripts/codegen/objc_generator_templates.py | InfiniteSynthesis/lynx-native | 022e277ee6767f5b668269a17b1679072cf7c3d6 | [
"MIT"
] | 9 | 2020-04-18T18:47:18.000Z | 2020-04-18T18:52:41.000Z | Core/third_party/JavaScriptCore/inspector/scripts/codegen/objc_generator_templates.py | InfiniteSynthesis/lynx-native | 022e277ee6767f5b668269a17b1679072cf7c3d6 | [
"MIT"
] | 92 | 2017-09-21T14:21:27.000Z | 2022-03-25T13:29:42.000Z | #!/usr/bin/env python
#
# Copyright (c) 2014 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# Generator templates, which can be filled with string.Template.
# Following are classes that fill the templates from the typechecked model.
class ObjCGeneratorTemplates:
HeaderPrelude = (
"""#import <Foundation/Foundation.h>
${includes}
""")
HeaderPostlude = (
"""""")
TypeConversionsHeaderPrelude = (
"""${includes}
namespace Inspector {""")
TypeConversionsHeaderPostlude = (
"""} // namespace Inspector
""")
GenericHeaderPrelude = (
"""${includes}""")
GenericHeaderPostlude = (
"""""")
TypeConversionsHeaderStandard = (
"""template<typename ObjCEnumType>
std::optional<ObjCEnumType> fromProtocolString(const String& value);""")
BackendDispatcherHeaderPrelude = (
"""${includes}
${forwardDeclarations}
namespace Inspector {
""")
BackendDispatcherHeaderPostlude = (
"""} // namespace Inspector
""")
BackendDispatcherImplementationPrelude = (
"""#import "config.h"
#import ${primaryInclude}
${secondaryIncludes}
namespace Inspector {""")
BackendDispatcherImplementationPostlude = (
"""} // namespace Inspector
""")
ImplementationPrelude = (
"""#import ${primaryInclude}
${secondaryIncludes}
using namespace Inspector;""")
ImplementationPostlude = (
"""""")
BackendDispatcherHeaderDomainHandlerInterfaceDeclaration = (
"""class Alternate${domainName}BackendDispatcher : public AlternateBackendDispatcher {
public:
virtual ~Alternate${domainName}BackendDispatcher() { }
${commandDeclarations}
};""")
BackendDispatcherHeaderDomainHandlerObjCDeclaration = (
"""class ObjCInspector${domainName}BackendDispatcher final : public Alternate${domainName}BackendDispatcher {
public:
ObjCInspector${domainName}BackendDispatcher(id<${objcPrefix}${domainName}DomainHandler> handler) { m_delegate = handler; }
${commandDeclarations}
private:
RetainPtr<id<${objcPrefix}${domainName}DomainHandler>> m_delegate;
};""")
BackendDispatcherHeaderDomainHandlerImplementation = (
"""void ObjCInspector${domainName}BackendDispatcher::${commandName}(${parameters})
{
id errorCallback = ^(NSString *error) {
backendDispatcher()->reportProtocolError(requestId, BackendDispatcher::ServerError, error);
backendDispatcher()->sendPendingErrors();
};
${successCallback}
${conversions}
${invocation}
}
""")
ConfigurationCommandProperty = (
"""@property (nonatomic, retain, setter=set${domainName}Handler:) id<${objcPrefix}${domainName}DomainHandler> ${variableNamePrefix}Handler;""")
ConfigurationEventProperty = (
"""@property (nonatomic, readonly) ${objcPrefix}${domainName}DomainEventDispatcher *${variableNamePrefix}EventDispatcher;""")
ConfigurationCommandPropertyImplementation = (
"""- (void)set${domainName}Handler:(id<${objcPrefix}${domainName}DomainHandler>)handler
{
if (handler == _${variableNamePrefix}Handler)
return;
[_${variableNamePrefix}Handler release];
_${variableNamePrefix}Handler = [handler retain];
auto alternateDispatcher = std::make_unique<ObjCInspector${domainName}BackendDispatcher>(handler);
auto alternateAgent = std::make_unique<AlternateDispatchableAgent<${domainName}BackendDispatcher, Alternate${domainName}BackendDispatcher>>(ASCIILiteral("${domainName}"), *_controller, WTFMove(alternateDispatcher));
_controller->appendExtraAgent(WTFMove(alternateAgent));
}
- (id<${objcPrefix}${domainName}DomainHandler>)${variableNamePrefix}Handler
{
return _${variableNamePrefix}Handler;
}""")
ConfigurationGetterImplementation = (
"""- (${objcPrefix}${domainName}DomainEventDispatcher *)${variableNamePrefix}EventDispatcher
{
if (!_${variableNamePrefix}EventDispatcher)
_${variableNamePrefix}EventDispatcher = [[${objcPrefix}${domainName}DomainEventDispatcher alloc] initWithController:_controller];
return _${variableNamePrefix}EventDispatcher;
}""")
| 34.25 | 219 | 0.740408 |
class ObjCGeneratorTemplates:
HeaderPrelude = (
"""#import <Foundation/Foundation.h>
${includes}
""")
HeaderPostlude = (
"""""")
TypeConversionsHeaderPrelude = (
"""${includes}
namespace Inspector {""")
TypeConversionsHeaderPostlude = (
"""} // namespace Inspector
""")
GenericHeaderPrelude = (
"""${includes}""")
GenericHeaderPostlude = (
"""""")
TypeConversionsHeaderStandard = (
"""template<typename ObjCEnumType>
std::optional<ObjCEnumType> fromProtocolString(const String& value);""")
BackendDispatcherHeaderPrelude = (
"""${includes}
${forwardDeclarations}
namespace Inspector {
""")
BackendDispatcherHeaderPostlude = (
"""} // namespace Inspector
""")
BackendDispatcherImplementationPrelude = (
"""#import "config.h"
#import ${primaryInclude}
${secondaryIncludes}
namespace Inspector {""")
BackendDispatcherImplementationPostlude = (
"""} // namespace Inspector
""")
ImplementationPrelude = (
"""#import ${primaryInclude}
${secondaryIncludes}
using namespace Inspector;""")
ImplementationPostlude = (
"""""")
BackendDispatcherHeaderDomainHandlerInterfaceDeclaration = (
"""class Alternate${domainName}BackendDispatcher : public AlternateBackendDispatcher {
public:
virtual ~Alternate${domainName}BackendDispatcher() { }
${commandDeclarations}
};""")
BackendDispatcherHeaderDomainHandlerObjCDeclaration = (
"""class ObjCInspector${domainName}BackendDispatcher final : public Alternate${domainName}BackendDispatcher {
public:
ObjCInspector${domainName}BackendDispatcher(id<${objcPrefix}${domainName}DomainHandler> handler) { m_delegate = handler; }
${commandDeclarations}
private:
RetainPtr<id<${objcPrefix}${domainName}DomainHandler>> m_delegate;
};""")
BackendDispatcherHeaderDomainHandlerImplementation = (
"""void ObjCInspector${domainName}BackendDispatcher::${commandName}(${parameters})
{
id errorCallback = ^(NSString *error) {
backendDispatcher()->reportProtocolError(requestId, BackendDispatcher::ServerError, error);
backendDispatcher()->sendPendingErrors();
};
${successCallback}
${conversions}
${invocation}
}
""")
ConfigurationCommandProperty = (
"""@property (nonatomic, retain, setter=set${domainName}Handler:) id<${objcPrefix}${domainName}DomainHandler> ${variableNamePrefix}Handler;""")
ConfigurationEventProperty = (
"""@property (nonatomic, readonly) ${objcPrefix}${domainName}DomainEventDispatcher *${variableNamePrefix}EventDispatcher;""")
ConfigurationCommandPropertyImplementation = (
"""- (void)set${domainName}Handler:(id<${objcPrefix}${domainName}DomainHandler>)handler
{
if (handler == _${variableNamePrefix}Handler)
return;
[_${variableNamePrefix}Handler release];
_${variableNamePrefix}Handler = [handler retain];
auto alternateDispatcher = std::make_unique<ObjCInspector${domainName}BackendDispatcher>(handler);
auto alternateAgent = std::make_unique<AlternateDispatchableAgent<${domainName}BackendDispatcher, Alternate${domainName}BackendDispatcher>>(ASCIILiteral("${domainName}"), *_controller, WTFMove(alternateDispatcher));
_controller->appendExtraAgent(WTFMove(alternateAgent));
}
- (id<${objcPrefix}${domainName}DomainHandler>)${variableNamePrefix}Handler
{
return _${variableNamePrefix}Handler;
}""")
ConfigurationGetterImplementation = (
"""- (${objcPrefix}${domainName}DomainEventDispatcher *)${variableNamePrefix}EventDispatcher
{
if (!_${variableNamePrefix}EventDispatcher)
_${variableNamePrefix}EventDispatcher = [[${objcPrefix}${domainName}DomainEventDispatcher alloc] initWithController:_controller];
return _${variableNamePrefix}EventDispatcher;
}""")
| true | true |
1c30c4259da911f2e2560a7b7b56346dcdfdf1da | 137 | py | Python | venv/Lib/site-packages/pybrain3/optimization/distributionbased/__init__.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pybrain3/optimization/distributionbased/__init__.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pybrain3/optimization/distributionbased/__init__.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | from .cmaes import CMAES
from .fem import FEM
from .nes import ExactNES, OriginalNES
from .ves import VanillaGradientEvolutionStrategies
| 27.4 | 51 | 0.839416 | from .cmaes import CMAES
from .fem import FEM
from .nes import ExactNES, OriginalNES
from .ves import VanillaGradientEvolutionStrategies
| true | true |
1c30c42e1fe0ce6adebca7ac36e11be2a614f315 | 749 | py | Python | src/my_blog/urls.py | lahhrachmoh/blog-django-ar | 07f3716a742ced2b30bc2bc64a316e57eabf3322 | [
"bzip2-1.0.6"
] | 1 | 2020-01-03T07:27:11.000Z | 2020-01-03T07:27:11.000Z | src/my_blog/urls.py | notme20n/blog-django-ar | 6b84b6d2d7e7ade2b55b4cff89d9685740c05696 | [
"bzip2-1.0.6"
] | null | null | null | src/my_blog/urls.py | notme20n/blog-django-ar | 6b84b6d2d7e7ade2b55b4cff89d9685740c05696 | [
"bzip2-1.0.6"
] | null | null | null | """my_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.045455 | 77 | 0.708945 | from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| true | true |
1c30c4c66c173dd9ac779d41c7e082d5889ea704 | 2,264 | py | Python | bcs-ui/backend/templatesets/legacy_apps/configuration/migrations/0023_auto_20180312_1623.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 599 | 2019-06-25T03:20:46.000Z | 2022-03-31T12:14:33.000Z | bcs-ui/backend/templatesets/legacy_apps/configuration/migrations/0023_auto_20180312_1623.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 537 | 2019-06-27T06:03:44.000Z | 2022-03-31T12:10:01.000Z | bcs-ui/backend/templatesets/legacy_apps/configuration/migrations/0023_auto_20180312_1623.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 214 | 2019-06-25T03:26:05.000Z | 2022-03-31T07:52:03.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 1.11.5 on 2018-03-12 08:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('configuration', '0023_auto_20180313_1614'),
]
operations = [
migrations.AddField(
model_name='k8sdaemonset',
name='deploy_tag',
field=models.CharField(
default='', help_text='每次保存时会生成新的应用记录,用deploy_tag来记录与其他资源的关联关系', max_length=32, verbose_name='pod 标识'),
),
migrations.AddField(
model_name='k8sjob',
name='deploy_tag',
field=models.CharField(
default='', help_text='每次保存时会生成新的应用记录,用deploy_tag来记录与其他资源的关联关系', max_length=32, verbose_name='pod 标识'),
),
migrations.AddField(
model_name='k8sstatefulset',
name='deploy_tag',
field=models.CharField(
default='', help_text='每次保存时会生成新的应用记录,用deploy_tag来记录与其他资源的关联关系', max_length=32, verbose_name='pod 标识'),
),
migrations.AlterField(
model_name='k8sservice',
name='deploy_tag_list',
field=models.TextField(
help_text='可以关联多个Pod,json格式存储,选填', verbose_name='关联的Deployment ID'),
),
migrations.AlterField(
model_name='k8sstatefulset',
name='service_tag',
field=models.CharField(
max_length=32, verbose_name='关联的K8sService 标识'),
),
]
| 38.372881 | 119 | 0.659011 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('configuration', '0023_auto_20180313_1614'),
]
operations = [
migrations.AddField(
model_name='k8sdaemonset',
name='deploy_tag',
field=models.CharField(
default='', help_text='每次保存时会生成新的应用记录,用deploy_tag来记录与其他资源的关联关系', max_length=32, verbose_name='pod 标识'),
),
migrations.AddField(
model_name='k8sjob',
name='deploy_tag',
field=models.CharField(
default='', help_text='每次保存时会生成新的应用记录,用deploy_tag来记录与其他资源的关联关系', max_length=32, verbose_name='pod 标识'),
),
migrations.AddField(
model_name='k8sstatefulset',
name='deploy_tag',
field=models.CharField(
default='', help_text='每次保存时会生成新的应用记录,用deploy_tag来记录与其他资源的关联关系', max_length=32, verbose_name='pod 标识'),
),
migrations.AlterField(
model_name='k8sservice',
name='deploy_tag_list',
field=models.TextField(
help_text='可以关联多个Pod,json格式存储,选填', verbose_name='关联的Deployment ID'),
),
migrations.AlterField(
model_name='k8sstatefulset',
name='service_tag',
field=models.CharField(
max_length=32, verbose_name='关联的K8sService 标识'),
),
]
| true | true |
1c30c533c03ae5855c6d2ffabbad7757bb3636a2 | 3,817 | py | Python | backend/apps/volontulo/urls.py | ponycalypsenow/volontulo | 8f7886aa3c8ea5ec0ca84711a089bea60fb69598 | [
"MIT"
] | null | null | null | backend/apps/volontulo/urls.py | ponycalypsenow/volontulo | 8f7886aa3c8ea5ec0ca84711a089bea60fb69598 | [
"MIT"
] | null | null | null | backend/apps/volontulo/urls.py | ponycalypsenow/volontulo | 8f7886aa3c8ea5ec0ca84711a089bea60fb69598 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
.. module:: urls
"""
from django.conf.urls import include
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from apps.volontulo import views
from apps.volontulo.views import api as api_views
from apps.volontulo.views import auth as auth_views
from apps.volontulo.views import offers as offers_views
from apps.volontulo.views import organizations as orgs_views
router = DefaultRouter()
router.register(r'offers', api_views.OfferViewSet, base_name='offer')
router.register(r'organizations', api_views.OrganizationViewSet)
handler404 = 'apps.volontulo.views.page_not_found'
handler500 = 'apps.volontulo.views.server_error'
urlpatterns = [
url(r'^$', views.homepage_redirect, name='homepage_redirect'),
# api:
url(r'^api/', include(router.urls)),
url(
r'^api/login',
api_views.login_view,
name='api_login'
),
url(
r'^api/logout',
api_views.logout_view,
name='api_logout'
),
url(
r'^api/current-user',
api_views.current_user,
name='current_user'
),
# homepage:
url(r'^o$', views.homepage, name='homepage'),
# login and loggged user space:
url(r'^o/login$', auth_views.login, name='login'),
url(r'^o/logout$', auth_views.logout, name='logout'),
url(r'^o/register$', auth_views.Register.as_view(), name='register'),
url(
r'^o/activate/(?P<uuid>[-0-9A-Za-z]+)$',
auth_views.activate,
name='activate'
),
url(
r'^o/password-reset$',
auth_views.password_reset,
name='password_reset'
),
url(
r'^o/password-reset/(?P<uidb64>[0-9A-Za-z]+)/(?P<token>.+)$',
auth_views.password_reset_confirm,
name='password_reset_confirm'
),
url(r'^o/me$', views.logged_user_profile, name='logged_user_profile'),
# me/edit
# me/settings
# offers' namesapce:
url(r'^o/offers$', offers_views.OffersList.as_view(), name='offers_list'),
url(
r'^o/offers/delete/(?P<pk>[0-9]+)$',
offers_views.OffersDelete.as_view(),
name='offers_delete'
),
url(
r'^o/offers/accept/(?P<pk>[0-9]+)$',
offers_views.OffersAccept.as_view(),
name='offers_accept'
),
url(
r'^o/offers/create$',
offers_views.OffersCreate.as_view(),
name='offers_create'
),
url(
r'^o/offers/reorder/(?P<id_>[0-9]+)?$',
offers_views.OffersReorder.as_view(),
name='offers_reorder'
),
url(
r'^o/offers/archived$',
offers_views.OffersArchived.as_view(),
name='offers_archived'
),
url(
r'^o/offers/(?P<slug>[\w-]+)/(?P<id_>[0-9]+)/edit$',
offers_views.OffersEdit.as_view(),
name='offers_edit'
),
url(
r'^o/offers/(?P<slug>[\w-]+)/(?P<id_>[0-9]+)/join$',
offers_views.OffersJoin.as_view(),
name='offers_join'
),
# offers/filter
# users' namesapce:
# users
# users/filter
# users/slug-id
# users/slug-id/contact
# organizations' namespace:
url(
r'^o/organizations$',
orgs_views.organizations_list,
name='organizations_list'
),
url(
r'^o/organizations/create$',
orgs_views.OrganizationsCreate.as_view(),
name='organizations_create',
),
url(
r'^o/organizations/(?P<slug>[\w-]+)/(?P<id_>[0-9]+)$',
orgs_views.organization_view,
name='organization_view'
),
url(
r'^o/organizations/(?P<slug>[\w-]+)/(?P<id_>[0-9]+)/edit$',
orgs_views.organization_form,
name='organization_form'
),
# organizations/filter
url(
r'^o/contact$',
views.contact_form,
name='contact_form'
),
]
| 26.143836 | 78 | 0.595756 |
from django.conf.urls import include
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from apps.volontulo import views
from apps.volontulo.views import api as api_views
from apps.volontulo.views import auth as auth_views
from apps.volontulo.views import offers as offers_views
from apps.volontulo.views import organizations as orgs_views
router = DefaultRouter()
router.register(r'offers', api_views.OfferViewSet, base_name='offer')
router.register(r'organizations', api_views.OrganizationViewSet)
handler404 = 'apps.volontulo.views.page_not_found'
handler500 = 'apps.volontulo.views.server_error'
urlpatterns = [
url(r'^$', views.homepage_redirect, name='homepage_redirect'),
url(r'^api/', include(router.urls)),
url(
r'^api/login',
api_views.login_view,
name='api_login'
),
url(
r'^api/logout',
api_views.logout_view,
name='api_logout'
),
url(
r'^api/current-user',
api_views.current_user,
name='current_user'
),
url(r'^o$', views.homepage, name='homepage'),
url(r'^o/login$', auth_views.login, name='login'),
url(r'^o/logout$', auth_views.logout, name='logout'),
url(r'^o/register$', auth_views.Register.as_view(), name='register'),
url(
r'^o/activate/(?P<uuid>[-0-9A-Za-z]+)$',
auth_views.activate,
name='activate'
),
url(
r'^o/password-reset$',
auth_views.password_reset,
name='password_reset'
),
url(
r'^o/password-reset/(?P<uidb64>[0-9A-Za-z]+)/(?P<token>.+)$',
auth_views.password_reset_confirm,
name='password_reset_confirm'
),
url(r'^o/me$', views.logged_user_profile, name='logged_user_profile'),
url(r'^o/offers$', offers_views.OffersList.as_view(), name='offers_list'),
url(
r'^o/offers/delete/(?P<pk>[0-9]+)$',
offers_views.OffersDelete.as_view(),
name='offers_delete'
),
url(
r'^o/offers/accept/(?P<pk>[0-9]+)$',
offers_views.OffersAccept.as_view(),
name='offers_accept'
),
url(
r'^o/offers/create$',
offers_views.OffersCreate.as_view(),
name='offers_create'
),
url(
r'^o/offers/reorder/(?P<id_>[0-9]+)?$',
offers_views.OffersReorder.as_view(),
name='offers_reorder'
),
url(
r'^o/offers/archived$',
offers_views.OffersArchived.as_view(),
name='offers_archived'
),
url(
r'^o/offers/(?P<slug>[\w-]+)/(?P<id_>[0-9]+)/edit$',
offers_views.OffersEdit.as_view(),
name='offers_edit'
),
url(
r'^o/offers/(?P<slug>[\w-]+)/(?P<id_>[0-9]+)/join$',
offers_views.OffersJoin.as_view(),
name='offers_join'
),
# offers/filter
# users' namesapce:
url(
r'^o/organizations$',
orgs_views.organizations_list,
name='organizations_list'
),
url(
r'^o/organizations/create$',
orgs_views.OrganizationsCreate.as_view(),
name='organizations_create',
),
url(
r'^o/organizations/(?P<slug>[\w-]+)/(?P<id_>[0-9]+)$',
orgs_views.organization_view,
name='organization_view'
),
url(
r'^o/organizations/(?P<slug>[\w-]+)/(?P<id_>[0-9]+)/edit$',
orgs_views.organization_form,
name='organization_form'
),
# organizations/filter
url(
r'^o/contact$',
views.contact_form,
name='contact_form'
),
]
| true | true |
1c30c5701d062fcba7906b54785f7821211aeeed | 4,450 | py | Python | via_httplib.py | arunskurian/delphixpy-examples | c4716edbd22fb238ceed23e989b6e6abd82ac8fc | [
"Apache-2.0"
] | null | null | null | via_httplib.py | arunskurian/delphixpy-examples | c4716edbd22fb238ceed23e989b6e6abd82ac8fc | [
"Apache-2.0"
] | null | null | null | via_httplib.py | arunskurian/delphixpy-examples | c4716edbd22fb238ceed23e989b6e6abd82ac8fc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
from __future__ import print_function
import argparse
import httplib
import json
import os
import sys
import urllib
from argparse import RawTextHelpFormatter
SCRIPT_DESCRIPTION = """
Connect to Delphix engine to run some queries using the http lib library
"""
# globals used by helper functions
dlpx_host = ""
dlpx_user = ""
dlpx_password = ""
dlpx_cookie = None
major = 1 # API Major version number
minor = 6 # API Minor version number
micro = 0 # API micro version number
def main():
global dlpx_host
global dlpx_user
global dlpx_password
global dlpx_cookie
# parse args and print usage message if necessary
parser = argparse.ArgumentParser(
description=SCRIPT_DESCRIPTION, formatter_class=RawTextHelpFormatter
)
parser.add_argument("dlpxHost", help="The target Delphix Engine.", type=str)
parser.add_argument(
"dlpxUser",
help="The username to use to log into the Delphix Engine.",
type=str,
nargs="?",
default="delphix_admin",
)
parser.add_argument(
"dlpxPassword",
help="The password to use to log into the Delphix Engine.",
type=str,
nargs="?",
default="delphix",
)
args = parser.parse_args()
# save args to variables with shorter names
dlpx_host = args.dlpxHost
dlpx_user = args.dlpxUser
dlpx_password = args.dlpxPassword
api_version = {"type": "APIVersion", "major": major, "minor": minor, "micro": micro}
# log into the Delphix Engine in order to set cookie
print("Logging into " + dlpx_host + "...")
log_into_dlpx_engine(api_version)
print("SUCCESS - Logged in as " + dlpx_user)
response = dlpx_get("delphix/user")
for item in response["result"]:
print (item["name"])
# exit with success
sys.exit(0)
def check_response(response):
if response.status is not 200:
sys.stderr.write(
"ERROR: Expected a response of HTTP status 200 (Success) but received something different.\n"
)
sys.stderr.write("Response status: " + str(response.status) + "\n")
sys.stderr.write("Response reason: " + response.reason + "\n")
sys.exit(1)
def dlpx_post_json(resource, payload):
global dlpx_host
global dlpx_user
global dlpx_password
global dlpx_cookie
# encode payload for request
data = json.dumps(payload)
# form http header, add cookie if one has been set
headers = {"Content-type": "application/json"}
if dlpx_cookie is not None:
headers["Cookie"] = dlpx_cookie
# issue request
h = httplib.HTTPConnection(dlpx_host)
h.request("POST", "/resources/json/" + resource, data, headers)
r = h.getresponse()
check_response(r)
# save cookie if one was received
if r.getheader("set-cookie", None) is not None:
dlpx_cookie = r.getheader("set-cookie")
# return response as parsed json
r_payload = r.read()
return json.loads(r_payload)
def dlpx_get(resource, payload=None):
global dlpx_host
global dlpx_user
global dlpx_password
global dlpx_cookie
if payload:
# encode payload for request
data = json.dumps(payload)
else:
data = None
# form http header, add cookie if one has been set
headers = {"Content-type": "application/json"}
if dlpx_cookie is not None:
headers["Cookie"] = dlpx_cookie
# issue request
h = httplib.HTTPConnection(dlpx_host)
h.request("GET", "/resources/json/" + resource, data, headers)
r = h.getresponse()
check_response(r)
# save cookie if one was received
if r.getheader("set-cookie", None) is not None:
dlpx_cookie = r.getheader("set-cookie")
# return response as parsed json
r_payload = r.read()
return json.loads(r_payload)
def log_into_dlpx_engine(api_version):
dlpx_post_json(
"delphix/session",
{
"type": "APISession",
"version": {
"type": "APIVersion",
"major": api_version["major"],
"minor": api_version["minor"],
"micro": api_version["micro"],
},
},
)
dlpx_post_json(
"delphix/login",
{"type": "LoginRequest", "username": dlpx_user, "password": dlpx_password},
)
if __name__ == "__main__":
main()
| 26.331361 | 105 | 0.642022 |
from __future__ import print_function
import argparse
import httplib
import json
import os
import sys
import urllib
from argparse import RawTextHelpFormatter
SCRIPT_DESCRIPTION = """
Connect to Delphix engine to run some queries using the http lib library
"""
dlpx_host = ""
dlpx_user = ""
dlpx_password = ""
dlpx_cookie = None
major = 1
minor = 6
micro = 0
def main():
global dlpx_host
global dlpx_user
global dlpx_password
global dlpx_cookie
parser = argparse.ArgumentParser(
description=SCRIPT_DESCRIPTION, formatter_class=RawTextHelpFormatter
)
parser.add_argument("dlpxHost", help="The target Delphix Engine.", type=str)
parser.add_argument(
"dlpxUser",
help="The username to use to log into the Delphix Engine.",
type=str,
nargs="?",
default="delphix_admin",
)
parser.add_argument(
"dlpxPassword",
help="The password to use to log into the Delphix Engine.",
type=str,
nargs="?",
default="delphix",
)
args = parser.parse_args()
dlpx_host = args.dlpxHost
dlpx_user = args.dlpxUser
dlpx_password = args.dlpxPassword
api_version = {"type": "APIVersion", "major": major, "minor": minor, "micro": micro}
print("Logging into " + dlpx_host + "...")
log_into_dlpx_engine(api_version)
print("SUCCESS - Logged in as " + dlpx_user)
response = dlpx_get("delphix/user")
for item in response["result"]:
print (item["name"])
sys.exit(0)
def check_response(response):
if response.status is not 200:
sys.stderr.write(
"ERROR: Expected a response of HTTP status 200 (Success) but received something different.\n"
)
sys.stderr.write("Response status: " + str(response.status) + "\n")
sys.stderr.write("Response reason: " + response.reason + "\n")
sys.exit(1)
def dlpx_post_json(resource, payload):
global dlpx_host
global dlpx_user
global dlpx_password
global dlpx_cookie
data = json.dumps(payload)
headers = {"Content-type": "application/json"}
if dlpx_cookie is not None:
headers["Cookie"] = dlpx_cookie
h = httplib.HTTPConnection(dlpx_host)
h.request("POST", "/resources/json/" + resource, data, headers)
r = h.getresponse()
check_response(r)
if r.getheader("set-cookie", None) is not None:
dlpx_cookie = r.getheader("set-cookie")
r_payload = r.read()
return json.loads(r_payload)
def dlpx_get(resource, payload=None):
global dlpx_host
global dlpx_user
global dlpx_password
global dlpx_cookie
if payload:
data = json.dumps(payload)
else:
data = None
headers = {"Content-type": "application/json"}
if dlpx_cookie is not None:
headers["Cookie"] = dlpx_cookie
h = httplib.HTTPConnection(dlpx_host)
h.request("GET", "/resources/json/" + resource, data, headers)
r = h.getresponse()
check_response(r)
if r.getheader("set-cookie", None) is not None:
dlpx_cookie = r.getheader("set-cookie")
r_payload = r.read()
return json.loads(r_payload)
def log_into_dlpx_engine(api_version):
dlpx_post_json(
"delphix/session",
{
"type": "APISession",
"version": {
"type": "APIVersion",
"major": api_version["major"],
"minor": api_version["minor"],
"micro": api_version["micro"],
},
},
)
dlpx_post_json(
"delphix/login",
{"type": "LoginRequest", "username": dlpx_user, "password": dlpx_password},
)
if __name__ == "__main__":
main()
| true | true |
1c30c6b3fbfdced0506206ae79b1ef597bfa332b | 74,059 | py | Python | tensorflow/python/keras/engine/network.py | ajweiss/tensorflow | 2f4d4da52f0c488417d7e917edaf1b7569b5e408 | [
"Apache-2.0"
] | 1 | 2019-06-20T05:02:56.000Z | 2019-06-20T05:02:56.000Z | tensorflow/python/keras/engine/network.py | ajweiss/tensorflow | 2f4d4da52f0c488417d7e917edaf1b7569b5e408 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/engine/network.py | ajweiss/tensorflow | 2f4d4da52f0c488417d7e917edaf1b7569b5e408 | [
"Apache-2.0"
] | 1 | 2018-12-20T02:55:55.000Z | 2018-12-20T02:55:55.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""A `Network` is way to compose layers: the topological form of a `Model`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import weakref
import numpy as np
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import saving
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.training.checkpointable import layer_utils as checkpointable_layer_utils
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.util import tf_inspect
# pylint: disable=g-import-not-at-top
try:
import h5py
except ImportError:
h5py = None
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
class Network(base_layer.Layer):
"""A `Network` is a composition of layers.
It is the topological form of a "model". A `Model`
is simply a `Network` with added training routines.
"""
def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called
# Signature detection
if (len(args) == 2 or
len(args) == 1 and 'outputs' in kwargs or
'inputs' in kwargs and 'outputs' in kwargs):
# Graph network
self._init_graph_network(*args, **kwargs)
else:
# Subclassed network
self._init_subclassed_network(**kwargs)
# Several Network methods have "no_automatic_dependency_tracking"
# annotations. Since Network does automatic dependency tracking on attribute
# assignment, including for common data structures such as lists, by default
# we'd have quite a few empty dependencies which users don't care about (or
# would need some way to ignore dependencies automatically, which is confusing
# when applied to user code). Some attributes, such as _layers, would cause
# structural issues (_layers being the place where Layers assigned to tracked
# attributes are stored).
#
# Aside from these aesthetic and structural issues, useless dependencies on
# empty lists shouldn't cause issues; adding or removing them will not break
# checkpoints, but may cause "all Python objects matched" assertions to fail
# (in which case less strict assertions may be substituted if necessary).
@checkpointable.no_automatic_dependency_tracking
def _base_init(self, name=None):
# The following are implemented as property functions:
# self.trainable_weights
# self.non_trainable_weights
# self.input_spec
# self.losses
# self.updates
self._init_set_name(name, zero_based=True)
self._activity_regularizer = None
# This acts just like the `trainable` attribute of any layer instance.
# It does not affect users of the underlying layers, only users of the
# Network instance.
self.trainable = True
self._is_compiled = False
self._expects_training_arg = False
# In many internal cases one needs to compute both the model's output
# and its output mask without relying on `__call__` (which would do both and
# set mask metadata), but for models, computing the mask requires to
# recompute the output.
# Hence the pattern `output = model.call(); mask = model.compute_mask()`
# would be redundant, and internal logic
# (susceptible to use `call` directly) should prefer using the
# internal method `output, mask = _call_and_compute_mask()`.
# This is True for Sequential networks and graph networks.
self._compute_output_and_mask_jointly = False
self.supports_masking = False
if not hasattr(self, 'optimizer'):
# Don't reset optimizer if already set.
self.optimizer = None
# Private attributes to implement compatibility with Layer.
self._trainable_weights = []
self._non_trainable_weights = []
self._updates = [] # Used in symbolic mode only.
self._losses = []
self._eager_losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# A dictionary that maps metric names to metric result tensors.
self._metrics_tensors = {}
self._scope = None # Never used.
self._reuse = None # Never used.
if context.executing_eagerly():
self._graph = None
else:
self._graph = ops.get_default_graph() # Used in symbolic mode only.
# A Network does not create weights of its own, thus has no dtype.
self._dtype = None
# All layers in order of horizontal graph traversal.
# Entries are unique. Includes input and output layers.
self._layers = []
# Used in symbolic mode only, only in conjunction with graph-networks
self._outbound_nodes = []
self._inbound_nodes = []
self._checkpointable_saver = checkpointable_utils.CheckpointableSaver(
weakref.ref(self))
@checkpointable.no_automatic_dependency_tracking
def _init_graph_network(self, inputs, outputs, name=None):
self._call_convention = (base_layer_utils
.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
# Normalize and set self.inputs, self.outputs.
if isinstance(inputs, (list, tuple)):
self.inputs = list(inputs) # Tensor or list of tensors.
else:
self.inputs = [inputs]
if isinstance(outputs, (list, tuple)):
self.outputs = list(outputs)
else:
self.outputs = [outputs]
self._validate_graph_inputs_and_outputs()
self._base_init(name=name)
self._compute_previous_mask = (
'mask' in tf_inspect.getfullargspec(self.call).args or
hasattr(self, 'compute_mask'))
# A Network does not create weights of its own, thus it is already
# built.
self.built = True
self._compute_output_and_mask_jointly = True
self._is_graph_network = True
self._dynamic = False
self._input_layers = []
self._output_layers = []
self._input_coordinates = []
self._output_coordinates = []
# This is for performance optimization when calling the Network on new
# inputs. Every time the Network is called on a set on input tensors,
# we compute the output tensors, output masks and output shapes in one pass,
# then cache them here. When any of these outputs is queried later, we
# retrieve it from there instead of recomputing it.
self._output_mask_cache = {}
self._output_tensor_cache = {}
self._output_shape_cache = {}
# Build self._output_layers:
for x in self.outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
self._output_layers.append(layer)
self._output_coordinates.append((layer, node_index, tensor_index))
# Build self._input_layers:
for x in self.inputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
# It's supposed to be an input layer, so only one node
# and one tensor output.
assert node_index == 0
assert tensor_index == 0
self._input_layers.append(layer)
self._input_coordinates.append((layer, node_index, tensor_index))
# Keep track of the network's nodes and layers.
nodes, nodes_by_depth, layers, layers_by_depth = _map_graph_network(
self.inputs, self.outputs)
self._network_nodes = nodes
self._nodes_by_depth = nodes_by_depth
self._layers = layers
self._layers_by_depth = layers_by_depth
self._layer_call_argspecs = {}
for layer in self._layers:
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
self._track_layers(layers)
# Create the node linking internal inputs to internal outputs.
base_layer.Node(
outbound_layer=self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=self.inputs,
output_tensors=self.outputs)
# Build self.input_names and self.output_names.
self.input_names = []
self.output_names = []
self._feed_input_names = []
self._feed_inputs = []
self._feed_input_shapes = []
for i, layer in enumerate(self._input_layers):
self.input_names.append(layer.name)
if layer.is_placeholder:
self._feed_input_names.append(layer.name)
self._feed_input_shapes.append(backend.int_shape(self.inputs[i]))
self._feed_inputs.append(layer.input)
for layer in self._output_layers:
self.output_names.append(layer.name)
@checkpointable.no_automatic_dependency_tracking
def _init_subclassed_network(self, name=None, dynamic=False):
self._base_init(name=name)
self._is_graph_network = False
self._dynamic = dynamic
call_argspec = tf_inspect.getfullargspec(self.call)
if 'training' in call_argspec.args:
self._expects_training_arg = True
else:
self._expects_training_arg = False
self._call_convention = self._determine_call_convention(call_argspec)
self.outputs = []
self.inputs = []
self.built = False
@property
def dynamic(self):
if self._is_graph_network:
return any(layer.dynamic for layer in self.layers)
return self._dynamic or any(layer.dynamic for layer in self.layers)
def _determine_call_convention(self, call_argspec):
"""Decides how `self.call()` is invoked. See `CallConvention`."""
if call_argspec.varargs:
may_take_single_argument = False
else:
try:
# Note: tf_inspect doesn't raise a TypeError when regular inspect would,
# so we need to keep in mind that "getcallargs" may have returned
# something even though we under-specified positional arguments.
all_args = tf_inspect.getcallargs(self.call, None)
self_args = set()
for arg_name, obj in all_args.items():
if obj is self:
self_args.add(arg_name)
may_take_single_argument = True
except TypeError:
may_take_single_argument = False
if may_take_single_argument:
# A single positional argument (plus "self") is considered equivalent to
# an "inputs" argument.
all_positional_args = len(call_argspec.args)
if call_argspec.defaults is not None:
all_positional_args -= len(call_argspec.defaults)
non_self_positional_args = all_positional_args
for positional_arg_name in call_argspec.args[:all_positional_args]:
if positional_arg_name in self_args:
non_self_positional_args -= 1
if non_self_positional_args == 1:
if 'inputs' in call_argspec.args[all_positional_args:]:
raise TypeError(
"Model.call() takes a single positional argument (to which "
"inputs are passed by convention) and a separate 'inputs' "
"argument. Unable to determine which arguments are inputs.")
return base_layer_utils.CallConvention.SINGLE_POSITIONAL_ARGUMENT
if 'inputs' in call_argspec.args:
return base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT
else:
return base_layer_utils.CallConvention.POSITIONAL_ARGUMENTS_ARE_INPUTS
def _track_layers(self, layers):
"""Add Checkpointable dependencies on a list of Layers."""
weight_layer_index = 0
for layer_index, layer in enumerate(layers):
if layer.weights:
# Keep a separate index for layers which have weights. This allows users
# to insert Layers without weights anywhere in the network without
# breaking checkpoints.
self._track_checkpointable(
layer, name='layer_with_weights-%d' % weight_layer_index,
overwrite=True)
weight_layer_index += 1
# Even if it doesn't have weights, we should still track everything in
# case it has/will have Checkpointable dependencies.
self._track_checkpointable(
layer, name='layer-%d' % layer_index, overwrite=True)
def __setattr__(self, name, value):
if not getattr(self, '_setattr_tracking', True):
super(Network, self).__setattr__(name, value)
return
if (isinstance(value, (base_layer.Layer,
data_structures.CheckpointableDataStructure))
or checkpointable_layer_utils.has_weights(value)):
try:
self._is_graph_network
except AttributeError:
raise RuntimeError('It looks like you are subclassing `Model` and you '
'forgot to call `super(YourClass, self).__init__()`.'
' Always start with this line.')
# Keep track of checkpointable objects,
# for the needs of `self.save/save_weights`.
value = data_structures.sticky_attribute_assignment(
checkpointable=self, value=value, name=name)
super(Network, self).__setattr__(name, value)
# Keep track of metric instance created in subclassed model/layer.
# We do this so that we can maintain the correct order of metrics by adding
# the instance to the `metrics` list as soon as it is created.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
if isinstance(value, metrics_module.Metric):
self._metrics.append(value)
@property
def stateful(self):
return any((hasattr(layer, 'stateful') and layer.stateful)
for layer in self.layers)
def reset_states(self):
for layer in self.layers:
if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):
layer.reset_states()
@property
def state_updates(self):
"""Returns the `updates` from all layers that are stateful.
This is useful for separating training updates and
state updates, e.g. when we need to update a layer's internal state
during prediction.
Returns:
A list of update ops.
"""
state_updates = []
for layer in self.layers:
if getattr(layer, 'stateful', False):
if hasattr(layer, 'updates'):
state_updates += layer.updates
return state_updates
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays.
"""
weights = []
for layer in self.layers:
weights += layer.weights
return backend.batch_get_value(weights)
def set_weights(self, weights):
"""Sets the weights of the model.
Arguments:
weights: A list of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
tuples = []
for layer in self.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
tuples.append((sw, w))
weights = weights[num_param:]
backend.batch_set_value(tuples)
def compute_mask(self, inputs, mask):
if not self._is_graph_network:
return None
inputs = generic_utils.to_list(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = generic_utils.to_list(mask)
_, output_masks = self._run_internal_graph(inputs, mask=masks)
return output_masks
@property
def layers(self):
return checkpointable_layer_utils.filter_empty_layer_containers(
self._layers)
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Arguments:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
Raises:
ValueError: In case of invalid layer name or index.
"""
# TODO(fchollet): We could build a dictionary based on layer names
# since they are constant, but we have not done that yet.
if index is not None:
if len(self.layers) <= index:
raise ValueError('Was asked to retrieve layer at index ' + str(index) +
' but model only has ' + str(len(self.layers)) +
' layers.')
else:
return self.layers[index]
else:
if not name:
raise ValueError('Provide either a layer name or layer index.')
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError('No such layer: ' + name)
@property
def _unfiltered_updates(self):
updates = []
for layer in self.layers:
if isinstance(layer, Network):
updates += layer._unfiltered_updates
else:
updates += layer.updates
updates += self._updates
return updates
@property
def _unfiltered_losses(self):
losses = []
if context.executing_eagerly():
losses.extend(self._eager_losses)
else:
losses.extend(self._losses)
for layer in self.layers:
if isinstance(layer, Network):
losses += layer._unfiltered_losses
else:
losses += layer.losses
return losses
@checkpointable.no_automatic_dependency_tracking
def _clear_losses(self):
"""Used every step in eager to reset losses."""
self._eager_losses = []
for layer in self.layers:
if isinstance(layer, Network):
layer._clear_losses()
else:
layer._eager_losses = []
@property
def updates(self):
"""Retrieves the network's updates.
Will only include updates that are either
unconditional, or conditional on inputs to this model
(e.g. will not include updates that were created by layers of this model
outside of the model).
When the network has no registered inputs, all updates are returned.
Effectively, `network.updates` behaves like `layer.updates`.
Concrete example:
```python
bn = keras.layers.BatchNormalization()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1) # This creates 2 updates.
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2) # This creates 2 more updates.
# The BN layer has now 4 updates.
self.assertEqual(len(bn.updates), 4)
# Let's create a model from x2 to y2.
model = keras.models.Model(x2, y2)
# The model does not list all updates from its underlying layers,
# but only the updates that are relevant to it. Updates created by layers
# outside of the model are discarded.
self.assertEqual(len(model.updates), 2)
# If you keep calling the model, you append to its updates, just like
# what happens for a layer.
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
self.assertEqual(len(model.updates), 4)
# But if you call the inner BN layer independently, you don't affect
# the model's updates.
x4 = keras.layers.Input(shape=(10,))
_ = bn(x4)
self.assertEqual(len(model.updates), 4)
```
Returns:
A list of update ops.
"""
if not self.trainable and not self.stateful:
return []
updates = self._unfiltered_updates
# `updates` might contain irrelevant updates, so it needs to be filtered
# with respect to inputs the model has been called on.
relevant_inputs = []
for i in range(0, len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
if not relevant_inputs:
return list(set(updates))
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, updates)
relevant_conditional_updates = [x for x in updates if x in reachable]
unconditional_updates = [
x for x in updates if x._unconditional_update] # pylint: disable=protected-access
# A layer could be used multiple times in a nested structure,
# so the updates list must be de-duped.
return list(set(relevant_conditional_updates + unconditional_updates))
@property
def losses(self):
"""Retrieves the network's losses.
Will only include losses that are either
unconditional, or conditional on inputs to this model
(e.g. will not include losses that depend on tensors
that aren't inputs to this model).
When the network has no registered inputs, all losses are returned.
Returns:
A list of loss tensors.
"""
losses = self._unfiltered_losses
if context.executing_eagerly():
return losses
# TODO(kaftan/fchollet): Clean this up / make it obsolete.
# This is a super ugly, confusing check necessary to
# handle the case where we are executing in a function graph in eager mode
# but the model was constructed symbolically in a separate graph scope.
# We need to capture the losses created in the current graph function,
# and filter out the incorrect loss tensors created when symbolically
# building the graph.
# We have to use this check because the code after it that checks
# for reachable inputs only captures the part of the model that was
# built symbolically, and captures the wrong tensors from a different
# func graph (causing a crash later on when trying to execute the
# graph function)
with ops.init_scope():
if context.executing_eagerly():
return [loss for loss in losses
if loss.graph == ops.get_default_graph()]
relevant_inputs = []
for i in range(0, len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
if not relevant_inputs:
return losses
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, losses)
relevant_conditional_losses = [x for x in losses if x in reachable]
unconditional_losses = [
x for x in losses if x._unconditional_loss] # pylint: disable=protected-access
return list(set(
relevant_conditional_losses + unconditional_losses + self._losses))
@property
def trainable_weights(self):
return checkpointable_layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._trainable_weights)
@property
def non_trainable_weights(self):
return checkpointable_layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._non_trainable_weights + self._trainable_weights)
@property
def metrics(self):
"""Returns the network's symbolic metrics.
Model overrides this function to include the metrics from `compile` API.
"""
metrics = []
for layer in self.layers:
metrics += layer._metrics # pylint: disable=protected-access
return metrics + self._metrics
@property
def _all_metrics_tensors(self):
"""Returns the network's symbolic metric tensors."""
# TODO(psv): Remove this property.
metrics_tensors = {}
for layer in self.layers:
if isinstance(layer, Network):
metrics_tensors.update(layer._all_metrics_tensors)
else:
metrics_tensors.update(layer._metrics_tensors)
metrics_tensors.update(self._metrics_tensors)
return metrics_tensors
@property
def input_spec(self):
"""Gets the network's input specs.
Returns:
A list of `InputSpec` instances (one per input to the model)
or a single instance if the model has only one input.
"""
# If not a graph network, can't assume anything.
if not self._is_graph_network:
return None
specs = []
for layer in self._input_layers:
if layer.input_spec is None:
specs.append(None)
else:
if not isinstance(layer.input_spec, list):
raise TypeError('Layer ' + layer.name +
' has an input_spec attribute that '
'is not a list. We expect a list. '
'Found input_spec = ' + str(layer.input_spec))
specs += layer.input_spec
if len(specs) == 1:
return specs[0]
return specs
@base_layer.default
def build(self, input_shape):
"""Builds the model based on input shapes received.
This is to be used for subclassed models, which do not know at instantiation
time what their inputs look like.
This method only exists for users who want to call `model.build()` in a
standalone way (as a substitute for calling the model on real data to
build it). It will never be called by the framework (and thus it will
never throw unexpected errors in an unrelated workflow).
Args:
input_shape: Single tuple, TensorShape, or list of shapes, where shapes
are tuples, integers, or TensorShapes.
Raises:
ValueError:
1. In case of invalid user-provided data (not of type tuple,
list, or TensorShape).
2. If the model requires call arguments that are agnostic
to the input shapes (positional or kwarg in call signature).
3. If not all layers were properly built.
4. If float type inputs are not supported within the layers.
In each of these cases, the user should build their model by calling it
on real tensor data.
"""
if self._is_graph_network:
self.built = True
return
# If subclass network
if input_shape is None:
raise ValueError('Input shape must be defined when calling build on a '
'model subclass network.')
valid_types = (tuple, list, tensor_shape.TensorShape)
if not isinstance(input_shape, valid_types):
raise ValueError('Specified input shape is not one of the valid types. '
'Please specify a batch input shape of type tuple or '
'list of input shapes. User provided '
'input type: {}'.format(type(input_shape)))
if input_shape and not self.inputs:
# We create placeholders for the `None`s in the shape and build the model
# in a Graph. Since tf.Variable is compatible with both eager execution
# and graph building, the variables created after building the model in
# a Graph are still valid when executing eagerly.
if context.executing_eagerly():
graph = func_graph.FuncGraph('build_graph')
else:
graph = backend.get_graph()
with graph.as_default():
if isinstance(input_shape, list):
x = [base_layer_utils.generate_placeholders_from_shape(shape)
for shape in input_shape]
else:
x = base_layer_utils.generate_placeholders_from_shape(input_shape)
kwargs = {}
call_signature = tf_inspect.getfullargspec(self.call)
call_args = call_signature.args
# Exclude `self`, `inputs`, and any argument with a default value.
if len(call_args) > 2:
if call_signature.defaults:
call_args = call_args[2:-len(call_signature.defaults)]
else:
call_args = call_args[2:]
for arg in call_args:
if arg == 'training':
# Case where `training` is a positional arg with no default.
kwargs['training'] = False
else:
# Has invalid call signature with unknown positional arguments.
raise ValueError(
'Currently, you cannot build your model if it has '
'positional or keyword arguments that are not '
'inputs to the model, but are required for its '
'`call` method. Instead, in order to instantiate '
'and build your model, `call` your model on real '
'tensor data with all expected call arguments.')
elif len(call_args) < 2:
# Signature without `inputs`.
raise ValueError('You can only call `build` on a model if its `call` '
'method accepts an `inputs` argument.')
try:
self.call(x, **kwargs)
except (errors.InvalidArgumentError, TypeError):
raise ValueError('You cannot build your model by calling `build` '
'if your layers do not support float type inputs. '
'Instead, in order to instantiate and build your '
'model, `call` your model on real tensor data (of '
'the correct dtype).')
if self._layers:
self._track_layers(self._layers)
self.built = True
def call(self, inputs, training=None, mask=None):
"""Calls the model on new inputs.
In this case `call` just reapplies
all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs).
Arguments:
inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode.
mask: A mask or list of masks. A mask can be
either a tensor or None (no mask).
Returns:
A tensor if there is a single output, or
a list of tensors if there are more than one outputs.
"""
if not self._is_graph_network:
raise NotImplementedError('When subclassing the `Model` class, you should'
' implement a `call` method.')
inputs = generic_utils.to_list(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = generic_utils.to_list(mask)
outputs, _ = self._run_internal_graph(inputs,
training=training,
mask=masks)
return outputs
def _call_and_compute_mask(self, inputs, training=None, mask=None):
inputs = generic_utils.to_list(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = generic_utils.to_list(mask)
return self._run_internal_graph(inputs,
training=training,
mask=masks)
def compute_output_shape(self, input_shape):
if not self._is_graph_network:
return super(Network, self).compute_output_shape(input_shape)
if isinstance(input_shape, list):
input_shapes = []
for shape in input_shape:
if shape is not None:
input_shapes.append(tuple(tensor_shape.TensorShape(shape).as_list()))
else:
input_shapes.append(None)
else:
if input_shape is not None:
input_shapes = [tuple(tensor_shape.TensorShape(input_shape).as_list())]
else:
input_shapes = [None]
if len(input_shapes) != len(self._input_layers):
raise ValueError('Invalid input_shape argument ' + str(input_shape) +
': model has ' + str(len(self._input_layers)) +
' tensor inputs.')
cache_key = generic_utils.object_list_uid(input_shapes)
if cache_key in self._output_shape_cache:
# Cache hit.
output_shapes = self._output_shape_cache[cache_key]
else:
layers_to_output_shapes = {}
for i in range(len(input_shapes)):
layer = self._input_layers[i]
input_shape = input_shapes[i]
# It's an input layer: then `compute_output_shape` is identity,
# and there is only one node and one tensor output.
shape_key = layer.name + '_0_0'
layers_to_output_shapes[shape_key] = input_shape
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Iterate over nodes, by depth level.
if len(depth_keys) > 1:
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if layer in self._input_layers:
# We've already covered the input layers
# a few lines above.
continue
# Potentially redundant list,
# same size as node.input_tensors.
input_shapes = []
for j in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[j]
node_index = node.node_indices[j]
tensor_index = node.tensor_indices[j]
shape_key = inbound_layer.name + '_%s_%s' % (node_index,
tensor_index)
input_shape = layers_to_output_shapes[shape_key]
input_shapes.append(input_shape)
if len(input_shapes) == 1:
output_shape = layer.compute_output_shape(input_shapes[0])
else:
output_shape = layer.compute_output_shape(input_shapes)
if isinstance(output_shape, list):
output_shapes = [
tuple(tensor_shape.TensorShape(shape).as_list())
for shape in output_shape
]
else:
output_shapes = [
tuple(tensor_shape.TensorShape(output_shape).as_list())
]
node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access
for j in range(len(output_shapes)):
shape_key = layer.name + '_%s_%s' % (node_index, j)
layers_to_output_shapes[shape_key] = output_shapes[j]
# Read final output shapes from layers_to_output_shapes.
output_shapes = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)
output_shapes.append(layers_to_output_shapes[shape_key])
# Store in cache.
self._output_shape_cache[cache_key] = output_shapes
if isinstance(output_shapes, list):
if len(output_shapes) == 1:
return tensor_shape.TensorShape(output_shapes[0])
else:
return [tensor_shape.TensorShape(shape) for shape in output_shapes]
else:
return tensor_shape.TensorShape(output_shapes)
def _run_internal_graph(self, inputs, training=None, mask=None):
"""Computes output tensors for new inputs.
# Note:
- Expects `inputs` to be a list (potentially with 1 element).
- Can be run on non-Keras tensors.
Arguments:
inputs: List of tensors
training: Boolean learning phase.
mask: List of masks (tensors or None).
Returns:
Two lists: output_tensors, output_masks
"""
# Note: masking support is relevant mainly for Keras.
# It cannot be factored out without having the fully reimplement the network
# calling logic on the Keras side. We choose to incorporate it in
# Network because 1) it may be useful to fully support in tf.layers in
# the future and 2) Keras is a major user of Network. If you don't
# use masking, it does not interfere with regular behavior at all and you
# can ignore it.
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = mask
# Dictionary mapping reference tensors to tuples
# (computed tensor, compute mask)
# we assume a 1:1 mapping from tensor to mask
tensor_map = {}
for x, y, mask in zip(self.inputs, inputs, masks):
tensor_map[str(id(x))] = (y, mask)
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_data = [] # List of tuples (input, mask).
for x in reference_input_tensors:
if str(id(x)) in tensor_map:
computed_data.append(tensor_map[str(id(x))])
if len(computed_data) == len(reference_input_tensors):
# Call layer (reapplying ops to new inputs).
with ops.name_scope(layer.name):
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
# Ensure `training` arg propagation if applicable.
argspec = self._layer_call_argspecs[layer].args
if 'training' in argspec:
kwargs.setdefault('training', training)
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
# Ensure mask propagation if applicable.
if 'mask' in argspec:
kwargs.setdefault('mask', computed_mask)
# Compute outputs and masks.
if (isinstance(layer, Network) and
layer._compute_output_and_mask_jointly):
output_tensors, output_masks = layer._call_and_compute_mask(
computed_tensor, **kwargs)
else:
if context.executing_eagerly():
output_tensors = layer(computed_tensor, **kwargs)
elif layer.dynamic:
output_tensors = layer._symbolic_call(computed_tensor) # pylint: disable=protected-call
else:
output_tensors = layer.call(computed_tensor, **kwargs)
if hasattr(layer, 'compute_mask'):
output_masks = layer.compute_mask(computed_tensor,
computed_mask)
else:
output_masks = [None for _ in output_tensors]
computed_tensors = [computed_tensor]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
# Ensure mask propagation if applicable.
if 'mask' in argspec:
kwargs.setdefault('mask', computed_masks)
# Compute outputs and masks.
if (isinstance(layer, Network) and
layer._compute_output_and_mask_jointly):
output_tensors, output_masks = layer._call_and_compute_mask(
computed_tensors, **kwargs)
else:
if context.executing_eagerly():
output_tensors = layer(computed_tensors, **kwargs)
elif layer.dynamic:
output_tensors = layer._symbolic_call(computed_tensors) # pylint: disable=protected-call
else:
output_tensors = layer.call(computed_tensors, **kwargs)
if hasattr(layer, 'compute_mask'):
output_masks = layer.compute_mask(computed_tensors,
computed_masks)
else:
output_masks = [None for _ in output_tensors]
output_tensors = generic_utils.to_list(output_tensors)
if output_masks is None:
output_masks = [None for _ in output_tensors]
else:
output_masks = generic_utils.to_list(output_masks)
if not context.executing_eagerly():
# Set mask metadata.
for x, m in zip(output_tensors, output_masks):
try:
x._keras_mask = m
except AttributeError:
pass
# Apply activity regularizer if any.
layer._handle_activity_regularization(computed_tensors,
output_tensors)
# Update tensor_map.
for x, y, mask in zip(reference_output_tensors, output_tensors,
output_masks):
tensor_map[str(id(x))] = (y, mask)
output_tensors = []
output_masks = []
output_shapes = []
for x in self.outputs:
assert str(id(x)) in tensor_map, 'Could not compute output ' + str(x)
tensor, mask = tensor_map[str(id(x))]
output_shapes.append(backend.int_shape(x))
output_tensors.append(tensor)
output_masks.append(mask)
if len(output_tensors) == 1:
output_tensors = output_tensors[0]
if output_shapes is not None:
output_shapes = output_shapes[0]
if output_masks is not None:
output_masks = output_masks[0]
if output_shapes is not None:
input_shapes = [backend.int_shape(x) for x in inputs]
cache_key = generic_utils.object_list_uid(input_shapes)
self._output_shape_cache[cache_key] = output_shapes
return output_tensors, output_masks
def get_config(self):
if not self._is_graph_network:
raise NotImplementedError
config = {
'name': self.name,
}
node_conversion_map = {}
for layer in self.layers:
if issubclass(layer.__class__, Network):
# Networks start with a pre-existing node
# linking their input to output.
kept_nodes = 1
else:
kept_nodes = 0
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in self._network_nodes:
node_conversion_map[node_key] = kept_nodes
kept_nodes += 1
layer_configs = []
for layer in self.layers: # From the earliest layers on.
layer_class_name = layer.__class__.__name__
layer_config = layer.get_config()
filtered_inbound_nodes = []
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in self._network_nodes:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
if node.arguments:
try:
json.dumps(node.arguments)
kwargs = node.arguments
except TypeError:
logging.warning(
'Layer ' + layer.name +
' was passed non-serializable keyword arguments: ' +
str(node.arguments) + '. They will not be included '
'in the serialized model (and thus will be missing '
'at deserialization time).')
kwargs = {}
else:
kwargs = {}
if node.inbound_layers:
node_data = []
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i]
node_index = node.node_indices[i]
tensor_index = node.tensor_indices[i]
node_key = _make_node_key(inbound_layer.name, node_index)
new_node_index = node_conversion_map.get(node_key, 0)
node_data.append(
[inbound_layer.name, new_node_index, tensor_index, kwargs])
filtered_inbound_nodes.append(node_data)
layer_configs.append({
'name': layer.name,
'class_name': layer_class_name,
'config': layer_config,
'inbound_nodes': filtered_inbound_nodes,
})
config['layers'] = layer_configs
# Gather info about inputs and outputs.
model_inputs = []
for i in range(len(self._input_layers)):
layer, node_index, tensor_index = self._input_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_inputs.append([layer.name, new_node_index, tensor_index])
config['input_layers'] = model_inputs
model_outputs = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_outputs.append([layer.name, new_node_index, tensor_index])
config['output_layers'] = model_outputs
return copy.deepcopy(config)
@classmethod
def from_config(cls, config, custom_objects=None):
"""Instantiates a Model from its config (output of `get_config()`).
Arguments:
config: Model config dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A model instance.
Raises:
ValueError: In case of improperly formatted config dict.
"""
# Layer instances created during
# the graph reconstruction process
created_layers = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def process_node(layer, node_data):
"""Deserialize a node.
Arguments:
layer: layer instance.
node_data: node config dict.
Raises:
ValueError: In case of improperly formatted `node_data` dict.
"""
input_tensors = []
for input_data in node_data:
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
else:
raise ValueError('Improperly formatted model config.')
if inbound_layer_name not in created_layers:
add_unprocessed_node(layer, node_data)
return
inbound_layer = created_layers[inbound_layer_name]
if len(inbound_layer._inbound_nodes) <= inbound_node_index:
add_unprocessed_node(layer, node_data)
return
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(inbound_node.output_tensors[inbound_tensor_index])
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
if input_tensors:
if len(input_tensors) == 1:
layer(input_tensors[0], **kwargs)
else:
layer(input_tensors, **kwargs)
def process_layer(layer_data):
"""Deserializes a layer, then call it on appropriate inputs.
Arguments:
layer_data: layer config dict.
Raises:
ValueError: In case of improperly formatted `layer_data` dict.
"""
layer_name = layer_data['name']
# Instantiate layer.
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(layer_data, custom_objects=custom_objects)
created_layers[layer_name] = layer
# Gather layer inputs.
inbound_nodes_data = layer_data['inbound_nodes']
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# First, we create all layers and enqueue nodes to be processed
for layer_data in config['layers']:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in config['layers']:
layer = created_layers[layer_data['name']]
if layer in unprocessed_nodes:
for node_data in unprocessed_nodes.pop(layer):
process_node(layer, node_data)
name = config.get('name')
input_tensors = []
output_tensors = []
for layer_data in config['input_layers']:
layer_name, node_index, tensor_index = layer_data
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
input_tensors.append(layer_output_tensors[tensor_index])
for layer_data in config['output_layers']:
layer_name, node_index, tensor_index = layer_data
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
output_tensors.append(layer_output_tensors[tensor_index])
return cls(inputs=input_tensors, outputs=output_tensors, name=name)
def save(self, filepath, overwrite=True, include_optimizer=True):
"""Saves the model to a single HDF5 file.
The savefile includes:
- The model architecture, allowing to re-instantiate the model.
- The model weights.
- The state of the optimizer, allowing to resume training
exactly where you left off.
This allows you to save the entirety of the state of a model
in a single file.
Saved models can be reinstantiated via `keras.models.load_model`.
The model returned by `load_model`
is a compiled model ready to be used (unless the saved model
was never compiled in the first place).
Arguments:
filepath: String, path to the file to save the weights to.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
Example:
```python
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')
```
"""
if not self._is_graph_network:
raise NotImplementedError(
'Currently `save` requires model to be a graph network. Consider '
'using `save_weights`, in order to save the weights of the model.')
from tensorflow.python.keras.models import save_model # pylint: disable=g-import-not-at-top
save_model(self, filepath, overwrite, include_optimizer)
def save_weights(self, filepath, overwrite=True, save_format=None):
"""Saves all layer weights.
Either saves in HDF5 or in TensorFlow format based on the `save_format`
argument.
When saving in HDF5 format, the weight file has:
- `layer_names` (attribute), a list of strings
(ordered names of model layers).
- For every layer, a `group` named `layer.name`
- For every such layer group, a group attribute `weight_names`,
a list of strings
(ordered names of weights tensor of the layer).
- For every weight in the layer, a dataset
storing the weight value, named after the weight tensor.
When saving in TensorFlow format, all objects referenced by the network are
saved in the same format as `tf.train.Checkpoint`, including any `Layer`
instances or `Optimizer` instances assigned to object attributes. For
networks constructed from inputs and outputs using `tf.keras.Model(inputs,
outputs)`, `Layer` instances used by the network are tracked/saved
automatically. For user-defined classes which inherit from `tf.keras.Model`,
`Layer` instances must be assigned to object attributes, typically in the
constructor. See the documentation of `tf.train.Checkpoint` and
`tf.keras.Model` for details.
Arguments:
filepath: String, path to the file to save the weights to. When saving
in TensorFlow format, this is the prefix used for checkpoint files
(multiple files are generated). Note that the '.h5' suffix causes
weights to be saved in HDF5 format.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
'.keras' will default to HDF5 if `save_format` is `None`. Otherwise
`None` defaults to 'tf'.
Raises:
ImportError: If h5py is not available when attempting to save in HDF5
format.
ValueError: For invalid/unknown format arguments.
"""
filepath_is_h5 = _is_hdf5_filepath(filepath)
if save_format is None:
if filepath_is_h5:
save_format = 'h5'
else:
save_format = 'tf'
else:
user_format = save_format.lower().strip()
if user_format in ('tensorflow', 'tf'):
save_format = 'tf'
elif user_format in ('hdf5', 'h5', 'keras'):
save_format = 'h5'
else:
raise ValueError(
'Unknown format "%s". Was expecting one of {"tf", "h5"}.' % (
save_format,))
if save_format == 'tf' and filepath_is_h5:
raise ValueError(
('save_weights got save_format="tf"/"tensorflow", but the '
'filepath ("%s") looks like an HDF5 file. Omit the ".h5"/".keras" '
'when saving in TensorFlow format.')
% filepath)
if save_format == 'h5' and h5py is None:
raise ImportError(
'`save_weights` requires h5py when saving in hdf5.')
if save_format == 'tf':
check_filepath = filepath + '.index'
else:
check_filepath = filepath
# If file exists and should not be overwritten:
if not overwrite and os.path.isfile(check_filepath):
proceed = ask_to_proceed_with_overwrite(check_filepath)
if not proceed:
return
if save_format == 'h5':
with h5py.File(filepath, 'w') as f:
saving.save_weights_to_hdf5_group(f, self.layers)
else:
if context.executing_eagerly():
session = None
else:
session = backend.get_session()
optimizer = getattr(self, 'optimizer', None)
if (optimizer
and not isinstance(optimizer, checkpointable.CheckpointableBase)):
logging.warning(
('This model was compiled with a Keras optimizer (%s) but is being '
'saved in TensorFlow format with `save_weights`. The model\'s '
'weights will be saved, but unlike with TensorFlow optimizers in '
'the TensorFlow format the optimizer\'s state will not be '
'saved.\n\nConsider using a TensorFlow optimizer from `tf.train`.')
% (optimizer,))
self._checkpointable_saver.save(filepath, session=session)
# Record this checkpoint so it's visible from tf.train.latest_checkpoint.
checkpoint_management.update_checkpoint_state(
save_dir=os.path.dirname(filepath),
model_checkpoint_path=filepath,
all_model_checkpoint_paths=[filepath])
def load_weights(self, filepath, by_name=False):
"""Loads all layer weights, either from a TensorFlow or an HDF5 weight file.
If `by_name` is False weights are loaded based on the network's
topology. This means the architecture should be the same as when the weights
were saved. Note that layers that don't have weights are not taken into
account in the topological ordering, so adding or removing layers is fine as
long as they don't have weights.
If `by_name` is True, weights are loaded into layers only if they share the
same name. This is useful for fine-tuning or transfer-learning models where
some of the layers have changed.
Only topological loading (`by_name=False`) is supported when loading weights
from the TensorFlow format. Note that topological loading differs slightly
between TensorFlow and HDF5 formats for user-defined classes inheriting from
`tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the
TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor.
Arguments:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`).
by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
Returns:
When loading a weight file in TensorFlow format, returns the same status
object as `tf.train.Checkpoint.restore`. When graph building, restore
ops are run automatically as soon as the network is built (on first call
for user-defined classes inheriting from `Model`, immediately if it is
already built).
When loading weights in HDF5 format, returns `None`.
Raises:
ImportError: If h5py is not available and the weight file is in HDF5
format.
"""
if _is_hdf5_filepath(filepath):
save_format = 'h5'
else:
try:
pywrap_tensorflow.NewCheckpointReader(filepath)
save_format = 'tf'
except errors_impl.DataLossError:
# The checkpoint is not readable in TensorFlow format. Try HDF5.
save_format = 'h5'
if save_format == 'tf':
status = self._checkpointable_saver.restore(filepath)
if by_name:
raise NotImplementedError(
'Weights may only be loaded based on topology into Models when '
'loading TensorFlow-formatted weights (got by_name=True to '
'load_weights).')
if not context.executing_eagerly():
session = backend.get_session()
# Restore existing variables (if any) immediately, and set up a
# streaming restore for any variables created in the future.
checkpointable_utils.streaming_restore(status=status, session=session)
status.assert_nontrivial_match()
return status
if h5py is None:
raise ImportError(
'`load_weights` requires h5py when loading weights from HDF5.')
if self._is_graph_network and not self.built:
raise NotImplementedError(
'Unable to load weights saved in HDF5 format into a subclassed '
'Model which has not created its variables yet. Call the Model '
'first, then load the weights.')
with h5py.File(filepath, 'r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, self.layers)
else:
saving.load_weights_from_hdf5_group(f, self.layers)
def _updated_config(self):
"""Util shared between different serialization methods.
Returns:
Model config with Keras version information added.
"""
from tensorflow.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
config = self.get_config()
model_config = {
'class_name': self.__class__.__name__,
'config': config,
'keras_version': keras_version,
'backend': backend.backend()
}
return model_config
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={})`.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
Returns:
A JSON string.
"""
def get_json_type(obj):
# If obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
# If obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
model_config = self._updated_config()
return json.dumps(model_config, default=get_json_type, **kwargs)
def to_yaml(self, **kwargs):
"""Returns a yaml string containing the network configuration.
To load a network from a yaml save file, use
`keras.models.model_from_yaml(yaml_string, custom_objects={})`.
`custom_objects` should be a dictionary mapping
the names of custom losses / layers / etc to the corresponding
functions / classes.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `yaml.dump()`.
Returns:
A YAML string.
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError(
'Requires yaml module installed (`pip install pyyaml`).')
return yaml.dump(self._updated_config(), **kwargs)
def summary(self, line_length=None, positions=None, print_fn=None):
"""Prints a string summary of the network.
Arguments:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided,
defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use. Defaults to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
Raises:
ValueError: if `summary()` is called before the model is built.
"""
if not self.built:
raise ValueError('This model has not yet been built. '
'Build the model first by calling `build()` or calling '
'`fit()` with some data, or specify '
'an `input_shape` argument in the first layer(s) for '
'automatic build.')
layer_utils.print_summary(self,
line_length=line_length,
positions=positions,
print_fn=print_fn)
def _validate_graph_inputs_and_outputs(self):
"""Validates the inputs and outputs of a Graph Network."""
# Check for redundancy in inputs.
if len(set(self.inputs)) != len(self.inputs):
raise ValueError('The list of inputs passed to the model '
'is redundant. '
'All inputs should only appear once.'
' Found: ' + str(self.inputs))
for x in self.inputs:
# Check that x has appropriate `_keras_history` metadata.
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Input tensors to a ' + cls_name + ' ' +
'must come from `tf.keras.Input`. '
'Received: ' + str(x) +
' (missing previous layer metadata).')
# Check that x is an input tensor.
# pylint: disable=protected-access
layer, _, _ = x._keras_history
if len(layer._inbound_nodes) > 1 or (
layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers):
cls_name = self.__class__.__name__
logging.warning(cls_name + ' inputs must come from '
'`tf.keras.Input` (thus holding past layer metadata), '
'they cannot be the output of '
'a previous non-Input layer. '
'Here, a tensor specified as '
'input to "' + self.name + '" was not an Input tensor, '
'it was generated by layer ' + layer.name + '.\n'
'Note that input tensors are '
'instantiated via `tensor = tf.keras.Input(shape)`.\n'
'The tensor that caused the issue was: ' + str(x.name))
# Check compatibility of batch sizes of Input Layers.
input_batch_sizes = [
training_utils.get_static_batch_size(x._keras_history[0])
for x in self.inputs
]
consistent_batch_size = None
for batch_size in input_batch_sizes:
if batch_size is not None:
if (consistent_batch_size is not None and
batch_size != consistent_batch_size):
raise ValueError('The specified batch sizes of the Input Layers'
' are incompatible. Found batch sizes: {}'.format(
input_batch_sizes))
consistent_batch_size = batch_size
for x in self.outputs:
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Output tensors to a ' + cls_name + ' must be '
'the output of a TensorFlow `Layer` '
'(thus holding past layer metadata). Found: ' + str(x))
def _is_hdf5_filepath(filepath):
return (filepath.endswith('.h5') or filepath.endswith('.keras') or
filepath.endswith('.hdf5'))
def _make_node_key(layer_name, node_index):
return layer_name + '_ib-' + str(node_index)
def _map_graph_network(inputs, outputs):
"""Validates a network's topology and gather its layers and nodes.
Arguments:
inputs: List of input tensors.
outputs: List of outputs tensors.
Returns:
A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.
- nodes: list of Node instances.
- nodes_by_depth: dict mapping ints (depth) to lists of node instances.
- layers: list of Layer instances.
- layers_by_depth: dict mapping ints (depth) to lists of layer instances.
Raises:
ValueError: In case the network is not valid (e.g. disconnected graph).
"""
# Network_nodes: set of nodes included in the graph of layers
# (not all nodes included in the layers are relevant to the current graph).
network_nodes = set() # ids of all nodes relevant to the Network
nodes_depths = {} # dict {node: depth value}
layers_depths = {} # dict {layer: depth value}
layer_indices = {} # dict {layer: index in traversal}
nodes_in_decreasing_depth = []
def build_map(tensor,
finished_nodes,
nodes_in_progress,
layer,
node_index,
tensor_index):
"""Builds a map of the graph of layers.
This recursively updates the map `layer_indices`,
the list `nodes_in_decreasing_depth` and the set `network_nodes`.
Arguments:
tensor: Some tensor in a graph.
finished_nodes: Set of nodes whose subgraphs have been traversed
completely. Useful to prevent duplicated work.
nodes_in_progress: Set of nodes that are currently active on the
recursion stack. Useful to detect cycles.
layer: Layer from which `tensor` comes from. If not provided,
will be obtained from `tensor._keras_history`.
node_index: Node index from which `tensor` comes from.
tensor_index: Tensor_index from which `tensor` comes from.
Raises:
ValueError: if a cycle is detected.
"""
node = layer._inbound_nodes[node_index] # pylint: disable=protected-access
# Prevent cycles.
if node in nodes_in_progress:
raise ValueError('The tensor ' + str(tensor) + ' at layer "' +
layer.name + '" is part of a cycle.')
# Don't repeat work for shared subgraphs
if node in finished_nodes:
return
node_key = _make_node_key(layer.name, node_index)
# Update network_nodes.
network_nodes.add(node_key)
# Store the traversal order for layer sorting.
if layer not in layer_indices:
layer_indices[layer] = len(layer_indices)
nodes_in_progress.add(node)
# Propagate to all previous tensors connected to this node.
for i in range(len(node.inbound_layers)):
x = node.input_tensors[i]
layer = node.inbound_layers[i]
node_index = node.node_indices[i]
tensor_index = node.tensor_indices[i]
build_map(x, finished_nodes, nodes_in_progress, layer,
node_index, tensor_index)
finished_nodes.add(node)
nodes_in_progress.remove(node)
nodes_in_decreasing_depth.append(node)
finished_nodes = set()
nodes_in_progress = set()
for x in outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
build_map(x, finished_nodes, nodes_in_progress,
layer=layer,
node_index=node_index,
tensor_index=tensor_index)
for node in reversed(nodes_in_decreasing_depth):
# If the depth is not set, the node has no outbound nodes (depth 0).
depth = nodes_depths.setdefault(node, 0)
# Update the depth of the corresponding layer
previous_depth = layers_depths.get(node.outbound_layer, 0)
# If we've seen this layer before at a higher depth,
# we should use that depth instead of the node depth.
# This is necessary for shared layers that have inputs at different
# depth levels in the graph.
depth = max(depth, previous_depth)
layers_depths[node.outbound_layer] = depth
nodes_depths[node] = depth
# Update the depth of inbound nodes.
# The "depth" of a node is the max of the depths
# of all layers it is connected to.
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i]
node_index = node.node_indices[i]
inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access
previous_depth = nodes_depths.get(inbound_node, 0)
nodes_depths[inbound_node] = max(depth + 1, previous_depth)
# Build a dict {depth: list of nodes with this depth}
nodes_by_depth = {}
for node, depth in nodes_depths.items():
if depth not in nodes_by_depth:
nodes_by_depth[depth] = []
nodes_by_depth[depth].append(node)
# Build a dict {depth: list of layers with this depth}
layers_by_depth = {}
for layer, depth in layers_depths.items():
if depth not in layers_by_depth:
layers_by_depth[depth] = []
layers_by_depth[depth].append(layer)
# Get sorted list of layer depths.
depth_keys = list(layers_by_depth.keys())
depth_keys.sort(reverse=True)
# Set self.layers and self._layers_by_depth.
layers = []
for depth in depth_keys:
layers_for_depth = layers_by_depth[depth]
# Network.layers needs to have a deterministic order:
# here we order them by traversal order.
layers_for_depth.sort(key=lambda x: layer_indices[x])
layers.extend(layers_for_depth)
# Get sorted list of node depths.
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Check that all tensors required are computable.
# computable_tensors: all tensors in the graph
# that can be computed from the inputs provided.
computable_tensors = []
for x in inputs:
computable_tensors.append(x)
layers_with_complete_input = [] # To provide a better error msg.
for depth in depth_keys:
for node in nodes_by_depth[depth]:
layer = node.outbound_layer
if layer:
for x in node.input_tensors:
if x not in computable_tensors:
raise ValueError('Graph disconnected: '
'cannot obtain value for tensor ' + str(x) +
' at layer "' + layer.name + '". '
'The following previous layers '
'were accessed without issue: ' +
str(layers_with_complete_input))
for x in node.output_tensors:
computable_tensors.append(x)
layers_with_complete_input.append(layer.name)
# Ensure name unicity, which will be crucial for serialization
# (since serialized nodes refer to layers by their name).
all_names = [layer.name for layer in layers]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError('The name "' + name + '" is used ' +
str(all_names.count(name)) + ' times in the model. '
'All layer names should be unique.')
return network_nodes, nodes_by_depth, layers, layers_by_depth
| 39.498133 | 120 | 0.657003 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import weakref
import numpy as np
from six.moves import zip
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import saving
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.training.checkpointable import layer_utils as checkpointable_layer_utils
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.util import tf_inspect
try:
import h5py
except ImportError:
h5py = None
try:
import yaml
except ImportError:
yaml = None
class Network(base_layer.Layer):
def __init__(self, *args, **kwargs):
if (len(args) == 2 or
len(args) == 1 and 'outputs' in kwargs or
'inputs' in kwargs and 'outputs' in kwargs):
self._init_graph_network(*args, **kwargs)
else:
self._init_subclassed_network(**kwargs)
# checkpoints, but may cause "all Python objects matched" assertions to fail
# (in which case less strict assertions may be substituted if necessary).
@checkpointable.no_automatic_dependency_tracking
def _base_init(self, name=None):
# The following are implemented as property functions:
# self.trainable_weights
# self.non_trainable_weights
# self.input_spec
# self.losses
# self.updates
self._init_set_name(name, zero_based=True)
self._activity_regularizer = None
# This acts just like the `trainable` attribute of any layer instance.
# It does not affect users of the underlying layers, only users of the
# Network instance.
self.trainable = True
self._is_compiled = False
self._expects_training_arg = False
# In many internal cases one needs to compute both the model's output
self._compute_output_and_mask_jointly = False
self.supports_masking = False
if not hasattr(self, 'optimizer'):
self.optimizer = None
# Private attributes to implement compatibility with Layer.
self._trainable_weights = []
self._non_trainable_weights = []
self._updates = [] # Used in symbolic mode only.
self._losses = []
self._eager_losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# A dictionary that maps metric names to metric result tensors.
self._metrics_tensors = {}
self._scope = None # Never used.
self._reuse = None # Never used.
if context.executing_eagerly():
self._graph = None
else:
self._graph = ops.get_default_graph() # Used in symbolic mode only.
# A Network does not create weights of its own, thus has no dtype.
self._dtype = None
# All layers in order of horizontal graph traversal.
# Entries are unique. Includes input and output layers.
self._layers = []
# Used in symbolic mode only, only in conjunction with graph-networks
self._outbound_nodes = []
self._inbound_nodes = []
self._checkpointable_saver = checkpointable_utils.CheckpointableSaver(
weakref.ref(self))
@checkpointable.no_automatic_dependency_tracking
def _init_graph_network(self, inputs, outputs, name=None):
self._call_convention = (base_layer_utils
.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
# Normalize and set self.inputs, self.outputs.
if isinstance(inputs, (list, tuple)):
self.inputs = list(inputs) # Tensor or list of tensors.
else:
self.inputs = [inputs]
if isinstance(outputs, (list, tuple)):
self.outputs = list(outputs)
else:
self.outputs = [outputs]
self._validate_graph_inputs_and_outputs()
self._base_init(name=name)
self._compute_previous_mask = (
'mask' in tf_inspect.getfullargspec(self.call).args or
hasattr(self, 'compute_mask'))
# A Network does not create weights of its own, thus it is already
# built.
self.built = True
self._compute_output_and_mask_jointly = True
self._is_graph_network = True
self._dynamic = False
self._input_layers = []
self._output_layers = []
self._input_coordinates = []
self._output_coordinates = []
# This is for performance optimization when calling the Network on new
# inputs. Every time the Network is called on a set on input tensors,
# we compute the output tensors, output masks and output shapes in one pass,
# then cache them here. When any of these outputs is queried later, we
# retrieve it from there instead of recomputing it.
self._output_mask_cache = {}
self._output_tensor_cache = {}
self._output_shape_cache = {}
# Build self._output_layers:
for x in self.outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
self._output_layers.append(layer)
self._output_coordinates.append((layer, node_index, tensor_index))
# Build self._input_layers:
for x in self.inputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
# It's supposed to be an input layer, so only one node
assert node_index == 0
assert tensor_index == 0
self._input_layers.append(layer)
self._input_coordinates.append((layer, node_index, tensor_index))
nodes, nodes_by_depth, layers, layers_by_depth = _map_graph_network(
self.inputs, self.outputs)
self._network_nodes = nodes
self._nodes_by_depth = nodes_by_depth
self._layers = layers
self._layers_by_depth = layers_by_depth
self._layer_call_argspecs = {}
for layer in self._layers:
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
self._track_layers(layers)
# Create the node linking internal inputs to internal outputs.
base_layer.Node(
outbound_layer=self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=self.inputs,
output_tensors=self.outputs)
# Build self.input_names and self.output_names.
self.input_names = []
self.output_names = []
self._feed_input_names = []
self._feed_inputs = []
self._feed_input_shapes = []
for i, layer in enumerate(self._input_layers):
self.input_names.append(layer.name)
if layer.is_placeholder:
self._feed_input_names.append(layer.name)
self._feed_input_shapes.append(backend.int_shape(self.inputs[i]))
self._feed_inputs.append(layer.input)
for layer in self._output_layers:
self.output_names.append(layer.name)
@checkpointable.no_automatic_dependency_tracking
def _init_subclassed_network(self, name=None, dynamic=False):
self._base_init(name=name)
self._is_graph_network = False
self._dynamic = dynamic
call_argspec = tf_inspect.getfullargspec(self.call)
if 'training' in call_argspec.args:
self._expects_training_arg = True
else:
self._expects_training_arg = False
self._call_convention = self._determine_call_convention(call_argspec)
self.outputs = []
self.inputs = []
self.built = False
@property
def dynamic(self):
if self._is_graph_network:
return any(layer.dynamic for layer in self.layers)
return self._dynamic or any(layer.dynamic for layer in self.layers)
def _determine_call_convention(self, call_argspec):
if call_argspec.varargs:
may_take_single_argument = False
else:
try:
# Note: tf_inspect doesn't raise a TypeError when regular inspect would,
all_args = tf_inspect.getcallargs(self.call, None)
self_args = set()
for arg_name, obj in all_args.items():
if obj is self:
self_args.add(arg_name)
may_take_single_argument = True
except TypeError:
may_take_single_argument = False
if may_take_single_argument:
all_positional_args = len(call_argspec.args)
if call_argspec.defaults is not None:
all_positional_args -= len(call_argspec.defaults)
non_self_positional_args = all_positional_args
for positional_arg_name in call_argspec.args[:all_positional_args]:
if positional_arg_name in self_args:
non_self_positional_args -= 1
if non_self_positional_args == 1:
if 'inputs' in call_argspec.args[all_positional_args:]:
raise TypeError(
"Model.call() takes a single positional argument (to which "
"inputs are passed by convention) and a separate 'inputs' "
"argument. Unable to determine which arguments are inputs.")
return base_layer_utils.CallConvention.SINGLE_POSITIONAL_ARGUMENT
if 'inputs' in call_argspec.args:
return base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT
else:
return base_layer_utils.CallConvention.POSITIONAL_ARGUMENTS_ARE_INPUTS
def _track_layers(self, layers):
weight_layer_index = 0
for layer_index, layer in enumerate(layers):
if layer.weights:
self._track_checkpointable(
layer, name='layer_with_weights-%d' % weight_layer_index,
overwrite=True)
weight_layer_index += 1
# case it has/will have Checkpointable dependencies.
self._track_checkpointable(
layer, name='layer-%d' % layer_index, overwrite=True)
def __setattr__(self, name, value):
if not getattr(self, '_setattr_tracking', True):
super(Network, self).__setattr__(name, value)
return
if (isinstance(value, (base_layer.Layer,
data_structures.CheckpointableDataStructure))
or checkpointable_layer_utils.has_weights(value)):
try:
self._is_graph_network
except AttributeError:
raise RuntimeError('It looks like you are subclassing `Model` and you '
'forgot to call `super(YourClass, self).__init__()`.'
' Always start with this line.')
# Keep track of checkpointable objects,
# for the needs of `self.save/save_weights`.
value = data_structures.sticky_attribute_assignment(
checkpointable=self, value=value, name=name)
super(Network, self).__setattr__(name, value)
# Keep track of metric instance created in subclassed model/layer.
# We do this so that we can maintain the correct order of metrics by adding
# the instance to the `metrics` list as soon as it is created.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
if isinstance(value, metrics_module.Metric):
self._metrics.append(value)
@property
def stateful(self):
return any((hasattr(layer, 'stateful') and layer.stateful)
for layer in self.layers)
def reset_states(self):
for layer in self.layers:
if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):
layer.reset_states()
@property
def state_updates(self):
state_updates = []
for layer in self.layers:
if getattr(layer, 'stateful', False):
if hasattr(layer, 'updates'):
state_updates += layer.updates
return state_updates
def get_weights(self):
weights = []
for layer in self.layers:
weights += layer.weights
return backend.batch_get_value(weights)
def set_weights(self, weights):
tuples = []
for layer in self.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
tuples.append((sw, w))
weights = weights[num_param:]
backend.batch_set_value(tuples)
def compute_mask(self, inputs, mask):
if not self._is_graph_network:
return None
inputs = generic_utils.to_list(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = generic_utils.to_list(mask)
_, output_masks = self._run_internal_graph(inputs, mask=masks)
return output_masks
@property
def layers(self):
return checkpointable_layer_utils.filter_empty_layer_containers(
self._layers)
def get_layer(self, name=None, index=None):
# TODO(fchollet): We could build a dictionary based on layer names
# since they are constant, but we have not done that yet.
if index is not None:
if len(self.layers) <= index:
raise ValueError('Was asked to retrieve layer at index ' + str(index) +
' but model only has ' + str(len(self.layers)) +
' layers.')
else:
return self.layers[index]
else:
if not name:
raise ValueError('Provide either a layer name or layer index.')
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError('No such layer: ' + name)
@property
def _unfiltered_updates(self):
updates = []
for layer in self.layers:
if isinstance(layer, Network):
updates += layer._unfiltered_updates
else:
updates += layer.updates
updates += self._updates
return updates
@property
def _unfiltered_losses(self):
losses = []
if context.executing_eagerly():
losses.extend(self._eager_losses)
else:
losses.extend(self._losses)
for layer in self.layers:
if isinstance(layer, Network):
losses += layer._unfiltered_losses
else:
losses += layer.losses
return losses
@checkpointable.no_automatic_dependency_tracking
def _clear_losses(self):
self._eager_losses = []
for layer in self.layers:
if isinstance(layer, Network):
layer._clear_losses()
else:
layer._eager_losses = []
@property
def updates(self):
if not self.trainable and not self.stateful:
return []
updates = self._unfiltered_updates
# `updates` might contain irrelevant updates, so it needs to be filtered
# with respect to inputs the model has been called on.
relevant_inputs = []
for i in range(0, len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
if not relevant_inputs:
return list(set(updates))
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, updates)
relevant_conditional_updates = [x for x in updates if x in reachable]
unconditional_updates = [
x for x in updates if x._unconditional_update] # pylint: disable=protected-access
# A layer could be used multiple times in a nested structure,
# so the updates list must be de-duped.
return list(set(relevant_conditional_updates + unconditional_updates))
@property
def losses(self):
losses = self._unfiltered_losses
if context.executing_eagerly():
return losses
# TODO(kaftan/fchollet): Clean this up / make it obsolete.
# This is a super ugly, confusing check necessary to
# handle the case where we are executing in a function graph in eager mode
# but the model was constructed symbolically in a separate graph scope.
# We need to capture the losses created in the current graph function,
# and filter out the incorrect loss tensors created when symbolically
# building the graph.
# We have to use this check because the code after it that checks
# for reachable inputs only captures the part of the model that was
# built symbolically, and captures the wrong tensors from a different
# func graph (causing a crash later on when trying to execute the
# graph function)
with ops.init_scope():
if context.executing_eagerly():
return [loss for loss in losses
if loss.graph == ops.get_default_graph()]
relevant_inputs = []
for i in range(0, len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
if not relevant_inputs:
return losses
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, losses)
relevant_conditional_losses = [x for x in losses if x in reachable]
unconditional_losses = [
x for x in losses if x._unconditional_loss] # pylint: disable=protected-access
return list(set(
relevant_conditional_losses + unconditional_losses + self._losses))
@property
def trainable_weights(self):
return checkpointable_layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._trainable_weights)
@property
def non_trainable_weights(self):
return checkpointable_layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._non_trainable_weights + self._trainable_weights)
@property
def metrics(self):
metrics = []
for layer in self.layers:
metrics += layer._metrics # pylint: disable=protected-access
return metrics + self._metrics
@property
def _all_metrics_tensors(self):
# TODO(psv): Remove this property.
metrics_tensors = {}
for layer in self.layers:
if isinstance(layer, Network):
metrics_tensors.update(layer._all_metrics_tensors)
else:
metrics_tensors.update(layer._metrics_tensors)
metrics_tensors.update(self._metrics_tensors)
return metrics_tensors
@property
def input_spec(self):
# If not a graph network, can't assume anything.
if not self._is_graph_network:
return None
specs = []
for layer in self._input_layers:
if layer.input_spec is None:
specs.append(None)
else:
if not isinstance(layer.input_spec, list):
raise TypeError('Layer ' + layer.name +
' has an input_spec attribute that '
'is not a list. We expect a list. '
'Found input_spec = ' + str(layer.input_spec))
specs += layer.input_spec
if len(specs) == 1:
return specs[0]
return specs
@base_layer.default
def build(self, input_shape):
if self._is_graph_network:
self.built = True
return
if input_shape is None:
raise ValueError('Input shape must be defined when calling build on a '
'model subclass network.')
valid_types = (tuple, list, tensor_shape.TensorShape)
if not isinstance(input_shape, valid_types):
raise ValueError('Specified input shape is not one of the valid types. '
'Please specify a batch input shape of type tuple or '
'list of input shapes. User provided '
'input type: {}'.format(type(input_shape)))
if input_shape and not self.inputs:
if context.executing_eagerly():
graph = func_graph.FuncGraph('build_graph')
else:
graph = backend.get_graph()
with graph.as_default():
if isinstance(input_shape, list):
x = [base_layer_utils.generate_placeholders_from_shape(shape)
for shape in input_shape]
else:
x = base_layer_utils.generate_placeholders_from_shape(input_shape)
kwargs = {}
call_signature = tf_inspect.getfullargspec(self.call)
call_args = call_signature.args
if len(call_args) > 2:
if call_signature.defaults:
call_args = call_args[2:-len(call_signature.defaults)]
else:
call_args = call_args[2:]
for arg in call_args:
if arg == 'training':
kwargs['training'] = False
else:
raise ValueError(
'Currently, you cannot build your model if it has '
'positional or keyword arguments that are not '
'inputs to the model, but are required for its '
'`call` method. Instead, in order to instantiate '
'and build your model, `call` your model on real '
'tensor data with all expected call arguments.')
elif len(call_args) < 2:
raise ValueError('You can only call `build` on a model if its `call` '
'method accepts an `inputs` argument.')
try:
self.call(x, **kwargs)
except (errors.InvalidArgumentError, TypeError):
raise ValueError('You cannot build your model by calling `build` '
'if your layers do not support float type inputs. '
'Instead, in order to instantiate and build your '
'model, `call` your model on real tensor data (of '
'the correct dtype).')
if self._layers:
self._track_layers(self._layers)
self.built = True
def call(self, inputs, training=None, mask=None):
if not self._is_graph_network:
raise NotImplementedError('When subclassing the `Model` class, you should'
' implement a `call` method.')
inputs = generic_utils.to_list(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = generic_utils.to_list(mask)
outputs, _ = self._run_internal_graph(inputs,
training=training,
mask=masks)
return outputs
def _call_and_compute_mask(self, inputs, training=None, mask=None):
inputs = generic_utils.to_list(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = generic_utils.to_list(mask)
return self._run_internal_graph(inputs,
training=training,
mask=masks)
def compute_output_shape(self, input_shape):
if not self._is_graph_network:
return super(Network, self).compute_output_shape(input_shape)
if isinstance(input_shape, list):
input_shapes = []
for shape in input_shape:
if shape is not None:
input_shapes.append(tuple(tensor_shape.TensorShape(shape).as_list()))
else:
input_shapes.append(None)
else:
if input_shape is not None:
input_shapes = [tuple(tensor_shape.TensorShape(input_shape).as_list())]
else:
input_shapes = [None]
if len(input_shapes) != len(self._input_layers):
raise ValueError('Invalid input_shape argument ' + str(input_shape) +
': model has ' + str(len(self._input_layers)) +
' tensor inputs.')
cache_key = generic_utils.object_list_uid(input_shapes)
if cache_key in self._output_shape_cache:
output_shapes = self._output_shape_cache[cache_key]
else:
layers_to_output_shapes = {}
for i in range(len(input_shapes)):
layer = self._input_layers[i]
input_shape = input_shapes[i]
# and there is only one node and one tensor output.
shape_key = layer.name + '_0_0'
layers_to_output_shapes[shape_key] = input_shape
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Iterate over nodes, by depth level.
if len(depth_keys) > 1:
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if layer in self._input_layers:
# We've already covered the input layers
continue
input_shapes = []
for j in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[j]
node_index = node.node_indices[j]
tensor_index = node.tensor_indices[j]
shape_key = inbound_layer.name + '_%s_%s' % (node_index,
tensor_index)
input_shape = layers_to_output_shapes[shape_key]
input_shapes.append(input_shape)
if len(input_shapes) == 1:
output_shape = layer.compute_output_shape(input_shapes[0])
else:
output_shape = layer.compute_output_shape(input_shapes)
if isinstance(output_shape, list):
output_shapes = [
tuple(tensor_shape.TensorShape(shape).as_list())
for shape in output_shape
]
else:
output_shapes = [
tuple(tensor_shape.TensorShape(output_shape).as_list())
]
node_index = layer._inbound_nodes.index(node)
for j in range(len(output_shapes)):
shape_key = layer.name + '_%s_%s' % (node_index, j)
layers_to_output_shapes[shape_key] = output_shapes[j]
output_shapes = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)
output_shapes.append(layers_to_output_shapes[shape_key])
self._output_shape_cache[cache_key] = output_shapes
if isinstance(output_shapes, list):
if len(output_shapes) == 1:
return tensor_shape.TensorShape(output_shapes[0])
else:
return [tensor_shape.TensorShape(shape) for shape in output_shapes]
else:
return tensor_shape.TensorShape(output_shapes)
def _run_internal_graph(self, inputs, training=None, mask=None):
# use masking, it does not interfere with regular behavior at all and you
# can ignore it.
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = mask
# Dictionary mapping reference tensors to tuples
# (computed tensor, compute mask)
# we assume a 1:1 mapping from tensor to mask
tensor_map = {}
for x, y, mask in zip(self.inputs, inputs, masks):
tensor_map[str(id(x))] = (y, mask)
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_data = [] # List of tuples (input, mask).
for x in reference_input_tensors:
if str(id(x)) in tensor_map:
computed_data.append(tensor_map[str(id(x))])
if len(computed_data) == len(reference_input_tensors):
# Call layer (reapplying ops to new inputs).
with ops.name_scope(layer.name):
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
# Ensure `training` arg propagation if applicable.
argspec = self._layer_call_argspecs[layer].args
if 'training' in argspec:
kwargs.setdefault('training', training)
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
# Ensure mask propagation if applicable.
if 'mask' in argspec:
kwargs.setdefault('mask', computed_mask)
# Compute outputs and masks.
if (isinstance(layer, Network) and
layer._compute_output_and_mask_jointly):
output_tensors, output_masks = layer._call_and_compute_mask(
computed_tensor, **kwargs)
else:
if context.executing_eagerly():
output_tensors = layer(computed_tensor, **kwargs)
elif layer.dynamic:
output_tensors = layer._symbolic_call(computed_tensor) # pylint: disable=protected-call
else:
output_tensors = layer.call(computed_tensor, **kwargs)
if hasattr(layer, 'compute_mask'):
output_masks = layer.compute_mask(computed_tensor,
computed_mask)
else:
output_masks = [None for _ in output_tensors]
computed_tensors = [computed_tensor]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
# Ensure mask propagation if applicable.
if 'mask' in argspec:
kwargs.setdefault('mask', computed_masks)
# Compute outputs and masks.
if (isinstance(layer, Network) and
layer._compute_output_and_mask_jointly):
output_tensors, output_masks = layer._call_and_compute_mask(
computed_tensors, **kwargs)
else:
if context.executing_eagerly():
output_tensors = layer(computed_tensors, **kwargs)
elif layer.dynamic:
output_tensors = layer._symbolic_call(computed_tensors) # pylint: disable=protected-call
else:
output_tensors = layer.call(computed_tensors, **kwargs)
if hasattr(layer, 'compute_mask'):
output_masks = layer.compute_mask(computed_tensors,
computed_masks)
else:
output_masks = [None for _ in output_tensors]
output_tensors = generic_utils.to_list(output_tensors)
if output_masks is None:
output_masks = [None for _ in output_tensors]
else:
output_masks = generic_utils.to_list(output_masks)
if not context.executing_eagerly():
# Set mask metadata.
for x, m in zip(output_tensors, output_masks):
try:
x._keras_mask = m
except AttributeError:
pass
# Apply activity regularizer if any.
layer._handle_activity_regularization(computed_tensors,
output_tensors)
# Update tensor_map.
for x, y, mask in zip(reference_output_tensors, output_tensors,
output_masks):
tensor_map[str(id(x))] = (y, mask)
output_tensors = []
output_masks = []
output_shapes = []
for x in self.outputs:
assert str(id(x)) in tensor_map, 'Could not compute output ' + str(x)
tensor, mask = tensor_map[str(id(x))]
output_shapes.append(backend.int_shape(x))
output_tensors.append(tensor)
output_masks.append(mask)
if len(output_tensors) == 1:
output_tensors = output_tensors[0]
if output_shapes is not None:
output_shapes = output_shapes[0]
if output_masks is not None:
output_masks = output_masks[0]
if output_shapes is not None:
input_shapes = [backend.int_shape(x) for x in inputs]
cache_key = generic_utils.object_list_uid(input_shapes)
self._output_shape_cache[cache_key] = output_shapes
return output_tensors, output_masks
def get_config(self):
if not self._is_graph_network:
raise NotImplementedError
config = {
'name': self.name,
}
node_conversion_map = {}
for layer in self.layers:
if issubclass(layer.__class__, Network):
# Networks start with a pre-existing node
# linking their input to output.
kept_nodes = 1
else:
kept_nodes = 0
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in self._network_nodes:
node_conversion_map[node_key] = kept_nodes
kept_nodes += 1
layer_configs = []
for layer in self.layers: # From the earliest layers on.
layer_class_name = layer.__class__.__name__
layer_config = layer.get_config()
filtered_inbound_nodes = []
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in self._network_nodes:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
if node.arguments:
try:
json.dumps(node.arguments)
kwargs = node.arguments
except TypeError:
logging.warning(
'Layer ' + layer.name +
' was passed non-serializable keyword arguments: ' +
str(node.arguments) + '. They will not be included '
'in the serialized model (and thus will be missing '
'at deserialization time).')
kwargs = {}
else:
kwargs = {}
if node.inbound_layers:
node_data = []
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i]
node_index = node.node_indices[i]
tensor_index = node.tensor_indices[i]
node_key = _make_node_key(inbound_layer.name, node_index)
new_node_index = node_conversion_map.get(node_key, 0)
node_data.append(
[inbound_layer.name, new_node_index, tensor_index, kwargs])
filtered_inbound_nodes.append(node_data)
layer_configs.append({
'name': layer.name,
'class_name': layer_class_name,
'config': layer_config,
'inbound_nodes': filtered_inbound_nodes,
})
config['layers'] = layer_configs
# Gather info about inputs and outputs.
model_inputs = []
for i in range(len(self._input_layers)):
layer, node_index, tensor_index = self._input_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_inputs.append([layer.name, new_node_index, tensor_index])
config['input_layers'] = model_inputs
model_outputs = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_outputs.append([layer.name, new_node_index, tensor_index])
config['output_layers'] = model_outputs
return copy.deepcopy(config)
@classmethod
def from_config(cls, config, custom_objects=None):
# Layer instances created during
# the graph reconstruction process
created_layers = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def process_node(layer, node_data):
input_tensors = []
for input_data in node_data:
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
else:
raise ValueError('Improperly formatted model config.')
if inbound_layer_name not in created_layers:
add_unprocessed_node(layer, node_data)
return
inbound_layer = created_layers[inbound_layer_name]
if len(inbound_layer._inbound_nodes) <= inbound_node_index:
add_unprocessed_node(layer, node_data)
return
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(inbound_node.output_tensors[inbound_tensor_index])
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
if input_tensors:
if len(input_tensors) == 1:
layer(input_tensors[0], **kwargs)
else:
layer(input_tensors, **kwargs)
def process_layer(layer_data):
layer_name = layer_data['name']
# Instantiate layer.
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(layer_data, custom_objects=custom_objects)
created_layers[layer_name] = layer
# Gather layer inputs.
inbound_nodes_data = layer_data['inbound_nodes']
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
add_unprocessed_node(layer, node_data)
for layer_data in config['layers']:
process_layer(layer_data)
while unprocessed_nodes:
for layer_data in config['layers']:
layer = created_layers[layer_data['name']]
if layer in unprocessed_nodes:
for node_data in unprocessed_nodes.pop(layer):
process_node(layer, node_data)
name = config.get('name')
input_tensors = []
output_tensors = []
for layer_data in config['input_layers']:
layer_name, node_index, tensor_index = layer_data
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
input_tensors.append(layer_output_tensors[tensor_index])
for layer_data in config['output_layers']:
layer_name, node_index, tensor_index = layer_data
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
output_tensors.append(layer_output_tensors[tensor_index])
return cls(inputs=input_tensors, outputs=output_tensors, name=name)
def save(self, filepath, overwrite=True, include_optimizer=True):
if not self._is_graph_network:
raise NotImplementedError(
'Currently `save` requires model to be a graph network. Consider '
'using `save_weights`, in order to save the weights of the model.')
from tensorflow.python.keras.models import save_model
save_model(self, filepath, overwrite, include_optimizer)
def save_weights(self, filepath, overwrite=True, save_format=None):
filepath_is_h5 = _is_hdf5_filepath(filepath)
if save_format is None:
if filepath_is_h5:
save_format = 'h5'
else:
save_format = 'tf'
else:
user_format = save_format.lower().strip()
if user_format in ('tensorflow', 'tf'):
save_format = 'tf'
elif user_format in ('hdf5', 'h5', 'keras'):
save_format = 'h5'
else:
raise ValueError(
'Unknown format "%s". Was expecting one of {"tf", "h5"}.' % (
save_format,))
if save_format == 'tf' and filepath_is_h5:
raise ValueError(
('save_weights got save_format="tf"/"tensorflow", but the '
'filepath ("%s") looks like an HDF5 file. Omit the ".h5"/".keras" '
'when saving in TensorFlow format.')
% filepath)
if save_format == 'h5' and h5py is None:
raise ImportError(
'`save_weights` requires h5py when saving in hdf5.')
if save_format == 'tf':
check_filepath = filepath + '.index'
else:
check_filepath = filepath
if not overwrite and os.path.isfile(check_filepath):
proceed = ask_to_proceed_with_overwrite(check_filepath)
if not proceed:
return
if save_format == 'h5':
with h5py.File(filepath, 'w') as f:
saving.save_weights_to_hdf5_group(f, self.layers)
else:
if context.executing_eagerly():
session = None
else:
session = backend.get_session()
optimizer = getattr(self, 'optimizer', None)
if (optimizer
and not isinstance(optimizer, checkpointable.CheckpointableBase)):
logging.warning(
('This model was compiled with a Keras optimizer (%s) but is being '
'saved in TensorFlow format with `save_weights`. The model\'s '
'weights will be saved, but unlike with TensorFlow optimizers in '
'the TensorFlow format the optimizer\'s state will not be '
'saved.\n\nConsider using a TensorFlow optimizer from `tf.train`.')
% (optimizer,))
self._checkpointable_saver.save(filepath, session=session)
checkpoint_management.update_checkpoint_state(
save_dir=os.path.dirname(filepath),
model_checkpoint_path=filepath,
all_model_checkpoint_paths=[filepath])
def load_weights(self, filepath, by_name=False):
if _is_hdf5_filepath(filepath):
save_format = 'h5'
else:
try:
pywrap_tensorflow.NewCheckpointReader(filepath)
save_format = 'tf'
except errors_impl.DataLossError:
# The checkpoint is not readable in TensorFlow format. Try HDF5.
save_format = 'h5'
if save_format == 'tf':
status = self._checkpointable_saver.restore(filepath)
if by_name:
raise NotImplementedError(
'Weights may only be loaded based on topology into Models when '
'loading TensorFlow-formatted weights (got by_name=True to '
'load_weights).')
if not context.executing_eagerly():
session = backend.get_session()
# Restore existing variables (if any) immediately, and set up a
# streaming restore for any variables created in the future.
checkpointable_utils.streaming_restore(status=status, session=session)
status.assert_nontrivial_match()
return status
if h5py is None:
raise ImportError(
'`load_weights` requires h5py when loading weights from HDF5.')
if self._is_graph_network and not self.built:
raise NotImplementedError(
'Unable to load weights saved in HDF5 format into a subclassed '
'Model which has not created its variables yet. Call the Model '
'first, then load the weights.')
with h5py.File(filepath, 'r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, self.layers)
else:
saving.load_weights_from_hdf5_group(f, self.layers)
def _updated_config(self):
from tensorflow.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
config = self.get_config()
model_config = {
'class_name': self.__class__.__name__,
'config': config,
'keras_version': keras_version,
'backend': backend.backend()
}
return model_config
def to_json(self, **kwargs):
def get_json_type(obj):
# If obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
# If obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
model_config = self._updated_config()
return json.dumps(model_config, default=get_json_type, **kwargs)
def to_yaml(self, **kwargs):
if yaml is None:
raise ImportError(
'Requires yaml module installed (`pip install pyyaml`).')
return yaml.dump(self._updated_config(), **kwargs)
def summary(self, line_length=None, positions=None, print_fn=None):
if not self.built:
raise ValueError('This model has not yet been built. '
'Build the model first by calling `build()` or calling '
'`fit()` with some data, or specify '
'an `input_shape` argument in the first layer(s) for '
'automatic build.')
layer_utils.print_summary(self,
line_length=line_length,
positions=positions,
print_fn=print_fn)
def _validate_graph_inputs_and_outputs(self):
# Check for redundancy in inputs.
if len(set(self.inputs)) != len(self.inputs):
raise ValueError('The list of inputs passed to the model '
'is redundant. '
'All inputs should only appear once.'
' Found: ' + str(self.inputs))
for x in self.inputs:
# Check that x has appropriate `_keras_history` metadata.
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Input tensors to a ' + cls_name + ' ' +
'must come from `tf.keras.Input`. '
'Received: ' + str(x) +
' (missing previous layer metadata).')
# Check that x is an input tensor.
# pylint: disable=protected-access
layer, _, _ = x._keras_history
if len(layer._inbound_nodes) > 1 or (
layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers):
cls_name = self.__class__.__name__
logging.warning(cls_name + ' inputs must come from '
'`tf.keras.Input` (thus holding past layer metadata), '
'they cannot be the output of '
'a previous non-Input layer. '
'Here, a tensor specified as '
'input to "' + self.name + '" was not an Input tensor, '
'it was generated by layer ' + layer.name + '.\n'
'Note that input tensors are '
'instantiated via `tensor = tf.keras.Input(shape)`.\n'
'The tensor that caused the issue was: ' + str(x.name))
# Check compatibility of batch sizes of Input Layers.
input_batch_sizes = [
training_utils.get_static_batch_size(x._keras_history[0])
for x in self.inputs
]
consistent_batch_size = None
for batch_size in input_batch_sizes:
if batch_size is not None:
if (consistent_batch_size is not None and
batch_size != consistent_batch_size):
raise ValueError('The specified batch sizes of the Input Layers'
' are incompatible. Found batch sizes: {}'.format(
input_batch_sizes))
consistent_batch_size = batch_size
for x in self.outputs:
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Output tensors to a ' + cls_name + ' must be '
'the output of a TensorFlow `Layer` '
'(thus holding past layer metadata). Found: ' + str(x))
def _is_hdf5_filepath(filepath):
return (filepath.endswith('.h5') or filepath.endswith('.keras') or
filepath.endswith('.hdf5'))
def _make_node_key(layer_name, node_index):
return layer_name + '_ib-' + str(node_index)
def _map_graph_network(inputs, outputs):
# Network_nodes: set of nodes included in the graph of layers
# (not all nodes included in the layers are relevant to the current graph).
network_nodes = set() # ids of all nodes relevant to the Network
nodes_depths = {} # dict {node: depth value}
layers_depths = {} # dict {layer: depth value}
layer_indices = {} # dict {layer: index in traversal}
nodes_in_decreasing_depth = []
def build_map(tensor,
finished_nodes,
nodes_in_progress,
layer,
node_index,
tensor_index):
node = layer._inbound_nodes[node_index] # pylint: disable=protected-access
# Prevent cycles.
if node in nodes_in_progress:
raise ValueError('The tensor ' + str(tensor) + ' at layer "' +
layer.name + '" is part of a cycle.')
# Don't repeat work for shared subgraphs
if node in finished_nodes:
return
node_key = _make_node_key(layer.name, node_index)
network_nodes.add(node_key)
if layer not in layer_indices:
layer_indices[layer] = len(layer_indices)
nodes_in_progress.add(node)
for i in range(len(node.inbound_layers)):
x = node.input_tensors[i]
layer = node.inbound_layers[i]
node_index = node.node_indices[i]
tensor_index = node.tensor_indices[i]
build_map(x, finished_nodes, nodes_in_progress, layer,
node_index, tensor_index)
finished_nodes.add(node)
nodes_in_progress.remove(node)
nodes_in_decreasing_depth.append(node)
finished_nodes = set()
nodes_in_progress = set()
for x in outputs:
layer, node_index, tensor_index = x._keras_history
build_map(x, finished_nodes, nodes_in_progress,
layer=layer,
node_index=node_index,
tensor_index=tensor_index)
for node in reversed(nodes_in_decreasing_depth):
depth = nodes_depths.setdefault(node, 0)
previous_depth = layers_depths.get(node.outbound_layer, 0)
# we should use that depth instead of the node depth.
# This is necessary for shared layers that have inputs at different
# depth levels in the graph.
depth = max(depth, previous_depth)
layers_depths[node.outbound_layer] = depth
nodes_depths[node] = depth
# Update the depth of inbound nodes.
# The "depth" of a node is the max of the depths
# of all layers it is connected to.
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i]
node_index = node.node_indices[i]
inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access
previous_depth = nodes_depths.get(inbound_node, 0)
nodes_depths[inbound_node] = max(depth + 1, previous_depth)
# Build a dict {depth: list of nodes with this depth}
nodes_by_depth = {}
for node, depth in nodes_depths.items():
if depth not in nodes_by_depth:
nodes_by_depth[depth] = []
nodes_by_depth[depth].append(node)
# Build a dict {depth: list of layers with this depth}
layers_by_depth = {}
for layer, depth in layers_depths.items():
if depth not in layers_by_depth:
layers_by_depth[depth] = []
layers_by_depth[depth].append(layer)
# Get sorted list of layer depths.
depth_keys = list(layers_by_depth.keys())
depth_keys.sort(reverse=True)
# Set self.layers and self._layers_by_depth.
layers = []
for depth in depth_keys:
layers_for_depth = layers_by_depth[depth]
# Network.layers needs to have a deterministic order:
# here we order them by traversal order.
layers_for_depth.sort(key=lambda x: layer_indices[x])
layers.extend(layers_for_depth)
# Get sorted list of node depths.
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Check that all tensors required are computable.
# computable_tensors: all tensors in the graph
# that can be computed from the inputs provided.
computable_tensors = []
for x in inputs:
computable_tensors.append(x)
layers_with_complete_input = [] # To provide a better error msg.
for depth in depth_keys:
for node in nodes_by_depth[depth]:
layer = node.outbound_layer
if layer:
for x in node.input_tensors:
if x not in computable_tensors:
raise ValueError('Graph disconnected: '
'cannot obtain value for tensor ' + str(x) +
' at layer "' + layer.name + '". '
'The following previous layers '
'were accessed without issue: ' +
str(layers_with_complete_input))
for x in node.output_tensors:
computable_tensors.append(x)
layers_with_complete_input.append(layer.name)
# Ensure name unicity, which will be crucial for serialization
# (since serialized nodes refer to layers by their name).
all_names = [layer.name for layer in layers]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError('The name "' + name + '" is used ' +
str(all_names.count(name)) + ' times in the model. '
'All layer names should be unique.')
return network_nodes, nodes_by_depth, layers, layers_by_depth
| true | true |
1c30c6c00be1a292b934adcc7875e2235faffb9a | 3,284 | py | Python | datasets/opinosis/opinosis.py | TheophileBlard/nlp | 2e0a8639a79b1abc848cff5c669094d40bba0f63 | [
"Apache-2.0"
] | 3 | 2020-05-19T05:15:12.000Z | 2020-10-03T11:44:42.000Z | datasets/opinosis/opinosis.py | TheophileBlard/nlp | 2e0a8639a79b1abc848cff5c669094d40bba0f63 | [
"Apache-2.0"
] | null | null | null | datasets/opinosis/opinosis.py | TheophileBlard/nlp | 2e0a8639a79b1abc848cff5c669094d40bba0f63 | [
"Apache-2.0"
] | 1 | 2020-12-08T10:36:30.000Z | 2020-12-08T10:36:30.000Z | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Opinosis Opinion Dataset."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_CITATION = """
@inproceedings{ganesan2010opinosis,
title={Opinosis: a graph-based approach to abstractive summarization of highly redundant opinions},
author={Ganesan, Kavita and Zhai, ChengXiang and Han, Jiawei},
booktitle={Proceedings of the 23rd International Conference on Computational Linguistics},
pages={340--348},
year={2010},
organization={Association for Computational Linguistics}
}
"""
_DESCRIPTION = """
The Opinosis Opinion Dataset consists of sentences extracted from reviews for 51 topics.
Topics and opinions are obtained from Tripadvisor, Edmunds.com and Amazon.com.
"""
_URL = "https://github.com/kavgan/opinosis-summarization/raw/master/OpinosisDataset1.0_0.zip"
_REVIEW_SENTS = "review_sents"
_SUMMARIES = "summaries"
class Opinosis(nlp.GeneratorBasedBuilder):
"""Opinosis Opinion Dataset."""
VERSION = nlp.Version("1.0.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{_REVIEW_SENTS: nlp.Value("string"), _SUMMARIES: nlp.features.Sequence(nlp.Value("string"))}
),
supervised_keys=(_REVIEW_SENTS, _SUMMARIES),
homepage="http://kavita-ganesan.com/opinosis/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
extract_path = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"path": extract_path},),
]
def _generate_examples(self, path=None):
"""Yields examples."""
topics_path = os.path.join(path, "topics")
filenames = os.listdir(topics_path)
for filename in filenames:
file_path = os.path.join(topics_path, filename)
topic_name = filename.split(".txt")[0]
with open(file_path, "rb") as src_f:
input_data = src_f.read().decode("latin-1")
summaries_path = os.path.join(path, "summaries-gold", topic_name)
summary_lst = []
for summ_filename in os.listdir(summaries_path):
file_path = os.path.join(summaries_path, summ_filename)
with open(file_path, "rb") as tgt_f:
data = tgt_f.read().strip().decode("latin-1")
summary_lst.append(data)
summary_data = summary_lst
yield filename, {_REVIEW_SENTS: input_data, _SUMMARIES: summary_data}
| 36.898876 | 108 | 0.680268 |
from __future__ import absolute_import, division, print_function
import os
import nlp
_CITATION = """
@inproceedings{ganesan2010opinosis,
title={Opinosis: a graph-based approach to abstractive summarization of highly redundant opinions},
author={Ganesan, Kavita and Zhai, ChengXiang and Han, Jiawei},
booktitle={Proceedings of the 23rd International Conference on Computational Linguistics},
pages={340--348},
year={2010},
organization={Association for Computational Linguistics}
}
"""
_DESCRIPTION = """
The Opinosis Opinion Dataset consists of sentences extracted from reviews for 51 topics.
Topics and opinions are obtained from Tripadvisor, Edmunds.com and Amazon.com.
"""
_URL = "https://github.com/kavgan/opinosis-summarization/raw/master/OpinosisDataset1.0_0.zip"
_REVIEW_SENTS = "review_sents"
_SUMMARIES = "summaries"
class Opinosis(nlp.GeneratorBasedBuilder):
VERSION = nlp.Version("1.0.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{_REVIEW_SENTS: nlp.Value("string"), _SUMMARIES: nlp.features.Sequence(nlp.Value("string"))}
),
supervised_keys=(_REVIEW_SENTS, _SUMMARIES),
homepage="http://kavita-ganesan.com/opinosis/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
extract_path = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"path": extract_path},),
]
def _generate_examples(self, path=None):
topics_path = os.path.join(path, "topics")
filenames = os.listdir(topics_path)
for filename in filenames:
file_path = os.path.join(topics_path, filename)
topic_name = filename.split(".txt")[0]
with open(file_path, "rb") as src_f:
input_data = src_f.read().decode("latin-1")
summaries_path = os.path.join(path, "summaries-gold", topic_name)
summary_lst = []
for summ_filename in os.listdir(summaries_path):
file_path = os.path.join(summaries_path, summ_filename)
with open(file_path, "rb") as tgt_f:
data = tgt_f.read().strip().decode("latin-1")
summary_lst.append(data)
summary_data = summary_lst
yield filename, {_REVIEW_SENTS: input_data, _SUMMARIES: summary_data}
| true | true |
1c30c7d369896fdc6c8fc398774ad3c385a319f1 | 7,060 | py | Python | bokeh_app/scripts/functions/timeseries_stats.py | goodteamname/spino | aa8c6cfa9f94a639c306d85ca6df2483108fda37 | [
"MIT"
] | null | null | null | bokeh_app/scripts/functions/timeseries_stats.py | goodteamname/spino | aa8c6cfa9f94a639c306d85ca6df2483108fda37 | [
"MIT"
] | 9 | 2020-10-26T10:57:00.000Z | 2020-11-01T14:48:21.000Z | bokeh_app/scripts/functions/timeseries_stats.py | goodteamname/spino | aa8c6cfa9f94a639c306d85ca6df2483108fda37 | [
"MIT"
] | 1 | 2020-10-26T10:41:31.000Z | 2020-10-26T10:41:31.000Z | import pandas as pd
import numpy as np
def remove_trend(ts, N):
"""Remove a best fitting polynomial of degree N from time series data.
Uses numpy methods polyfit to find the coefficients of a degree N
polynomial of best fit (least squares resiuduals) and polyeval to
construct the polynomial over the duration of the time series.
If more than one column of data in ts, returns trend and detrended
data for each data set.
:param ts: Time series data as a pandas dataframe.
:param N: Degree of polynomial trend to remove.
:return ts_detrended: timeseries composed of time column, and two
output result columns per input data column; fit_<data_col> is
Array of values of the best fitting polynomial at each time;
detrended_<data_col> is original data, with trend fit subtracted
"""
headers = ['time']
data = [ts.time]
# Calculate trend for each column of data (not including time column)
for col in np.delete(ts.columns.values, 0):
fit = np.polyval(np.polyfit(ts.time, ts[col], deg=N), ts.time)
detrended = ts[col]-fit
headers.append('detrended_' + col)
headers.append('fit_' + col)
data.append(pd.Series(detrended))
data.append(pd.Series(fit))
ts_detrended = pd.concat(data, axis=1, keys=headers) # return DataFrame
return ts_detrended
# ts_detrended = remove_trend(ts, 1)
# plt.figure()
# plt.plot(ts.time, ts.y2, label='data2')
# plt.plot(ts_detrended.time, ts_detrended.detrended_y2, label='detrended2')
# plt.plot(ts_detrended.time, ts_detrended.fit_y2, label='fit2')
# plt.legend()
# plt.show()
def remove_seasonality(ts, T):
"""Remove periodic repetition of period T from time series data.
Uses differencing methods to compare equivalent points in different
periods,
e.g. signal = data_[i] - data_[i-T]
Note that this reduces duration of time series by T.
If more than one column of data in ts, returns deseasonalised
time series for each column.
:param ts: Time series data as a pandas DataFrame.
:param T: Period of seasonality to be removed.
:return ts_diff: DataFrame with same columns as ts but data
columns are now deseasonalised, and time column is correspondingly
shorter.
"""
T_ind = np.argmin(abs(ts.time-T)) # Find index in time array closest to T
forward = ts.truncate(before=T_ind) # Differencing
backward = ts.truncate(after=ts.shape[0]-1-T_ind)
forward = forward.reset_index(drop=True) # So index starts at 0
backward = backward.reset_index(drop=True)
ts_diff = forward-backward
# Values before first period T are lost; reset time indices to start at 0
times = ts['time'][T_ind:].reset_index(drop=True)
ts_diff['time_diff'] = times
return ts_diff
# ts_diff = remove_seasonality(ts, 2*np.pi)
# plt.figure()
# plt.plot(ts.time, ts.y2, label='data2')
# plt.plot(ts_diff.time, ts_diff.y2, label='de seasoned2')
# plt.legend()
# plt.show()
def rolling_std(ts, window):
"""Calculate rolling standard deviation of time series.
Uses pandas.DataFrame.rolling() to calculate rolling std
dev of a given window size.
If more than one column of data in ts, returns rolling std
dev using given window size for each column of data.
Returns nans for times before first window.
:param ts: Time series data as a pandas DataFrame.
:param window: Window size over which to calculate std dev (int).
:return ts_std: DataFrame with same columns as ts but with rolling
std dev in place of data column.
"""
ts_std = ts.rolling(window).var()
ts_std = np.sqrt(ts_std)
ts_std["time"] = ts["time"] # don't want std dev of time!
return ts_std
def rolling_mean(ts, window):
"""Calculate rolling mean of time series.
Uses pandas.DataFrame.rolling() to calculate rolling mean
of a given window size.
If more than one column of data in ts, returns rolling mean
using given window size for each column of data.
Returns nans for times before first window.
:param ts: Time series data as a pandas DataFrame.
:param window: Window size over which to calculate mean (int).
:return ts_std: DataFrame with same columns as ts but with rolling
mean in place of data column.
"""
ts_mean = ts.rolling(window).mean()
ts_mean["time"] = ts["time"] # don't want mean of time!
return ts_mean
# ts_mean = rolling_mean(ts, 20)
# plt.figure()
# plt.plot(ts.time, ts.y1, label='data1')
# plt.plot(ts.time, ts.y2, label='data2')
# plt.plot(ts.time, ts.y3, label='data3')
# plt.plot(ts_mean.time, ts_mean.y1, label='rolling mean 1')
# plt.plot(ts_mean.time, ts_mean.y2, label='rolling mean 2')
# plt.plot(ts_mean.time, ts_mean.y3, label='rolling mean 3')
# plt.legend()
# plt.show()
# ts_std = rolling_std(ts, 20)
# plt.figure()
# plt.plot(ts.time, ts.y2, label='data2')
# plt.plot(ts_std.time, ts_std.y2, label='rolling std 2')
# plt.legend()
# plt.show()
def auto_corr(data, max_lag):
"""Calculate autocorrelation of time series for range of
lag values up to max_lag.
Uses pandas.Series.autocorr() to calculate autocorrelation
for a single column of data (i.e. a pandas.Series), for a
range of values up to max_lag
:param data: Time series data as a pandas Series.
:param max_lag: Index of maximum time lag to calculate
autocorrelation.
:return: DataFrame with lags column and autocorrelation
value at given lag.
"""
auto_corrs = []
lags = range(max_lag)
for lag in lags:
auto_corrs.append(pd.Series(data).autocorr(lag))
headers = ['lags', 'auto_corrs']
# Return as DataFrame:
array = [pd.Series(lags), pd.Series(auto_corrs)]
return pd.concat(array, axis=1, keys=headers)
# auto = auto_corr(ts.y1, 600)
# plt.figure()
# plt.plot(auto.lags, auto.auto_corrs, label='autocorrelation')
# plt.legend()
# plt.show()
def corr(data1, data2, max_lag):
"""Calculate correlation of two time series for a range
of lags between them.
Uses pandas.Series.corr() to calculate correlation between
two columns of data (i.e. a pandas.Series), with data2
shifted relative to data1 by a range of lags up to max_lag.
:param data1: Time series data as a pandas Series.
:param data2: Time series data as a pandas Series. This is
the series that is shifted relative to data1.
:param max_lag: Index of maximum time lag to calculate
correlation.
:return: DataFrame with lags column and correlation value
at given lag.
"""
corrs = []
lags = range(max_lag)
for lag in lags:
corr = data1.corr(pd.Series(data2).shift(periods=lag))
corrs.append(corr)
headers = ['lags', 'corrs']
array = [pd.Series(lags), pd.Series(corrs)]
return pd.concat(array, axis=1, keys=headers)
# correlations = corr(ts.y1, ts.y3, 600)
# plt.figure()
# plt.plot(correlations.lags, correlations.corrs, label='correlation')
# plt.legend()
# plt.show()
| 33.779904 | 78 | 0.686261 | import pandas as pd
import numpy as np
def remove_trend(ts, N):
headers = ['time']
data = [ts.time]
for col in np.delete(ts.columns.values, 0):
fit = np.polyval(np.polyfit(ts.time, ts[col], deg=N), ts.time)
detrended = ts[col]-fit
headers.append('detrended_' + col)
headers.append('fit_' + col)
data.append(pd.Series(detrended))
data.append(pd.Series(fit))
ts_detrended = pd.concat(data, axis=1, keys=headers)
return ts_detrended
def remove_seasonality(ts, T):
T_ind = np.argmin(abs(ts.time-T))
forward = ts.truncate(before=T_ind)
backward = ts.truncate(after=ts.shape[0]-1-T_ind)
forward = forward.reset_index(drop=True)
backward = backward.reset_index(drop=True)
ts_diff = forward-backward
times = ts['time'][T_ind:].reset_index(drop=True)
ts_diff['time_diff'] = times
return ts_diff
def rolling_std(ts, window):
ts_std = ts.rolling(window).var()
ts_std = np.sqrt(ts_std)
ts_std["time"] = ts["time"]
return ts_std
def rolling_mean(ts, window):
ts_mean = ts.rolling(window).mean()
ts_mean["time"] = ts["time"] # don't want mean of time!
return ts_mean
def auto_corr(data, max_lag):
auto_corrs = []
lags = range(max_lag)
for lag in lags:
auto_corrs.append(pd.Series(data).autocorr(lag))
headers = ['lags', 'auto_corrs']
array = [pd.Series(lags), pd.Series(auto_corrs)]
return pd.concat(array, axis=1, keys=headers)
def corr(data1, data2, max_lag):
corrs = []
lags = range(max_lag)
for lag in lags:
corr = data1.corr(pd.Series(data2).shift(periods=lag))
corrs.append(corr)
headers = ['lags', 'corrs']
array = [pd.Series(lags), pd.Series(corrs)]
return pd.concat(array, axis=1, keys=headers)
| true | true |
1c30c929aee456110941c26a22c61ae409b4009f | 1,222 | py | Python | components/collector/src/model/responses.py | kargaranamir/quality-time | 1c427c61bee9d31c3526f0a01be2218a7e167c23 | [
"Apache-2.0"
] | 33 | 2016-01-20T07:35:48.000Z | 2022-03-14T09:20:51.000Z | components/collector/src/model/responses.py | kargaranamir/quality-time | 1c427c61bee9d31c3526f0a01be2218a7e167c23 | [
"Apache-2.0"
] | 2,410 | 2016-01-22T18:13:01.000Z | 2022-03-31T16:57:34.000Z | components/collector/src/model/responses.py | kargaranamir/quality-time | 1c427c61bee9d31c3526f0a01be2218a7e167c23 | [
"Apache-2.0"
] | 21 | 2016-01-16T11:49:23.000Z | 2022-01-14T21:53:22.000Z | """Source responses model class."""
from collector_utilities.type import URL, ErrorMessage, Response, Responses
class SourceResponses:
"""Class the hold the source responses, and associated information such as api_url and connection error, if any."""
def __init__(
self, *, responses: Responses = None, api_url: URL = None, connection_error: ErrorMessage = None
) -> None:
self.__responses: Responses = responses or []
self.api_url = api_url
self.connection_error = connection_error
def __iter__(self):
return iter(self.__responses)
def __len__(self) -> int:
return len(self.__responses)
def __getitem__(self, key):
return self.__responses[key]
def __setitem__(self, key, value):
self.__responses[key] = value
def append(self, response: Response) -> None:
"""Append a response."""
self.__responses.append(response)
def insert(self, index, response: Response) -> None:
"""Insert a response."""
self.__responses.insert(index, response)
def extend(self, responses: "SourceResponses") -> None:
"""Extend the responses."""
self.__responses.extend(list(responses))
| 31.333333 | 119 | 0.661211 |
from collector_utilities.type import URL, ErrorMessage, Response, Responses
class SourceResponses:
def __init__(
self, *, responses: Responses = None, api_url: URL = None, connection_error: ErrorMessage = None
) -> None:
self.__responses: Responses = responses or []
self.api_url = api_url
self.connection_error = connection_error
def __iter__(self):
return iter(self.__responses)
def __len__(self) -> int:
return len(self.__responses)
def __getitem__(self, key):
return self.__responses[key]
def __setitem__(self, key, value):
self.__responses[key] = value
def append(self, response: Response) -> None:
self.__responses.append(response)
def insert(self, index, response: Response) -> None:
self.__responses.insert(index, response)
def extend(self, responses: "SourceResponses") -> None:
self.__responses.extend(list(responses))
| true | true |
1c30c930d3da81291cc51c6f2ac44d96eab4f155 | 2,745 | py | Python | tests/agent_test.py | sld/dp-agent | 02729887f8db3c99ac2c6a3e5e7be7fa6849a1ba | [
"Apache-2.0"
] | null | null | null | tests/agent_test.py | sld/dp-agent | 02729887f8db3c99ac2c6a3e5e7be7fa6849a1ba | [
"Apache-2.0"
] | null | null | null | tests/agent_test.py | sld/dp-agent | 02729887f8db3c99ac2c6a3e5e7be7fa6849a1ba | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import uuid
from core.agent import Agent
from core.state_manager import StateManager
from core.skill_manager import SkillManager
from core.rest_caller import RestCaller
from core.service import Service
from core.postprocessor import DefaultPostprocessor
from core.response_selector import ConfidenceResponseSelector
from core.config import MAX_WORKERS, ANNOTATORS, SKILL_SELECTORS
from core.skill_selector import ChitchatQASelector
from core.state_schema import Human
# from deeppavlov import configs, build_model
# ner = build_model(configs.ner.ner_rus, download=True)
# faq = build_model(configs.faq.tfidf_autofaq, download=True)
# sentiment = build_model(configs.classifiers.rusentiment_elmo_twitter_rnn, download=True)
# utterances = ['Привет!', 'Когда началась Вторая Мировая?',
# 'Привет, я бот!', '1939', 'Как дела?', 'Спасибо, бот!',
# 'Хорошо, а у тебя как?', 'И у меня нормально. Когда родился Петр Первый?',
# 'в 1672 году', 'спасибо', ]
# print("DeepPavlov configs output:")
# print(ner(utterances))
# print(faq(utterances))
# print(sentiment(utterances))
state_manager = StateManager()
anno_names, anno_urls = zip(*[(annotator['name'], annotator['url']) for annotator in ANNOTATORS])
preprocessor = Service(
rest_caller=RestCaller(max_workers=MAX_WORKERS, names=anno_names, urls=anno_urls))
postprocessor = DefaultPostprocessor()
skill_caller = RestCaller(max_workers=MAX_WORKERS)
response_selector = ConfidenceResponseSelector()
ss_names, ss_urls = zip(*[(selector['name'], selector['url']) for selector in SKILL_SELECTORS])
skill_selector = ChitchatQASelector(RestCaller(max_workers=MAX_WORKERS, names=ss_names, urls=ss_urls))
skill_manager = SkillManager(skill_selector=skill_selector, response_selector=response_selector,
skill_caller=skill_caller)
agent = Agent(state_manager, preprocessor, postprocessor, skill_manager)
# TEST predict_annotations()
# annotations = agent.predict_annotations(utterances, should_reset=[False]*len(utterances))
# print("Agent output:")
# print(annotations)
# TEST __call__()
exist_humans = Human.objects
u_tg_ids = [exist_humans[0].user_telegram_id, exist_humans[1].user_telegram_id, str(uuid.uuid4())]
utts = ['Что еще скажешь интересного?', 'Бот, ты тупой', '/start']
u_d_types = ['iphone', 'android', 'iphone']
date_times = [datetime.utcnow(), datetime.utcnow(), datetime.utcnow()]
locations = ['moscow', 'novosibirsk', 'novokuznetsk']
ch_types = ['telegram', 'telegram', 'telegram']
responses = agent(utterances=utts, user_telegram_ids=u_tg_ids, user_device_types=u_d_types,
date_times=date_times, locations=locations, channel_types=ch_types)
print(responses)
| 45 | 102 | 0.764663 | from datetime import datetime
import uuid
from core.agent import Agent
from core.state_manager import StateManager
from core.skill_manager import SkillManager
from core.rest_caller import RestCaller
from core.service import Service
from core.postprocessor import DefaultPostprocessor
from core.response_selector import ConfidenceResponseSelector
from core.config import MAX_WORKERS, ANNOTATORS, SKILL_SELECTORS
from core.skill_selector import ChitchatQASelector
from core.state_schema import Human
state_manager = StateManager()
anno_names, anno_urls = zip(*[(annotator['name'], annotator['url']) for annotator in ANNOTATORS])
preprocessor = Service(
rest_caller=RestCaller(max_workers=MAX_WORKERS, names=anno_names, urls=anno_urls))
postprocessor = DefaultPostprocessor()
skill_caller = RestCaller(max_workers=MAX_WORKERS)
response_selector = ConfidenceResponseSelector()
ss_names, ss_urls = zip(*[(selector['name'], selector['url']) for selector in SKILL_SELECTORS])
skill_selector = ChitchatQASelector(RestCaller(max_workers=MAX_WORKERS, names=ss_names, urls=ss_urls))
skill_manager = SkillManager(skill_selector=skill_selector, response_selector=response_selector,
skill_caller=skill_caller)
agent = Agent(state_manager, preprocessor, postprocessor, skill_manager)
exist_humans = Human.objects
u_tg_ids = [exist_humans[0].user_telegram_id, exist_humans[1].user_telegram_id, str(uuid.uuid4())]
utts = ['Что еще скажешь интересного?', 'Бот, ты тупой', '/start']
u_d_types = ['iphone', 'android', 'iphone']
date_times = [datetime.utcnow(), datetime.utcnow(), datetime.utcnow()]
locations = ['moscow', 'novosibirsk', 'novokuznetsk']
ch_types = ['telegram', 'telegram', 'telegram']
responses = agent(utterances=utts, user_telegram_ids=u_tg_ids, user_device_types=u_d_types,
date_times=date_times, locations=locations, channel_types=ch_types)
print(responses)
| true | true |
1c30cafa647ace6204c6ba2ef558a148026b503f | 314 | py | Python | osipkd/views/ak/__init__.py | aagusti/o-sipkd | 6c61fddb87fa6f4be18cac851bb44949019b8f3e | [
"MIT"
] | null | null | null | osipkd/views/ak/__init__.py | aagusti/o-sipkd | 6c61fddb87fa6f4be18cac851bb44949019b8f3e | [
"MIT"
] | null | null | null | osipkd/views/ak/__init__.py | aagusti/o-sipkd | 6c61fddb87fa6f4be18cac851bb44949019b8f3e | [
"MIT"
] | null | null | null | from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
from osipkd.models import App
########
# APP Home #
########
@view_config(route_name='ak', renderer='templates/home.pt', permission='read')
def view_app(request):
return dict(project='o-SIPKD')
| 22.428571 | 78 | 0.652866 | from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
from osipkd.models import App
.pt', permission='read')
def view_app(request):
return dict(project='o-SIPKD')
| true | true |
1c30cbc30ba9e277f19e4cb4f681d179c47f1969 | 9,464 | py | Python | doc/generate_logos.py | iamabhishek0/sympy | c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd | [
"BSD-3-Clause"
] | 2 | 2019-02-05T19:20:24.000Z | 2019-04-23T13:24:38.000Z | doc/generate_logos.py | iamabhishek0/sympy | c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd | [
"BSD-3-Clause"
] | 2 | 2017-06-29T14:11:05.000Z | 2022-01-24T09:28:04.000Z | doc/generate_logos.py | iamabhishek0/sympy | c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd | [
"BSD-3-Clause"
] | 1 | 2016-11-25T13:40:28.000Z | 2016-11-25T13:40:28.000Z | #!/usr/bin/env python
"""
This script creates logos of different formats from the source "sympy.svg"
Requirements:
rsvg-convert - for converting to *.png format
(librsvg2-bin deb package)
imagemagick - for converting to *.ico favicon format
"""
from argparse import ArgumentParser
import xml.dom.minidom
import os.path
import logging
import subprocess
import sys
default_source_dir = os.path.join(os.path.dirname(__file__), "src/logo")
default_source_svg = "sympy.svg"
default_output_dir = os.path.join(os.path.dirname(__file__), "_build/logo")
# those are the options for resizing versions without tail or text
svg_sizes = {}
svg_sizes['notail'] = {
"prefix":"notail", "dx":-70, "dy":-20, "size":690,
"title":"SymPy Logo, with no tail"}
svg_sizes['notail-notext'] = {
"prefix":"notailtext", "dx":-70, "dy":60, "size":690,
"title":"SymPy Logo, with no tail, no text"}
svg_sizes['notext'] = {
"prefix":"notext", "dx":-7, "dy":90, "size":750,
"title":"SymPy Logo, with no text"}
# The list of identifiers of various versions
versions = ['notail', 'notail-notext', 'notext']
parser = ArgumentParser(usage="%(prog)s [options ...]")
parser.add_argument("--source-dir", type=str, dest="source_dir",
help="Directory of the source *.svg file [default: %(default)s]",
default=default_source_dir)
parser.add_argument("--source-svg", type=str, dest="source_svg",
help="File name of the source *.svg file [default: %(default)s]",
default=default_source_svg)
parser.add_argument("--svg", action="store_true", dest="generate_svg",
help="Generate *.svg versions without tails " \
"and without text 'SymPy' [default: %(default)s]",
default=False)
parser.add_argument("--png", action="store_true", dest="generate_png",
help="Generate *.png versions [default: %(default)s]",
default=False)
parser.add_argument("--ico", action="store_true", dest="generate_ico",
help="Generate *.ico versions [default: %(default)s]",
default=False)
parser.add_argument("--clear", action="store_true", dest="clear",
help="Remove temporary files [default: %(default)s]",
default=False)
parser.add_argument("-a", "--all", action="store_true", dest="generate_all",
help="Shorthand for '--svg --png --ico --clear' options " \
"[default: %(default)s]",
default=True)
parser.add_argument("-s", "--sizes", type=str, dest="sizes",
help="Sizes of png pictures [default: %(default)s]",
default="160,500")
parser.add_argument("--icon-sizes", type=str, dest="icon_sizes",
help="Sizes of icons embedded in favicon file [default: %(default)s]",
default="16,32,48,64")
parser.add_argument("--output-dir", type=str, dest="output_dir",
help="Output dir [default: %(default)s]",
default=default_output_dir)
parser.add_argument("-d", "--debug", action="store_true", dest="debug",
help="Print debug log [default: %(default)s]",
default=False)
def main():
options, args = parser.parse_known_args()
if options.debug:
logging.basicConfig(level=logging.DEBUG)
fn_source = os.path.join(options.source_dir, options.source_svg)
if options.generate_svg or options.generate_all:
generate_notail_notext_versions(fn_source, options.output_dir)
if options.generate_png or options.generate_all:
sizes = options.sizes.split(",")
sizes = [int(s) for s in sizes]
convert_to_png(fn_source, options.output_dir, sizes)
if options.generate_ico or options.generate_all:
sizes = options.icon_sizes.split(",")
sizes = [int(s) for s in sizes]
convert_to_ico(fn_source, options.output_dir, sizes)
def generate_notail_notext_versions(fn_source, output_dir):
for ver in versions:
properties = svg_sizes[ver]
doc = load_svg(fn_source)
(notail, notext) = versionkey_to_boolean_tuple(ver)
g_tail = searchElementById(doc, "SnakeTail", "g")
if notail:
g_tail.setAttribute("display", "none")
g_text = searchElementById(doc, "SymPy_text", "g")
if notext:
g_text.setAttribute("display", "none")
g_logo = searchElementById(doc, "SympyLogo", "g")
dx = properties["dx"]
dy = properties["dy"]
transform = "translate(%d,%d)" % (dx, dy)
g_logo.setAttribute("transform", transform)
svg = searchElementById(doc, "svg_SympyLogo", "svg")
newsize = properties["size"]
svg.setAttribute("width", "%d" % newsize)
svg.setAttribute("height", "%d" % newsize)
title = svg.getElementsByTagName("title")[0]
title.firstChild.data = properties["title"]
desc = svg.getElementsByTagName("desc")[0]
desc.appendChild(
doc.createTextNode(
"\n\nThis file is generated from %s !" % fn_source))
fn_out = get_svg_filename_from_versionkey(fn_source, ver)
fn_out = os.path.join(output_dir, fn_out)
save_svg(fn_out, doc)
def convert_to_png(fn_source, output_dir, sizes):
svgs = list(versions)
svgs.insert(0, '')
cmd = "rsvg-convert"
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 127:
logging.error(
"%s: command not found. Install librsvg" % cmd)
sys.exit(p.returncode)
for ver in svgs:
if ver == '':
fn_svg = fn_source
else:
fn_svg = get_svg_filename_from_versionkey(fn_source, ver)
fn_svg = os.path.join(output_dir, fn_svg)
basename = os.path.basename(fn_svg)
name, ext = os.path.splitext(basename)
for size in sizes:
fn_out = "%s-%dpx.png" % (name, size)
fn_out = os.path.join(output_dir, fn_out)
cmd = "rsvg-convert %s -f png -o %s -h %d -w %d" % (fn_svg, fn_out,
size, size)
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
logging.error("Return code is not 0: Command: %s" % cmd)
logging.error("return code: %s" % p.returncode)
sys.exit(p.returncode)
else:
logging.debug("command: %s" % cmd)
logging.debug("return code: %s" % p.returncode)
def convert_to_ico(fn_source, output_dir, sizes):
# firstly prepare *.png files, which will be embedded
# into the *.ico files.
convert_to_png(fn_source, output_dir, sizes)
svgs = list(versions)
svgs.insert(0, '')
cmd = "convert"
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 127:
logging.error("%s: command not found. Install imagemagick" % cmd)
sys.exit(p.returncode)
for ver in svgs:
if ver == '':
fn_svg = fn_source
else:
fn_svg = get_svg_filename_from_versionkey(fn_source, ver)
fn_svg = os.path.join(output_dir, fn_svg)
basename = os.path.basename(fn_svg)
name, ext = os.path.splitext(basename)
# calculate the list of *.png files
pngs = []
for size in sizes:
fn_png= "%s-%dpx.png" % (name, size)
fn_png = os.path.join(output_dir, fn_png)
pngs.append(fn_png)
# convert them to *.ico
fn_out = "%s-favicon.ico" % name
fn_out = os.path.join(output_dir, fn_out)
cmd = "convert %s %s" % (" ".join(pngs), fn_out)
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
logging.error("Return code is not 0: Command: %s" % cmd)
logging.error("return code: %s" % p.returncode)
sys.exit(p.returncode)
else:
logging.debug("command: %s" % cmd)
logging.debug("return code: %s" % p.returncode)
def versionkey_to_boolean_tuple(ver):
notail = False
notext = False
vers = ver.split("-")
notail = 'notail' in vers
notext = 'notext' in vers
return (notail, notext)
def get_svg_filename_from_versionkey(fn_source, ver):
basename = os.path.basename(fn_source)
if ver == '':
return basename
name, ext = os.path.splitext(basename)
prefix = svg_sizes[ver]["prefix"]
fn_out = "%s-%s.svg" % (name, prefix)
return fn_out
def searchElementById(node, Id, tagname):
"""
Search element by id in all the children and descendants of node.
id is lower case, not ID which is usually used for getElementById
"""
nodes = node.getElementsByTagName(tagname)
for node in nodes:
an = node.getAttributeNode('id')
if an and an.nodeValue == Id:
return node
def load_svg(fn):
doc = xml.dom.minidom.parse(fn)
return doc
def save_svg(fn, doc):
with open(fn, "wb") as f:
xmlstr = doc.toxml("utf-8")
f.write(xmlstr)
logging.info(" File saved: %s" % fn)
main()
| 33.921147 | 79 | 0.614434 |
from argparse import ArgumentParser
import xml.dom.minidom
import os.path
import logging
import subprocess
import sys
default_source_dir = os.path.join(os.path.dirname(__file__), "src/logo")
default_source_svg = "sympy.svg"
default_output_dir = os.path.join(os.path.dirname(__file__), "_build/logo")
svg_sizes = {}
svg_sizes['notail'] = {
"prefix":"notail", "dx":-70, "dy":-20, "size":690,
"title":"SymPy Logo, with no tail"}
svg_sizes['notail-notext'] = {
"prefix":"notailtext", "dx":-70, "dy":60, "size":690,
"title":"SymPy Logo, with no tail, no text"}
svg_sizes['notext'] = {
"prefix":"notext", "dx":-7, "dy":90, "size":750,
"title":"SymPy Logo, with no text"}
versions = ['notail', 'notail-notext', 'notext']
parser = ArgumentParser(usage="%(prog)s [options ...]")
parser.add_argument("--source-dir", type=str, dest="source_dir",
help="Directory of the source *.svg file [default: %(default)s]",
default=default_source_dir)
parser.add_argument("--source-svg", type=str, dest="source_svg",
help="File name of the source *.svg file [default: %(default)s]",
default=default_source_svg)
parser.add_argument("--svg", action="store_true", dest="generate_svg",
help="Generate *.svg versions without tails " \
"and without text 'SymPy' [default: %(default)s]",
default=False)
parser.add_argument("--png", action="store_true", dest="generate_png",
help="Generate *.png versions [default: %(default)s]",
default=False)
parser.add_argument("--ico", action="store_true", dest="generate_ico",
help="Generate *.ico versions [default: %(default)s]",
default=False)
parser.add_argument("--clear", action="store_true", dest="clear",
help="Remove temporary files [default: %(default)s]",
default=False)
parser.add_argument("-a", "--all", action="store_true", dest="generate_all",
help="Shorthand for '--svg --png --ico --clear' options " \
"[default: %(default)s]",
default=True)
parser.add_argument("-s", "--sizes", type=str, dest="sizes",
help="Sizes of png pictures [default: %(default)s]",
default="160,500")
parser.add_argument("--icon-sizes", type=str, dest="icon_sizes",
help="Sizes of icons embedded in favicon file [default: %(default)s]",
default="16,32,48,64")
parser.add_argument("--output-dir", type=str, dest="output_dir",
help="Output dir [default: %(default)s]",
default=default_output_dir)
parser.add_argument("-d", "--debug", action="store_true", dest="debug",
help="Print debug log [default: %(default)s]",
default=False)
def main():
options, args = parser.parse_known_args()
if options.debug:
logging.basicConfig(level=logging.DEBUG)
fn_source = os.path.join(options.source_dir, options.source_svg)
if options.generate_svg or options.generate_all:
generate_notail_notext_versions(fn_source, options.output_dir)
if options.generate_png or options.generate_all:
sizes = options.sizes.split(",")
sizes = [int(s) for s in sizes]
convert_to_png(fn_source, options.output_dir, sizes)
if options.generate_ico or options.generate_all:
sizes = options.icon_sizes.split(",")
sizes = [int(s) for s in sizes]
convert_to_ico(fn_source, options.output_dir, sizes)
def generate_notail_notext_versions(fn_source, output_dir):
for ver in versions:
properties = svg_sizes[ver]
doc = load_svg(fn_source)
(notail, notext) = versionkey_to_boolean_tuple(ver)
g_tail = searchElementById(doc, "SnakeTail", "g")
if notail:
g_tail.setAttribute("display", "none")
g_text = searchElementById(doc, "SymPy_text", "g")
if notext:
g_text.setAttribute("display", "none")
g_logo = searchElementById(doc, "SympyLogo", "g")
dx = properties["dx"]
dy = properties["dy"]
transform = "translate(%d,%d)" % (dx, dy)
g_logo.setAttribute("transform", transform)
svg = searchElementById(doc, "svg_SympyLogo", "svg")
newsize = properties["size"]
svg.setAttribute("width", "%d" % newsize)
svg.setAttribute("height", "%d" % newsize)
title = svg.getElementsByTagName("title")[0]
title.firstChild.data = properties["title"]
desc = svg.getElementsByTagName("desc")[0]
desc.appendChild(
doc.createTextNode(
"\n\nThis file is generated from %s !" % fn_source))
fn_out = get_svg_filename_from_versionkey(fn_source, ver)
fn_out = os.path.join(output_dir, fn_out)
save_svg(fn_out, doc)
def convert_to_png(fn_source, output_dir, sizes):
svgs = list(versions)
svgs.insert(0, '')
cmd = "rsvg-convert"
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 127:
logging.error(
"%s: command not found. Install librsvg" % cmd)
sys.exit(p.returncode)
for ver in svgs:
if ver == '':
fn_svg = fn_source
else:
fn_svg = get_svg_filename_from_versionkey(fn_source, ver)
fn_svg = os.path.join(output_dir, fn_svg)
basename = os.path.basename(fn_svg)
name, ext = os.path.splitext(basename)
for size in sizes:
fn_out = "%s-%dpx.png" % (name, size)
fn_out = os.path.join(output_dir, fn_out)
cmd = "rsvg-convert %s -f png -o %s -h %d -w %d" % (fn_svg, fn_out,
size, size)
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
logging.error("Return code is not 0: Command: %s" % cmd)
logging.error("return code: %s" % p.returncode)
sys.exit(p.returncode)
else:
logging.debug("command: %s" % cmd)
logging.debug("return code: %s" % p.returncode)
def convert_to_ico(fn_source, output_dir, sizes):
convert_to_png(fn_source, output_dir, sizes)
svgs = list(versions)
svgs.insert(0, '')
cmd = "convert"
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 127:
logging.error("%s: command not found. Install imagemagick" % cmd)
sys.exit(p.returncode)
for ver in svgs:
if ver == '':
fn_svg = fn_source
else:
fn_svg = get_svg_filename_from_versionkey(fn_source, ver)
fn_svg = os.path.join(output_dir, fn_svg)
basename = os.path.basename(fn_svg)
name, ext = os.path.splitext(basename)
pngs = []
for size in sizes:
fn_png= "%s-%dpx.png" % (name, size)
fn_png = os.path.join(output_dir, fn_png)
pngs.append(fn_png)
fn_out = "%s-favicon.ico" % name
fn_out = os.path.join(output_dir, fn_out)
cmd = "convert %s %s" % (" ".join(pngs), fn_out)
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
logging.error("Return code is not 0: Command: %s" % cmd)
logging.error("return code: %s" % p.returncode)
sys.exit(p.returncode)
else:
logging.debug("command: %s" % cmd)
logging.debug("return code: %s" % p.returncode)
def versionkey_to_boolean_tuple(ver):
notail = False
notext = False
vers = ver.split("-")
notail = 'notail' in vers
notext = 'notext' in vers
return (notail, notext)
def get_svg_filename_from_versionkey(fn_source, ver):
basename = os.path.basename(fn_source)
if ver == '':
return basename
name, ext = os.path.splitext(basename)
prefix = svg_sizes[ver]["prefix"]
fn_out = "%s-%s.svg" % (name, prefix)
return fn_out
def searchElementById(node, Id, tagname):
nodes = node.getElementsByTagName(tagname)
for node in nodes:
an = node.getAttributeNode('id')
if an and an.nodeValue == Id:
return node
def load_svg(fn):
doc = xml.dom.minidom.parse(fn)
return doc
def save_svg(fn, doc):
with open(fn, "wb") as f:
xmlstr = doc.toxml("utf-8")
f.write(xmlstr)
logging.info(" File saved: %s" % fn)
main()
| true | true |
1c30cc41e761b0f6e63c07e7049e39088af4064f | 7,920 | py | Python | QNetwork/tests/test_channels.py | SwamyDev/q_network | 4f1866f8d06e4f206b4ada5e86396a4da26f28f7 | [
"MIT"
] | null | null | null | QNetwork/tests/test_channels.py | SwamyDev/q_network | 4f1866f8d06e4f206b4ada5e86396a4da26f28f7 | [
"MIT"
] | null | null | null | QNetwork/tests/test_channels.py | SwamyDev/q_network | 4f1866f8d06e4f206b4ada5e86396a4da26f28f7 | [
"MIT"
] | 2 | 2019-12-04T08:47:40.000Z | 2021-07-22T16:22:27.000Z | import unittest
from collections import deque
from QNetwork.q_network_channels import QState, QChannel, CAChannel
class QConnectionSpy:
def __init__(self, qubit_factory):
self.qubit_factory = qubit_factory
self.receiver = ''
self.qubits = []
self.epr_values = deque()
def sendQubit(self, qubit, receiver, print_info=True):
self.receiver = receiver
self.qubits.append(qubit)
def createEPR(self, receiver, print_info=True):
self.receiver = receiver
q = self.qubit_factory(self)
if len(self.epr_values) != 0:
q.value = self.epr_values.popleft()
self.qubits.append(q)
return q
class CACConnectionSpy:
def __init__(self):
self.receiver = ''
self.sender = ''
self.sent_data = None
self.received_get_call = False
self.received_clear_call = False
self.received_close_call = False
self.received_send_ack_call = False
self.received_get_ack_call = False
def sendValueList(self, receiver, data):
self.receiver = receiver
self.sent_data = data
def getValueList(self, sender):
self.sender = sender
self.received_get_call = True
def sendAck(self, receiver):
self.receiver = receiver
self.received_send_ack_call = True
def getAck(self, sender):
self.sender = sender
self.received_get_ack_call = True
def clearServer(self):
self.received_clear_call = True
def closeChannel(self):
self.received_close_call = True
class QubitSpy:
def __init__(self, value=0):
self.operations = []
self.value = value
def X(self, print_info=True):
self.operations.append('X')
def Y(self, print_info=True):
self.operations.append('Y')
def Z(self, print_info=True):
self.operations.append('Z')
def H(self, print_info=True):
self.operations.append('H')
def measure(self, print_info=True):
return self.value
def make_qubit_spy(connection):
return QubitSpy()
class ConnectionStub:
def __init__(self):
self.qubits = None
self.received_qubits = None
self.idx = 0
@property
def received_qubits(self):
return self.qubits
@received_qubits.setter
def received_qubits(self, value):
self.qubits = value
def recvQubit(self, print_info=True):
self.idx += 1
return self.qubits[self.idx - 1]
def recvEPR(self, print_info=True):
self.idx += 1
return self.qubits[self.idx - 1]
class TestQChannelBase(unittest.TestCase):
def setUp(self):
self.con = self.make_connection_double()
self.qc = QChannel(self.con, make_qubit_spy, 'Bob')
def make_connection_double(self):
raise NotImplementedError("QChannel test require a connection object")
def assert_qubit_operations(self, *expected_operations):
for i, e in enumerate(expected_operations):
self.assertEqual(e, self.con.qubits[i].operations, "Unexpected operations on qubit {}".format(i))
class TestQChannelSending(TestQChannelBase):
def make_connection_double(self):
return QConnectionSpy(make_qubit_spy)
def test_send_qubits_to_receiver(self):
self.qc.send_qubits([QState(0, 0)])
self.assertEqual('Bob', self.con.receiver)
def test_sending_qubits_with_default_bases(self):
self.qc.send_qubits([QState(0, 0), QState(0, 1), QState(1, 0), QState(1, 1)])
self.assert_qubit_operations([], ['H'], ['X'], ['X', 'H'])
def test_sending_qubits_with_specified_bases(self):
self.qc.bases_mapping = [lambda q: q.Z(), lambda q: q.Y()]
self.qc.send_qubits([QState(0, 0), QState(0, 1)])
self.assert_qubit_operations(['Z'], ['Y'])
def test_send_epr_pair_to_receiver(self):
self.qc.send_epr([0])
self.assertEqual('Bob', self.con.receiver)
def test_measuring_sent_epr_pair(self):
self.con.epr_values = deque([1, 0])
self.assertEqual([QState(1, 0), QState(0, 1)], self.qc.send_epr([0, 1]))
def test_measuring_sent_epr_pair_in_default_bases(self):
self.qc.send_epr([0, 1])
self.assert_qubit_operations([], ['H'])
def test_measuring_sent_epr_pair_in_specified_bases(self):
self.qc.bases_mapping = [lambda q: q.Z(), lambda q: q.Y()]
self.qc.send_epr([0, 1])
self.assert_qubit_operations(['Z'], ['Y'])
class TestQChannelReceiving(TestQChannelBase):
def make_connection_double(self):
return ConnectionStub()
def test_receiving_qubits(self):
self.con.received_qubits = [QubitSpy(0), QubitSpy(0), QubitSpy(1), QubitSpy(1)]
self.assertSequenceEqual([QState(0, 0), QState(0, 1), QState(1, 0), QState(1, 1)]
, self.qc.receive_qubits_in([0, 1, 0, 1]))
def test_measure_qubits_in_default_bases(self):
self.con.received_qubits = [QubitSpy(), QubitSpy()]
self.qc.receive_qubits_in([0, 1])
self.assert_qubit_operations([], ['H'])
def test_measure_qubits_in_specified_bases(self):
self.con.received_qubits = [QubitSpy(), QubitSpy(), QubitSpy()]
self.qc.bases_mapping = [lambda q: q.Y(), lambda q: q.Z(), lambda q: q.H()]
self.qc.receive_qubits_in([0, 1, 2])
self.assert_qubit_operations(['Y'], ['Z'], ['H'])
def test_receiving_epr_pair(self):
self.con.received_qubits = [QubitSpy(0), QubitSpy(0), QubitSpy(1), QubitSpy(1)]
self.assertSequenceEqual([QState(0, 0), QState(0, 1), QState(1, 0), QState(1, 1)]
, self.qc.receive_epr_in([0, 1, 0, 1]))
def test_measure_epr_in_default_bases(self):
self.con.received_qubits = [QubitSpy(), QubitSpy()]
self.qc.receive_epr_in([0, 1])
self.assert_qubit_operations([], ['H'])
def test_measure_epr_in_specified_bases(self):
self.con.received_qubits = [QubitSpy(), QubitSpy(), QubitSpy()]
self.qc.bases_mapping = [lambda q: q.Y(), lambda q: q.Z(), lambda q: q.H()]
self.qc.receive_epr_in([0, 1, 2])
self.assert_qubit_operations(['Y'], ['Z'], ['H'])
class TestCAC(unittest.TestCase):
def test_sending_list_data(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Bob')
ca.send([1, 2])
self.assertSequenceEqual([1, 2], connection.sent_data)
self.assertEqual('Bob', connection.receiver)
def test_sending_single_int(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Bob')
ca.send(42)
self.assertSequenceEqual([42], connection.sent_data)
self.assertEqual('Bob', connection.receiver)
def test_receiving_data(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Alice')
ca.receive()
self.assertTrue(connection.received_get_call)
self.assertEqual('Alice', connection.sender)
def test_send_acknowledgement(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Alice')
ca.send_ack()
self.assertTrue(connection.received_send_ack_call)
self.assertEqual('Alice', connection.receiver)
def test_receive_acknowledgement(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Bob')
ca.receive_ack()
self.assertTrue(connection.received_get_ack_call)
self.assertEqual('Bob', connection.sender)
def test_clear(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Alice')
ca.clear()
self.assertTrue(connection.received_clear_call)
def test_close(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Alice')
ca.close()
self.assertTrue(connection.received_close_call)
| 32.863071 | 109 | 0.644571 | import unittest
from collections import deque
from QNetwork.q_network_channels import QState, QChannel, CAChannel
class QConnectionSpy:
def __init__(self, qubit_factory):
self.qubit_factory = qubit_factory
self.receiver = ''
self.qubits = []
self.epr_values = deque()
def sendQubit(self, qubit, receiver, print_info=True):
self.receiver = receiver
self.qubits.append(qubit)
def createEPR(self, receiver, print_info=True):
self.receiver = receiver
q = self.qubit_factory(self)
if len(self.epr_values) != 0:
q.value = self.epr_values.popleft()
self.qubits.append(q)
return q
class CACConnectionSpy:
def __init__(self):
self.receiver = ''
self.sender = ''
self.sent_data = None
self.received_get_call = False
self.received_clear_call = False
self.received_close_call = False
self.received_send_ack_call = False
self.received_get_ack_call = False
def sendValueList(self, receiver, data):
self.receiver = receiver
self.sent_data = data
def getValueList(self, sender):
self.sender = sender
self.received_get_call = True
def sendAck(self, receiver):
self.receiver = receiver
self.received_send_ack_call = True
def getAck(self, sender):
self.sender = sender
self.received_get_ack_call = True
def clearServer(self):
self.received_clear_call = True
def closeChannel(self):
self.received_close_call = True
class QubitSpy:
def __init__(self, value=0):
self.operations = []
self.value = value
def X(self, print_info=True):
self.operations.append('X')
def Y(self, print_info=True):
self.operations.append('Y')
def Z(self, print_info=True):
self.operations.append('Z')
def H(self, print_info=True):
self.operations.append('H')
def measure(self, print_info=True):
return self.value
def make_qubit_spy(connection):
return QubitSpy()
class ConnectionStub:
def __init__(self):
self.qubits = None
self.received_qubits = None
self.idx = 0
@property
def received_qubits(self):
return self.qubits
@received_qubits.setter
def received_qubits(self, value):
self.qubits = value
def recvQubit(self, print_info=True):
self.idx += 1
return self.qubits[self.idx - 1]
def recvEPR(self, print_info=True):
self.idx += 1
return self.qubits[self.idx - 1]
class TestQChannelBase(unittest.TestCase):
def setUp(self):
self.con = self.make_connection_double()
self.qc = QChannel(self.con, make_qubit_spy, 'Bob')
def make_connection_double(self):
raise NotImplementedError("QChannel test require a connection object")
def assert_qubit_operations(self, *expected_operations):
for i, e in enumerate(expected_operations):
self.assertEqual(e, self.con.qubits[i].operations, "Unexpected operations on qubit {}".format(i))
class TestQChannelSending(TestQChannelBase):
def make_connection_double(self):
return QConnectionSpy(make_qubit_spy)
def test_send_qubits_to_receiver(self):
self.qc.send_qubits([QState(0, 0)])
self.assertEqual('Bob', self.con.receiver)
def test_sending_qubits_with_default_bases(self):
self.qc.send_qubits([QState(0, 0), QState(0, 1), QState(1, 0), QState(1, 1)])
self.assert_qubit_operations([], ['H'], ['X'], ['X', 'H'])
def test_sending_qubits_with_specified_bases(self):
self.qc.bases_mapping = [lambda q: q.Z(), lambda q: q.Y()]
self.qc.send_qubits([QState(0, 0), QState(0, 1)])
self.assert_qubit_operations(['Z'], ['Y'])
def test_send_epr_pair_to_receiver(self):
self.qc.send_epr([0])
self.assertEqual('Bob', self.con.receiver)
def test_measuring_sent_epr_pair(self):
self.con.epr_values = deque([1, 0])
self.assertEqual([QState(1, 0), QState(0, 1)], self.qc.send_epr([0, 1]))
def test_measuring_sent_epr_pair_in_default_bases(self):
self.qc.send_epr([0, 1])
self.assert_qubit_operations([], ['H'])
def test_measuring_sent_epr_pair_in_specified_bases(self):
self.qc.bases_mapping = [lambda q: q.Z(), lambda q: q.Y()]
self.qc.send_epr([0, 1])
self.assert_qubit_operations(['Z'], ['Y'])
class TestQChannelReceiving(TestQChannelBase):
def make_connection_double(self):
return ConnectionStub()
def test_receiving_qubits(self):
self.con.received_qubits = [QubitSpy(0), QubitSpy(0), QubitSpy(1), QubitSpy(1)]
self.assertSequenceEqual([QState(0, 0), QState(0, 1), QState(1, 0), QState(1, 1)]
, self.qc.receive_qubits_in([0, 1, 0, 1]))
def test_measure_qubits_in_default_bases(self):
self.con.received_qubits = [QubitSpy(), QubitSpy()]
self.qc.receive_qubits_in([0, 1])
self.assert_qubit_operations([], ['H'])
def test_measure_qubits_in_specified_bases(self):
self.con.received_qubits = [QubitSpy(), QubitSpy(), QubitSpy()]
self.qc.bases_mapping = [lambda q: q.Y(), lambda q: q.Z(), lambda q: q.H()]
self.qc.receive_qubits_in([0, 1, 2])
self.assert_qubit_operations(['Y'], ['Z'], ['H'])
def test_receiving_epr_pair(self):
self.con.received_qubits = [QubitSpy(0), QubitSpy(0), QubitSpy(1), QubitSpy(1)]
self.assertSequenceEqual([QState(0, 0), QState(0, 1), QState(1, 0), QState(1, 1)]
, self.qc.receive_epr_in([0, 1, 0, 1]))
def test_measure_epr_in_default_bases(self):
self.con.received_qubits = [QubitSpy(), QubitSpy()]
self.qc.receive_epr_in([0, 1])
self.assert_qubit_operations([], ['H'])
def test_measure_epr_in_specified_bases(self):
self.con.received_qubits = [QubitSpy(), QubitSpy(), QubitSpy()]
self.qc.bases_mapping = [lambda q: q.Y(), lambda q: q.Z(), lambda q: q.H()]
self.qc.receive_epr_in([0, 1, 2])
self.assert_qubit_operations(['Y'], ['Z'], ['H'])
class TestCAC(unittest.TestCase):
def test_sending_list_data(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Bob')
ca.send([1, 2])
self.assertSequenceEqual([1, 2], connection.sent_data)
self.assertEqual('Bob', connection.receiver)
def test_sending_single_int(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Bob')
ca.send(42)
self.assertSequenceEqual([42], connection.sent_data)
self.assertEqual('Bob', connection.receiver)
def test_receiving_data(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Alice')
ca.receive()
self.assertTrue(connection.received_get_call)
self.assertEqual('Alice', connection.sender)
def test_send_acknowledgement(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Alice')
ca.send_ack()
self.assertTrue(connection.received_send_ack_call)
self.assertEqual('Alice', connection.receiver)
def test_receive_acknowledgement(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Bob')
ca.receive_ack()
self.assertTrue(connection.received_get_ack_call)
self.assertEqual('Bob', connection.sender)
def test_clear(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Alice')
ca.clear()
self.assertTrue(connection.received_clear_call)
def test_close(self):
connection = CACConnectionSpy()
ca = CAChannel(connection, 'Alice')
ca.close()
self.assertTrue(connection.received_close_call)
| true | true |
1c30cc794aaf76d3edfc94d11fca65f4b979f80a | 2,010 | py | Python | aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/GetEngineNamepaceRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/GetEngineNamepaceRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/GetEngineNamepaceRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class GetEngineNamepaceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'GetEngineNamepace','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_AcceptLanguage(self): # String
return self.get_query_params().get('AcceptLanguage')
def set_AcceptLanguage(self, AcceptLanguage): # String
self.add_query_param('AcceptLanguage', AcceptLanguage)
def get_Id(self): # String
return self.get_query_params().get('Id')
def set_Id(self, Id): # String
self.add_query_param('Id', Id)
| 37.222222 | 76 | 0.755224 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class GetEngineNamepaceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'GetEngineNamepace','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId):
self.add_query_param('ClusterId', ClusterId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId):
self.add_query_param('InstanceId', InstanceId)
def get_AcceptLanguage(self):
return self.get_query_params().get('AcceptLanguage')
def set_AcceptLanguage(self, AcceptLanguage):
self.add_query_param('AcceptLanguage', AcceptLanguage)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self, Id):
self.add_query_param('Id', Id)
| true | true |
1c30ccc97252a100898b0b1f1d747dd1cc917e59 | 1,026 | py | Python | samples/tutorial-5.py | Richard-L-Johnson/pyalgotrader | ad2bcc6b25c06c66eee4a8d522ce844504d8ec62 | [
"Apache-2.0"
] | 3,719 | 2015-01-06T09:00:02.000Z | 2022-03-31T20:55:01.000Z | samples/tutorial-5.py | Richard-L-Johnson/pyalgotrader | ad2bcc6b25c06c66eee4a8d522ce844504d8ec62 | [
"Apache-2.0"
] | 122 | 2015-01-01T17:06:22.000Z | 2022-03-22T13:33:38.000Z | samples/tutorial-5.py | Richard-L-Johnson/pyalgotrader | ad2bcc6b25c06c66eee4a8d522ce844504d8ec62 | [
"Apache-2.0"
] | 1,428 | 2015-01-01T17:07:38.000Z | 2022-03-31T10:02:37.000Z | from pyalgotrade import plotter
from pyalgotrade.barfeed import quandlfeed
from pyalgotrade.stratanalyzer import returns
import sma_crossover
# Load the bar feed from the CSV file
feed = quandlfeed.Feed()
feed.addBarsFromCSV("orcl", "WIKI-ORCL-2000-quandl.csv")
# Evaluate the strategy with the feed's bars.
myStrategy = sma_crossover.SMACrossOver(feed, "orcl", 20)
# Attach a returns analyzers to the strategy.
returnsAnalyzer = returns.Returns()
myStrategy.attachAnalyzer(returnsAnalyzer)
# Attach the plotter to the strategy.
plt = plotter.StrategyPlotter(myStrategy)
# Include the SMA in the instrument's subplot to get it displayed along with the closing prices.
plt.getInstrumentSubplot("orcl").addDataSeries("SMA", myStrategy.getSMA())
# Plot the simple returns on each bar.
plt.getOrCreateSubplot("returns").addDataSeries("Simple returns", returnsAnalyzer.getReturns())
# Run the strategy.
myStrategy.run()
myStrategy.info("Final portfolio value: $%.2f" % myStrategy.getResult())
# Plot the strategy.
plt.plot()
| 34.2 | 96 | 0.789474 | from pyalgotrade import plotter
from pyalgotrade.barfeed import quandlfeed
from pyalgotrade.stratanalyzer import returns
import sma_crossover
feed = quandlfeed.Feed()
feed.addBarsFromCSV("orcl", "WIKI-ORCL-2000-quandl.csv")
myStrategy = sma_crossover.SMACrossOver(feed, "orcl", 20)
# Attach a returns analyzers to the strategy.
returnsAnalyzer = returns.Returns()
myStrategy.attachAnalyzer(returnsAnalyzer)
# Attach the plotter to the strategy.
plt = plotter.StrategyPlotter(myStrategy)
# Include the SMA in the instrument's subplot to get it displayed along with the closing prices.
plt.getInstrumentSubplot("orcl").addDataSeries("SMA", myStrategy.getSMA())
plt.getOrCreateSubplot("returns").addDataSeries("Simple returns", returnsAnalyzer.getReturns())
myStrategy.run()
myStrategy.info("Final portfolio value: $%.2f" % myStrategy.getResult())
plt.plot()
| true | true |
1c30ccfd8fc32263190dc25308ca7b6c7621657c | 4,928 | py | Python | src/oci/apigateway/models/header_validation_request_policy.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/apigateway/models/header_validation_request_policy.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/apigateway/models/header_validation_request_policy.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class HeaderValidationRequestPolicy(object):
"""
Validate the HTTP headers on the incoming API requests on a specific route.
"""
#: A constant which can be used with the validation_mode property of a HeaderValidationRequestPolicy.
#: This constant has a value of "ENFORCING"
VALIDATION_MODE_ENFORCING = "ENFORCING"
#: A constant which can be used with the validation_mode property of a HeaderValidationRequestPolicy.
#: This constant has a value of "PERMISSIVE"
VALIDATION_MODE_PERMISSIVE = "PERMISSIVE"
#: A constant which can be used with the validation_mode property of a HeaderValidationRequestPolicy.
#: This constant has a value of "DISABLED"
VALIDATION_MODE_DISABLED = "DISABLED"
def __init__(self, **kwargs):
"""
Initializes a new HeaderValidationRequestPolicy object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param headers:
The value to assign to the headers property of this HeaderValidationRequestPolicy.
:type headers: list[oci.apigateway.models.HeaderValidationItem]
:param validation_mode:
The value to assign to the validation_mode property of this HeaderValidationRequestPolicy.
Allowed values for this property are: "ENFORCING", "PERMISSIVE", "DISABLED"
:type validation_mode: str
"""
self.swagger_types = {
'headers': 'list[HeaderValidationItem]',
'validation_mode': 'str'
}
self.attribute_map = {
'headers': 'headers',
'validation_mode': 'validationMode'
}
self._headers = None
self._validation_mode = None
@property
def headers(self):
"""
Gets the headers of this HeaderValidationRequestPolicy.
:return: The headers of this HeaderValidationRequestPolicy.
:rtype: list[oci.apigateway.models.HeaderValidationItem]
"""
return self._headers
@headers.setter
def headers(self, headers):
"""
Sets the headers of this HeaderValidationRequestPolicy.
:param headers: The headers of this HeaderValidationRequestPolicy.
:type: list[oci.apigateway.models.HeaderValidationItem]
"""
self._headers = headers
@property
def validation_mode(self):
"""
Gets the validation_mode of this HeaderValidationRequestPolicy.
Validation behavior mode.
In `ENFORCING` mode, upon a validation failure, the request will be rejected with a 4xx response
and not sent to the backend.
In `PERMISSIVE` mode, the result of the validation will be exposed as metrics while the request
will follow the normal path.
`DISABLED` type turns the validation off.
Allowed values for this property are: "ENFORCING", "PERMISSIVE", "DISABLED"
:return: The validation_mode of this HeaderValidationRequestPolicy.
:rtype: str
"""
return self._validation_mode
@validation_mode.setter
def validation_mode(self, validation_mode):
"""
Sets the validation_mode of this HeaderValidationRequestPolicy.
Validation behavior mode.
In `ENFORCING` mode, upon a validation failure, the request will be rejected with a 4xx response
and not sent to the backend.
In `PERMISSIVE` mode, the result of the validation will be exposed as metrics while the request
will follow the normal path.
`DISABLED` type turns the validation off.
:param validation_mode: The validation_mode of this HeaderValidationRequestPolicy.
:type: str
"""
allowed_values = ["ENFORCING", "PERMISSIVE", "DISABLED"]
if not value_allowed_none_or_none_sentinel(validation_mode, allowed_values):
raise ValueError(
"Invalid value for `validation_mode`, must be None or one of {0}"
.format(allowed_values)
)
self._validation_mode = validation_mode
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 36.503704 | 245 | 0.683442 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class HeaderValidationRequestPolicy(object):
VALIDATION_MODE_ENFORCING = "ENFORCING"
VALIDATION_MODE_PERMISSIVE = "PERMISSIVE"
VALIDATION_MODE_DISABLED = "DISABLED"
def __init__(self, **kwargs):
self.swagger_types = {
'headers': 'list[HeaderValidationItem]',
'validation_mode': 'str'
}
self.attribute_map = {
'headers': 'headers',
'validation_mode': 'validationMode'
}
self._headers = None
self._validation_mode = None
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, headers):
self._headers = headers
@property
def validation_mode(self):
return self._validation_mode
@validation_mode.setter
def validation_mode(self, validation_mode):
allowed_values = ["ENFORCING", "PERMISSIVE", "DISABLED"]
if not value_allowed_none_or_none_sentinel(validation_mode, allowed_values):
raise ValueError(
"Invalid value for `validation_mode`, must be None or one of {0}"
.format(allowed_values)
)
self._validation_mode = validation_mode
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c30cdb56ec9c8a721b9245384ea415dc675cf89 | 794 | py | Python | networking-calico/networking_calico/tests/base.py | mikestephen/calico | 6c512191c05097dbfacbd18fb23d1ebff18961fd | [
"Apache-2.0"
] | 3,973 | 2015-07-29T21:13:46.000Z | 2022-03-31T09:27:38.000Z | networking-calico/networking_calico/tests/base.py | mikestephen/calico | 6c512191c05097dbfacbd18fb23d1ebff18961fd | [
"Apache-2.0"
] | 4,584 | 2015-07-29T08:47:22.000Z | 2022-03-31T22:54:26.000Z | networking-calico/networking_calico/tests/base.py | mikestephen/calico | 6c512191c05097dbfacbd18fb23d1ebff18961fd | [
"Apache-2.0"
] | 1,066 | 2015-07-30T06:29:18.000Z | 2022-03-31T20:01:47.000Z | # -*- coding: utf-8 -*-
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base
class TestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""
| 33.083333 | 75 | 0.746851 |
from oslotest import base
class TestCase(base.BaseTestCase):
| true | true |
1c30ceb82379c701d0aaa19e1746cd120433bc66 | 443 | py | Python | src/utils/common_utils.py | ralfeger/language-identification | 80c79423389207f197911d7b0eb78143f25f44b6 | [
"BSD-2-Clause"
] | 16 | 2021-06-23T15:24:04.000Z | 2022-03-23T21:13:31.000Z | src/utils/common_utils.py | ralfeger/language-identification | 80c79423389207f197911d7b0eb78143f25f44b6 | [
"BSD-2-Clause"
] | null | null | null | src/utils/common_utils.py | ralfeger/language-identification | 80c79423389207f197911d7b0eb78143f25f44b6 | [
"BSD-2-Clause"
] | 2 | 2021-06-24T08:49:26.000Z | 2022-03-31T12:13:58.000Z | """
:author:
Paul Bethge (bethge@zkm.de)
2021
:License:
This package is published under Simplified BSD License.
"""
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
def __contains__(self, item):
return self.__eq__(item)
def __iter__(self):
yield self
def __str__(self):
return '{0} - {1}'.format(self.start, self.end)
| 17.038462 | 55 | 0.68623 |
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
def __contains__(self, item):
return self.__eq__(item)
def __iter__(self):
yield self
def __str__(self):
return '{0} - {1}'.format(self.start, self.end)
| true | true |
1c30d12919e16a90c13fc069273b0a89cf0622d4 | 4,363 | py | Python | sympy/physics/quantum/anticommutator.py | JDTrujillo18/sympy | ef47677483b2f29d0b8e6a0eb45de72b2e34477d | [
"BSD-3-Clause"
] | 4 | 2018-07-04T17:20:12.000Z | 2019-07-14T18:07:25.000Z | sympy/physics/quantum/anticommutator.py | JDTrujillo18/sympy | ef47677483b2f29d0b8e6a0eb45de72b2e34477d | [
"BSD-3-Clause"
] | null | null | null | sympy/physics/quantum/anticommutator.py | JDTrujillo18/sympy | ef47677483b2f29d0b8e6a0eb45de72b2e34477d | [
"BSD-3-Clause"
] | 1 | 2018-09-03T03:02:06.000Z | 2018-09-03T03:02:06.000Z | """The anti-commutator: ``{A,B} = A*B + B*A``."""
from __future__ import print_function, division
from sympy import S, Expr, Mul, Integer
from sympy.core.compatibility import u
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.operator import Operator
from sympy.physics.quantum.dagger import Dagger
__all__ = [
'AntiCommutator'
]
#-----------------------------------------------------------------------------
# Anti-commutator
#-----------------------------------------------------------------------------
class AntiCommutator(Expr):
"""The standard anticommutator, in an unevaluated state.
Evaluating an anticommutator is defined [1]_ as: ``{A, B} = A*B + B*A``.
This class returns the anticommutator in an unevaluated form. To evaluate
the anticommutator, use the ``.doit()`` method.
Cannonical ordering of an anticommutator is ``{A, B}`` for ``A < B``. The
arguments of the anticommutator are put into canonical order using
``__cmp__``. If ``B < A``, then ``{A, B}`` is returned as ``{B, A}``.
Parameters
==========
A : Expr
The first argument of the anticommutator {A,B}.
B : Expr
The second argument of the anticommutator {A,B}.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.quantum import AntiCommutator
>>> from sympy.physics.quantum import Operator, Dagger
>>> x, y = symbols('x,y')
>>> A = Operator('A')
>>> B = Operator('B')
Create an anticommutator and use ``doit()`` to multiply them out.
>>> ac = AntiCommutator(A,B); ac
{A,B}
>>> ac.doit()
A*B + B*A
The commutator orders it arguments in canonical order:
>>> ac = AntiCommutator(B,A); ac
{A,B}
Commutative constants are factored out:
>>> AntiCommutator(3*x*A,x*y*B)
3*x**2*y*{A,B}
Adjoint operations applied to the anticommutator are properly applied to
the arguments:
>>> Dagger(AntiCommutator(A,B))
{Dagger(A),Dagger(B)}
References
==========
.. [1] http://en.wikipedia.org/wiki/Commutator
"""
is_commutative = False
def __new__(cls, A, B):
r = cls.eval(A, B)
if r is not None:
return r
obj = Expr.__new__(cls, A, B)
return obj
@classmethod
def eval(cls, a, b):
if not (a and b):
return S.Zero
if a == b:
return Integer(2)*a**2
if a.is_commutative or b.is_commutative:
return Integer(2)*a*b
# [xA,yB] -> xy*[A,B]
ca, nca = a.args_cnc()
cb, ncb = b.args_cnc()
c_part = ca + cb
if c_part:
return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb)))
# Canonical ordering of arguments
#The Commutator [A,B] is on canonical form if A < B.
if a.compare(b) == 1:
return cls(b, a)
def doit(self, **hints):
""" Evaluate anticommutator """
A = self.args[0]
B = self.args[1]
if isinstance(A, Operator) and isinstance(B, Operator):
try:
comm = A._eval_anticommutator(B, **hints)
except NotImplementedError:
try:
comm = B._eval_anticommutator(A, **hints)
except NotImplementedError:
comm = None
if comm is not None:
return comm.doit(**hints)
return (A*B + B*A).doit(**hints)
def _eval_adjoint(self):
return AntiCommutator(Dagger(self.args[0]), Dagger(self.args[1]))
def _sympyrepr(self, printer, *args):
return "%s(%s,%s)" % (
self.__class__.__name__, printer._print(
self.args[0]), printer._print(self.args[1])
)
def _sympystr(self, printer, *args):
return "{%s,%s}" % (self.args[0], self.args[1])
def _pretty(self, printer, *args):
pform = printer._print(self.args[0], *args)
pform = prettyForm(*pform.right((prettyForm(u','))))
pform = prettyForm(*pform.right((printer._print(self.args[1], *args))))
pform = prettyForm(*pform.parens(left='{', right='}'))
return pform
def _latex(self, printer, *args):
return "\\left\\{%s,%s\\right\\}" % tuple([
printer._print(arg, *args) for arg in self.args])
| 29.883562 | 83 | 0.555123 |
from __future__ import print_function, division
from sympy import S, Expr, Mul, Integer
from sympy.core.compatibility import u
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.operator import Operator
from sympy.physics.quantum.dagger import Dagger
__all__ = [
'AntiCommutator'
]
class AntiCommutator(Expr):
is_commutative = False
def __new__(cls, A, B):
r = cls.eval(A, B)
if r is not None:
return r
obj = Expr.__new__(cls, A, B)
return obj
@classmethod
def eval(cls, a, b):
if not (a and b):
return S.Zero
if a == b:
return Integer(2)*a**2
if a.is_commutative or b.is_commutative:
return Integer(2)*a*b
ca, nca = a.args_cnc()
cb, ncb = b.args_cnc()
c_part = ca + cb
if c_part:
return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb)))
if a.compare(b) == 1:
return cls(b, a)
def doit(self, **hints):
A = self.args[0]
B = self.args[1]
if isinstance(A, Operator) and isinstance(B, Operator):
try:
comm = A._eval_anticommutator(B, **hints)
except NotImplementedError:
try:
comm = B._eval_anticommutator(A, **hints)
except NotImplementedError:
comm = None
if comm is not None:
return comm.doit(**hints)
return (A*B + B*A).doit(**hints)
def _eval_adjoint(self):
return AntiCommutator(Dagger(self.args[0]), Dagger(self.args[1]))
def _sympyrepr(self, printer, *args):
return "%s(%s,%s)" % (
self.__class__.__name__, printer._print(
self.args[0]), printer._print(self.args[1])
)
def _sympystr(self, printer, *args):
return "{%s,%s}" % (self.args[0], self.args[1])
def _pretty(self, printer, *args):
pform = printer._print(self.args[0], *args)
pform = prettyForm(*pform.right((prettyForm(u','))))
pform = prettyForm(*pform.right((printer._print(self.args[1], *args))))
pform = prettyForm(*pform.parens(left='{', right='}'))
return pform
def _latex(self, printer, *args):
return "\\left\\{%s,%s\\right\\}" % tuple([
printer._print(arg, *args) for arg in self.args])
| true | true |
1c30d3b7bec1460ddc836be1f6b85d6be46858d7 | 3,854 | py | Python | ucscentralsdk/mometa/adaptor/AdaptorProtocolProfile.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | ucscentralsdk/mometa/adaptor/AdaptorProtocolProfile.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | ucscentralsdk/mometa/adaptor/AdaptorProtocolProfile.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for AdaptorProtocolProfile ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class AdaptorProtocolProfileConsts():
BOOT_TO_TARGET_FALSE = "false"
BOOT_TO_TARGET_NO = "no"
BOOT_TO_TARGET_TRUE = "true"
BOOT_TO_TARGET_YES = "yes"
HBA_MODE_FALSE = "false"
HBA_MODE_NO = "no"
HBA_MODE_TRUE = "true"
HBA_MODE_YES = "yes"
TCP_TIME_STAMP_FALSE = "false"
TCP_TIME_STAMP_NO = "no"
TCP_TIME_STAMP_TRUE = "true"
TCP_TIME_STAMP_YES = "yes"
class AdaptorProtocolProfile(ManagedObject):
"""This is AdaptorProtocolProfile class."""
consts = AdaptorProtocolProfileConsts()
naming_props = set([])
mo_meta = MoMeta("AdaptorProtocolProfile", "adaptorProtocolProfile", "iscsi-prot-profile", VersionMeta.Version111a, "InputOutput", 0x3ff, [], ["admin", "ls-config-policy", "ls-network", "ls-server-policy"], [u'adaptorHostIscsiIfProfile'], [], ["Add", "Get", "Set"])
prop_meta = {
"boot_to_target": MoPropertyMeta("boot_to_target", "bootToTarget", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["false", "no", "true", "yes"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"connection_time_out": MoPropertyMeta("connection_time_out", "connectionTimeOut", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, [], ["0-255"]),
"dhcp_time_out": MoPropertyMeta("dhcp_time_out", "dhcpTimeOut", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["60-300"]),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"hba_mode": MoPropertyMeta("hba_mode", "hbaMode", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["false", "no", "true", "yes"], []),
"lun_busy_retry_count": MoPropertyMeta("lun_busy_retry_count", "lunBusyRetryCount", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["0-60"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x100, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"tcp_time_stamp": MoPropertyMeta("tcp_time_stamp", "tcpTimeStamp", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, ["false", "no", "true", "yes"], []),
}
prop_map = {
"bootToTarget": "boot_to_target",
"childAction": "child_action",
"connectionTimeOut": "connection_time_out",
"dhcpTimeOut": "dhcp_time_out",
"dn": "dn",
"hbaMode": "hba_mode",
"lunBusyRetryCount": "lun_busy_retry_count",
"rn": "rn",
"status": "status",
"tcpTimeStamp": "tcp_time_stamp",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.boot_to_target = None
self.child_action = None
self.connection_time_out = None
self.dhcp_time_out = None
self.hba_mode = None
self.lun_busy_retry_count = None
self.status = None
self.tcp_time_stamp = None
ManagedObject.__init__(self, "AdaptorProtocolProfile", parent_mo_or_dn, **kwargs)
| 55.057143 | 269 | 0.676959 |
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class AdaptorProtocolProfileConsts():
BOOT_TO_TARGET_FALSE = "false"
BOOT_TO_TARGET_NO = "no"
BOOT_TO_TARGET_TRUE = "true"
BOOT_TO_TARGET_YES = "yes"
HBA_MODE_FALSE = "false"
HBA_MODE_NO = "no"
HBA_MODE_TRUE = "true"
HBA_MODE_YES = "yes"
TCP_TIME_STAMP_FALSE = "false"
TCP_TIME_STAMP_NO = "no"
TCP_TIME_STAMP_TRUE = "true"
TCP_TIME_STAMP_YES = "yes"
class AdaptorProtocolProfile(ManagedObject):
consts = AdaptorProtocolProfileConsts()
naming_props = set([])
mo_meta = MoMeta("AdaptorProtocolProfile", "adaptorProtocolProfile", "iscsi-prot-profile", VersionMeta.Version111a, "InputOutput", 0x3ff, [], ["admin", "ls-config-policy", "ls-network", "ls-server-policy"], [u'adaptorHostIscsiIfProfile'], [], ["Add", "Get", "Set"])
prop_meta = {
"boot_to_target": MoPropertyMeta("boot_to_target", "bootToTarget", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["false", "no", "true", "yes"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"connection_time_out": MoPropertyMeta("connection_time_out", "connectionTimeOut", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, [], ["0-255"]),
"dhcp_time_out": MoPropertyMeta("dhcp_time_out", "dhcpTimeOut", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["60-300"]),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"hba_mode": MoPropertyMeta("hba_mode", "hbaMode", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["false", "no", "true", "yes"], []),
"lun_busy_retry_count": MoPropertyMeta("lun_busy_retry_count", "lunBusyRetryCount", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["0-60"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x100, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"tcp_time_stamp": MoPropertyMeta("tcp_time_stamp", "tcpTimeStamp", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, ["false", "no", "true", "yes"], []),
}
prop_map = {
"bootToTarget": "boot_to_target",
"childAction": "child_action",
"connectionTimeOut": "connection_time_out",
"dhcpTimeOut": "dhcp_time_out",
"dn": "dn",
"hbaMode": "hba_mode",
"lunBusyRetryCount": "lun_busy_retry_count",
"rn": "rn",
"status": "status",
"tcpTimeStamp": "tcp_time_stamp",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.boot_to_target = None
self.child_action = None
self.connection_time_out = None
self.dhcp_time_out = None
self.hba_mode = None
self.lun_busy_retry_count = None
self.status = None
self.tcp_time_stamp = None
ManagedObject.__init__(self, "AdaptorProtocolProfile", parent_mo_or_dn, **kwargs)
| true | true |
1c30d5a3a6d78db19f14f05f1216dea4dd85ef53 | 1,802 | py | Python | day19/puzzle2.py | soerenbnoergaard/adventofcode2020 | eaaf0b670ab581cf993167fa19023fe965cc2eb4 | [
"MIT"
] | null | null | null | day19/puzzle2.py | soerenbnoergaard/adventofcode2020 | eaaf0b670ab581cf993167fa19023fe965cc2eb4 | [
"MIT"
] | null | null | null | day19/puzzle2.py | soerenbnoergaard/adventofcode2020 | eaaf0b670ab581cf993167fa19023fe965cc2eb4 | [
"MIT"
] | null | null | null | import re
from copy import deepcopy
from pprint import pprint
# INFILE = "test_input1.txt"
INFILE = "test_input2.txt"
# INFILE = "puzzle_input.txt"
def main():
rules, messages = parse(INFILE)
rules = update(rules)
print(f"{len(rules)=}")
print(f"{len(messages)=}")
pattern = expand(rules, "0")
print(pattern)
num_match = 0
for message in messages:
if pattern.match(message):
# print(message)
num_match += 1
print(f"{num_match=}")
def update(rules):
"""Adjust the rules to contain loops"""
rules["8"] = "42 | 42 8"
rules["11"] = "42 31 | 42 11 31"
return rules
def parse(filename):
rules = {}
messages = []
state = 0
with open(filename, "r") as fh:
for line in fh:
line = line.strip()
if line == "":
state = 1
elif state == 0:
idx, rule = line.split(":")
rules[idx.strip()] = rule.strip()
elif state == 1:
messages.append(line.strip())
return rules, messages
def expand(rules, index):
"""Return a regex object to match against messages"""
# Remove citation marks
for idx in rules:
rules[idx] = rules[idx].replace('"', '')
# Search-and-replace all indexes until all decimal values are eliminated.
rule = " " + rules[index] + " "
pattern = re.compile(r"(\d+)")
while m := pattern.search(rule):
idx = m.group(1)
old = idx
new = rules[idx]
if old in new:
# Recursion!
# breakpoint()
continue
rule = rule.replace(" "+old+" ", " ( "+new+" ) ")
pattern = "^" + rule.replace(" ", "") + "$"
return re.compile(pattern)
if __name__ == "__main__":
main()
| 24.684932 | 77 | 0.532186 | import re
from copy import deepcopy
from pprint import pprint
INFILE = "test_input2.txt"
def main():
rules, messages = parse(INFILE)
rules = update(rules)
print(f"{len(rules)=}")
print(f"{len(messages)=}")
pattern = expand(rules, "0")
print(pattern)
num_match = 0
for message in messages:
if pattern.match(message):
num_match += 1
print(f"{num_match=}")
def update(rules):
rules["8"] = "42 | 42 8"
rules["11"] = "42 31 | 42 11 31"
return rules
def parse(filename):
rules = {}
messages = []
state = 0
with open(filename, "r") as fh:
for line in fh:
line = line.strip()
if line == "":
state = 1
elif state == 0:
idx, rule = line.split(":")
rules[idx.strip()] = rule.strip()
elif state == 1:
messages.append(line.strip())
return rules, messages
def expand(rules, index):
for idx in rules:
rules[idx] = rules[idx].replace('"', '')
# Search-and-replace all indexes until all decimal values are eliminated.
rule = " " + rules[index] + " "
pattern = re.compile(r"(\d+)")
while m := pattern.search(rule):
idx = m.group(1)
old = idx
new = rules[idx]
if old in new:
# Recursion!
# breakpoint()
continue
rule = rule.replace(" "+old+" ", " ( "+new+" ) ")
pattern = "^" + rule.replace(" ", "") + "$"
return re.compile(pattern)
if __name__ == "__main__":
main()
| true | true |
1c30d64ed9596a121fb2b5a5b2d877f00c35273d | 1,433 | py | Python | users/migrations/0002_auto_20190824_1213.py | ispmor/space_reservation_system | 459843c94bad82110a532db6e16d1075bc88f39b | [
"MIT"
] | null | null | null | users/migrations/0002_auto_20190824_1213.py | ispmor/space_reservation_system | 459843c94bad82110a532db6e16d1075bc88f39b | [
"MIT"
] | 23 | 2019-07-27T10:21:17.000Z | 2022-02-10T08:39:12.000Z | users/migrations/0002_auto_20190824_1213.py | ispmor/space_reservation_system | 459843c94bad82110a532db6e16d1075bc88f39b | [
"MIT"
] | 1 | 2019-05-19T21:37:40.000Z | 2019-05-19T21:37:40.000Z | # Generated by Django 2.2 on 2019-08-24 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='archived',
field=models.BooleanField(blank=True, default=False, null=True),
),
migrations.AddField(
model_name='customuser',
name='group',
field=models.CharField(blank=True, choices=[('s', 'Student'), ('l', 'Lecturer'), ('e', 'External')], help_text='To which group does User qualify', max_length=1),
),
migrations.AddField(
model_name='customuser',
name='indeks',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='customuser',
name='permission',
field=models.CharField(blank=True, choices=[('b', 'Banned'), ('a', 'Allowed')], help_text='Is User allowed to create a reservation', max_length=1),
),
migrations.AlterField(
model_name='customuser',
name='first_name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='customuser',
name='last_name',
field=models.CharField(max_length=50),
),
]
| 32.568182 | 173 | 0.566643 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='archived',
field=models.BooleanField(blank=True, default=False, null=True),
),
migrations.AddField(
model_name='customuser',
name='group',
field=models.CharField(blank=True, choices=[('s', 'Student'), ('l', 'Lecturer'), ('e', 'External')], help_text='To which group does User qualify', max_length=1),
),
migrations.AddField(
model_name='customuser',
name='indeks',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='customuser',
name='permission',
field=models.CharField(blank=True, choices=[('b', 'Banned'), ('a', 'Allowed')], help_text='Is User allowed to create a reservation', max_length=1),
),
migrations.AlterField(
model_name='customuser',
name='first_name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='customuser',
name='last_name',
field=models.CharField(max_length=50),
),
]
| true | true |
1c30da24056b0d1729d11e38cf1ff35b66700d17 | 1,481 | py | Python | xlsxwriter/test/comparison/test_chart_data_labels02.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2019-07-25T06:08:09.000Z | 2019-11-01T02:33:56.000Z | xlsxwriter/test/comparison/test_chart_data_labels02.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | 13 | 2019-07-14T00:29:05.000Z | 2019-11-26T06:16:46.000Z | xlsxwriter/test/comparison/test_chart_data_labels02.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_data_labels02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [47721856, 53641216]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'data_labels': {'value': 1, 'position': 'inside_end'},
})
chart.add_series({
'values': '=Sheet1!$B$1:$B$5',
'data_labels': {'value': 1, 'position': 'center'},
})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 24.683333 | 79 | 0.538825 | true | true | |
1c30db6ad13e9d9f75d9cca786f0767d98239ed3 | 10,186 | py | Python | models/base_model.py | santisy/pytorch-CycleGAN-and-pix2pix | 0d78a3c34bea14316dba852724919fb3e75d1575 | [
"BSD-3-Clause"
] | null | null | null | models/base_model.py | santisy/pytorch-CycleGAN-and-pix2pix | 0d78a3c34bea14316dba852724919fb3e75d1575 | [
"BSD-3-Clause"
] | null | null | null | models/base_model.py | santisy/pytorch-CycleGAN-and-pix2pix | 0d78a3c34bea14316dba852724919fb3e75d1575 | [
"BSD-3-Clause"
] | null | null | null | import os
import torch
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
from . import networks
class BaseModel(object):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
__metaclass__ = ABCMeta
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this fucntion, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): specify the images that you want to display and save.
-- self.visual_names (str list): define networks used in our training.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 45.070796 | 260 | 0.603181 | import os
import torch
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
from . import networks
class BaseModel(object):
__metaclass__ = ABCMeta
def __init__(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
if opt.preprocess != 'scale_width':
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
@abstractmethod
def set_input(self, input):
pass
@abstractmethod
def forward(self):
pass
@abstractmethod
def optimize_parameters(self):
pass
def setup(self, opt):
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
pass
def get_image_paths(self):
return self.image_paths
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name))
return errors_ret
def save_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if i + 1 == len(keys):
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
for key in list(state_dict.keys()):
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| true | true |
1c30db80a8a237b9c9b138d81fccccaf69511952 | 8,641 | py | Python | homework/hw03/hw03.py | zltshadow/CS61A-2019-summer | 0f5dd0be5f51927364aec1bc974526837328b695 | [
"MIT"
] | 3 | 2021-11-21T06:09:39.000Z | 2022-03-12T08:05:27.000Z | homework/hw03/hw03.py | zltshadow/CS61A-2019-summer | 0f5dd0be5f51927364aec1bc974526837328b695 | [
"MIT"
] | null | null | null | homework/hw03/hw03.py | zltshadow/CS61A-2019-summer | 0f5dd0be5f51927364aec1bc974526837328b695 | [
"MIT"
] | null | null | null | HW_SOURCE_FILE = 'hw03.py'
#########
# Trees #
#########
def tree(label, branches=[]):
"""Construct a tree with the given label value and a list of branches."""
for branch in branches:
assert is_tree(branch), 'branches must be trees'
return [label] + list(branches)
def label(tree):
"""Return the label value of a tree."""
return tree[0]
def branches(tree):
"""Return the list of branches of the given tree."""
return tree[1:]
def is_tree(tree):
"""Returns True if the given tree is a tree, and False otherwise."""
if type(tree) != list or len(tree) < 1:
return False
for branch in branches(tree):
if not is_tree(branch):
return False
return True
def is_leaf(tree):
"""Returns True if the given tree's list of branches is empty, and False
otherwise.
"""
return not branches(tree)
def print_tree(t, indent=0):
"""Print a representation of this tree in which each node is
indented by two spaces times its depth from the root.
>>> print_tree(tree(1))
1
>>> print_tree(tree(1, [tree(2)]))
1
2
>>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])])
>>> print_tree(numbers)
1
2
3
4
5
6
7
"""
print(' ' * indent + str(label(t)))
for b in branches(t):
print_tree(b, indent + 1)
def copy_tree(t):
"""Returns a copy of t. Only for testing purposes.
>>> t = tree(5)
>>> copy = copy_tree(t)
>>> t = tree(6)
>>> print_tree(copy)
5
"""
return tree(label(t), [copy_tree(b) for b in branches(t)])
#############
# Questions #
#############
def intersection(st, ave):
"""Represent an intersection using the Cantor pairing function."""
return (st+ave)*(st+ave+1)//2 + ave
def street(inter):
return w(inter) - avenue(inter)
def avenue(inter):
return inter - (w(inter) ** 2 + w(inter)) // 2
w = lambda z: int(((8*z+1)**0.5-1)/2)
def taxicab(a, b):
"""Return the taxicab distance between two intersections.
>>> times_square = intersection(46, 7)
>>> ess_a_bagel = intersection(51, 3)
>>> taxicab(times_square, ess_a_bagel)
9
>>> taxicab(ess_a_bagel, times_square)
9
"""
return abs(street(a) - street(b)) + abs(avenue(a) - avenue(b))
def flatten(lst):
"""Returns a flattened version of lst.
>>> flatten([1, 2, 3]) # normal list
[1, 2, 3]
>>> x = [1, [2, 3], 4] # deep list
>>> flatten(x)
[1, 2, 3, 4]
>>> x # Ensure x is not mutated
[1, [2, 3], 4]
>>> x = [[1, [1, 1]], 1, [1, 1]] # deep list
>>> flatten(x)
[1, 1, 1, 1, 1, 1]
>>> x
[[1, [1, 1]], 1, [1, 1]]
"""
return [x for i in lst for x in ([i] if type(i) != list else flatten(i))]
def replace_leaf(t, old, new):
"""Returns a new tree where every leaf value equal to old has
been replaced with new.
>>> yggdrasil = tree('odin',
... [tree('balder',
... [tree('thor'),
... tree('freya')]),
... tree('frigg',
... [tree('thor')]),
... tree('thor',
... [tree('sif'),
... tree('thor')]),
... tree('thor')])
>>> laerad = copy_tree(yggdrasil) # copy yggdrasil for testing purposes
>>> print_tree(replace_leaf(yggdrasil, 'thor', 'freya'))
odin
balder
freya
freya
frigg
freya
thor
sif
freya
freya
>>> laerad == yggdrasil # Make sure original tree is unmodified
True
"""
return tree(new if label(t) == old else label(t)) if is_leaf(t) else tree(label(t), [replace_leaf(b, old, new) for b in branches(t)])
# Mobiles
def mobile(left, right):
"""Construct a mobile from a left side and a right side."""
assert is_side(left), "left must be a side"
assert is_side(right), "right must be a side"
return ['mobile', left, right]
def is_mobile(m):
"""Return whether m is a mobile."""
return type(m) == list and len(m) == 3 and m[0] == 'mobile'
def left(m):
"""Select the left side of a mobile."""
assert is_mobile(m), "must call left on a mobile"
return m[1]
def right(m):
"""Select the right side of a mobile."""
assert is_mobile(m), "must call right on a mobile"
return m[2]
def side(length, mobile_or_weight):
"""Construct a side: a length of rod with a mobile or weight at the end."""
assert is_mobile(mobile_or_weight) or is_weight(mobile_or_weight)
return ['side', length, mobile_or_weight]
def is_side(s):
"""Return whether s is a side."""
return type(s) == list and len(s) == 3 and s[0] == 'side'
def length(s):
"""Select the length of a side."""
assert is_side(s), "must call length on a side"
return s[1]
def end(s):
"""Select the mobile or weight hanging at the end of a side."""
assert is_side(s), "must call end on a side"
return s[2]
def weight(size):
"""Construct a weight of some size."""
assert size > 0
"*** YOUR CODE HERE ***"
return ['weight', size]
def size(w):
"""Select the size of a weight."""
assert is_weight(w), 'must call size on a weight'
return w[1]
def is_weight(w):
"""Whether w is a weight."""
return type(w) == list and len(w) == 2 and w[0] == 'weight'
def examples():
t = mobile(side(1, weight(2)),
side(2, weight(1)))
u = mobile(side(5, weight(1)),
side(1, mobile(side(2, weight(3)),
side(3, weight(2)))))
v = mobile(side(4, t), side(2, u))
return (t, u, v)
def total_weight(m):
"""Return the total weight of m, a weight or mobile.
>>> t, u, v = examples()
>>> total_weight(t)
3
>>> total_weight(u)
6
>>> total_weight(v)
9
"""
if is_weight(m):
return size(m)
else:
assert is_mobile(m), "must get total weight of a mobile or a weight"
return total_weight(end(left(m))) + total_weight(end(right(m)))
def balanced(m):
"""Return whether m is balanced.
>>> t, u, v = examples()
>>> balanced(t)
True
>>> balanced(v)
True
>>> w = mobile(side(3, t), side(2, u))
>>> balanced(w)
False
>>> balanced(mobile(side(1, v), side(1, w)))
False
>>> balanced(mobile(side(1, w), side(1, v)))
False
"""
return (lambda l, r: (lambda le, re: length(l) * total_weight(le) == length(r) * total_weight(re) and (is_weight(le) or balanced(le)) and (is_weight(re) or balanced(re)))(end(l), end(r)))(left(m), right(m))
def totals_tree(m):
"""Return a tree representing the mobile with its total weight at the root.
>>> t, u, v = examples()
>>> print_tree(totals_tree(t))
3
2
1
>>> print_tree(totals_tree(u))
6
1
5
3
2
>>> print_tree(totals_tree(v))
9
3
2
1
6
1
5
3
2
"""
return tree(total_weight(m), [totals_tree(end(left(m))), totals_tree(end(right(m)))]) if is_mobile(m) else tree(total_weight(m))
###################
# Extra Questions #
###################
def zero(f):
return lambda x: x
def successor(n):
return lambda f: lambda x: f(n(f)(x))
def one(f):
"""Church numeral 1: same as successor(zero)"""
return lambda x: f(x)
def two(f):
"""Church numeral 2: same as successor(successor(zero))"""
return lambda x: f(f(x))
three = successor(two)
def church_to_int(n):
"""Convert the Church numeral n to a Python integer.
>>> church_to_int(zero)
0
>>> church_to_int(one)
1
>>> church_to_int(two)
2
>>> church_to_int(three)
3
"""
return n(lambda x: x + 1)(0) # λa.a (λb.b+1) (0)
def add_church(m, n):
"""Return the Church numeral for m + n, for Church numerals m and n.
>>> church_to_int(add_church(two, three))
5
"""
return lambda f: lambda x: m(f)(n(f)(x)) # λm.λn.λf.λx.m f (n f x)
def mul_church(m, n):
"""Return the Church numeral for m * n, for Church numerals m and n.
>>> four = successor(three)
>>> church_to_int(mul_church(two, three))
6
>>> church_to_int(mul_church(three, four))
12
"""
return lambda f: m(n(f)) # λm.λn.λf.m (n f)
def pow_church(m, n):
"""Return the Church numeral m ** n, for Church numerals m and n.
>>> church_to_int(pow_church(two, three))
8
>>> church_to_int(pow_church(three, two))
9
"""
return n(m)
| 25.79403 | 210 | 0.550168 | HW_SOURCE_FILE = 'hw03.py'
ert is_tree(branch), 'branches must be trees'
return [label] + list(branches)
def label(tree):
return tree[0]
def branches(tree):
return tree[1:]
def is_tree(tree):
if type(tree) != list or len(tree) < 1:
return False
for branch in branches(tree):
if not is_tree(branch):
return False
return True
def is_leaf(tree):
return not branches(tree)
def print_tree(t, indent=0):
print(' ' * indent + str(label(t)))
for b in branches(t):
print_tree(b, indent + 1)
def copy_tree(t):
return tree(label(t), [copy_tree(b) for b in branches(t)])
inter - (w(inter) ** 2 + w(inter)) // 2
w = lambda z: int(((8*z+1)**0.5-1)/2)
def taxicab(a, b):
return abs(street(a) - street(b)) + abs(avenue(a) - avenue(b))
def flatten(lst):
return [x for i in lst for x in ([i] if type(i) != list else flatten(i))]
def replace_leaf(t, old, new):
return tree(new if label(t) == old else label(t)) if is_leaf(t) else tree(label(t), [replace_leaf(b, old, new) for b in branches(t)])
def mobile(left, right):
assert is_side(left), "left must be a side"
assert is_side(right), "right must be a side"
return ['mobile', left, right]
def is_mobile(m):
return type(m) == list and len(m) == 3 and m[0] == 'mobile'
def left(m):
assert is_mobile(m), "must call left on a mobile"
return m[1]
def right(m):
assert is_mobile(m), "must call right on a mobile"
return m[2]
def side(length, mobile_or_weight):
assert is_mobile(mobile_or_weight) or is_weight(mobile_or_weight)
return ['side', length, mobile_or_weight]
def is_side(s):
return type(s) == list and len(s) == 3 and s[0] == 'side'
def length(s):
assert is_side(s), "must call length on a side"
return s[1]
def end(s):
assert is_side(s), "must call end on a side"
return s[2]
def weight(size):
assert size > 0
return ['weight', size]
def size(w):
assert is_weight(w), 'must call size on a weight'
return w[1]
def is_weight(w):
return type(w) == list and len(w) == 2 and w[0] == 'weight'
def examples():
t = mobile(side(1, weight(2)),
side(2, weight(1)))
u = mobile(side(5, weight(1)),
side(1, mobile(side(2, weight(3)),
side(3, weight(2)))))
v = mobile(side(4, t), side(2, u))
return (t, u, v)
def total_weight(m):
if is_weight(m):
return size(m)
else:
assert is_mobile(m), "must get total weight of a mobile or a weight"
return total_weight(end(left(m))) + total_weight(end(right(m)))
def balanced(m):
return (lambda l, r: (lambda le, re: length(l) * total_weight(le) == length(r) * total_weight(re) and (is_weight(le) or balanced(le)) and (is_weight(re) or balanced(re)))(end(l), end(r)))(left(m), right(m))
def totals_tree(m):
return tree(total_weight(m), [totals_tree(end(left(m))), totals_tree(end(right(m)))]) if is_mobile(m) else tree(total_weight(m))
church(m, n):
return lambda f: m(n(f))
def pow_church(m, n):
return n(m)
| true | true |
1c30db9fc9464f74ad0004cc9f87dc057c9d0c09 | 2,410 | py | Python | pdc/apps/common/models.py | hluk/product-definition-center | af79f73c30fa5f5709ba03d584b7a49b83166b81 | [
"MIT"
] | 18 | 2015-12-15T17:56:18.000Z | 2021-04-10T13:49:48.000Z | pdc/apps/common/models.py | hluk/product-definition-center | af79f73c30fa5f5709ba03d584b7a49b83166b81 | [
"MIT"
] | 303 | 2015-11-18T07:37:06.000Z | 2021-05-26T12:34:01.000Z | pdc/apps/common/models.py | hluk/product-definition-center | af79f73c30fa5f5709ba03d584b7a49b83166b81 | [
"MIT"
] | 27 | 2015-11-19T20:33:54.000Z | 2021-03-25T08:15:28.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.db import models
from pdc.apps.common.validators import validate_sigkey
def get_cached_id(cls, cache_field, value, create=False):
"""cached `value` to database `id`"""
if not value:
return None
result = cls.CACHE.get(value, None)
if result is None:
if create:
obj, _ = cls.objects.get_or_create(**{cache_field: value})
else:
obj = cls.objects.get(**{cache_field: value})
cls.CACHE[value] = obj.id
result = obj.id
return result
class Arch(models.Model):
name = models.CharField(max_length=50, unique=True)
class Meta:
pass
def __unicode__(self):
return u"%s" % (self.name, )
def export(self):
# FIXME: export has been deprecated, use serializer instead.
return {"name": self.name}
class SigKey(models.Model):
key_id = models.CharField(max_length=20, unique=True, validators=[validate_sigkey])
name = models.CharField(max_length=50, blank=True, null=True, unique=True)
description = models.CharField(max_length=100, blank=True)
def __unicode__(self):
return u"%s" % self.key_id
CACHE = {}
@classmethod
def get_cached_id(cls, value, create=False):
"""cached `key_id` to `id`"""
return get_cached_id(cls, "key_id", value, create=create)
def export(self):
return {
"key_id": self.key_id,
"name": self.name,
"description": self.description,
}
class Label(models.Model):
"""
Record label/tag with its name and description.
"""
name = models.CharField(max_length=100, unique=True)
description = models.CharField(max_length=500)
class Meta:
ordering = ('name',)
def __unicode__(self):
return u'%s' % self.name
# FIXME: Compatible with ChangeSetMixin which still uses export funtion to record changeset
def export(self, fields=None):
_fields = ['name', 'description'] if fields is None else fields
result = dict()
if 'name' in _fields:
result['name'] = self.name
if 'description' in _fields:
result['description'] = self.description
return result
| 27.386364 | 100 | 0.610373 |
from django.db import models
from pdc.apps.common.validators import validate_sigkey
def get_cached_id(cls, cache_field, value, create=False):
if not value:
return None
result = cls.CACHE.get(value, None)
if result is None:
if create:
obj, _ = cls.objects.get_or_create(**{cache_field: value})
else:
obj = cls.objects.get(**{cache_field: value})
cls.CACHE[value] = obj.id
result = obj.id
return result
class Arch(models.Model):
name = models.CharField(max_length=50, unique=True)
class Meta:
pass
def __unicode__(self):
return u"%s" % (self.name, )
def export(self):
return {"name": self.name}
class SigKey(models.Model):
key_id = models.CharField(max_length=20, unique=True, validators=[validate_sigkey])
name = models.CharField(max_length=50, blank=True, null=True, unique=True)
description = models.CharField(max_length=100, blank=True)
def __unicode__(self):
return u"%s" % self.key_id
CACHE = {}
@classmethod
def get_cached_id(cls, value, create=False):
return get_cached_id(cls, "key_id", value, create=create)
def export(self):
return {
"key_id": self.key_id,
"name": self.name,
"description": self.description,
}
class Label(models.Model):
name = models.CharField(max_length=100, unique=True)
description = models.CharField(max_length=500)
class Meta:
ordering = ('name',)
def __unicode__(self):
return u'%s' % self.name
def export(self, fields=None):
_fields = ['name', 'description'] if fields is None else fields
result = dict()
if 'name' in _fields:
result['name'] = self.name
if 'description' in _fields:
result['description'] = self.description
return result
| true | true |
1c30dc5664d75ff7cfa74ca426b709badd9afac5 | 3,518 | py | Python | platon/providers/auto.py | shinnng/platon.py | 3197fac3839896290210da04dd0d45f0bdc731ce | [
"MIT"
] | null | null | null | platon/providers/auto.py | shinnng/platon.py | 3197fac3839896290210da04dd0d45f0bdc731ce | [
"MIT"
] | null | null | null | platon/providers/auto.py | shinnng/platon.py | 3197fac3839896290210da04dd0d45f0bdc731ce | [
"MIT"
] | null | null | null | import os
from typing import (
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from urllib.parse import (
urlparse,
)
from platon_typing import (
URI,
)
from platon.exceptions import (
CannotHandleRequest,
)
from platon.providers import (
BaseProvider,
HTTPProvider,
IPCProvider,
WebsocketProvider,
)
from platon.types import (
RPCEndpoint,
RPCResponse,
)
HTTP_SCHEMES = {'http', 'https'}
WS_SCHEMES = {'ws', 'wss'}
def load_provider_from_environment() -> BaseProvider:
uri_string = URI(os.environ.get('PLATON_PROVIDER_URI', ''))
if not uri_string:
return None
return load_provider_from_uri(uri_string)
def load_provider_from_uri(
uri_string: URI, headers: Optional[Dict[str, Tuple[str, str]]] = None
) -> BaseProvider:
uri = urlparse(uri_string)
if uri.scheme == 'file':
return IPCProvider(uri.path)
elif uri.scheme in HTTP_SCHEMES:
return HTTPProvider(uri_string, headers)
elif uri.scheme in WS_SCHEMES:
return WebsocketProvider(uri_string)
else:
raise NotImplementedError(
'Web3 does not know how to connect to scheme %r in %r' % (
uri.scheme,
uri_string,
)
)
class AutoProvider(BaseProvider):
default_providers = (
load_provider_from_environment,
IPCProvider,
HTTPProvider,
WebsocketProvider,
)
_active_provider = None
def __init__(
self,
potential_providers: Optional[Sequence[Union[Callable[..., BaseProvider],
Type[BaseProvider]]]] = None
) -> None:
"""
:param iterable potential_providers: ordered series of provider classes to attempt with
AutoProvider will initialize each potential provider (without arguments),
in an attempt to find an active node. The list will default to
:attribute:`default_providers`.
"""
if potential_providers:
self._potential_providers = potential_providers
else:
self._potential_providers = self.default_providers
def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
try:
return self._proxy_request(method, params)
except IOError:
return self._proxy_request(method, params, use_cache=False)
def isConnected(self) -> bool:
provider = self._get_active_provider(use_cache=True)
return provider is not None and provider.isConnected()
def _proxy_request(self, method: RPCEndpoint, params: Any,
use_cache: bool = True) -> RPCResponse:
provider = self._get_active_provider(use_cache)
if provider is None:
raise CannotHandleRequest(
"Could not discover provider while making request: "
"method:{0}\n"
"params:{1}\n".format(
method,
params))
return provider.make_request(method, params)
def _get_active_provider(self, use_cache: bool) -> Optional[BaseProvider]:
if use_cache and self._active_provider is not None:
return self._active_provider
for Provider in self._potential_providers:
provider = Provider()
if provider is not None and provider.isConnected():
self._active_provider = provider
return provider
return None
| 27.700787 | 95 | 0.627061 | import os
from typing import (
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from urllib.parse import (
urlparse,
)
from platon_typing import (
URI,
)
from platon.exceptions import (
CannotHandleRequest,
)
from platon.providers import (
BaseProvider,
HTTPProvider,
IPCProvider,
WebsocketProvider,
)
from platon.types import (
RPCEndpoint,
RPCResponse,
)
HTTP_SCHEMES = {'http', 'https'}
WS_SCHEMES = {'ws', 'wss'}
def load_provider_from_environment() -> BaseProvider:
uri_string = URI(os.environ.get('PLATON_PROVIDER_URI', ''))
if not uri_string:
return None
return load_provider_from_uri(uri_string)
def load_provider_from_uri(
uri_string: URI, headers: Optional[Dict[str, Tuple[str, str]]] = None
) -> BaseProvider:
uri = urlparse(uri_string)
if uri.scheme == 'file':
return IPCProvider(uri.path)
elif uri.scheme in HTTP_SCHEMES:
return HTTPProvider(uri_string, headers)
elif uri.scheme in WS_SCHEMES:
return WebsocketProvider(uri_string)
else:
raise NotImplementedError(
'Web3 does not know how to connect to scheme %r in %r' % (
uri.scheme,
uri_string,
)
)
class AutoProvider(BaseProvider):
default_providers = (
load_provider_from_environment,
IPCProvider,
HTTPProvider,
WebsocketProvider,
)
_active_provider = None
def __init__(
self,
potential_providers: Optional[Sequence[Union[Callable[..., BaseProvider],
Type[BaseProvider]]]] = None
) -> None:
if potential_providers:
self._potential_providers = potential_providers
else:
self._potential_providers = self.default_providers
def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
try:
return self._proxy_request(method, params)
except IOError:
return self._proxy_request(method, params, use_cache=False)
def isConnected(self) -> bool:
provider = self._get_active_provider(use_cache=True)
return provider is not None and provider.isConnected()
def _proxy_request(self, method: RPCEndpoint, params: Any,
use_cache: bool = True) -> RPCResponse:
provider = self._get_active_provider(use_cache)
if provider is None:
raise CannotHandleRequest(
"Could not discover provider while making request: "
"method:{0}\n"
"params:{1}\n".format(
method,
params))
return provider.make_request(method, params)
def _get_active_provider(self, use_cache: bool) -> Optional[BaseProvider]:
if use_cache and self._active_provider is not None:
return self._active_provider
for Provider in self._potential_providers:
provider = Provider()
if provider is not None and provider.isConnected():
self._active_provider = provider
return provider
return None
| true | true |
1c30dd5d18fbc8e97d48bcb238cc760143ff089b | 2,907 | py | Python | docker/package/fedora.py | phanngt/tezos-packaging | ca804b53709c61fc5c959e02dafb69ccafddc26e | [
"Apache-2.0",
"MIT"
] | null | null | null | docker/package/fedora.py | phanngt/tezos-packaging | ca804b53709c61fc5c959e02dafb69ccafddc26e | [
"Apache-2.0",
"MIT"
] | null | null | null | docker/package/fedora.py | phanngt/tezos-packaging | ca804b53709c61fc5c959e02dafb69ccafddc26e | [
"Apache-2.0",
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 TQ Tezos <https://tqtezos.com/>
#
# SPDX-License-Identifier: LicenseRef-MIT-TQ
import os, shutil, subprocess
from typing import List
from .model import AbstractPackage
from .systemd import print_service_file
def build_fedora_package(
pkg: AbstractPackage,
build_deps: List[str],
run_deps: List[str],
is_source: bool,
):
dir = f"{pkg.name}-{pkg.meta.version}"
cwd = os.path.dirname(__file__)
home = os.environ["HOME"]
pkg.fetch_sources(dir)
pkg.gen_makefile(f"{dir}/Makefile")
pkg.gen_license(f"{dir}/LICENSE")
for systemd_unit in pkg.systemd_units:
if systemd_unit.service_file.service.environment_file is not None:
systemd_unit.service_file.service.environment_file = (
systemd_unit.service_file.service.environment_file.lower()
)
if systemd_unit.suffix is None:
unit_name = f"{pkg.name}"
else:
unit_name = f"{pkg.name}-{systemd_unit.suffix}"
out_path = (
f"{dir}/{unit_name}@.service"
if systemd_unit.instances is not None
else f"{dir}/{unit_name}.service"
)
print_service_file(systemd_unit.service_file, out_path)
if systemd_unit.config_file is not None:
shutil.copy(
f"{cwd}/defaults/{systemd_unit.config_file}",
f"{dir}/{unit_name}.default",
)
if systemd_unit.startup_script is not None:
dest_path = f"{dir}/{systemd_unit.startup_script}"
source_script_name = (
systemd_unit.startup_script
if systemd_unit.startup_script_source is None
else systemd_unit.startup_script_source
)
source_path = f"{cwd}/scripts/{source_script_name}"
shutil.copy(source_path, dest_path)
if systemd_unit.prestart_script is not None:
dest_path = f"{dir}/{systemd_unit.prestart_script}"
source_path = (
f"{cwd}/scripts/{systemd_unit.prestart_script}"
if systemd_unit.prestart_script_source is None
else f"{cwd}/scripts/{systemd_unit.prestart_script_source}"
)
shutil.copy(source_path, dest_path)
subprocess.run(["tar", "-czf", f"{dir}.tar.gz", dir], check=True)
os.makedirs(f"{home}/rpmbuild/SPECS", exist_ok=True)
os.makedirs(f"{home}/rpmbuild/SOURCES", exist_ok=True)
pkg.gen_spec_file(
build_deps + run_deps, run_deps, f"{home}/rpmbuild/SPECS/{pkg.name}.spec"
)
os.rename(f"{dir}.tar.gz", f"{home}/rpmbuild/SOURCES/{dir}.tar.gz")
subprocess.run(
[
"rpmbuild",
"-bs" if is_source else "-bb",
f"{home}/rpmbuild/SPECS/{pkg.name}.spec",
],
check=True,
)
subprocess.run(f"rm -rf {dir}", shell=True, check=True)
| 36.797468 | 81 | 0.616099 |
import os, shutil, subprocess
from typing import List
from .model import AbstractPackage
from .systemd import print_service_file
def build_fedora_package(
pkg: AbstractPackage,
build_deps: List[str],
run_deps: List[str],
is_source: bool,
):
dir = f"{pkg.name}-{pkg.meta.version}"
cwd = os.path.dirname(__file__)
home = os.environ["HOME"]
pkg.fetch_sources(dir)
pkg.gen_makefile(f"{dir}/Makefile")
pkg.gen_license(f"{dir}/LICENSE")
for systemd_unit in pkg.systemd_units:
if systemd_unit.service_file.service.environment_file is not None:
systemd_unit.service_file.service.environment_file = (
systemd_unit.service_file.service.environment_file.lower()
)
if systemd_unit.suffix is None:
unit_name = f"{pkg.name}"
else:
unit_name = f"{pkg.name}-{systemd_unit.suffix}"
out_path = (
f"{dir}/{unit_name}@.service"
if systemd_unit.instances is not None
else f"{dir}/{unit_name}.service"
)
print_service_file(systemd_unit.service_file, out_path)
if systemd_unit.config_file is not None:
shutil.copy(
f"{cwd}/defaults/{systemd_unit.config_file}",
f"{dir}/{unit_name}.default",
)
if systemd_unit.startup_script is not None:
dest_path = f"{dir}/{systemd_unit.startup_script}"
source_script_name = (
systemd_unit.startup_script
if systemd_unit.startup_script_source is None
else systemd_unit.startup_script_source
)
source_path = f"{cwd}/scripts/{source_script_name}"
shutil.copy(source_path, dest_path)
if systemd_unit.prestart_script is not None:
dest_path = f"{dir}/{systemd_unit.prestart_script}"
source_path = (
f"{cwd}/scripts/{systemd_unit.prestart_script}"
if systemd_unit.prestart_script_source is None
else f"{cwd}/scripts/{systemd_unit.prestart_script_source}"
)
shutil.copy(source_path, dest_path)
subprocess.run(["tar", "-czf", f"{dir}.tar.gz", dir], check=True)
os.makedirs(f"{home}/rpmbuild/SPECS", exist_ok=True)
os.makedirs(f"{home}/rpmbuild/SOURCES", exist_ok=True)
pkg.gen_spec_file(
build_deps + run_deps, run_deps, f"{home}/rpmbuild/SPECS/{pkg.name}.spec"
)
os.rename(f"{dir}.tar.gz", f"{home}/rpmbuild/SOURCES/{dir}.tar.gz")
subprocess.run(
[
"rpmbuild",
"-bs" if is_source else "-bb",
f"{home}/rpmbuild/SPECS/{pkg.name}.spec",
],
check=True,
)
subprocess.run(f"rm -rf {dir}", shell=True, check=True)
| true | true |
1c30dde957ec6ec2c8428b4859b69bf5c1bf83fb | 73 | py | Python | acousticsim/representations/mhec.py | JoFrhwld/python-acoustic-similarity | 50f71835532010b2fedf14b0ca3a52d88a9ab380 | [
"MIT"
] | 5 | 2018-01-15T22:06:20.000Z | 2022-02-21T07:02:40.000Z | acousticsim/representations/mhec.py | JoFrhwld/python-acoustic-similarity | 50f71835532010b2fedf14b0ca3a52d88a9ab380 | [
"MIT"
] | null | null | null | acousticsim/representations/mhec.py | JoFrhwld/python-acoustic-similarity | 50f71835532010b2fedf14b0ca3a52d88a9ab380 | [
"MIT"
] | 2 | 2019-11-28T17:06:27.000Z | 2019-12-05T22:57:28.000Z |
from .base import Representation
class Mhec(Representation):
pass
| 10.428571 | 32 | 0.753425 |
from .base import Representation
class Mhec(Representation):
pass
| true | true |
1c30de6cbe077d2400e83fda34939b783042b201 | 3,421 | py | Python | filter.py | bradyt/beancount-docs | 73342599a9cbd8b0b7b89d5abf453fd87d322ea4 | [
"MIT"
] | 1 | 2020-07-27T00:47:41.000Z | 2020-07-27T00:47:41.000Z | filter.py | bradyt/beancount-docs | 73342599a9cbd8b0b7b89d5abf453fd87d322ea4 | [
"MIT"
] | null | null | null | filter.py | bradyt/beancount-docs | 73342599a9cbd8b0b7b89d5abf453fd87d322ea4 | [
"MIT"
] | null | null | null | import json
import logging
import re
from panflute import (
run_filter,
stringify,
BlockQuote,
CodeBlock,
Header,
Image,
LineBreak,
Link,
ListItem,
Para,
Space,
Str,
)
import requests
from constants import GOOGLE_DOC_URL_REGEXP
logging.basicConfig(
filename='filter.log',
filemode='w',
level=logging.INFO,
format='%(message)s',
)
def prepare(doc):
# Insert title
title = doc.get_metadata('title')
if title:
title_elem = Header(Str(title), level=1, identifier='title')
doc.content.insert(0, title_elem)
def resolve_url(url: str) -> str:
if '//furius.ca' in url:
# Get Google Doc url
response = requests.get(url, allow_redirects=True, stream=True)
if any(res.status_code == 302 for res in response.history):
url = response.url # Final location
else:
# Not a redirect, leave as is
return None
match = GOOGLE_DOC_URL_REGEXP.search(url)
if not match:
# Not a Google Doc
return None
document_id = match.group(1)
with open('index.json', 'r') as index_json:
document_map = json.load(index_json)
return document_map.get(document_id)
def action(elem, doc):
if doc.get_metadata('title') is None:
# No title -> Beancount Options Reference
if isinstance(elem, Para):
# Convert all paragraphs to code blocks
text = stringify(elem)
if not text.startswith('option'):
text = ' ' + text
return CodeBlock(text)
# Skip everything else
return
if isinstance(elem, BlockQuote):
if isinstance(elem.parent, ListItem):
# Don't use blockquotes in lists
assert len(elem.content) == 1
return elem.content[0]
elif any(isinstance(item, CodeBlock) for item in elem.content):
# Remove blockquotes around code blocks
return [item for item in elem.content]
elif len(elem.content) == 1:
# Convert blockquotes to code blocks
code = ''
for item in elem.content[0].content:
if isinstance(item, Link):
# Don't convert links to code
break
elif isinstance(item, Str):
code += item.text
elif isinstance(item, Space):
code += ' '
elif isinstance(item, LineBreak):
code += '\n'
else:
code += stringify(item)
else:
return CodeBlock(code)
elif isinstance(elem, Header):
# There must be only one level 1 header
if elem.identifier != 'title':
elem.level += 1
elif isinstance(elem, Link):
if elem.url == stringify(elem):
# Displayed as url, skip
pass
else:
resolved = resolve_url(elem.url)
if resolved:
elem.url = resolved
elif isinstance(elem, CodeBlock):
# Remove unnecessary leading tabs from code blocks
elem.text = re.sub(r'^\t', '', elem.text, flags=re.MULTILINE)
elif isinstance(elem, Image):
elem.url = './' + elem.url
def main(doc=None):
return run_filter(action, prepare=prepare, doc=doc)
if __name__ == '__main__':
main()
| 27.58871 | 71 | 0.563286 | import json
import logging
import re
from panflute import (
run_filter,
stringify,
BlockQuote,
CodeBlock,
Header,
Image,
LineBreak,
Link,
ListItem,
Para,
Space,
Str,
)
import requests
from constants import GOOGLE_DOC_URL_REGEXP
logging.basicConfig(
filename='filter.log',
filemode='w',
level=logging.INFO,
format='%(message)s',
)
def prepare(doc):
title = doc.get_metadata('title')
if title:
title_elem = Header(Str(title), level=1, identifier='title')
doc.content.insert(0, title_elem)
def resolve_url(url: str) -> str:
if '//furius.ca' in url:
response = requests.get(url, allow_redirects=True, stream=True)
if any(res.status_code == 302 for res in response.history):
url = response.url
else:
return None
match = GOOGLE_DOC_URL_REGEXP.search(url)
if not match:
return None
document_id = match.group(1)
with open('index.json', 'r') as index_json:
document_map = json.load(index_json)
return document_map.get(document_id)
def action(elem, doc):
if doc.get_metadata('title') is None:
if isinstance(elem, Para):
text = stringify(elem)
if not text.startswith('option'):
text = ' ' + text
return CodeBlock(text)
return
if isinstance(elem, BlockQuote):
if isinstance(elem.parent, ListItem):
assert len(elem.content) == 1
return elem.content[0]
elif any(isinstance(item, CodeBlock) for item in elem.content):
# Remove blockquotes around code blocks
return [item for item in elem.content]
elif len(elem.content) == 1:
# Convert blockquotes to code blocks
code = ''
for item in elem.content[0].content:
if isinstance(item, Link):
# Don't convert links to code
break
elif isinstance(item, Str):
code += item.text
elif isinstance(item, Space):
code += ' '
elif isinstance(item, LineBreak):
code += '\n'
else:
code += stringify(item)
else:
return CodeBlock(code)
elif isinstance(elem, Header):
if elem.identifier != 'title':
elem.level += 1
elif isinstance(elem, Link):
if elem.url == stringify(elem):
pass
else:
resolved = resolve_url(elem.url)
if resolved:
elem.url = resolved
elif isinstance(elem, CodeBlock):
elem.text = re.sub(r'^\t', '', elem.text, flags=re.MULTILINE)
elif isinstance(elem, Image):
elem.url = './' + elem.url
def main(doc=None):
return run_filter(action, prepare=prepare, doc=doc)
if __name__ == '__main__':
main()
| true | true |
1c30de918e38e7d8958d2723841d2f47446e452b | 9,245 | py | Python | homeassistant/components/homekit/__init__.py | raymondelooff/home-assistant | a9a8cbbd100b4ca5c7f90210fb37da37bc634923 | [
"Apache-2.0"
] | 3 | 2019-01-31T13:41:37.000Z | 2020-05-20T14:22:18.000Z | homeassistant/components/homekit/__init__.py | raymondelooff/home-assistant | a9a8cbbd100b4ca5c7f90210fb37da37bc634923 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/homekit/__init__.py | raymondelooff/home-assistant | a9a8cbbd100b4ca5c7f90210fb37da37bc634923 | [
"Apache-2.0"
] | 1 | 2020-11-04T07:35:32.000Z | 2020-11-04T07:35:32.000Z | """Support for Apple HomeKit.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/homekit/
"""
import ipaddress
import logging
from zlib import adler32
import voluptuous as vol
from homeassistant.components import cover
from homeassistant.const import (
ATTR_DEVICE_CLASS, ATTR_SUPPORTED_FEATURES, ATTR_UNIT_OF_MEASUREMENT,
CONF_IP_ADDRESS, CONF_NAME, CONF_PORT, CONF_TYPE, DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE, DEVICE_CLASS_TEMPERATURE,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
TEMP_CELSIUS, TEMP_FAHRENHEIT)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import FILTER_SCHEMA
from homeassistant.util import get_local_ip
from homeassistant.util.decorator import Registry
from .const import (
BRIDGE_NAME, CONF_AUTO_START, CONF_ENTITY_CONFIG, CONF_FEATURE_LIST,
CONF_FILTER, DEFAULT_AUTO_START, DEFAULT_PORT, DEVICE_CLASS_CO,
DEVICE_CLASS_CO2, DEVICE_CLASS_PM25, DOMAIN, HOMEKIT_FILE,
SERVICE_HOMEKIT_START, TYPE_FAUCET, TYPE_OUTLET, TYPE_SHOWER,
TYPE_SPRINKLER, TYPE_SWITCH, TYPE_VALVE)
from .util import (
show_setup_message, validate_entity_config, validate_media_player_features)
REQUIREMENTS = ['HAP-python==2.2.2']
_LOGGER = logging.getLogger(__name__)
MAX_DEVICES = 100
TYPES = Registry()
# #### Driver Status ####
STATUS_READY = 0
STATUS_RUNNING = 1
STATUS_STOPPED = 2
STATUS_WAIT = 3
SWITCH_TYPES = {
TYPE_FAUCET: 'Valve',
TYPE_OUTLET: 'Outlet',
TYPE_SHOWER: 'Valve',
TYPE_SPRINKLER: 'Valve',
TYPE_SWITCH: 'Switch',
TYPE_VALVE: 'Valve'}
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All({
vol.Optional(CONF_NAME, default=BRIDGE_NAME):
vol.All(cv.string, vol.Length(min=3, max=25)),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_IP_ADDRESS):
vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_AUTO_START, default=DEFAULT_AUTO_START): cv.boolean,
vol.Optional(CONF_FILTER, default={}): FILTER_SCHEMA,
vol.Optional(CONF_ENTITY_CONFIG, default={}): validate_entity_config,
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the HomeKit component."""
_LOGGER.debug('Begin setup HomeKit')
conf = config[DOMAIN]
name = conf[CONF_NAME]
port = conf[CONF_PORT]
ip_address = conf.get(CONF_IP_ADDRESS)
auto_start = conf[CONF_AUTO_START]
entity_filter = conf[CONF_FILTER]
entity_config = conf[CONF_ENTITY_CONFIG]
homekit = HomeKit(hass, name, port, ip_address, entity_filter,
entity_config)
await hass.async_add_executor_job(homekit.setup)
if auto_start:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, homekit.start)
return True
def handle_homekit_service_start(service):
"""Handle start HomeKit service call."""
if homekit.status != STATUS_READY:
_LOGGER.warning(
'HomeKit is not ready. Either it is already running or has '
'been stopped.')
return
homekit.start()
hass.services.async_register(DOMAIN, SERVICE_HOMEKIT_START,
handle_homekit_service_start)
return True
def get_accessory(hass, driver, state, aid, config):
"""Take state and return an accessory object if supported."""
if not aid:
_LOGGER.warning('The entitiy "%s" is not supported, since it '
'generates an invalid aid, please change it.',
state.entity_id)
return None
a_type = None
name = config.get(CONF_NAME, state.name)
if state.domain == 'alarm_control_panel':
a_type = 'SecuritySystem'
elif state.domain == 'binary_sensor' or state.domain == 'device_tracker':
a_type = 'BinarySensor'
elif state.domain == 'climate':
a_type = 'Thermostat'
elif state.domain == 'cover':
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if device_class == 'garage' and \
features & (cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE):
a_type = 'GarageDoorOpener'
elif features & cover.SUPPORT_SET_POSITION:
a_type = 'WindowCovering'
elif features & (cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE):
a_type = 'WindowCoveringBasic'
elif state.domain == 'fan':
a_type = 'Fan'
elif state.domain == 'light':
a_type = 'Light'
elif state.domain == 'lock':
a_type = 'Lock'
elif state.domain == 'media_player':
feature_list = config.get(CONF_FEATURE_LIST)
if feature_list and \
validate_media_player_features(state, feature_list):
a_type = 'MediaPlayer'
elif state.domain == 'sensor':
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
if device_class == DEVICE_CLASS_TEMPERATURE or \
unit in (TEMP_CELSIUS, TEMP_FAHRENHEIT):
a_type = 'TemperatureSensor'
elif device_class == DEVICE_CLASS_HUMIDITY and unit == '%':
a_type = 'HumiditySensor'
elif device_class == DEVICE_CLASS_PM25 \
or DEVICE_CLASS_PM25 in state.entity_id:
a_type = 'AirQualitySensor'
elif device_class == DEVICE_CLASS_CO:
a_type = 'CarbonMonoxideSensor'
elif device_class == DEVICE_CLASS_CO2 \
or DEVICE_CLASS_CO2 in state.entity_id:
a_type = 'CarbonDioxideSensor'
elif device_class == DEVICE_CLASS_ILLUMINANCE or unit in ('lm', 'lx'):
a_type = 'LightSensor'
elif state.domain == 'switch':
switch_type = config.get(CONF_TYPE, TYPE_SWITCH)
a_type = SWITCH_TYPES[switch_type]
elif state.domain in ('automation', 'input_boolean', 'remote', 'script'):
a_type = 'Switch'
elif state.domain == 'water_heater':
a_type = 'WaterHeater'
if a_type is None:
return None
_LOGGER.debug('Add "%s" as "%s"', state.entity_id, a_type)
return TYPES[a_type](hass, driver, name, state.entity_id, aid, config)
def generate_aid(entity_id):
"""Generate accessory aid with zlib adler32."""
aid = adler32(entity_id.encode('utf-8'))
if aid in (0, 1):
return None
return aid
class HomeKit():
"""Class to handle all actions between HomeKit and Home Assistant."""
def __init__(self, hass, name, port, ip_address, entity_filter,
entity_config):
"""Initialize a HomeKit object."""
self.hass = hass
self._name = name
self._port = port
self._ip_address = ip_address
self._filter = entity_filter
self._config = entity_config
self.status = STATUS_READY
self.bridge = None
self.driver = None
def setup(self):
"""Set up bridge and accessory driver."""
from .accessories import HomeBridge, HomeDriver
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, self.stop)
ip_addr = self._ip_address or get_local_ip()
path = self.hass.config.path(HOMEKIT_FILE)
self.driver = HomeDriver(self.hass, address=ip_addr,
port=self._port, persist_file=path)
self.bridge = HomeBridge(self.hass, self.driver, self._name)
def add_bridge_accessory(self, state):
"""Try adding accessory to bridge if configured beforehand."""
if not state or not self._filter(state.entity_id):
return
aid = generate_aid(state.entity_id)
conf = self._config.pop(state.entity_id, {})
acc = get_accessory(self.hass, self.driver, state, aid, conf)
if acc is not None:
self.bridge.add_accessory(acc)
def start(self, *args):
"""Start the accessory driver."""
if self.status != STATUS_READY:
return
self.status = STATUS_WAIT
# pylint: disable=unused-variable
from . import ( # noqa F401
type_covers, type_fans, type_lights, type_locks,
type_media_players, type_security_systems, type_sensors,
type_switches, type_thermostats)
for state in self.hass.states.all():
self.add_bridge_accessory(state)
self.driver.add_accessory(self.bridge)
if not self.driver.state.paired:
show_setup_message(self.hass, self.driver.state.pincode)
if len(self.bridge.accessories) > MAX_DEVICES:
_LOGGER.warning('You have exceeded the device limit, which might '
'cause issues. Consider using the filter option.')
_LOGGER.debug('Driver start')
self.hass.add_job(self.driver.start)
self.status = STATUS_RUNNING
def stop(self, *args):
"""Stop the accessory driver."""
if self.status != STATUS_RUNNING:
return
self.status = STATUS_STOPPED
_LOGGER.debug('Driver stop')
self.hass.add_job(self.driver.stop)
| 34.36803 | 79 | 0.657869 | import ipaddress
import logging
from zlib import adler32
import voluptuous as vol
from homeassistant.components import cover
from homeassistant.const import (
ATTR_DEVICE_CLASS, ATTR_SUPPORTED_FEATURES, ATTR_UNIT_OF_MEASUREMENT,
CONF_IP_ADDRESS, CONF_NAME, CONF_PORT, CONF_TYPE, DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE, DEVICE_CLASS_TEMPERATURE,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
TEMP_CELSIUS, TEMP_FAHRENHEIT)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import FILTER_SCHEMA
from homeassistant.util import get_local_ip
from homeassistant.util.decorator import Registry
from .const import (
BRIDGE_NAME, CONF_AUTO_START, CONF_ENTITY_CONFIG, CONF_FEATURE_LIST,
CONF_FILTER, DEFAULT_AUTO_START, DEFAULT_PORT, DEVICE_CLASS_CO,
DEVICE_CLASS_CO2, DEVICE_CLASS_PM25, DOMAIN, HOMEKIT_FILE,
SERVICE_HOMEKIT_START, TYPE_FAUCET, TYPE_OUTLET, TYPE_SHOWER,
TYPE_SPRINKLER, TYPE_SWITCH, TYPE_VALVE)
from .util import (
show_setup_message, validate_entity_config, validate_media_player_features)
REQUIREMENTS = ['HAP-python==2.2.2']
_LOGGER = logging.getLogger(__name__)
MAX_DEVICES = 100
TYPES = Registry()
PE_FAUCET: 'Valve',
TYPE_OUTLET: 'Outlet',
TYPE_SHOWER: 'Valve',
TYPE_SPRINKLER: 'Valve',
TYPE_SWITCH: 'Switch',
TYPE_VALVE: 'Valve'}
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All({
vol.Optional(CONF_NAME, default=BRIDGE_NAME):
vol.All(cv.string, vol.Length(min=3, max=25)),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_IP_ADDRESS):
vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_AUTO_START, default=DEFAULT_AUTO_START): cv.boolean,
vol.Optional(CONF_FILTER, default={}): FILTER_SCHEMA,
vol.Optional(CONF_ENTITY_CONFIG, default={}): validate_entity_config,
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
_LOGGER.debug('Begin setup HomeKit')
conf = config[DOMAIN]
name = conf[CONF_NAME]
port = conf[CONF_PORT]
ip_address = conf.get(CONF_IP_ADDRESS)
auto_start = conf[CONF_AUTO_START]
entity_filter = conf[CONF_FILTER]
entity_config = conf[CONF_ENTITY_CONFIG]
homekit = HomeKit(hass, name, port, ip_address, entity_filter,
entity_config)
await hass.async_add_executor_job(homekit.setup)
if auto_start:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, homekit.start)
return True
def handle_homekit_service_start(service):
if homekit.status != STATUS_READY:
_LOGGER.warning(
'HomeKit is not ready. Either it is already running or has '
'been stopped.')
return
homekit.start()
hass.services.async_register(DOMAIN, SERVICE_HOMEKIT_START,
handle_homekit_service_start)
return True
def get_accessory(hass, driver, state, aid, config):
if not aid:
_LOGGER.warning('The entitiy "%s" is not supported, since it '
'generates an invalid aid, please change it.',
state.entity_id)
return None
a_type = None
name = config.get(CONF_NAME, state.name)
if state.domain == 'alarm_control_panel':
a_type = 'SecuritySystem'
elif state.domain == 'binary_sensor' or state.domain == 'device_tracker':
a_type = 'BinarySensor'
elif state.domain == 'climate':
a_type = 'Thermostat'
elif state.domain == 'cover':
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if device_class == 'garage' and \
features & (cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE):
a_type = 'GarageDoorOpener'
elif features & cover.SUPPORT_SET_POSITION:
a_type = 'WindowCovering'
elif features & (cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE):
a_type = 'WindowCoveringBasic'
elif state.domain == 'fan':
a_type = 'Fan'
elif state.domain == 'light':
a_type = 'Light'
elif state.domain == 'lock':
a_type = 'Lock'
elif state.domain == 'media_player':
feature_list = config.get(CONF_FEATURE_LIST)
if feature_list and \
validate_media_player_features(state, feature_list):
a_type = 'MediaPlayer'
elif state.domain == 'sensor':
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
if device_class == DEVICE_CLASS_TEMPERATURE or \
unit in (TEMP_CELSIUS, TEMP_FAHRENHEIT):
a_type = 'TemperatureSensor'
elif device_class == DEVICE_CLASS_HUMIDITY and unit == '%':
a_type = 'HumiditySensor'
elif device_class == DEVICE_CLASS_PM25 \
or DEVICE_CLASS_PM25 in state.entity_id:
a_type = 'AirQualitySensor'
elif device_class == DEVICE_CLASS_CO:
a_type = 'CarbonMonoxideSensor'
elif device_class == DEVICE_CLASS_CO2 \
or DEVICE_CLASS_CO2 in state.entity_id:
a_type = 'CarbonDioxideSensor'
elif device_class == DEVICE_CLASS_ILLUMINANCE or unit in ('lm', 'lx'):
a_type = 'LightSensor'
elif state.domain == 'switch':
switch_type = config.get(CONF_TYPE, TYPE_SWITCH)
a_type = SWITCH_TYPES[switch_type]
elif state.domain in ('automation', 'input_boolean', 'remote', 'script'):
a_type = 'Switch'
elif state.domain == 'water_heater':
a_type = 'WaterHeater'
if a_type is None:
return None
_LOGGER.debug('Add "%s" as "%s"', state.entity_id, a_type)
return TYPES[a_type](hass, driver, name, state.entity_id, aid, config)
def generate_aid(entity_id):
aid = adler32(entity_id.encode('utf-8'))
if aid in (0, 1):
return None
return aid
class HomeKit():
def __init__(self, hass, name, port, ip_address, entity_filter,
entity_config):
self.hass = hass
self._name = name
self._port = port
self._ip_address = ip_address
self._filter = entity_filter
self._config = entity_config
self.status = STATUS_READY
self.bridge = None
self.driver = None
def setup(self):
from .accessories import HomeBridge, HomeDriver
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, self.stop)
ip_addr = self._ip_address or get_local_ip()
path = self.hass.config.path(HOMEKIT_FILE)
self.driver = HomeDriver(self.hass, address=ip_addr,
port=self._port, persist_file=path)
self.bridge = HomeBridge(self.hass, self.driver, self._name)
def add_bridge_accessory(self, state):
if not state or not self._filter(state.entity_id):
return
aid = generate_aid(state.entity_id)
conf = self._config.pop(state.entity_id, {})
acc = get_accessory(self.hass, self.driver, state, aid, conf)
if acc is not None:
self.bridge.add_accessory(acc)
def start(self, *args):
if self.status != STATUS_READY:
return
self.status = STATUS_WAIT
from . import (
type_covers, type_fans, type_lights, type_locks,
type_media_players, type_security_systems, type_sensors,
type_switches, type_thermostats)
for state in self.hass.states.all():
self.add_bridge_accessory(state)
self.driver.add_accessory(self.bridge)
if not self.driver.state.paired:
show_setup_message(self.hass, self.driver.state.pincode)
if len(self.bridge.accessories) > MAX_DEVICES:
_LOGGER.warning('You have exceeded the device limit, which might '
'cause issues. Consider using the filter option.')
_LOGGER.debug('Driver start')
self.hass.add_job(self.driver.start)
self.status = STATUS_RUNNING
def stop(self, *args):
if self.status != STATUS_RUNNING:
return
self.status = STATUS_STOPPED
_LOGGER.debug('Driver stop')
self.hass.add_job(self.driver.stop)
| true | true |
1c30dee81d76fdf562d3cb39bff9a6d0dd08a5da | 1,834 | py | Python | bibcat/ingesters/rels_ext.py | KnowledgeLinks/bibcat | ed530401290865dcfefb2ae661a8880e52876a48 | [
"MIT"
] | 4 | 2018-02-13T20:36:29.000Z | 2019-09-26T14:38:25.000Z | bibcat/ingesters/rels_ext.py | KnowledgeLinks/rdfw-bibcat | ed530401290865dcfefb2ae661a8880e52876a48 | [
"MIT"
] | 11 | 2017-10-27T17:44:46.000Z | 2018-08-15T17:27:25.000Z | bibcat/ingesters/rels_ext.py | KnowledgeLinks/rdfw-bibcat | ed530401290865dcfefb2ae661a8880e52876a48 | [
"MIT"
] | 1 | 2017-01-23T19:52:01.000Z | 2017-01-23T19:52:01.000Z | """Fedora 3.x RELS-EXTseries to BIBFRAME 2.0 ingester
This ingester is not intended to generated fully formed BF RDF but
supplement existing ingesters like MODS and DC. The RELS-EXT ingester adds
additional properties and classes to existing BF entities.
"""
__author__ = "Jeremy Nelson, Mike Stabile"
import rdflib
from bibcat.rml.processor import XMLProcessor
BF = rdflib.Namespace("http://id.loc.gov/ontologies/bibframe/")
class RELSEXTIngester(XMLProcessor):
"""Handles Fedora 3.8 Digital Repository RELS-EXT"""
def __init__(self, **kwargs):
rules = ["rels-ext.ttl"]
if "rules_ttl" in kwargs:
tmp_rules = kwargs.get("rules_ttl")
if isinstance(tmp_rules, str):
rules.append(tmp_rules)
elif isinstance(tmp_rules, list):
rules.extend(tmp_rules)
super(RELSEXTIngester, self).__init__(
rml_rules=rules,
base_url=kwargs.get("base_url", "http://bibcat.org/"),
institution_iri=kwargs.get("institution_iri"),
namespaces={'fedora': 'info:fedora/fedora-system:def/relations-external#',
'fedora-model': 'info:fedora/fedora-system:def/model#',
'islandora': 'http://islandora.ca/ontology/relsext#',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'})
#self.constants["bf_still_image"] = BF.StillImage
#self.constants["bf_audio"] = BF.Audio
#self.constants["bf_video"] = BF.MovingImage
def run(self, xml, **kwargs):
super(RELSEXTIngester, self).run(xml, **kwargs)
#def __reference_handler__(self, **kwargs):
#if kwargs.get("subject").endswith("Work"):
# import pdb; pdb.set_trace()
# super(RELSEXTIngester, self).__reference_handler__(**kwargs)
| 38.208333 | 86 | 0.638495 | __author__ = "Jeremy Nelson, Mike Stabile"
import rdflib
from bibcat.rml.processor import XMLProcessor
BF = rdflib.Namespace("http://id.loc.gov/ontologies/bibframe/")
class RELSEXTIngester(XMLProcessor):
def __init__(self, **kwargs):
rules = ["rels-ext.ttl"]
if "rules_ttl" in kwargs:
tmp_rules = kwargs.get("rules_ttl")
if isinstance(tmp_rules, str):
rules.append(tmp_rules)
elif isinstance(tmp_rules, list):
rules.extend(tmp_rules)
super(RELSEXTIngester, self).__init__(
rml_rules=rules,
base_url=kwargs.get("base_url", "http://bibcat.org/"),
institution_iri=kwargs.get("institution_iri"),
namespaces={'fedora': 'info:fedora/fedora-system:def/relations-external#',
'fedora-model': 'info:fedora/fedora-system:def/model#',
'islandora': 'http://islandora.ca/ontology/relsext#',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'})
def run(self, xml, **kwargs):
super(RELSEXTIngester, self).run(xml, **kwargs)
| true | true |
1c30df9102b886dcf6539d61d83a51e700bd9743 | 13,174 | py | Python | innexia/innexiaBot/modules/disable.py | MikeOwino/curly-garbanzo | 37027800724cb80c4035eac421421a7ceb1062a6 | [
"MIT"
] | null | null | null | innexia/innexiaBot/modules/disable.py | MikeOwino/curly-garbanzo | 37027800724cb80c4035eac421421a7ceb1062a6 | [
"MIT"
] | null | null | null | innexia/innexiaBot/modules/disable.py | MikeOwino/curly-garbanzo | 37027800724cb80c4035eac421421a7ceb1062a6 | [
"MIT"
] | null | null | null | import importlib
from typing import Union
from future.utils import string_types
from innexiaBot import dispatcher
from innexiaBot.modules.helper_funcs.handlers import CMD_STARTERS, SpamChecker
from innexiaBot.modules.helper_funcs.misc import is_module_loaded
from telegram import ParseMode, Update
from telegram.ext import (
CallbackContext,
CommandHandler,
Filters,
MessageHandler,
RegexHandler,
)
from telegram.utils.helpers import escape_markdown
FILENAME = __name__.rsplit(".", 1)[-1]
# If module is due to be loaded, then setup all the magical handlers
if is_module_loaded(FILENAME):
from innexiaBot.modules.helper_funcs.chat_status import (
connection_status,
is_user_admin,
user_admin,
)
from innexiaBot.modules.sql import disable_sql as sql
from telegram.ext.dispatcher import run_async
DISABLE_CMDS = []
DISABLE_OTHER = []
ADMIN_CMDS = []
class DisableAbleCommandHandler(CommandHandler):
def __init__(self, command, callback, admin_ok=False, **kwargs):
super().__init__(command, callback, **kwargs)
self.admin_ok = admin_ok
if isinstance(command, string_types):
DISABLE_CMDS.append(command)
if admin_ok:
ADMIN_CMDS.append(command)
else:
DISABLE_CMDS.extend(command)
if admin_ok:
ADMIN_CMDS.extend(command)
def check_update(self, update):
if isinstance(update, Update) and update.effective_message:
message = update.effective_message
if message.text and len(message.text) > 1:
fst_word = message.text.split(None, 1)[0]
if len(fst_word) > 1 and any(
fst_word.startswith(start) for start in CMD_STARTERS
):
args = message.text.split()[1:]
command = fst_word[1:].split("@")
command.append(message.bot.username)
if not (
command[0].lower() in self.command
and command[1].lower() == message.bot.username.lower()
):
return None
chat = update.effective_chat
user = update.effective_user
if user.id == 1087968824:
user_id = chat.id
else:
user_id = user.id
if SpamChecker.check_user(user_id):
return None
filter_result = self.filters(update)
if filter_result:
# disabled, admincmd, user admin
if sql.is_command_disabled(chat.id, command[0].lower()):
# check if command was disabled
is_disabled = command[
0
] in ADMIN_CMDS and is_user_admin(chat, user.id)
if not is_disabled:
return None
else:
return args, filter_result
return args, filter_result
else:
return False
class DisableAbleMessageHandler(MessageHandler):
def __init__(self, filters, callback, friendly, **kwargs):
super().__init__(filters, callback, **kwargs)
DISABLE_OTHER.append(friendly)
self.friendly = friendly
if filters:
self.filters = Filters.update.messages & filters
else:
self.filters = Filters.update.messages
def check_update(self, update):
chat = update.effective_chat
message = update.effective_message
filter_result = self.filters(update)
try:
args = message.text.split()[1:]
except:
args = []
if super().check_update(update):
if sql.is_command_disabled(chat.id, self.friendly):
return False
else:
return args, filter_result
class DisableAbleRegexHandler(RegexHandler):
def __init__(self, pattern, callback, friendly="", filters=None, **kwargs):
super().__init__(pattern, callback, filters, **kwargs)
DISABLE_OTHER.append(friendly)
self.friendly = friendly
def check_update(self, update):
chat = update.effective_chat
if super().check_update(update):
if sql.is_command_disabled(chat.id, self.friendly):
return False
else:
return True
@run_async
@connection_status
@user_admin
def disable(update: Update, context: CallbackContext):
args = context.args
chat = update.effective_chat
if len(args) >= 1:
disable_cmd = args[0]
if disable_cmd.startswith(CMD_STARTERS):
disable_cmd = disable_cmd[1:]
if disable_cmd in set(DISABLE_CMDS + DISABLE_OTHER):
sql.disable_command(chat.id, str(disable_cmd).lower())
update.effective_message.reply_text(
f"Disabled the use of `{disable_cmd}`",
parse_mode=ParseMode.MARKDOWN,
)
else:
update.effective_message.reply_text("That command can't be disabled")
else:
update.effective_message.reply_text("What should I disable?")
@run_async
@connection_status
@user_admin
def disable_module(update: Update, context: CallbackContext):
args = context.args
chat = update.effective_chat
if len(args) >= 1:
disable_module = "innexiaBot.modules." + args[0].rsplit(".", 1)[0]
try:
module = importlib.import_module(disable_module)
except:
update.effective_message.reply_text("Does that module even exist?")
return
try:
command_list = module.__command_list__
except:
update.effective_message.reply_text(
"Module does not contain command list!"
)
return
disabled_cmds = []
failed_disabled_cmds = []
for disable_cmd in command_list:
if disable_cmd.startswith(CMD_STARTERS):
disable_cmd = disable_cmd[1:]
if disable_cmd in set(DISABLE_CMDS + DISABLE_OTHER):
sql.disable_command(chat.id, str(disable_cmd).lower())
disabled_cmds.append(disable_cmd)
else:
failed_disabled_cmds.append(disable_cmd)
if disabled_cmds:
disabled_cmds_string = ", ".join(disabled_cmds)
update.effective_message.reply_text(
f"Disabled the uses of `{disabled_cmds_string}`",
parse_mode=ParseMode.MARKDOWN,
)
if failed_disabled_cmds:
failed_disabled_cmds_string = ", ".join(failed_disabled_cmds)
update.effective_message.reply_text(
f"Commands `{failed_disabled_cmds_string}` can't be disabled",
parse_mode=ParseMode.MARKDOWN,
)
else:
update.effective_message.reply_text("What should I disable?")
@run_async
@connection_status
@user_admin
def enable(update: Update, context: CallbackContext):
args = context.args
chat = update.effective_chat
if len(args) >= 1:
enable_cmd = args[0]
if enable_cmd.startswith(CMD_STARTERS):
enable_cmd = enable_cmd[1:]
if sql.enable_command(chat.id, enable_cmd):
update.effective_message.reply_text(
f"Enabled the use of `{enable_cmd}`", parse_mode=ParseMode.MARKDOWN
)
else:
update.effective_message.reply_text("Is that even disabled?")
else:
update.effective_message.reply_text("What should I enable?")
@run_async
@connection_status
@user_admin
def enable_module(update: Update, context: CallbackContext):
args = context.args
chat = update.effective_chat
if len(args) >= 1:
enable_module = "innexiaBot.modules." + args[0].rsplit(".", 1)[0]
try:
module = importlib.import_module(enable_module)
except:
update.effective_message.reply_text("Does that module even exist?")
return
try:
command_list = module.__command_list__
except:
update.effective_message.reply_text(
"Module does not contain command list!"
)
return
enabled_cmds = []
failed_enabled_cmds = []
for enable_cmd in command_list:
if enable_cmd.startswith(CMD_STARTERS):
enable_cmd = enable_cmd[1:]
if sql.enable_command(chat.id, enable_cmd):
enabled_cmds.append(enable_cmd)
else:
failed_enabled_cmds.append(enable_cmd)
if enabled_cmds:
enabled_cmds_string = ", ".join(enabled_cmds)
update.effective_message.reply_text(
f"Enabled the uses of `{enabled_cmds_string}`",
parse_mode=ParseMode.MARKDOWN,
)
if failed_enabled_cmds:
failed_enabled_cmds_string = ", ".join(failed_enabled_cmds)
update.effective_message.reply_text(
f"Are the commands `{failed_enabled_cmds_string}` even disabled?",
parse_mode=ParseMode.MARKDOWN,
)
else:
update.effective_message.reply_text("What should I enable?")
@run_async
@connection_status
@user_admin
def list_cmds(update: Update, context: CallbackContext):
if DISABLE_CMDS + DISABLE_OTHER:
result = ""
for cmd in set(DISABLE_CMDS + DISABLE_OTHER):
result += f" - `{escape_markdown(cmd)}`\n"
update.effective_message.reply_text(
f"The following commands are toggleable:\n{result}",
parse_mode=ParseMode.MARKDOWN,
)
else:
update.effective_message.reply_text("No commands can be disabled.")
# do not async
def build_curr_disabled(chat_id: Union[str, int]) -> str:
disabled = sql.get_all_disabled(chat_id)
if not disabled:
return "No commands are disabled!"
result = ""
for cmd in disabled:
result += " - `{}`\n".format(escape_markdown(cmd))
return "The following commands are currently restricted:\n{}".format(result)
@run_async
@connection_status
def commands(update: Update, context: CallbackContext):
chat = update.effective_chat
update.effective_message.reply_text(
build_curr_disabled(chat.id), parse_mode=ParseMode.MARKDOWN
)
def __stats__():
return f"• {sql.num_disabled()} disabled items, across {sql.num_chats()} chats."
def __migrate__(old_chat_id, new_chat_id):
sql.migrate_chat(old_chat_id, new_chat_id)
def __chat_settings__(chat_id, user_id):
return build_curr_disabled(chat_id)
DISABLE_HANDLER = CommandHandler("disable", disable)
DISABLE_MODULE_HANDLER = CommandHandler("disablemodule", disable_module)
ENABLE_HANDLER = CommandHandler("enable", enable)
ENABLE_MODULE_HANDLER = CommandHandler("enablemodule", enable_module)
COMMANDS_HANDLER = CommandHandler(["cmds", "disabled"], commands)
TOGGLE_HANDLER = CommandHandler("listcmds", list_cmds)
dispatcher.add_handler(DISABLE_HANDLER)
dispatcher.add_handler(DISABLE_MODULE_HANDLER)
dispatcher.add_handler(ENABLE_HANDLER)
dispatcher.add_handler(ENABLE_MODULE_HANDLER)
dispatcher.add_handler(COMMANDS_HANDLER)
dispatcher.add_handler(TOGGLE_HANDLER)
__help__ = """
❍ /cmds*:* check the current status of disabled commands
*Admins only:*
❍ /enable <cmd name>*:* enable that command
❍ /disable <cmd name>*:* disable that command
❍ /enablemodule <module name>*:* enable all commands in that module
❍ /disablemodule <module name>*:* disable all commands in that module
❍ /listcmds*:* list all possible toggleable commands
"""
__mod_name__ = "Disable"
else:
DisableAbleCommandHandler = CommandHandler
DisableAbleRegexHandler = RegexHandler
DisableAbleMessageHandler = MessageHandler
| 36.901961 | 88 | 0.571201 | import importlib
from typing import Union
from future.utils import string_types
from innexiaBot import dispatcher
from innexiaBot.modules.helper_funcs.handlers import CMD_STARTERS, SpamChecker
from innexiaBot.modules.helper_funcs.misc import is_module_loaded
from telegram import ParseMode, Update
from telegram.ext import (
CallbackContext,
CommandHandler,
Filters,
MessageHandler,
RegexHandler,
)
from telegram.utils.helpers import escape_markdown
FILENAME = __name__.rsplit(".", 1)[-1]
if is_module_loaded(FILENAME):
from innexiaBot.modules.helper_funcs.chat_status import (
connection_status,
is_user_admin,
user_admin,
)
from innexiaBot.modules.sql import disable_sql as sql
from telegram.ext.dispatcher import run_async
DISABLE_CMDS = []
DISABLE_OTHER = []
ADMIN_CMDS = []
class DisableAbleCommandHandler(CommandHandler):
def __init__(self, command, callback, admin_ok=False, **kwargs):
super().__init__(command, callback, **kwargs)
self.admin_ok = admin_ok
if isinstance(command, string_types):
DISABLE_CMDS.append(command)
if admin_ok:
ADMIN_CMDS.append(command)
else:
DISABLE_CMDS.extend(command)
if admin_ok:
ADMIN_CMDS.extend(command)
def check_update(self, update):
if isinstance(update, Update) and update.effective_message:
message = update.effective_message
if message.text and len(message.text) > 1:
fst_word = message.text.split(None, 1)[0]
if len(fst_word) > 1 and any(
fst_word.startswith(start) for start in CMD_STARTERS
):
args = message.text.split()[1:]
command = fst_word[1:].split("@")
command.append(message.bot.username)
if not (
command[0].lower() in self.command
and command[1].lower() == message.bot.username.lower()
):
return None
chat = update.effective_chat
user = update.effective_user
if user.id == 1087968824:
user_id = chat.id
else:
user_id = user.id
if SpamChecker.check_user(user_id):
return None
filter_result = self.filters(update)
if filter_result:
if sql.is_command_disabled(chat.id, command[0].lower()):
is_disabled = command[
0
] in ADMIN_CMDS and is_user_admin(chat, user.id)
if not is_disabled:
return None
else:
return args, filter_result
return args, filter_result
else:
return False
class DisableAbleMessageHandler(MessageHandler):
def __init__(self, filters, callback, friendly, **kwargs):
super().__init__(filters, callback, **kwargs)
DISABLE_OTHER.append(friendly)
self.friendly = friendly
if filters:
self.filters = Filters.update.messages & filters
else:
self.filters = Filters.update.messages
def check_update(self, update):
chat = update.effective_chat
message = update.effective_message
filter_result = self.filters(update)
try:
args = message.text.split()[1:]
except:
args = []
if super().check_update(update):
if sql.is_command_disabled(chat.id, self.friendly):
return False
else:
return args, filter_result
class DisableAbleRegexHandler(RegexHandler):
def __init__(self, pattern, callback, friendly="", filters=None, **kwargs):
super().__init__(pattern, callback, filters, **kwargs)
DISABLE_OTHER.append(friendly)
self.friendly = friendly
def check_update(self, update):
chat = update.effective_chat
if super().check_update(update):
if sql.is_command_disabled(chat.id, self.friendly):
return False
else:
return True
@run_async
@connection_status
@user_admin
def disable(update: Update, context: CallbackContext):
args = context.args
chat = update.effective_chat
if len(args) >= 1:
disable_cmd = args[0]
if disable_cmd.startswith(CMD_STARTERS):
disable_cmd = disable_cmd[1:]
if disable_cmd in set(DISABLE_CMDS + DISABLE_OTHER):
sql.disable_command(chat.id, str(disable_cmd).lower())
update.effective_message.reply_text(
f"Disabled the use of `{disable_cmd}`",
parse_mode=ParseMode.MARKDOWN,
)
else:
update.effective_message.reply_text("That command can't be disabled")
else:
update.effective_message.reply_text("What should I disable?")
@run_async
@connection_status
@user_admin
def disable_module(update: Update, context: CallbackContext):
args = context.args
chat = update.effective_chat
if len(args) >= 1:
disable_module = "innexiaBot.modules." + args[0].rsplit(".", 1)[0]
try:
module = importlib.import_module(disable_module)
except:
update.effective_message.reply_text("Does that module even exist?")
return
try:
command_list = module.__command_list__
except:
update.effective_message.reply_text(
"Module does not contain command list!"
)
return
disabled_cmds = []
failed_disabled_cmds = []
for disable_cmd in command_list:
if disable_cmd.startswith(CMD_STARTERS):
disable_cmd = disable_cmd[1:]
if disable_cmd in set(DISABLE_CMDS + DISABLE_OTHER):
sql.disable_command(chat.id, str(disable_cmd).lower())
disabled_cmds.append(disable_cmd)
else:
failed_disabled_cmds.append(disable_cmd)
if disabled_cmds:
disabled_cmds_string = ", ".join(disabled_cmds)
update.effective_message.reply_text(
f"Disabled the uses of `{disabled_cmds_string}`",
parse_mode=ParseMode.MARKDOWN,
)
if failed_disabled_cmds:
failed_disabled_cmds_string = ", ".join(failed_disabled_cmds)
update.effective_message.reply_text(
f"Commands `{failed_disabled_cmds_string}` can't be disabled",
parse_mode=ParseMode.MARKDOWN,
)
else:
update.effective_message.reply_text("What should I disable?")
@run_async
@connection_status
@user_admin
def enable(update: Update, context: CallbackContext):
args = context.args
chat = update.effective_chat
if len(args) >= 1:
enable_cmd = args[0]
if enable_cmd.startswith(CMD_STARTERS):
enable_cmd = enable_cmd[1:]
if sql.enable_command(chat.id, enable_cmd):
update.effective_message.reply_text(
f"Enabled the use of `{enable_cmd}`", parse_mode=ParseMode.MARKDOWN
)
else:
update.effective_message.reply_text("Is that even disabled?")
else:
update.effective_message.reply_text("What should I enable?")
@run_async
@connection_status
@user_admin
def enable_module(update: Update, context: CallbackContext):
args = context.args
chat = update.effective_chat
if len(args) >= 1:
enable_module = "innexiaBot.modules." + args[0].rsplit(".", 1)[0]
try:
module = importlib.import_module(enable_module)
except:
update.effective_message.reply_text("Does that module even exist?")
return
try:
command_list = module.__command_list__
except:
update.effective_message.reply_text(
"Module does not contain command list!"
)
return
enabled_cmds = []
failed_enabled_cmds = []
for enable_cmd in command_list:
if enable_cmd.startswith(CMD_STARTERS):
enable_cmd = enable_cmd[1:]
if sql.enable_command(chat.id, enable_cmd):
enabled_cmds.append(enable_cmd)
else:
failed_enabled_cmds.append(enable_cmd)
if enabled_cmds:
enabled_cmds_string = ", ".join(enabled_cmds)
update.effective_message.reply_text(
f"Enabled the uses of `{enabled_cmds_string}`",
parse_mode=ParseMode.MARKDOWN,
)
if failed_enabled_cmds:
failed_enabled_cmds_string = ", ".join(failed_enabled_cmds)
update.effective_message.reply_text(
f"Are the commands `{failed_enabled_cmds_string}` even disabled?",
parse_mode=ParseMode.MARKDOWN,
)
else:
update.effective_message.reply_text("What should I enable?")
@run_async
@connection_status
@user_admin
def list_cmds(update: Update, context: CallbackContext):
if DISABLE_CMDS + DISABLE_OTHER:
result = ""
for cmd in set(DISABLE_CMDS + DISABLE_OTHER):
result += f" - `{escape_markdown(cmd)}`\n"
update.effective_message.reply_text(
f"The following commands are toggleable:\n{result}",
parse_mode=ParseMode.MARKDOWN,
)
else:
update.effective_message.reply_text("No commands can be disabled.")
def build_curr_disabled(chat_id: Union[str, int]) -> str:
disabled = sql.get_all_disabled(chat_id)
if not disabled:
return "No commands are disabled!"
result = ""
for cmd in disabled:
result += " - `{}`\n".format(escape_markdown(cmd))
return "The following commands are currently restricted:\n{}".format(result)
@run_async
@connection_status
def commands(update: Update, context: CallbackContext):
chat = update.effective_chat
update.effective_message.reply_text(
build_curr_disabled(chat.id), parse_mode=ParseMode.MARKDOWN
)
def __stats__():
return f"• {sql.num_disabled()} disabled items, across {sql.num_chats()} chats."
def __migrate__(old_chat_id, new_chat_id):
sql.migrate_chat(old_chat_id, new_chat_id)
def __chat_settings__(chat_id, user_id):
return build_curr_disabled(chat_id)
DISABLE_HANDLER = CommandHandler("disable", disable)
DISABLE_MODULE_HANDLER = CommandHandler("disablemodule", disable_module)
ENABLE_HANDLER = CommandHandler("enable", enable)
ENABLE_MODULE_HANDLER = CommandHandler("enablemodule", enable_module)
COMMANDS_HANDLER = CommandHandler(["cmds", "disabled"], commands)
TOGGLE_HANDLER = CommandHandler("listcmds", list_cmds)
dispatcher.add_handler(DISABLE_HANDLER)
dispatcher.add_handler(DISABLE_MODULE_HANDLER)
dispatcher.add_handler(ENABLE_HANDLER)
dispatcher.add_handler(ENABLE_MODULE_HANDLER)
dispatcher.add_handler(COMMANDS_HANDLER)
dispatcher.add_handler(TOGGLE_HANDLER)
__help__ = """
❍ /cmds*:* check the current status of disabled commands
*Admins only:*
❍ /enable <cmd name>*:* enable that command
❍ /disable <cmd name>*:* disable that command
❍ /enablemodule <module name>*:* enable all commands in that module
❍ /disablemodule <module name>*:* disable all commands in that module
❍ /listcmds*:* list all possible toggleable commands
"""
__mod_name__ = "Disable"
else:
DisableAbleCommandHandler = CommandHandler
DisableAbleRegexHandler = RegexHandler
DisableAbleMessageHandler = MessageHandler
| true | true |
1c30dfad8ff52390322775828193a9dbef30f255 | 7,614 | py | Python | tests/st/ops/ascend/test_gru_op.py | httpsgithu/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | 1 | 2022-02-23T09:13:43.000Z | 2022-02-23T09:13:43.000Z | tests/st/ops/ascend/test_gru_op.py | 949144093/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | null | null | null | tests/st/ops/ascend/test_gru_op.py | 949144093/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
import pytest
import numpy as np
from mindspore import context
from mindspore import nn
from mindspore import Tensor
from mindspore.common.parameter import ParameterTuple
from mindspore.common.parameter import Parameter
from mindspore.ops import composite as c
class GradOfAllInputsAndParams(nn.Cell):
def __init__(self, network, sens_param):
super(GradOfAllInputsAndParams, self).__init__()
self.grad = c.GradOperation(get_all=True, get_by_list=True, sens_param=sens_param)
self.network = network
self.params = ParameterTuple(self.network.trainable_params())
def construct(self, *inputs):
gout = self.grad(self.network, self.params)(*inputs)
return gout
class GRU(nn.Cell):
def __init__(self, input_size, hidden_size, num_layers, has_bias, batch_first, bidirectional, dropout):
super(GRU, self).__init__()
self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, has_bias=has_bias,
batch_first=batch_first, bidirectional=bidirectional, dropout=dropout)
def construct(self, inp, h0):
return self.gru(inp, h0)
class GRUWeightBias():
def __init__(self, num_layers, has_bias, input_size, num_directions, hidden_size, bidirectional):
self.num_layers = num_layers
self.has_bias = has_bias
self.input_size = input_size
self.num_directions = num_directions
self.hidden_size = hidden_size
self.bidirectional = bidirectional
def get_weight_bias(self):
gate_size = 3 * self.hidden_size
w_ih_list = []
w_hh_list = []
b_ih_list = []
b_hh_list = []
stdv = 1 / math.sqrt(self.hidden_size)
for layer in range(self.num_layers):
for direction in range(self.num_directions):
layer_input_size = self.input_size if layer == 0 else self.hidden_size * self.num_directions
suffix = '_reverse' if direction == 1 else ''
w_ih_list.append(Parameter(
Tensor(np.random.uniform(-stdv, stdv, (gate_size, layer_input_size)).astype(np.float32)),
name='weight_ih_l{}{}'.format(layer, suffix)))
w_hh_list.append(Parameter(
Tensor(np.random.uniform(-stdv, stdv, (gate_size, self.hidden_size)).astype(np.float32)),
name='weight_hh_l{}{}'.format(layer, suffix)))
if self.has_bias:
b_ih_list.append(Parameter(
Tensor(np.random.uniform(-stdv, stdv, (gate_size)).astype(np.float32)),
name='bias_ih_l{}{}'.format(layer, suffix)))
b_hh_list.append(Parameter(
Tensor(np.random.uniform(-stdv, stdv, (gate_size)).astype(np.float32)),
name='bias_hh_l{}{}'.format(layer, suffix)))
w_ih_list = ParameterTuple(w_ih_list)
w_hh_list = ParameterTuple(w_hh_list)
b_ih_list = ParameterTuple(b_ih_list)
b_hh_list = ParameterTuple(b_hh_list)
return w_ih_list, w_hh_list, b_ih_list, b_hh_list
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_sit_gru_forward_input_3_32_32_is_32_hs_16():
input_size = 32
hidden_size = 16
has_bias = True
bidirectional = False
num_layers = 1
num_directions = 1
fact = GRUWeightBias(num_layers, has_bias, input_size, num_directions, hidden_size, bidirectional)
w_ih_list, w_hh_list, b_ih_list, b_hh_list = fact.get_weight_bias()
h0 = Tensor(np.random.randn(num_layers * num_directions, 32, 16).astype(np.float32))
input_ms = Tensor(np.random.randn(3, 32, 32).astype(np.float32))
# graph mode
context.set_context(mode=context.GRAPH_MODE)
net = GRU(input_size=input_size, hidden_size=16, num_layers=num_layers, has_bias=has_bias, batch_first=False,
bidirectional=bidirectional, dropout=0.0)
net.gru.w_ih_list = w_ih_list
net.gru.w_hh_list = w_hh_list
net.gru.b_ih_list = b_ih_list
net.gru.b_hh_list = b_hh_list
out, hy = net(input_ms, h0)
# pynative mode
context.set_context(mode=context.PYNATIVE_MODE)
net_pynative = GRU(input_size=input_size, hidden_size=16, num_layers=num_layers, has_bias=has_bias,
batch_first=False, bidirectional=bidirectional, dropout=0.0)
net_pynative.gru.w_ih_list = w_ih_list
net_pynative.gru.w_hh_list = w_hh_list
net_pynative.gru.b_ih_list = b_ih_list
net_pynative.gru.b_hh_list = b_hh_list
out_pynative, hy_pynative = net_pynative(input_ms, h0)
assert np.allclose(out.asnumpy(), out_pynative.asnumpy(), 0.001, 0.001)
assert np.allclose(hy.asnumpy(), hy_pynative.asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_sit_gru_grad_input_3_32_32_is_32_hs_16():
input_size = 32
hidden_size = 16
has_bias = True
bidirectional = False
num_layers = 1
num_directions = 1
fact = GRUWeightBias(num_layers, has_bias, input_size, num_directions, hidden_size, bidirectional)
w_ih_list, w_hh_list, b_ih_list, b_hh_list = fact.get_weight_bias()
h0 = Tensor(np.random.randn(num_layers * num_directions, 32, 16).astype(np.float32))
input_ms = Tensor(np.random.randn(3, 32, 32).astype(np.float32))
# graph mode
context.set_context(mode=context.GRAPH_MODE)
net = GRU(input_size=input_size, hidden_size=16, num_layers=num_layers, has_bias=has_bias, batch_first=False,
bidirectional=bidirectional, dropout=0.0)
net.gru.w_ih_list = w_ih_list
net.gru.w_hh_list = w_hh_list
net.gru.b_ih_list = b_ih_list
net.gru.b_hh_list = b_hh_list
grad_net_inp = GradOfAllInputsAndParams(net, sens_param=False)
grad_net_inp.set_train()
out_grad, _ = grad_net_inp(input_ms, h0)
x_grad = out_grad[0].asnumpy()
h_grad = out_grad[1].asnumpy()
# pynative mode
context.set_context(mode=context.PYNATIVE_MODE)
net_pynative = GRU(input_size=input_size, hidden_size=16, num_layers=num_layers, has_bias=has_bias,
batch_first=False, bidirectional=bidirectional, dropout=0.0)
net_pynative.gru.w_ih_list = w_ih_list
net_pynative.gru.w_hh_list = w_hh_list
net_pynative.gru.b_ih_list = b_ih_list
net_pynative.gru.b_hh_list = b_hh_list
grad_net_inp_pynative = GradOfAllInputsAndParams(net_pynative, sens_param=False)
grad_net_inp_pynative.set_train()
out_grad_pynative, _ = grad_net_inp_pynative(input_ms, h0)
x_grad_pynative = out_grad_pynative[0].asnumpy()
h_grad_pynative = out_grad_pynative[1].asnumpy()
assert np.allclose(x_grad, x_grad_pynative, 0.001, 0.001)
assert np.allclose(h_grad, h_grad_pynative, 0.001, 0.001)
| 41.606557 | 115 | 0.695692 |
import math
import pytest
import numpy as np
from mindspore import context
from mindspore import nn
from mindspore import Tensor
from mindspore.common.parameter import ParameterTuple
from mindspore.common.parameter import Parameter
from mindspore.ops import composite as c
class GradOfAllInputsAndParams(nn.Cell):
def __init__(self, network, sens_param):
super(GradOfAllInputsAndParams, self).__init__()
self.grad = c.GradOperation(get_all=True, get_by_list=True, sens_param=sens_param)
self.network = network
self.params = ParameterTuple(self.network.trainable_params())
def construct(self, *inputs):
gout = self.grad(self.network, self.params)(*inputs)
return gout
class GRU(nn.Cell):
def __init__(self, input_size, hidden_size, num_layers, has_bias, batch_first, bidirectional, dropout):
super(GRU, self).__init__()
self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, has_bias=has_bias,
batch_first=batch_first, bidirectional=bidirectional, dropout=dropout)
def construct(self, inp, h0):
return self.gru(inp, h0)
class GRUWeightBias():
def __init__(self, num_layers, has_bias, input_size, num_directions, hidden_size, bidirectional):
self.num_layers = num_layers
self.has_bias = has_bias
self.input_size = input_size
self.num_directions = num_directions
self.hidden_size = hidden_size
self.bidirectional = bidirectional
def get_weight_bias(self):
gate_size = 3 * self.hidden_size
w_ih_list = []
w_hh_list = []
b_ih_list = []
b_hh_list = []
stdv = 1 / math.sqrt(self.hidden_size)
for layer in range(self.num_layers):
for direction in range(self.num_directions):
layer_input_size = self.input_size if layer == 0 else self.hidden_size * self.num_directions
suffix = '_reverse' if direction == 1 else ''
w_ih_list.append(Parameter(
Tensor(np.random.uniform(-stdv, stdv, (gate_size, layer_input_size)).astype(np.float32)),
name='weight_ih_l{}{}'.format(layer, suffix)))
w_hh_list.append(Parameter(
Tensor(np.random.uniform(-stdv, stdv, (gate_size, self.hidden_size)).astype(np.float32)),
name='weight_hh_l{}{}'.format(layer, suffix)))
if self.has_bias:
b_ih_list.append(Parameter(
Tensor(np.random.uniform(-stdv, stdv, (gate_size)).astype(np.float32)),
name='bias_ih_l{}{}'.format(layer, suffix)))
b_hh_list.append(Parameter(
Tensor(np.random.uniform(-stdv, stdv, (gate_size)).astype(np.float32)),
name='bias_hh_l{}{}'.format(layer, suffix)))
w_ih_list = ParameterTuple(w_ih_list)
w_hh_list = ParameterTuple(w_hh_list)
b_ih_list = ParameterTuple(b_ih_list)
b_hh_list = ParameterTuple(b_hh_list)
return w_ih_list, w_hh_list, b_ih_list, b_hh_list
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_sit_gru_forward_input_3_32_32_is_32_hs_16():
input_size = 32
hidden_size = 16
has_bias = True
bidirectional = False
num_layers = 1
num_directions = 1
fact = GRUWeightBias(num_layers, has_bias, input_size, num_directions, hidden_size, bidirectional)
w_ih_list, w_hh_list, b_ih_list, b_hh_list = fact.get_weight_bias()
h0 = Tensor(np.random.randn(num_layers * num_directions, 32, 16).astype(np.float32))
input_ms = Tensor(np.random.randn(3, 32, 32).astype(np.float32))
context.set_context(mode=context.GRAPH_MODE)
net = GRU(input_size=input_size, hidden_size=16, num_layers=num_layers, has_bias=has_bias, batch_first=False,
bidirectional=bidirectional, dropout=0.0)
net.gru.w_ih_list = w_ih_list
net.gru.w_hh_list = w_hh_list
net.gru.b_ih_list = b_ih_list
net.gru.b_hh_list = b_hh_list
out, hy = net(input_ms, h0)
context.set_context(mode=context.PYNATIVE_MODE)
net_pynative = GRU(input_size=input_size, hidden_size=16, num_layers=num_layers, has_bias=has_bias,
batch_first=False, bidirectional=bidirectional, dropout=0.0)
net_pynative.gru.w_ih_list = w_ih_list
net_pynative.gru.w_hh_list = w_hh_list
net_pynative.gru.b_ih_list = b_ih_list
net_pynative.gru.b_hh_list = b_hh_list
out_pynative, hy_pynative = net_pynative(input_ms, h0)
assert np.allclose(out.asnumpy(), out_pynative.asnumpy(), 0.001, 0.001)
assert np.allclose(hy.asnumpy(), hy_pynative.asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_sit_gru_grad_input_3_32_32_is_32_hs_16():
input_size = 32
hidden_size = 16
has_bias = True
bidirectional = False
num_layers = 1
num_directions = 1
fact = GRUWeightBias(num_layers, has_bias, input_size, num_directions, hidden_size, bidirectional)
w_ih_list, w_hh_list, b_ih_list, b_hh_list = fact.get_weight_bias()
h0 = Tensor(np.random.randn(num_layers * num_directions, 32, 16).astype(np.float32))
input_ms = Tensor(np.random.randn(3, 32, 32).astype(np.float32))
context.set_context(mode=context.GRAPH_MODE)
net = GRU(input_size=input_size, hidden_size=16, num_layers=num_layers, has_bias=has_bias, batch_first=False,
bidirectional=bidirectional, dropout=0.0)
net.gru.w_ih_list = w_ih_list
net.gru.w_hh_list = w_hh_list
net.gru.b_ih_list = b_ih_list
net.gru.b_hh_list = b_hh_list
grad_net_inp = GradOfAllInputsAndParams(net, sens_param=False)
grad_net_inp.set_train()
out_grad, _ = grad_net_inp(input_ms, h0)
x_grad = out_grad[0].asnumpy()
h_grad = out_grad[1].asnumpy()
context.set_context(mode=context.PYNATIVE_MODE)
net_pynative = GRU(input_size=input_size, hidden_size=16, num_layers=num_layers, has_bias=has_bias,
batch_first=False, bidirectional=bidirectional, dropout=0.0)
net_pynative.gru.w_ih_list = w_ih_list
net_pynative.gru.w_hh_list = w_hh_list
net_pynative.gru.b_ih_list = b_ih_list
net_pynative.gru.b_hh_list = b_hh_list
grad_net_inp_pynative = GradOfAllInputsAndParams(net_pynative, sens_param=False)
grad_net_inp_pynative.set_train()
out_grad_pynative, _ = grad_net_inp_pynative(input_ms, h0)
x_grad_pynative = out_grad_pynative[0].asnumpy()
h_grad_pynative = out_grad_pynative[1].asnumpy()
assert np.allclose(x_grad, x_grad_pynative, 0.001, 0.001)
assert np.allclose(h_grad, h_grad_pynative, 0.001, 0.001)
| true | true |
1c30dfddcaffa3a606e4b35353e48e34e5244bde | 5,051 | py | Python | setup.py | joeddav/datasets | f955fa2d4785a1cea381a7999e0c5d0c0314046b | [
"Apache-2.0"
] | null | null | null | setup.py | joeddav/datasets | f955fa2d4785a1cea381a7999e0c5d0c0314046b | [
"Apache-2.0"
] | null | null | null | setup.py | joeddav/datasets | f955fa2d4785a1cea381a7999e0c5d0c0314046b | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
""" HuggingFace/Datasets is an open library of NLP datasets.
Note:
VERSION needs to be formatted following the MAJOR.MINOR.PATCH convention
(we need to follow this convention to be able to retrieve versioned scripts)
Simple check list for release from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
First pin the SCRIPTS_VERSION to VERSION in __init__.py (but don't commit this change)
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
Then change the SCRIPTS_VERSION back to to "master" in __init__.py (but don't commit this change)
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi datasets
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
8. Update the documentation commit in .circleci/deploy.sh for the accurate documentation to be displayed
9. Update README.md to redirect to correct documentation.
"""
import datetime
import itertools
import os
import sys
from setuptools import find_packages
from setuptools import setup
DOCLINES = __doc__.split('\n')
REQUIRED_PKGS = [
# We use numpy>=1.17 to have np.random.Generator (Dataset shuffling)
'numpy>=1.17',
# Backend and serialization. Minimum 0.17.1 to support extension array
'pyarrow>=0.17.1',
# For smart caching dataset processing
'dill',
# For performance gains with apache arrow
'pandas',
# for downloading datasets over HTTPS
'requests>=2.19.0',
# progress bars in download and scripts
"tqdm >= 4.27",
# dataclasses for Python versions that don't have it
"dataclasses;python_version<'3.7'",
# filesystem locks e.g. to prevent parallel downloads
"filelock",
# for fast hashing
"xxhash"
]
BENCHMARKS_REQUIRE = [
'numpy==1.18.5',
'tensorflow==2.3.0',
'torch==1.6.0',
'transformers==3.0.2',
]
TESTS_REQUIRE = [
'apache-beam',
'absl-py',
'bs4',
'elasticsearch',
'faiss-cpu',
'langdetect',
'mwparserfromhell',
'nltk',
'pytest',
'pytest-xdist',
'tensorflow',
'torch',
'tldextract',
'transformers',
'zstandard'
]
QUALITY_REQUIRE = [
"black",
"isort",
"flake8==3.7.9",
]
EXTRAS_REQUIRE = {
'apache-beam': ['apache-beam'],
'tensorflow': ['tensorflow>=2.2.0'],
'tensorflow_gpu': ['tensorflow-gpu>=2.2.0'],
'torch': ['torch'],
'dev': TESTS_REQUIRE + QUALITY_REQUIRE,
'tests': TESTS_REQUIRE,
'quality': QUALITY_REQUIRE,
'benchmarks': BENCHMARKS_REQUIRE,
'docs': ["recommonmark", "sphinx==3.1.2", "sphinx-markdown-tables", "sphinx-rtd-theme==0.4.3", "sphinx-copybutton"]
}
setup(
name='datasets',
version="1.0.0",
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
author='HuggingFace Inc.',
author_email='thomas@huggingface.co',
url='https://github.com/huggingface/datasets',
download_url='https://github.com/huggingface/datasets/tags',
license='Apache 2.0',
package_dir={"": "src"},
packages=find_packages("src"),
package_data={
'datasets': [
'scripts/templates/*',
],
},
scripts=["datasets-cli"],
install_requires=REQUIRED_PKGS,
extras_require=EXTRAS_REQUIRE,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords='datasets machine learning datasets metrics',
)
| 30.98773 | 119 | 0.681251 |
import datetime
import itertools
import os
import sys
from setuptools import find_packages
from setuptools import setup
DOCLINES = __doc__.split('\n')
REQUIRED_PKGS = [
'numpy>=1.17',
'pyarrow>=0.17.1',
'dill',
'pandas',
'requests>=2.19.0',
"tqdm >= 4.27",
"dataclasses;python_version<'3.7'",
# filesystem locks e.g. to prevent parallel downloads
"filelock",
# for fast hashing
"xxhash"
]
BENCHMARKS_REQUIRE = [
'numpy==1.18.5',
'tensorflow==2.3.0',
'torch==1.6.0',
'transformers==3.0.2',
]
TESTS_REQUIRE = [
'apache-beam',
'absl-py',
'bs4',
'elasticsearch',
'faiss-cpu',
'langdetect',
'mwparserfromhell',
'nltk',
'pytest',
'pytest-xdist',
'tensorflow',
'torch',
'tldextract',
'transformers',
'zstandard'
]
QUALITY_REQUIRE = [
"black",
"isort",
"flake8==3.7.9",
]
EXTRAS_REQUIRE = {
'apache-beam': ['apache-beam'],
'tensorflow': ['tensorflow>=2.2.0'],
'tensorflow_gpu': ['tensorflow-gpu>=2.2.0'],
'torch': ['torch'],
'dev': TESTS_REQUIRE + QUALITY_REQUIRE,
'tests': TESTS_REQUIRE,
'quality': QUALITY_REQUIRE,
'benchmarks': BENCHMARKS_REQUIRE,
'docs': ["recommonmark", "sphinx==3.1.2", "sphinx-markdown-tables", "sphinx-rtd-theme==0.4.3", "sphinx-copybutton"]
}
setup(
name='datasets',
version="1.0.0",
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
author='HuggingFace Inc.',
author_email='thomas@huggingface.co',
url='https://github.com/huggingface/datasets',
download_url='https://github.com/huggingface/datasets/tags',
license='Apache 2.0',
package_dir={"": "src"},
packages=find_packages("src"),
package_data={
'datasets': [
'scripts/templates/*',
],
},
scripts=["datasets-cli"],
install_requires=REQUIRED_PKGS,
extras_require=EXTRAS_REQUIRE,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords='datasets machine learning datasets metrics',
)
| true | true |
1c30e01cd95229d02d5dc863333ad64d5d70e705 | 18,025 | py | Python | data/user_input/plots/structural/plotStructuralFrequencyResponseInput.py | open-pulse/OpenPulse | ef49cd1ff672821c4b57729c0ef9f4ff5a83eadf | [
"MIT"
] | 23 | 2020-01-14T12:49:11.000Z | 2021-11-10T05:19:29.000Z | data/user_input/plots/structural/plotStructuralFrequencyResponseInput.py | open-pulse/OpenPulse | ef49cd1ff672821c4b57729c0ef9f4ff5a83eadf | [
"MIT"
] | 101 | 2020-01-23T19:29:00.000Z | 2022-03-15T17:56:23.000Z | data/user_input/plots/structural/plotStructuralFrequencyResponseInput.py | open-pulse/OpenPulse | ef49cd1ff672821c4b57729c0ef9f4ff5a83eadf | [
"MIT"
] | 3 | 2020-01-14T12:49:26.000Z | 2022-01-13T02:06:53.000Z | from PyQt5.QtWidgets import QLineEdit, QDialog, QFileDialog, QWidget, QTreeWidget, QToolButton, QRadioButton, QMessageBox, QTreeWidgetItem, QTabWidget, QLabel, QCheckBox, QPushButton, QSpinBox
from os.path import basename
from PyQt5.QtGui import QIcon
from PyQt5.QtGui import QColor, QBrush
from PyQt5.QtCore import Qt
from PyQt5 import uic
import configparser
import os
import matplotlib.pyplot as plt
import numpy as np
from pulse.postprocessing.plot_structural_data import get_structural_frf
from data.user_input.project.printMessageInput import PrintMessageInput
window_title1 = "ERROR MESSAGE"
window_title2 = "WARNING MESSAGE"
class SnaptoCursor(object):
def __init__(self, ax, x, y, show_cursor):
self.ax = ax
self.x = x
self.y = y
self.show_cursor = show_cursor
if show_cursor:
self.vl = self.ax.axvline(x=np.min(x), ymin=np.min(y), color='k', alpha=0.3, label='_nolegend_') # the vertical line
self.hl = self.ax.axhline(color='k', alpha=0.3, label='_nolegend_') # the horizontal line
self.marker, = ax.plot(x[0], y[0], markersize=4, marker="s", color=[0,0,0], zorder=3)
# self.marker.set_label("x: %1.2f // y: %4.2e" % (self.x[0], self.y[0]))
# plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
def mouse_move(self, event):
if self.show_cursor:
if not event.inaxes: return
x, y = event.xdata, event.ydata
if x>=np.max(self.x): return
indx = np.searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
self.vl.set_xdata(x)
self.hl.set_ydata(y)
self.marker.set_data([x],[y])
self.marker.set_label("x: %1.2f // y: %4.2e" % (x, y))
plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
self.ax.figure.canvas.draw_idle()
class PlotStructuralFrequencyResponseInput(QDialog):
def __init__(self, project, opv, analysisMethod, frequencies, solution, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi('data/user_input/ui/Plots/Results/Structural/plotStructuralFrequencyResponseInput.ui', self)
icons_path = 'data\\icons\\'
self.icon = QIcon(icons_path + 'pulse.png')
self.setWindowIcon(self.icon)
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.setWindowModality(Qt.WindowModal)
self.opv = opv
self.opv.setInputObject(self)
self.list_node_IDs = self.opv.getListPickedPoints()
self.projec = project
self.preprocessor = project.preprocessor
self.before_run = self.preprocessor.get_model_checks()
self.nodes = self.preprocessor.nodes
self.analysisMethod = analysisMethod
self.frequencies = frequencies
self.solution = solution
self.userPath = os.path.expanduser('~')
self.save_path = ""
self.node_ID = 0
self.imported_data = None
self.localDof = None
self.lineEdit_nodeID = self.findChild(QLineEdit, 'lineEdit_nodeID')
self.lineEdit_FileName = self.findChild(QLineEdit, 'lineEdit_FileName')
self.lineEdit_ImportResultsPath = self.findChild(QLineEdit, 'lineEdit_ImportResultsPath')
self.lineEdit_SaveResultsPath = self.findChild(QLineEdit, 'lineEdit_SaveResultsPath')
self.toolButton_ChooseFolderImport = self.findChild(QToolButton, 'toolButton_ChooseFolderImport')
self.toolButton_ChooseFolderImport.clicked.connect(self.choose_path_import_results)
self.toolButton_ChooseFolderExport = self.findChild(QToolButton, 'toolButton_ChooseFolderExport')
self.toolButton_ChooseFolderExport.clicked.connect(self.choose_path_export_results)
self.toolButton_ExportResults = self.findChild(QToolButton, 'toolButton_ExportResults')
self.toolButton_ExportResults.clicked.connect(self.ExportResults)
self.toolButton_ResetPlot = self.findChild(QToolButton, 'toolButton_ResetPlot')
self.toolButton_ResetPlot.clicked.connect(self.reset_imported_data)
self.lineEdit_skiprows = self.findChild(QSpinBox, 'spinBox')
self.checkBox_cursor = self.findChild(QCheckBox, 'checkBox_cursor')
self.cursor = self.checkBox_cursor.isChecked()
self.checkBox_cursor.clicked.connect(self.update_cursor)
self.radioButton_ux = self.findChild(QRadioButton, 'radioButton_ux')
self.radioButton_uy = self.findChild(QRadioButton, 'radioButton_uy')
self.radioButton_uz = self.findChild(QRadioButton, 'radioButton_uz')
self.radioButton_rx = self.findChild(QRadioButton, 'radioButton_rx')
self.radioButton_ry = self.findChild(QRadioButton, 'radioButton_ry')
self.radioButton_rz = self.findChild(QRadioButton, 'radioButton_rz')
self.Ux = self.radioButton_ux.isChecked()
self.Uy = self.radioButton_uy.isChecked()
self.Uz = self.radioButton_uz.isChecked()
self.Rx = self.radioButton_rx.isChecked()
self.Ry = self.radioButton_ry.isChecked()
self.Rz = self.radioButton_rz.isChecked()
self.radioButton_plotAbs = self.findChild(QRadioButton, 'radioButton_plotAbs')
self.radioButton_plotReal = self.findChild(QRadioButton, 'radioButton_plotReal')
self.radioButton_plotImag = self.findChild(QRadioButton, 'radioButton_plotImag')
self.radioButton_plotAbs.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotReal.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotImag.clicked.connect(self.radioButtonEvent_YAxis)
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
self.radioButton_Absolute = self.findChild(QRadioButton, 'radioButton_Absolute')
self.radioButton_Real_Imaginary = self.findChild(QRadioButton, 'radioButton_Real_Imaginary')
self.radioButton_Absolute.clicked.connect(self.radioButtonEvent_save_data)
self.radioButton_Real_Imaginary.clicked.connect(self.radioButtonEvent_save_data)
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
self.radioButton_NoneDiff = self.findChild(QRadioButton, 'radioButton_NoneDiff')
self.radioButton_SingleDiff = self.findChild(QRadioButton, 'radioButton_SingleDiff')
self.radioButton_DoubleDiff = self.findChild(QRadioButton, 'radioButton_DoubleDiff')
self.radioButton_NoneDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.radioButton_SingleDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.radioButton_DoubleDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.NoneDiff = self.radioButton_NoneDiff.isChecked()
self.SingleDiff = self.radioButton_SingleDiff.isChecked()
self.DoubleDiff = self.radioButton_DoubleDiff.isChecked()
self.tabWidget_plot_results = self.findChild(QTabWidget, "tabWidget_plot_results")
self.tab_plot = self.tabWidget_plot_results.findChild(QWidget, "tab_plot")
self.pushButton_AddImportedPlot = self.findChild(QPushButton, 'pushButton_AddImportedPlot')
self.pushButton_AddImportedPlot.clicked.connect(self.ImportResults)
self.pushButton = self.findChild(QPushButton, 'pushButton')
self.pushButton.clicked.connect(self.check)
self.writeNodes(self.list_node_IDs)
self.exec_()
def update_cursor(self):
self.cursor = self.checkBox_cursor.isChecked()
def reset_imported_data(self):
self.imported_data = None
title = "Information"
message = "The plot data has been reseted."
PrintMessageInput([title, message, window_title2])
def writeNodes(self, list_node_ids):
text = ""
for node in list_node_ids:
text += "{}, ".format(node)
self.lineEdit_nodeID.setText(text)
def update(self):
self.list_node_IDs = self.opv.getListPickedPoints()
if self.list_node_IDs != []:
self.writeNodes(self.list_node_IDs)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.check()
elif event.key() == Qt.Key_Escape:
self.close()
def radioButtonEvent_YAxis(self):
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
def radioButtonEvent_save_data(self):
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
def radioButtonEvent_modify_spectrum(self):
self.NoneDiff = self.radioButton_NoneDiff.isChecked()
self.SingleDiff = self.radioButton_SingleDiff.isChecked()
self.DoubleDiff = self.radioButton_DoubleDiff.isChecked()
def choose_path_import_results(self):
self.import_path, _ = QFileDialog.getOpenFileName(None, 'Open file', self.userPath, 'Files (*.dat; *.csv)')
self.import_name = basename(self.import_path)
self.lineEdit_ImportResultsPath.setText(str(self.import_path))
def ImportResults(self):
try:
skiprows = int(self.lineEdit_skiprows.text())
self.imported_data = np.loadtxt(self.import_path, delimiter=",", skiprows=skiprows)
self.legend_imported = "imported data: "+ basename(self.import_path).split(".")[0]
self.tabWidget_plot_results.setCurrentWidget(self.tab_plot)
title = "Information"
message = "The results have been imported."
PrintMessageInput([title, message, window_title2])
except Exception as e:
title = "ERROR WHILE LOADING TABLE"
message = [str(e) + " It is recommended to skip the header rows."]
PrintMessageInput([title, message[0], window_title1])
return
def choose_path_export_results(self):
self.save_path = QFileDialog.getExistingDirectory(None, 'Choose a folder to export the results', self.userPath)
self.save_name = basename(self.save_path)
self.lineEdit_SaveResultsPath.setText(str(self.save_path))
def check(self, export=False):
lineEdit_nodeID = self.lineEdit_nodeID.text()
stop, self.node_ID = self.before_run.check_input_NodeID(lineEdit_nodeID, single_ID=True)
if stop:
return True
self.localDof = None
if self.SingleDiff:
_unit_label = "m/s"
elif self.DoubleDiff:
_unit_label = "m/s²"
else:
_unit_label = "m"
if self.radioButton_ux.isChecked():
self.localDof = 0
self.localdof_label = "Ux"
self.unit_label = _unit_label
if self.radioButton_uy.isChecked():
self.localDof = 1
self.localdof_label = "Uy"
self.unit_label = _unit_label
if self.radioButton_uz.isChecked():
self.localDof = 2
self.localdof_label = "Uz"
self.unit_label = _unit_label
if self.radioButton_rx.isChecked():
self.localDof = 3
self.localdof_label = "Rx"
self.unit_label = _unit_label
if self.radioButton_ry.isChecked():
self.localDof = 4
self.localdof_label = "Ry"
self.unit_label = _unit_label
if self.radioButton_rz.isChecked():
self.localDof = 5
self.localdof_label = "Rz"
self.unit_label = _unit_label
if self.SingleDiff:
_unit_label = "rad/s"
elif self.DoubleDiff:
_unit_label = "rad/s²"
else:
_unit_label = "rad"
if not export:
self.plot()
return False
def ExportResults(self):
if self.lineEdit_FileName.text() != "":
if self.save_path != "":
self.export_path_folder = self.save_path + "/"
else:
title = "None folder selected"
message = "Plese, choose a folder before trying export the results."
PrintMessageInput([title, message, window_title1])
return
else:
title = "Empty file name"
message = "Inform a file name before trying export the results."
PrintMessageInput([title, message, window_title1])
return
if self.check(export=True):
return
freq = self.frequencies
self.export_path = self.export_path_folder + self.lineEdit_FileName.text() + ".dat"
response = self.get_response()
if self.save_Absolute:
header = ("Frequency[Hz], Real part [{}], Imaginary part [{}], Absolute [{}]").format(self.unit_label, self.unit_label, self.unit_label)
data_to_export = np.array([freq, np.real(response), np.imag(response), np.abs(response)]).T
elif self.save_Real_Imaginary:
header = ("Frequency[Hz], Real part [{}], Imaginary part [{}]").format(self.unit_label, self.unit_label)
data_to_export = np.array([freq, np.real(response), np.imag(response)]).T
np.savetxt(self.export_path, data_to_export, delimiter=",", header=header)
title = "Information"
message = "The results have been exported."
PrintMessageInput([title, message, window_title2])
def get_response(self):
response = get_structural_frf(self.preprocessor, self.solution, self.node_ID, self.localDof)
if self.SingleDiff:
output_data = response*(1j*2*np.pi)*self.frequencies
elif self.DoubleDiff:
output_data = response*((1j*2*np.pi*self.frequencies)**2)
else:
output_data = response
return output_data
def plot(self):
fig = plt.figure(figsize=[12,7])
ax = fig.add_subplot(1,1,1)
frequencies = self.frequencies
response = self.get_response()
if self.imported_data is not None:
data = self.imported_data
imported_Xvalues = data[:,0]
if self.plotAbs:
imported_Yvalues = np.abs(data[:,1] + 1j*data[:,2])
elif self.plotReal:
imported_Yvalues = data[:,1]
elif self.plotImag:
imported_Yvalues = data[:,2]
if self.plotAbs:
response = np.abs(response)
ax.set_ylabel(("Structural Response - Absolute [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
if not float(0) in response:
if self.imported_data is None:
ax.set_yscale('log', nonposy='clip')
else:
if not float(0) in imported_Yvalues:
ax.set_yscale('log', nonposy='clip')
elif self.plotReal:
response = np.real(response)
ax.set_ylabel(("Structural Response - Real [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
elif self.plotImag:
response = np.imag(response)
ax.set_ylabel(("Structural Response - Imaginary [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
#cursor = Cursor(ax)
cursor = SnaptoCursor(ax, frequencies, response, self.cursor)
plt.connect('motion_notify_event', cursor.mouse_move)
legend_label = "Response {} at node {}".format(self.localdof_label, self.node_ID)
if self.imported_data is None:
if float(0) in response or self.plotReal or self.plotImag:
if float(0) in response[1:] or self.plotReal or self.plotImag:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.semilogy(frequencies[1:], response[1:], color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
_legends = plt.legend(handles=[first_plot], labels=[legend_label], loc='upper right')
else:
if float(0) in response or float(0) in imported_Yvalues or self.plotReal or self.plotImag:
if float(0) in response[1:] or float(0) in imported_Yvalues[1:] or self.plotReal or self.plotImag:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2)
second_plot, = plt.plot(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.semilogy(frequencies[1:], response[1:], color=[1,0,0], linewidth=2, label=legend_label)
second_plot, = plt.semilogy(imported_Xvalues[1:], imported_Yvalues[1:], color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
second_plot, = plt.semilogy(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
_legends = plt.legend(handles=[first_plot, second_plot], labels=[legend_label, self.legend_imported], loc='upper right')
plt.gca().add_artist(_legends)
ax.set_title(('STRUCTURAL FREQUENCY RESPONSE - {}').format(self.analysisMethod.upper()), fontsize = 16, fontweight = 'bold')
ax.set_xlabel(('Frequency [Hz]'), fontsize = 14, fontweight = 'bold')
plt.show() | 46.099744 | 192 | 0.650818 | from PyQt5.QtWidgets import QLineEdit, QDialog, QFileDialog, QWidget, QTreeWidget, QToolButton, QRadioButton, QMessageBox, QTreeWidgetItem, QTabWidget, QLabel, QCheckBox, QPushButton, QSpinBox
from os.path import basename
from PyQt5.QtGui import QIcon
from PyQt5.QtGui import QColor, QBrush
from PyQt5.QtCore import Qt
from PyQt5 import uic
import configparser
import os
import matplotlib.pyplot as plt
import numpy as np
from pulse.postprocessing.plot_structural_data import get_structural_frf
from data.user_input.project.printMessageInput import PrintMessageInput
window_title1 = "ERROR MESSAGE"
window_title2 = "WARNING MESSAGE"
class SnaptoCursor(object):
def __init__(self, ax, x, y, show_cursor):
self.ax = ax
self.x = x
self.y = y
self.show_cursor = show_cursor
if show_cursor:
self.vl = self.ax.axvline(x=np.min(x), ymin=np.min(y), color='k', alpha=0.3, label='_nolegend_')
self.hl = self.ax.axhline(color='k', alpha=0.3, label='_nolegend_')
self.marker, = ax.plot(x[0], y[0], markersize=4, marker="s", color=[0,0,0], zorder=3)
def mouse_move(self, event):
if self.show_cursor:
if not event.inaxes: return
x, y = event.xdata, event.ydata
if x>=np.max(self.x): return
indx = np.searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
self.vl.set_xdata(x)
self.hl.set_ydata(y)
self.marker.set_data([x],[y])
self.marker.set_label("x: %1.2f // y: %4.2e" % (x, y))
plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
self.ax.figure.canvas.draw_idle()
class PlotStructuralFrequencyResponseInput(QDialog):
def __init__(self, project, opv, analysisMethod, frequencies, solution, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi('data/user_input/ui/Plots/Results/Structural/plotStructuralFrequencyResponseInput.ui', self)
icons_path = 'data\\icons\\'
self.icon = QIcon(icons_path + 'pulse.png')
self.setWindowIcon(self.icon)
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.setWindowModality(Qt.WindowModal)
self.opv = opv
self.opv.setInputObject(self)
self.list_node_IDs = self.opv.getListPickedPoints()
self.projec = project
self.preprocessor = project.preprocessor
self.before_run = self.preprocessor.get_model_checks()
self.nodes = self.preprocessor.nodes
self.analysisMethod = analysisMethod
self.frequencies = frequencies
self.solution = solution
self.userPath = os.path.expanduser('~')
self.save_path = ""
self.node_ID = 0
self.imported_data = None
self.localDof = None
self.lineEdit_nodeID = self.findChild(QLineEdit, 'lineEdit_nodeID')
self.lineEdit_FileName = self.findChild(QLineEdit, 'lineEdit_FileName')
self.lineEdit_ImportResultsPath = self.findChild(QLineEdit, 'lineEdit_ImportResultsPath')
self.lineEdit_SaveResultsPath = self.findChild(QLineEdit, 'lineEdit_SaveResultsPath')
self.toolButton_ChooseFolderImport = self.findChild(QToolButton, 'toolButton_ChooseFolderImport')
self.toolButton_ChooseFolderImport.clicked.connect(self.choose_path_import_results)
self.toolButton_ChooseFolderExport = self.findChild(QToolButton, 'toolButton_ChooseFolderExport')
self.toolButton_ChooseFolderExport.clicked.connect(self.choose_path_export_results)
self.toolButton_ExportResults = self.findChild(QToolButton, 'toolButton_ExportResults')
self.toolButton_ExportResults.clicked.connect(self.ExportResults)
self.toolButton_ResetPlot = self.findChild(QToolButton, 'toolButton_ResetPlot')
self.toolButton_ResetPlot.clicked.connect(self.reset_imported_data)
self.lineEdit_skiprows = self.findChild(QSpinBox, 'spinBox')
self.checkBox_cursor = self.findChild(QCheckBox, 'checkBox_cursor')
self.cursor = self.checkBox_cursor.isChecked()
self.checkBox_cursor.clicked.connect(self.update_cursor)
self.radioButton_ux = self.findChild(QRadioButton, 'radioButton_ux')
self.radioButton_uy = self.findChild(QRadioButton, 'radioButton_uy')
self.radioButton_uz = self.findChild(QRadioButton, 'radioButton_uz')
self.radioButton_rx = self.findChild(QRadioButton, 'radioButton_rx')
self.radioButton_ry = self.findChild(QRadioButton, 'radioButton_ry')
self.radioButton_rz = self.findChild(QRadioButton, 'radioButton_rz')
self.Ux = self.radioButton_ux.isChecked()
self.Uy = self.radioButton_uy.isChecked()
self.Uz = self.radioButton_uz.isChecked()
self.Rx = self.radioButton_rx.isChecked()
self.Ry = self.radioButton_ry.isChecked()
self.Rz = self.radioButton_rz.isChecked()
self.radioButton_plotAbs = self.findChild(QRadioButton, 'radioButton_plotAbs')
self.radioButton_plotReal = self.findChild(QRadioButton, 'radioButton_plotReal')
self.radioButton_plotImag = self.findChild(QRadioButton, 'radioButton_plotImag')
self.radioButton_plotAbs.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotReal.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotImag.clicked.connect(self.radioButtonEvent_YAxis)
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
self.radioButton_Absolute = self.findChild(QRadioButton, 'radioButton_Absolute')
self.radioButton_Real_Imaginary = self.findChild(QRadioButton, 'radioButton_Real_Imaginary')
self.radioButton_Absolute.clicked.connect(self.radioButtonEvent_save_data)
self.radioButton_Real_Imaginary.clicked.connect(self.radioButtonEvent_save_data)
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
self.radioButton_NoneDiff = self.findChild(QRadioButton, 'radioButton_NoneDiff')
self.radioButton_SingleDiff = self.findChild(QRadioButton, 'radioButton_SingleDiff')
self.radioButton_DoubleDiff = self.findChild(QRadioButton, 'radioButton_DoubleDiff')
self.radioButton_NoneDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.radioButton_SingleDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.radioButton_DoubleDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.NoneDiff = self.radioButton_NoneDiff.isChecked()
self.SingleDiff = self.radioButton_SingleDiff.isChecked()
self.DoubleDiff = self.radioButton_DoubleDiff.isChecked()
self.tabWidget_plot_results = self.findChild(QTabWidget, "tabWidget_plot_results")
self.tab_plot = self.tabWidget_plot_results.findChild(QWidget, "tab_plot")
self.pushButton_AddImportedPlot = self.findChild(QPushButton, 'pushButton_AddImportedPlot')
self.pushButton_AddImportedPlot.clicked.connect(self.ImportResults)
self.pushButton = self.findChild(QPushButton, 'pushButton')
self.pushButton.clicked.connect(self.check)
self.writeNodes(self.list_node_IDs)
self.exec_()
def update_cursor(self):
self.cursor = self.checkBox_cursor.isChecked()
def reset_imported_data(self):
self.imported_data = None
title = "Information"
message = "The plot data has been reseted."
PrintMessageInput([title, message, window_title2])
def writeNodes(self, list_node_ids):
text = ""
for node in list_node_ids:
text += "{}, ".format(node)
self.lineEdit_nodeID.setText(text)
def update(self):
self.list_node_IDs = self.opv.getListPickedPoints()
if self.list_node_IDs != []:
self.writeNodes(self.list_node_IDs)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.check()
elif event.key() == Qt.Key_Escape:
self.close()
def radioButtonEvent_YAxis(self):
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
def radioButtonEvent_save_data(self):
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
def radioButtonEvent_modify_spectrum(self):
self.NoneDiff = self.radioButton_NoneDiff.isChecked()
self.SingleDiff = self.radioButton_SingleDiff.isChecked()
self.DoubleDiff = self.radioButton_DoubleDiff.isChecked()
def choose_path_import_results(self):
self.import_path, _ = QFileDialog.getOpenFileName(None, 'Open file', self.userPath, 'Files (*.dat; *.csv)')
self.import_name = basename(self.import_path)
self.lineEdit_ImportResultsPath.setText(str(self.import_path))
def ImportResults(self):
try:
skiprows = int(self.lineEdit_skiprows.text())
self.imported_data = np.loadtxt(self.import_path, delimiter=",", skiprows=skiprows)
self.legend_imported = "imported data: "+ basename(self.import_path).split(".")[0]
self.tabWidget_plot_results.setCurrentWidget(self.tab_plot)
title = "Information"
message = "The results have been imported."
PrintMessageInput([title, message, window_title2])
except Exception as e:
title = "ERROR WHILE LOADING TABLE"
message = [str(e) + " It is recommended to skip the header rows."]
PrintMessageInput([title, message[0], window_title1])
return
def choose_path_export_results(self):
self.save_path = QFileDialog.getExistingDirectory(None, 'Choose a folder to export the results', self.userPath)
self.save_name = basename(self.save_path)
self.lineEdit_SaveResultsPath.setText(str(self.save_path))
def check(self, export=False):
lineEdit_nodeID = self.lineEdit_nodeID.text()
stop, self.node_ID = self.before_run.check_input_NodeID(lineEdit_nodeID, single_ID=True)
if stop:
return True
self.localDof = None
if self.SingleDiff:
_unit_label = "m/s"
elif self.DoubleDiff:
_unit_label = "m/s²"
else:
_unit_label = "m"
if self.radioButton_ux.isChecked():
self.localDof = 0
self.localdof_label = "Ux"
self.unit_label = _unit_label
if self.radioButton_uy.isChecked():
self.localDof = 1
self.localdof_label = "Uy"
self.unit_label = _unit_label
if self.radioButton_uz.isChecked():
self.localDof = 2
self.localdof_label = "Uz"
self.unit_label = _unit_label
if self.radioButton_rx.isChecked():
self.localDof = 3
self.localdof_label = "Rx"
self.unit_label = _unit_label
if self.radioButton_ry.isChecked():
self.localDof = 4
self.localdof_label = "Ry"
self.unit_label = _unit_label
if self.radioButton_rz.isChecked():
self.localDof = 5
self.localdof_label = "Rz"
self.unit_label = _unit_label
if self.SingleDiff:
_unit_label = "rad/s"
elif self.DoubleDiff:
_unit_label = "rad/s²"
else:
_unit_label = "rad"
if not export:
self.plot()
return False
def ExportResults(self):
if self.lineEdit_FileName.text() != "":
if self.save_path != "":
self.export_path_folder = self.save_path + "/"
else:
title = "None folder selected"
message = "Plese, choose a folder before trying export the results."
PrintMessageInput([title, message, window_title1])
return
else:
title = "Empty file name"
message = "Inform a file name before trying export the results."
PrintMessageInput([title, message, window_title1])
return
if self.check(export=True):
return
freq = self.frequencies
self.export_path = self.export_path_folder + self.lineEdit_FileName.text() + ".dat"
response = self.get_response()
if self.save_Absolute:
header = ("Frequency[Hz], Real part [{}], Imaginary part [{}], Absolute [{}]").format(self.unit_label, self.unit_label, self.unit_label)
data_to_export = np.array([freq, np.real(response), np.imag(response), np.abs(response)]).T
elif self.save_Real_Imaginary:
header = ("Frequency[Hz], Real part [{}], Imaginary part [{}]").format(self.unit_label, self.unit_label)
data_to_export = np.array([freq, np.real(response), np.imag(response)]).T
np.savetxt(self.export_path, data_to_export, delimiter=",", header=header)
title = "Information"
message = "The results have been exported."
PrintMessageInput([title, message, window_title2])
def get_response(self):
response = get_structural_frf(self.preprocessor, self.solution, self.node_ID, self.localDof)
if self.SingleDiff:
output_data = response*(1j*2*np.pi)*self.frequencies
elif self.DoubleDiff:
output_data = response*((1j*2*np.pi*self.frequencies)**2)
else:
output_data = response
return output_data
def plot(self):
fig = plt.figure(figsize=[12,7])
ax = fig.add_subplot(1,1,1)
frequencies = self.frequencies
response = self.get_response()
if self.imported_data is not None:
data = self.imported_data
imported_Xvalues = data[:,0]
if self.plotAbs:
imported_Yvalues = np.abs(data[:,1] + 1j*data[:,2])
elif self.plotReal:
imported_Yvalues = data[:,1]
elif self.plotImag:
imported_Yvalues = data[:,2]
if self.plotAbs:
response = np.abs(response)
ax.set_ylabel(("Structural Response - Absolute [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
if not float(0) in response:
if self.imported_data is None:
ax.set_yscale('log', nonposy='clip')
else:
if not float(0) in imported_Yvalues:
ax.set_yscale('log', nonposy='clip')
elif self.plotReal:
response = np.real(response)
ax.set_ylabel(("Structural Response - Real [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
elif self.plotImag:
response = np.imag(response)
ax.set_ylabel(("Structural Response - Imaginary [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
cursor = SnaptoCursor(ax, frequencies, response, self.cursor)
plt.connect('motion_notify_event', cursor.mouse_move)
legend_label = "Response {} at node {}".format(self.localdof_label, self.node_ID)
if self.imported_data is None:
if float(0) in response or self.plotReal or self.plotImag:
if float(0) in response[1:] or self.plotReal or self.plotImag:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.semilogy(frequencies[1:], response[1:], color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
_legends = plt.legend(handles=[first_plot], labels=[legend_label], loc='upper right')
else:
if float(0) in response or float(0) in imported_Yvalues or self.plotReal or self.plotImag:
if float(0) in response[1:] or float(0) in imported_Yvalues[1:] or self.plotReal or self.plotImag:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2)
second_plot, = plt.plot(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.semilogy(frequencies[1:], response[1:], color=[1,0,0], linewidth=2, label=legend_label)
second_plot, = plt.semilogy(imported_Xvalues[1:], imported_Yvalues[1:], color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
second_plot, = plt.semilogy(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
_legends = plt.legend(handles=[first_plot, second_plot], labels=[legend_label, self.legend_imported], loc='upper right')
plt.gca().add_artist(_legends)
ax.set_title(('STRUCTURAL FREQUENCY RESPONSE - {}').format(self.analysisMethod.upper()), fontsize = 16, fontweight = 'bold')
ax.set_xlabel(('Frequency [Hz]'), fontsize = 14, fontweight = 'bold')
plt.show() | true | true |
1c30e0935d54d1c3ba212343db4032e9c8553c20 | 3,342 | py | Python | fill-gaps/fill_gaps.py | DOUGLASMENDES/Python-Scripts | 00021ede5e894a0e2fb43a33129bf1d9dc0c492d | [
"MIT"
] | 307 | 2019-05-17T21:34:12.000Z | 2022-03-28T20:03:44.000Z | fill-gaps/fill_gaps.py | DOUGLASMENDES/Python-Scripts | 00021ede5e894a0e2fb43a33129bf1d9dc0c492d | [
"MIT"
] | 8 | 2021-03-19T00:47:41.000Z | 2022-03-11T23:47:47.000Z | fill-gaps/fill_gaps.py | DOUGLASMENDES/Python-Scripts | 00021ede5e894a0e2fb43a33129bf1d9dc0c492d | [
"MIT"
] | 78 | 2019-05-23T00:51:28.000Z | 2022-02-01T21:25:24.000Z | #! python3
# fill_gaps.py
# Author: Kene Udeh
# Source: Automate the Boring stuff with python Ch. 9 Project
import os
import re
import shutil
def getFilesWithPrefix(folderPath, prefix):
"""get all files with a certain prefix
Args:
folderPath (str): path to folder to search
Returns:
"""
fileRegex = re.compile(prefix+'(\d{1,})(.\w+)')
fileList = sorted( [file for file in os.listdir(folderPath) if fileRegex.match(file)] )
return fileList
def fillGaps(folderPath, prefix):
"""fill gaps in numbering of files in folder
Args:
folderPath (str): path to folder to search
prefix (str): prefix of files to fill gap
Returns:
None
"""
fileList = getFilesWithPrefix(folderPath, prefix) # files sorted ascending order
fileRegex = re.compile(prefix+'(\d{1,})(.\w+)')
start = int(fileRegex.search(fileList[0]).group(1)) # start with the minimum number in list
count = start # count to be incremented during checks for gaps
max_length = len(fileRegex.search(fileList[-1]).group(1)) # max length of largest number, for padding zeros
for file in fileList:
mo = fileRegex.search(file)
fileNum = int(mo.group(1))
if fileNum != count:
newFileName = prefix + '0'*(max_length-len(str(fileNum))) + str(count) + mo.group(2)
shutil.move(os.path.abspath(file), os.path.abspath(newFileName))
count += 1
def insertGaps(folderPath, prefix, index):
"""insert gaps in numbering of files in folder
Args:
folderPath (str): path to folder to search
prefix (str): prefix of files to insert gap
index (int): where to insert the gap
Returns:
None
"""
fileList = getFilesWithPrefix(folderPath, prefix) # files sorted ascending order
fileRegex = re.compile(prefix+'(\d{1,})(.\w+)')
max_length = len(fileRegex.search(fileList[-1]).group(1)) # max length of largest number, for padding zeros
firstIndex = int(fileRegex.search(fileList[0]).group(1)) # smallest number
lastIndex = int(fileRegex.search(fileList[-1]).group(1)) # largest number
if index >= firstIndex and index <= lastIndex: # if gap index falls in range
i = 0
currIndex = firstIndex
while currIndex < index:
# loop till the file number is >= gap index
i += 1
currIndex = int(fileRegex.search(fileList[i]).group(1))
if currIndex == index: # if gap index is taken, make a gap else already free
for file in fileList[i:][::-1]:
# loop through reversed file list, to prevent overwriting results and increment file number
mo = fileRegex.search(file)
newFileNum = int(mo.group(1)) + 1
newFileName = prefix + '0'*(max_length-len(str(newFileNum))) + str(newFileNum) + mo.group(2)
shutil.move(os.path.abspath(file), os.path.abspath(newFileName))
if __name__ == "__main__":
with open('spam001.txt', 'w') as s1, open('spam003.txt', 'w') as s3:
s1.write('spam001')
s3.write('spam003')
fillGaps('.', 'spam')
#insertGaps('.', 'spam', 2)
| 35.935484 | 121 | 0.59994 |
import os
import re
import shutil
def getFilesWithPrefix(folderPath, prefix):
fileRegex = re.compile(prefix+'(\d{1,})(.\w+)')
fileList = sorted( [file for file in os.listdir(folderPath) if fileRegex.match(file)] )
return fileList
def fillGaps(folderPath, prefix):
fileList = getFilesWithPrefix(folderPath, prefix)
fileRegex = re.compile(prefix+'(\d{1,})(.\w+)')
start = int(fileRegex.search(fileList[0]).group(1))
count = start
max_length = len(fileRegex.search(fileList[-1]).group(1))
for file in fileList:
mo = fileRegex.search(file)
fileNum = int(mo.group(1))
if fileNum != count:
newFileName = prefix + '0'*(max_length-len(str(fileNum))) + str(count) + mo.group(2)
shutil.move(os.path.abspath(file), os.path.abspath(newFileName))
count += 1
def insertGaps(folderPath, prefix, index):
fileList = getFilesWithPrefix(folderPath, prefix)
fileRegex = re.compile(prefix+'(\d{1,})(.\w+)')
max_length = len(fileRegex.search(fileList[-1]).group(1))
firstIndex = int(fileRegex.search(fileList[0]).group(1))
lastIndex = int(fileRegex.search(fileList[-1]).group(1))
if index >= firstIndex and index <= lastIndex:
i = 0
currIndex = firstIndex
while currIndex < index:
i += 1
currIndex = int(fileRegex.search(fileList[i]).group(1))
if currIndex == index:
for file in fileList[i:][::-1]:
mo = fileRegex.search(file)
newFileNum = int(mo.group(1)) + 1
newFileName = prefix + '0'*(max_length-len(str(newFileNum))) + str(newFileNum) + mo.group(2)
shutil.move(os.path.abspath(file), os.path.abspath(newFileName))
if __name__ == "__main__":
with open('spam001.txt', 'w') as s1, open('spam003.txt', 'w') as s3:
s1.write('spam001')
s3.write('spam003')
fillGaps('.', 'spam')
| true | true |
1c30e09656038523faf619660703ee1e184e6ac6 | 760 | py | Python | henon_heiles_system.py | cosmo-jana/numerics-physics-stuff | f5fb35c00c84ca713877e20c1d8186e76883cd28 | [
"MIT"
] | 1 | 2020-10-16T16:35:35.000Z | 2020-10-16T16:35:35.000Z | henon_heiles_system.py | cosmo-jana/numerics-physics-stuff | f5fb35c00c84ca713877e20c1d8186e76883cd28 | [
"MIT"
] | null | null | null | henon_heiles_system.py | cosmo-jana/numerics-physics-stuff | f5fb35c00c84ca713877e20c1d8186e76883cd28 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
def henon_heiles_rhs(t, s):
x, y, px, py = s
Fx = - x - 2*x*y
Fy = - y - (x**2 - y**2)
return px, py, Fx, Fy
def henon_heiles_system(initial_pos, initial_vel,
time_span=100, num_samples=1000):
sol = solve_ivp(henon_heiles_rhs, (0, time_span),
tuple(initial_pos) + tuple(initial_vel),
t_eval=np.linspace(0, time_span, num_samples), method="BDF")
plt.plot(sol.y[0, :], sol.y[1, :])
plt.plot([initial_pos[0]], [initial_pos[1]], "or")
plt.title("Henon Heiles System")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
return sol
if __name__ == "__main__":
henon_heiles_system((0, 1), (0.01, 0))
| 29.230769 | 72 | 0.619737 | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
def henon_heiles_rhs(t, s):
x, y, px, py = s
Fx = - x - 2*x*y
Fy = - y - (x**2 - y**2)
return px, py, Fx, Fy
def henon_heiles_system(initial_pos, initial_vel,
time_span=100, num_samples=1000):
sol = solve_ivp(henon_heiles_rhs, (0, time_span),
tuple(initial_pos) + tuple(initial_vel),
t_eval=np.linspace(0, time_span, num_samples), method="BDF")
plt.plot(sol.y[0, :], sol.y[1, :])
plt.plot([initial_pos[0]], [initial_pos[1]], "or")
plt.title("Henon Heiles System")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
return sol
if __name__ == "__main__":
henon_heiles_system((0, 1), (0.01, 0))
| true | true |
1c30e21627c98fd595cb22f185b4fa349ea23a12 | 1,581 | py | Python | data/sampler.py | alexchungio/RetinaNet-Pytorch | 2eea76171407f050d03fd0313b6920421e4a3015 | [
"MIT"
] | null | null | null | data/sampler.py | alexchungio/RetinaNet-Pytorch | 2eea76171407f050d03fd0313b6920421e4a3015 | [
"MIT"
] | null | null | null | data/sampler.py | alexchungio/RetinaNet-Pytorch | 2eea76171407f050d03fd0313b6920421e4a3015 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------
# @ File : sampler.py
# @ Description:
# @ Author : Alex Chung
# @ Contact : yonganzhong@outlook.com
# @ License : Copyright (c) 2017-2018
# @ Time : 2020/11/18 下午4:11
# @ Software : PyCharm
#-------------------------------------------------------
import random
import torchvision
from torch.utils.data.sampler import Sampler
class AspectRatioBasedSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last):
super(AspectRatioBasedSampler, self).__init__(data_source)
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
if self.drop_last:
return len(self.data_source) // self.batch_size
else:
return (len(self.data_source) + self.batch_size - 1) // self.batch_size
def group_images(self):
# determine the order of the images
# order image with aspect ratio
order = list(range(len(self.data_source)))
order.sort(key=lambda x: self.data_source.image_aspect_ratio(x))
# divide into groups, one group = one batch
return [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in range(0, len(order), self.batch_size)]
def image_aspect_ratio(self, image):
return | 31.62 | 126 | 0.596458 |
import random
import torchvision
from torch.utils.data.sampler import Sampler
class AspectRatioBasedSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last):
super(AspectRatioBasedSampler, self).__init__(data_source)
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
if self.drop_last:
return len(self.data_source) // self.batch_size
else:
return (len(self.data_source) + self.batch_size - 1) // self.batch_size
def group_images(self):
order = list(range(len(self.data_source)))
order.sort(key=lambda x: self.data_source.image_aspect_ratio(x))
return [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in range(0, len(order), self.batch_size)]
def image_aspect_ratio(self, image):
return | true | true |
1c30e253a241cb0b82efac76e34724ae80cfe19e | 201 | py | Python | aging/parameters.py | freefeynman123/Aging | 12633abc709b376dcd61d6e4f78a8a4343a0550c | [
"MIT"
] | 1 | 2022-03-15T06:33:56.000Z | 2022-03-15T06:33:56.000Z | aging/parameters.py | freefeynman123/Aging | 12633abc709b376dcd61d6e4f78a8a4343a0550c | [
"MIT"
] | null | null | null | aging/parameters.py | freefeynman123/Aging | 12633abc709b376dcd61d6e4f78a8a4343a0550c | [
"MIT"
] | null | null | null | #Parameters for neural net training
n_channels = 3
n_encode = 64
n_z = 50
n_l = 10
n_generator = 64
batch_size = 32
image_size = 128
n_discriminator = 16
n_age = int(n_z / n_l)
n_gender = int(n_z / 2) | 16.75 | 35 | 0.721393 |
n_channels = 3
n_encode = 64
n_z = 50
n_l = 10
n_generator = 64
batch_size = 32
image_size = 128
n_discriminator = 16
n_age = int(n_z / n_l)
n_gender = int(n_z / 2) | true | true |
1c30e2bf5386b9f44a24a53b906de5908797f9f0 | 282 | py | Python | leaker/pattern/__init__.py | anonleakerdev/LEAKER | bea8623021b3eb0b4fb450f2cdd1b48834d7c196 | [
"MIT"
] | 8 | 2021-08-30T04:55:21.000Z | 2022-03-20T16:14:33.000Z | leaker/pattern/__init__.py | anonleakerdev/LEAKER | bea8623021b3eb0b4fb450f2cdd1b48834d7c196 | [
"MIT"
] | 1 | 2021-08-09T09:22:00.000Z | 2021-08-09T09:22:00.000Z | leaker/pattern/__init__.py | anonleakerdev/LEAKER | bea8623021b3eb0b4fb450f2cdd1b48834d7c196 | [
"MIT"
] | null | null | null | from .identity import ResponseIdentity
from .length import ResponseLength
from .volume import TotalVolume, Volume
from .cooccurrence import CoOccurrence
from .rank import Rank
__all__ = [
'ResponseIdentity', 'ResponseLength', 'TotalVolume', 'Volume', 'CoOccurrence', 'Rank',
]
| 28.2 | 90 | 0.776596 | from .identity import ResponseIdentity
from .length import ResponseLength
from .volume import TotalVolume, Volume
from .cooccurrence import CoOccurrence
from .rank import Rank
__all__ = [
'ResponseIdentity', 'ResponseLength', 'TotalVolume', 'Volume', 'CoOccurrence', 'Rank',
]
| true | true |
1c30e3878bd84fbf53f92f80754641a0c21a4ed6 | 4,130 | py | Python | scylla/web/server.py | kirinse/scylla | e0fbe07d155856a5f76db600320b3d5bf0a53eaf | [
"Apache-2.0"
] | null | null | null | scylla/web/server.py | kirinse/scylla | e0fbe07d155856a5f76db600320b3d5bf0a53eaf | [
"Apache-2.0"
] | null | null | null | scylla/web/server.py | kirinse/scylla | e0fbe07d155856a5f76db600320b3d5bf0a53eaf | [
"Apache-2.0"
] | null | null | null | import math
import os
from playhouse.shortcuts import model_to_dict
from sanic import Sanic
from sanic.request import Request
from sanic.response import json
from sanic_cors import CORS
from scylla.database import ProxyIP
from scylla.loggings import logger
app = Sanic()
CORS(app)
base_path = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))
app.static('/assets/*', base_path + '/assets')
app.static('/', base_path + '/assets/index.html')
app.static('/*', base_path + '/assets/index.html')
def _parse_str_to_int(s: str) -> int:
try:
return int(s)
except ValueError:
return 0
def _get_valid_proxies_query():
return ProxyIP.select().where(ProxyIP.latency > 0).where(ProxyIP.latency < 9999) \
.where(ProxyIP.is_valid == True)
@app.route('/api/v1/proxies')
async def api_v1_proxies(request: Request):
args = request.get_args()
limit = 20
page = 1
is_anonymous = 2 # 0: no, 1: yes, 2: any
if 'limit' in args:
int_limit = _parse_str_to_int(args['limit'][0])
limit = int_limit if int_limit else 20
if 'page' in args:
int_page = _parse_str_to_int(args['page'][0])
page = int_page if int_page > 0 else 1
if 'anonymous' in args:
str_anonymous = args['anonymous'][0]
if str_anonymous == 'true':
is_anonymous = 1
elif str_anonymous == 'false':
is_anonymous = 0
else:
is_anonymous = 2
str_https = None
if 'https' in args:
str_https = args['https'][0]
country_list = []
if 'countries' in args:
countries = args['countries'][0]
country_list = countries.split(',')
proxy_initial_query = _get_valid_proxies_query()
proxy_query = proxy_initial_query
if is_anonymous != 2:
if is_anonymous == 1:
proxy_query = proxy_initial_query.where(ProxyIP.is_anonymous == True)
elif is_anonymous == 0:
proxy_query = proxy_initial_query.where(ProxyIP.is_anonymous == False)
if str_https:
if str_https == 'true':
proxy_query = proxy_initial_query.where(ProxyIP.is_https == True)
elif str_https == 'false':
proxy_query = proxy_initial_query.where(ProxyIP.is_https == False)
if country_list and len(country_list) > 0:
proxy_query = proxy_query.where(ProxyIP.country << country_list)
count = proxy_query.count() # count before sorting
proxies = proxy_query.order_by(ProxyIP.updated_at.desc(), ProxyIP.latency).offset((page - 1) * limit).limit(limit)
logger.debug('Perform SQL query: {}'.format(proxy_query.sql()))
proxy_list = []
for p in proxies:
pp = model_to_dict(p)
pp['created_at'] = pp['created_at'].timestamp()
pp['updated_at'] = pp['updated_at'].timestamp()
proxy_list.append(pp)
return json({
'proxies': proxy_list,
'count': count,
'per_page': limit,
'page': page,
'total_page': math.ceil(count / limit),
})
@app.route('/api/v1/stats')
async def api_v1_stats(request: Request):
median_query: ProxyIP = ProxyIP.raw("""SELECT latency
FROM proxy_ips
WHERE is_valid = 1
ORDER BY latency
LIMIT 1
OFFSET (
SELECT COUNT(*) FROM proxy_ips WHERE is_valid = 1
) / 2""").get()
median = median_query.latency
mean_query: ProxyIP = ProxyIP.raw("""SELECT AVG(latency) as latency
FROM proxy_ips
WHERE is_valid = 1 AND latency < 9999""").get()
mean = mean_query.latency
valid_count = _get_valid_proxies_query().count()
total_count = ProxyIP.select().count()
return json({
'median': median,
'valid_count': valid_count,
'total_count': total_count,
'mean': mean,
})
def start_web_server(host='0.0.0.0', port=8899):
app.run(host=host, port=port)
| 28.287671 | 118 | 0.597337 | import math
import os
from playhouse.shortcuts import model_to_dict
from sanic import Sanic
from sanic.request import Request
from sanic.response import json
from sanic_cors import CORS
from scylla.database import ProxyIP
from scylla.loggings import logger
app = Sanic()
CORS(app)
base_path = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))
app.static('/assets/*', base_path + '/assets')
app.static('/', base_path + '/assets/index.html')
app.static('/*', base_path + '/assets/index.html')
def _parse_str_to_int(s: str) -> int:
try:
return int(s)
except ValueError:
return 0
def _get_valid_proxies_query():
return ProxyIP.select().where(ProxyIP.latency > 0).where(ProxyIP.latency < 9999) \
.where(ProxyIP.is_valid == True)
@app.route('/api/v1/proxies')
async def api_v1_proxies(request: Request):
args = request.get_args()
limit = 20
page = 1
is_anonymous = 2
if 'limit' in args:
int_limit = _parse_str_to_int(args['limit'][0])
limit = int_limit if int_limit else 20
if 'page' in args:
int_page = _parse_str_to_int(args['page'][0])
page = int_page if int_page > 0 else 1
if 'anonymous' in args:
str_anonymous = args['anonymous'][0]
if str_anonymous == 'true':
is_anonymous = 1
elif str_anonymous == 'false':
is_anonymous = 0
else:
is_anonymous = 2
str_https = None
if 'https' in args:
str_https = args['https'][0]
country_list = []
if 'countries' in args:
countries = args['countries'][0]
country_list = countries.split(',')
proxy_initial_query = _get_valid_proxies_query()
proxy_query = proxy_initial_query
if is_anonymous != 2:
if is_anonymous == 1:
proxy_query = proxy_initial_query.where(ProxyIP.is_anonymous == True)
elif is_anonymous == 0:
proxy_query = proxy_initial_query.where(ProxyIP.is_anonymous == False)
if str_https:
if str_https == 'true':
proxy_query = proxy_initial_query.where(ProxyIP.is_https == True)
elif str_https == 'false':
proxy_query = proxy_initial_query.where(ProxyIP.is_https == False)
if country_list and len(country_list) > 0:
proxy_query = proxy_query.where(ProxyIP.country << country_list)
count = proxy_query.count()
proxies = proxy_query.order_by(ProxyIP.updated_at.desc(), ProxyIP.latency).offset((page - 1) * limit).limit(limit)
logger.debug('Perform SQL query: {}'.format(proxy_query.sql()))
proxy_list = []
for p in proxies:
pp = model_to_dict(p)
pp['created_at'] = pp['created_at'].timestamp()
pp['updated_at'] = pp['updated_at'].timestamp()
proxy_list.append(pp)
return json({
'proxies': proxy_list,
'count': count,
'per_page': limit,
'page': page,
'total_page': math.ceil(count / limit),
})
@app.route('/api/v1/stats')
async def api_v1_stats(request: Request):
median_query: ProxyIP = ProxyIP.raw("""SELECT latency
FROM proxy_ips
WHERE is_valid = 1
ORDER BY latency
LIMIT 1
OFFSET (
SELECT COUNT(*) FROM proxy_ips WHERE is_valid = 1
) / 2""").get()
median = median_query.latency
mean_query: ProxyIP = ProxyIP.raw("""SELECT AVG(latency) as latency
FROM proxy_ips
WHERE is_valid = 1 AND latency < 9999""").get()
mean = mean_query.latency
valid_count = _get_valid_proxies_query().count()
total_count = ProxyIP.select().count()
return json({
'median': median,
'valid_count': valid_count,
'total_count': total_count,
'mean': mean,
})
def start_web_server(host='0.0.0.0', port=8899):
app.run(host=host, port=port)
| true | true |
1c30e3fac6b958925d4489b37cd3a3d7fea02f79 | 2,971 | py | Python | tests/system/gapic/v1/test_system_speech_v1.py | busunkim96/python-speech | 4214630c3318e6c9bc0a5156e20344956faf7d52 | [
"Apache-2.0"
] | 1 | 2019-03-26T21:44:51.000Z | 2019-03-26T21:44:51.000Z | tests/system/gapic/v1/test_system_speech_v1.py | busunkim96/python-speech | 4214630c3318e6c9bc0a5156e20344956faf7d52 | [
"Apache-2.0"
] | 40 | 2019-07-16T10:04:48.000Z | 2020-01-20T09:04:59.000Z | tests/system/gapic/v1/test_system_speech_v1.py | busunkim96/python-speech | 4214630c3318e6c9bc0a5156e20344956faf7d52 | [
"Apache-2.0"
] | 2 | 2019-07-18T00:05:31.000Z | 2019-11-27T14:17:22.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
import requests
from google.cloud import speech_v1
class TestSystemSpeech(object):
def test_recognize(self):
try:
BUCKET = os.environ["GOOGLE_CLOUD_TESTS_SPEECH_BUCKET"]
except KeyError:
BUCKET = "cloud-samples-tests"
client = speech_v1.SpeechClient()
config = {
"encoding": speech_v1.enums.RecognitionConfig.AudioEncoding.FLAC,
"language_code": "en-US",
"sample_rate_hertz": 16000,
}
uri = "gs://{}/speech/brooklyn.flac".format(BUCKET)
audio = {"uri": uri}
response = client.recognize(config, audio)
assert response.results[0].alternatives[0].transcript is not None
def test_long_running_recognize(self):
try:
BUCKET = os.environ["GOOGLE_CLOUD_TESTS_SPEECH_BUCKET"]
except KeyError:
BUCKET = "cloud-samples-tests"
client = speech_v1.SpeechClient()
config = speech_v1.types.RecognitionConfig(
encoding=speech_v1.enums.RecognitionConfig.AudioEncoding.FLAC,
language_code="en-US",
sample_rate_hertz=16000,
)
uri = "gs://{}/speech/brooklyn.flac".format(BUCKET)
audio = {"uri": uri}
response = client.long_running_recognize(config, audio)
assert response.result() is not None
def test_streaming_recognize(self):
try:
BUCKET = os.environ["GOOGLE_CLOUD_TESTS_SPEECH_BUCKET"]
except KeyError:
BUCKET = "cloud-samples-tests"
client = speech_v1.SpeechClient()
config = speech_v1.types.RecognitionConfig(
encoding=speech_v1.enums.RecognitionConfig.AudioEncoding.FLAC,
language_code="en-US",
sample_rate_hertz=16000,
)
streamingConfig = speech_v1.types.StreamingRecognitionConfig(config=config)
uri = "https://storage.googleapis.com/{}/speech/brooklyn.flac".format(BUCKET)
streaming_requests = [
speech_v1.types.StreamingRecognizeRequest(
audio_content=requests.get(uri).content
)
]
responses = client.streaming_recognize(streamingConfig, streaming_requests)
for response in responses:
for result in response.results:
assert result.alternatives[0].transcript is not None
| 31.273684 | 85 | 0.656345 |
import os
import io
import requests
from google.cloud import speech_v1
class TestSystemSpeech(object):
def test_recognize(self):
try:
BUCKET = os.environ["GOOGLE_CLOUD_TESTS_SPEECH_BUCKET"]
except KeyError:
BUCKET = "cloud-samples-tests"
client = speech_v1.SpeechClient()
config = {
"encoding": speech_v1.enums.RecognitionConfig.AudioEncoding.FLAC,
"language_code": "en-US",
"sample_rate_hertz": 16000,
}
uri = "gs://{}/speech/brooklyn.flac".format(BUCKET)
audio = {"uri": uri}
response = client.recognize(config, audio)
assert response.results[0].alternatives[0].transcript is not None
def test_long_running_recognize(self):
try:
BUCKET = os.environ["GOOGLE_CLOUD_TESTS_SPEECH_BUCKET"]
except KeyError:
BUCKET = "cloud-samples-tests"
client = speech_v1.SpeechClient()
config = speech_v1.types.RecognitionConfig(
encoding=speech_v1.enums.RecognitionConfig.AudioEncoding.FLAC,
language_code="en-US",
sample_rate_hertz=16000,
)
uri = "gs://{}/speech/brooklyn.flac".format(BUCKET)
audio = {"uri": uri}
response = client.long_running_recognize(config, audio)
assert response.result() is not None
def test_streaming_recognize(self):
try:
BUCKET = os.environ["GOOGLE_CLOUD_TESTS_SPEECH_BUCKET"]
except KeyError:
BUCKET = "cloud-samples-tests"
client = speech_v1.SpeechClient()
config = speech_v1.types.RecognitionConfig(
encoding=speech_v1.enums.RecognitionConfig.AudioEncoding.FLAC,
language_code="en-US",
sample_rate_hertz=16000,
)
streamingConfig = speech_v1.types.StreamingRecognitionConfig(config=config)
uri = "https://storage.googleapis.com/{}/speech/brooklyn.flac".format(BUCKET)
streaming_requests = [
speech_v1.types.StreamingRecognizeRequest(
audio_content=requests.get(uri).content
)
]
responses = client.streaming_recognize(streamingConfig, streaming_requests)
for response in responses:
for result in response.results:
assert result.alternatives[0].transcript is not None
| true | true |
1c30e43b5090c72c5287a606bc577e1e6dc801ae | 9,643 | py | Python | zeus/trainer/utils.py | TianQi-777/xingtian | 9b1678ad6ff12f00c2826a7ec7f42d5350b83b31 | [
"MIT"
] | 240 | 2020-08-15T15:11:49.000Z | 2022-03-28T07:26:23.000Z | zeus/trainer/utils.py | TianQi-777/xingtian | 9b1678ad6ff12f00c2826a7ec7f42d5350b83b31 | [
"MIT"
] | 20 | 2020-08-29T06:18:21.000Z | 2022-03-21T04:35:57.000Z | zeus/trainer/utils.py | TianQi-777/xingtian | 9b1678ad6ff12f00c2826a7ec7f42d5350b83b31 | [
"MIT"
] | 69 | 2020-08-15T15:41:53.000Z | 2022-03-16T08:27:47.000Z | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Utils functions that been used in pipeline."""
import os
import socket
import subprocess
import sys
import logging
import signal
import psutil
from collections import OrderedDict
from enum import Enum
from zeus.common import FileOps
from zeus.common.task_ops import TaskOps
class WorkerTypes(Enum):
"""WorkerTypes."""
TRAINER = 1
EVALUATOR = 2
HOST_EVALUATOR = 3
HAVA_D_EVALUATOR = 4
DeviceEvaluator = 5
class PairDictQueue():
"""A special Dict Queue only for Master to use to collect all finished Evaluator results.
the insert and pop item could only be string or int.
as a example for how to used in Evalutor, the stored odict could be :
{
"step_name::worker1": {"EVALUATE_GPU":0, "EVALUATE_DLOOP":0},
"step_name::worker2": {"EVALUATE_GPU":0, "EVALUATE_DLOOP":1},
"step_name::worker3": {"EVALUATE_GPU":1, "EVALUATE_DLOOP":0},
"step_name::worker4": {"EVALUATE_GPU":1, "EVALUATE_DLOOP":1},
}
the list could mean each sub-evalutor-worker's status, 0 is not finished,
1 is finished, here as example, this list could mean [gpu, dloop].
and the key of odict is the id of this task(which combined with step name
and worker-id).
Only sub-evalutor-worker's all status turn to 1(finshed), could it be able
to be popped from this PairDictQueue.
:param int pair_size: Description of parameter `pair_size`.
"""
def __init__(self):
self.dq_id = 0
self.odict = OrderedDict()
return
def add_new(self, item, type):
"""Short summary.
:param type item: Description of parameter `item`.
:param type key: Description of parameter `key`.
"""
if item not in self.odict:
self.odict[item] = dict()
self.odict[item][type] = 0
def put(self, item, type):
"""Short summary.
:param type item: Description of parameter `item`.
:param type type: Description of parameter `type`.
:return: Description of returned object.
:rtype: type
"""
if item not in self.odict:
logging.debug("item({}) not in PairDictQueue!".format(item))
return
self.odict[item][type] = 1
logging.debug("PairDictQueue add item({}) key({})".format(item, type))
return True
def get(self):
"""Short summary.
:return: Description of returned object.
:rtype: type
"""
item = None
for key, subdict in self.odict.items():
item_ok = True
for k, i in subdict.items():
if i != 1:
item_ok = False
break
if item_ok:
self.odict.pop(key)
item = key
break
return item
def qsize(self):
"""Short summary.
:return: Description of returned object.
:rtype: type
"""
return len(self.odict)
# Here start the stand alone functions for master to use!
def clean_cuda_proc(master_pid, device_id):
"""Short summary.
:param type master_pid: Description of parameter `master_pid`.
:param type device_id: Description of parameter `device_id`.
"""
current_pid = os.getpid()
cuda_kill = "fuser -v /dev/nvidia{0} | " \
"awk '{{for(i=1;i<=NF;i++)if($i!={1}&&$i!={2})" \
"print \"kill -9 \" $i;}}' | sh".format(device_id, master_pid, current_pid)
os.system(cuda_kill)
return
def kill_children_proc(sig=signal.SIGTERM, recursive=True,
timeout=1, on_terminate=None):
"""Kill a process tree of curret process (including grandchildren).
with signal "sig" and return a (gone, still_alive) tuple.
"on_terminate", if specified, is a callabck function which is
called as soon as a child terminates.
"""
pid = os.getpid()
parent = psutil.Process(pid)
children = parent.children(recursive)
for p in children:
logging.info("children: {}".format(p.as_dict(attrs=['pid', 'name', 'username'])))
p.send_signal(sig)
gone, alive = psutil.wait_procs(children, timeout=timeout,
callback=on_terminate)
return (gone, alive)
def kill_proc_tree(pid, sig=signal.SIGKILL, include_parent=True,
timeout=None, on_terminate=None):
"""Kill a process tree (including grandchildren) with signal.
"sig" and return a (gone, still_alive) tuple.
"on_terminate", if specified, is a callabck function which is
called as soon as a child terminates.
"""
if pid == os.getpid():
raise RuntimeError("I refuse to kill myself")
gone = None
alive = None
try:
parent = psutil.Process(pid)
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
for p in children:
p.send_signal(sig)
gone, alive = psutil.wait_procs(children, timeout=timeout,
callback=on_terminate)
except Exception:
pass
return (gone, alive)
def install_and_import_local(package, package_path=None, update=False):
"""Install and import local python packages.
:param str package: `package` name that need to install and import.
:param package_path: if the package is a local whl, then the `package_path`.
:type package_path: str or None
:param bool update: Description of parameter `update`.
"""
import importlib
try:
if not update:
try:
importlib.import_module(package)
except ImportError:
import pip
if hasattr(pip, 'main'):
pip.main(['install', package_path])
elif hasattr(pip, '_internal'):
pip._internal.main(['install', package_path])
else:
subprocess.call([sys.executable, "-m", "pip", "install",
package_path])
else:
import pip
if hasattr(pip, 'main'):
pip.main(['install', '-U', package_path])
elif hasattr(pip, '_internal'):
pip._internal.main(['install', '-U', package_path])
else:
subprocess.call([sys.executable, "-m", "pip", "install", "-U",
package_path])
finally:
globals()[package] = importlib.import_module(package)
def get_master_address(args):
"""Get master address(ip, port) from `args.init_method`.
:param argparse.ArgumentParser args: `args` is a argparse that should
contain `init_method`, `rank` and `world_size`.
:return: ip, port.
:rtype: (str, str) or None
"""
if args.init_method is not None:
address = args.init_method[6:].split(":")
ip = socket.gethostbyname(address[0])
port = address[-1]
logging.info("get master address, address={}, ip={}, port={}".format(
address, ip, port
))
return ip, port
else:
logging.warn("fail to get master address, args.init_method is none.")
return None
def get_local_address():
"""Try to get the local node's IP.
:return str: ip address.
"""
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
logging.info("get local address, hostname={}, ip={}".format(
hostname, ip
))
return ip
def save_master_ip(ip_address, port, args):
"""Write the ip and port in a system path.
:param str ip_address: The `ip_address` need to write.
:param str port: The `port` need to write.
:param argparse.ArgumentParser args: `args` is a argparse that should
contain `init_method`, `rank` and `world_size`.
"""
temp_folder = TaskOps().temp_path
FileOps.make_dir(temp_folder)
file_path = os.path.join(temp_folder, 'ip_address.txt')
logging.info("write ip, file path={}".format(file_path))
with open(file_path, 'w') as f:
f.write(ip_address + "\n")
f.write(port + "\n")
def load_master_ip():
"""Get the ip and port that write in a system path.
here will not download anything from S3.
"""
temp_folder = TaskOps().temp_path
FileOps.make_dir(temp_folder)
file_path = os.path.join(temp_folder, 'ip_address.txt')
if os.path.isfile(file_path):
with open(file_path, 'r') as f:
ip = f.readline().strip()
port = f.readline().strip()
logging.info("get write ip, ip={}, port={}".format(
ip, port
))
return ip, port
else:
return None, None
def get_master_port(args):
"""Get master port from `args.init_method`.
:param argparse.ArgumentParser args: `args` is a argparse that should
contain `init_method`, `rank` and `world_size`.
:return: The port that master used to communicate with slaves.
:rtype: str or None
"""
if args.init_method is not None:
address = args.init_method.split(":")
port = address[-1]
return port
else:
return None
| 31.825083 | 93 | 0.604895 |
import os
import socket
import subprocess
import sys
import logging
import signal
import psutil
from collections import OrderedDict
from enum import Enum
from zeus.common import FileOps
from zeus.common.task_ops import TaskOps
class WorkerTypes(Enum):
TRAINER = 1
EVALUATOR = 2
HOST_EVALUATOR = 3
HAVA_D_EVALUATOR = 4
DeviceEvaluator = 5
class PairDictQueue():
def __init__(self):
self.dq_id = 0
self.odict = OrderedDict()
return
def add_new(self, item, type):
if item not in self.odict:
self.odict[item] = dict()
self.odict[item][type] = 0
def put(self, item, type):
if item not in self.odict:
logging.debug("item({}) not in PairDictQueue!".format(item))
return
self.odict[item][type] = 1
logging.debug("PairDictQueue add item({}) key({})".format(item, type))
return True
def get(self):
item = None
for key, subdict in self.odict.items():
item_ok = True
for k, i in subdict.items():
if i != 1:
item_ok = False
break
if item_ok:
self.odict.pop(key)
item = key
break
return item
def qsize(self):
return len(self.odict)
def clean_cuda_proc(master_pid, device_id):
current_pid = os.getpid()
cuda_kill = "fuser -v /dev/nvidia{0} | " \
"awk '{{for(i=1;i<=NF;i++)if($i!={1}&&$i!={2})" \
"print \"kill -9 \" $i;}}' | sh".format(device_id, master_pid, current_pid)
os.system(cuda_kill)
return
def kill_children_proc(sig=signal.SIGTERM, recursive=True,
timeout=1, on_terminate=None):
pid = os.getpid()
parent = psutil.Process(pid)
children = parent.children(recursive)
for p in children:
logging.info("children: {}".format(p.as_dict(attrs=['pid', 'name', 'username'])))
p.send_signal(sig)
gone, alive = psutil.wait_procs(children, timeout=timeout,
callback=on_terminate)
return (gone, alive)
def kill_proc_tree(pid, sig=signal.SIGKILL, include_parent=True,
timeout=None, on_terminate=None):
if pid == os.getpid():
raise RuntimeError("I refuse to kill myself")
gone = None
alive = None
try:
parent = psutil.Process(pid)
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
for p in children:
p.send_signal(sig)
gone, alive = psutil.wait_procs(children, timeout=timeout,
callback=on_terminate)
except Exception:
pass
return (gone, alive)
def install_and_import_local(package, package_path=None, update=False):
import importlib
try:
if not update:
try:
importlib.import_module(package)
except ImportError:
import pip
if hasattr(pip, 'main'):
pip.main(['install', package_path])
elif hasattr(pip, '_internal'):
pip._internal.main(['install', package_path])
else:
subprocess.call([sys.executable, "-m", "pip", "install",
package_path])
else:
import pip
if hasattr(pip, 'main'):
pip.main(['install', '-U', package_path])
elif hasattr(pip, '_internal'):
pip._internal.main(['install', '-U', package_path])
else:
subprocess.call([sys.executable, "-m", "pip", "install", "-U",
package_path])
finally:
globals()[package] = importlib.import_module(package)
def get_master_address(args):
if args.init_method is not None:
address = args.init_method[6:].split(":")
ip = socket.gethostbyname(address[0])
port = address[-1]
logging.info("get master address, address={}, ip={}, port={}".format(
address, ip, port
))
return ip, port
else:
logging.warn("fail to get master address, args.init_method is none.")
return None
def get_local_address():
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
logging.info("get local address, hostname={}, ip={}".format(
hostname, ip
))
return ip
def save_master_ip(ip_address, port, args):
temp_folder = TaskOps().temp_path
FileOps.make_dir(temp_folder)
file_path = os.path.join(temp_folder, 'ip_address.txt')
logging.info("write ip, file path={}".format(file_path))
with open(file_path, 'w') as f:
f.write(ip_address + "\n")
f.write(port + "\n")
def load_master_ip():
temp_folder = TaskOps().temp_path
FileOps.make_dir(temp_folder)
file_path = os.path.join(temp_folder, 'ip_address.txt')
if os.path.isfile(file_path):
with open(file_path, 'r') as f:
ip = f.readline().strip()
port = f.readline().strip()
logging.info("get write ip, ip={}, port={}".format(
ip, port
))
return ip, port
else:
return None, None
def get_master_port(args):
if args.init_method is not None:
address = args.init_method.split(":")
port = address[-1]
return port
else:
return None
| true | true |
1c30e5d2a024fe414aefea9a5499608d015137fb | 2,170 | py | Python | dojo/unittests/tools/test_gitlab_container_scan_parser.py | axelpavageau/django-DefectDojo | 00b425742b783ada0f432241c2812ac1257feb73 | [
"BSD-3-Clause"
] | 1,772 | 2018-01-22T23:32:15.000Z | 2022-03-31T14:49:33.000Z | dojo/unittests/tools/test_gitlab_container_scan_parser.py | axelpavageau/django-DefectDojo | 00b425742b783ada0f432241c2812ac1257feb73 | [
"BSD-3-Clause"
] | 3,461 | 2018-01-20T19:12:28.000Z | 2022-03-31T17:14:39.000Z | dojo/unittests/tools/test_gitlab_container_scan_parser.py | axelpavageau/django-DefectDojo | 00b425742b783ada0f432241c2812ac1257feb73 | [
"BSD-3-Clause"
] | 1,173 | 2018-01-23T07:10:23.000Z | 2022-03-31T14:40:43.000Z | from datetime import datetime
from django.test import TestCase
from dojo.tools.gitlab_container_scan.parser import GitlabContainerScanParser
from dojo.models import Test
class TestGitlabContainerScanParser(TestCase):
def test_gitlab_container_scan_parser_with_no_vuln_has_no_findings(self):
testfile = open("dojo/unittests/scans/gitlab_container_scan/gl-container-scanning-report-0-vuln.json")
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_gitlab_container_scan_parser_with_one_vuln_has_one_findings(self):
testfile = open("dojo/unittests/scans/gitlab_container_scan/gl-container-scanning-report-1-vuln.json")
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
first_finding = findings[0]
self.assertEqual(1, len(findings))
self.assertEqual(datetime(2021, 4, 14, 19, 46, 18), finding.date)
self.assertEqual("CVE-2019-3462 in apt-1.4.8", first_finding.title)
self.assertEqual("apt", first_finding.component_name)
self.assertEqual("1.4.8", first_finding.component_version)
self.assertEqual("CVE-2019-3462", first_finding.cve)
self.assertEqual("High", first_finding.severity)
self.assertEqual("Upgrade apt from 1.4.8 to 1.4.9", first_finding.mitigation)
self.assertEqual("df52bc8ce9a2ae56bbcb0c4ecda62123fbd6f69b", first_finding.unique_id_from_tool)
def test_gitlab_container_scan_parser_with_five_vuln_has_five_findings(self):
testfile = open("dojo/unittests/scans/gitlab_container_scan/gl-container-scanning-report-5-vuln.json")
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(5, len(findings))
| 49.318182 | 110 | 0.723041 | from datetime import datetime
from django.test import TestCase
from dojo.tools.gitlab_container_scan.parser import GitlabContainerScanParser
from dojo.models import Test
class TestGitlabContainerScanParser(TestCase):
def test_gitlab_container_scan_parser_with_no_vuln_has_no_findings(self):
testfile = open("dojo/unittests/scans/gitlab_container_scan/gl-container-scanning-report-0-vuln.json")
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_gitlab_container_scan_parser_with_one_vuln_has_one_findings(self):
testfile = open("dojo/unittests/scans/gitlab_container_scan/gl-container-scanning-report-1-vuln.json")
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
first_finding = findings[0]
self.assertEqual(1, len(findings))
self.assertEqual(datetime(2021, 4, 14, 19, 46, 18), finding.date)
self.assertEqual("CVE-2019-3462 in apt-1.4.8", first_finding.title)
self.assertEqual("apt", first_finding.component_name)
self.assertEqual("1.4.8", first_finding.component_version)
self.assertEqual("CVE-2019-3462", first_finding.cve)
self.assertEqual("High", first_finding.severity)
self.assertEqual("Upgrade apt from 1.4.8 to 1.4.9", first_finding.mitigation)
self.assertEqual("df52bc8ce9a2ae56bbcb0c4ecda62123fbd6f69b", first_finding.unique_id_from_tool)
def test_gitlab_container_scan_parser_with_five_vuln_has_five_findings(self):
testfile = open("dojo/unittests/scans/gitlab_container_scan/gl-container-scanning-report-5-vuln.json")
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(5, len(findings))
| true | true |
1c30e5f7e5e604f4ff88dcce7fe6bc1c438a2bdc | 4,841 | py | Python | c7n/filters/config.py | ncerny/cloud-custodian | c43831604534a2bbc9e2a01187354b77a9b44bdc | [
"Apache-2.0"
] | null | null | null | c7n/filters/config.py | ncerny/cloud-custodian | c43831604534a2bbc9e2a01187354b77a9b44bdc | [
"Apache-2.0"
] | 1 | 2021-04-30T21:13:50.000Z | 2021-04-30T21:13:50.000Z | c7n/filters/config.py | ncerny/cloud-custodian | c43831604534a2bbc9e2a01187354b77a9b44bdc | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n.filters import ValueFilter
from c7n.manager import resources
from c7n.utils import local_session, type_schema
from .core import Filter
class ConfigCompliance(Filter):
"""Filter resources by their compliance with one or more AWS config rules.
An example of using the filter to find all ec2 instances that have
been registered as non compliant in the last 30 days against two
custom AWS Config rules.
:example:
.. code-block:: yaml
policies:
- name: non-compliant-ec2
resource: ec2
filters:
- type: config-compliance
eval_filters:
- type: value
key: ResultRecordedTime
value_type: age
value: 30
op: less-than
rules:
- custodian-ec2-encryption-required
- custodian-ec2-tags-required
Also note, custodian has direct support for deploying policies as config
rules see https://bit.ly/2mblVpq
"""
permissions = ('config:DescribeComplianceByConfigRule',)
schema = type_schema(
'config-compliance',
required=('rules',),
op={'enum': ['or', 'and']},
eval_filters={'type': 'array', 'items': {
'oneOf': [
{'$ref': '#/definitions/filters/valuekv'},
{'$ref': '#/definitions/filters/value'}]}},
states={'type': 'array', 'items': {'enum': [
'COMPLIANT', 'NON_COMPLIANT',
'NOT_APPLICABLE', 'INSUFFICIENT_DATA']}},
rules={'type': 'array', 'items': {'type': 'string'}})
schema_alias = True
annotation_key = 'c7n:config-compliance'
def get_resource_map(self, filters, resource_model, resources):
rule_ids = self.data.get('rules')
states = self.data.get('states', ['NON_COMPLIANT'])
op = self.data.get('op', 'or') == 'or' and any or all
client = local_session(self.manager.session_factory).client('config')
resource_map = {}
for rid in rule_ids:
pager = client.get_paginator('get_compliance_details_by_config_rule')
for page in pager.paginate(
ConfigRuleName=rid, ComplianceTypes=states):
evaluations = page.get('EvaluationResults', ())
for e in evaluations:
rident = e['EvaluationResultIdentifier'][
'EvaluationResultQualifier']
# for multi resource type rules, only look at
# results for the resource type currently being
# processed.
if rident['ResourceType'] != resource_model.config_type:
continue
if not filters:
resource_map.setdefault(
rident['ResourceId'], []).append(e)
continue
if op([f.match(e) for f in filters]):
resource_map.setdefault(
rident['ResourceId'], []).append(e)
return resource_map
def process(self, resources, event=None):
filters = []
for f in self.data.get('eval_filters', ()):
vf = ValueFilter(f)
vf.annotate = False
filters.append(vf)
resource_model = self.manager.get_model()
resource_map = self.get_resource_map(filters, resource_model, resources)
results = []
for r in resources:
if r[resource_model.id] not in resource_map:
continue
r[self.annotation_key] = resource_map[r[resource_model.id]]
results.append(r)
return results
@classmethod
def register_resources(klass, registry, resource_class):
"""model resource subscriber on resource registration.
Watch for new resource types being registered if they are
supported by aws config, automatically, register the
config-compliance filter.
"""
if resource_class.resource_type.config_type is None:
return
resource_class.filter_registry.register('config-compliance', klass)
resources.subscribe(ConfigCompliance.register_resources)
| 36.674242 | 81 | 0.600496 |
from c7n.filters import ValueFilter
from c7n.manager import resources
from c7n.utils import local_session, type_schema
from .core import Filter
class ConfigCompliance(Filter):
permissions = ('config:DescribeComplianceByConfigRule',)
schema = type_schema(
'config-compliance',
required=('rules',),
op={'enum': ['or', 'and']},
eval_filters={'type': 'array', 'items': {
'oneOf': [
{'$ref': '#/definitions/filters/valuekv'},
{'$ref': '#/definitions/filters/value'}]}},
states={'type': 'array', 'items': {'enum': [
'COMPLIANT', 'NON_COMPLIANT',
'NOT_APPLICABLE', 'INSUFFICIENT_DATA']}},
rules={'type': 'array', 'items': {'type': 'string'}})
schema_alias = True
annotation_key = 'c7n:config-compliance'
def get_resource_map(self, filters, resource_model, resources):
rule_ids = self.data.get('rules')
states = self.data.get('states', ['NON_COMPLIANT'])
op = self.data.get('op', 'or') == 'or' and any or all
client = local_session(self.manager.session_factory).client('config')
resource_map = {}
for rid in rule_ids:
pager = client.get_paginator('get_compliance_details_by_config_rule')
for page in pager.paginate(
ConfigRuleName=rid, ComplianceTypes=states):
evaluations = page.get('EvaluationResults', ())
for e in evaluations:
rident = e['EvaluationResultIdentifier'][
'EvaluationResultQualifier']
if rident['ResourceType'] != resource_model.config_type:
continue
if not filters:
resource_map.setdefault(
rident['ResourceId'], []).append(e)
continue
if op([f.match(e) for f in filters]):
resource_map.setdefault(
rident['ResourceId'], []).append(e)
return resource_map
def process(self, resources, event=None):
filters = []
for f in self.data.get('eval_filters', ()):
vf = ValueFilter(f)
vf.annotate = False
filters.append(vf)
resource_model = self.manager.get_model()
resource_map = self.get_resource_map(filters, resource_model, resources)
results = []
for r in resources:
if r[resource_model.id] not in resource_map:
continue
r[self.annotation_key] = resource_map[r[resource_model.id]]
results.append(r)
return results
@classmethod
def register_resources(klass, registry, resource_class):
if resource_class.resource_type.config_type is None:
return
resource_class.filter_registry.register('config-compliance', klass)
resources.subscribe(ConfigCompliance.register_resources)
| true | true |
1c30e600c88e0eeb37f6a6620136608b6174d39e | 1,912 | py | Python | proxy/get_proxy_url_test.py | Iuiu1234/pipelines | 1e032f550ce23cd40bfb6827b995248537b07d08 | [
"Apache-2.0"
] | 2,860 | 2018-05-24T04:55:01.000Z | 2022-03-31T13:49:56.000Z | proxy/get_proxy_url_test.py | Iuiu1234/pipelines | 1e032f550ce23cd40bfb6827b995248537b07d08 | [
"Apache-2.0"
] | 7,331 | 2018-05-16T09:03:26.000Z | 2022-03-31T23:22:04.000Z | proxy/get_proxy_url_test.py | Iuiu1234/pipelines | 1e032f550ce23cd40bfb6827b995248537b07d08 | [
"Apache-2.0"
] | 1,359 | 2018-05-15T11:05:41.000Z | 2022-03-31T09:42:09.000Z | #!/usr/bin/env python3
# Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from get_proxy_url import urls_for_zone
url_map_json = """
{
"us": ["https://datalab-us-west1.cloud.google.com"],
"us-west1": ["https://datalab-us-west1.cloud.google.com"],
"us-west2": ["https://datalab-us-west2.cloud.google.com"],
"us-east1": ["https://datalab-us-east1.cloud.google.com"]
}
"""
class TestUrlsForZone(unittest.TestCase):
def test_get_urls(self):
self.assertEqual([
"https://datalab-us-east1.cloud.google.com",
"https://datalab-us-west1.cloud.google.com"
], urls_for_zone("us-east1-a", json.loads(url_map_json)))
def test_get_urls_no_match(self):
self.assertEqual([],
urls_for_zone(
"euro-west1-a", json.loads(url_map_json)
))
def test_get_urls_incorrect_format(self):
with self.assertRaises(ValueError):
urls_for_zone("weird-format-a", json.loads(url_map_json))
def test_get_urls_priority(self):
self.assertEqual([
"https://datalab-us-west1.cloud.google.com",
"https://datalab-us-west2.cloud.google.com"
], urls_for_zone("us-west1-a", json.loads(url_map_json)))
if __name__ == '__main__':
unittest.main()
| 32.965517 | 74 | 0.654812 |
import json
import unittest
from get_proxy_url import urls_for_zone
url_map_json = """
{
"us": ["https://datalab-us-west1.cloud.google.com"],
"us-west1": ["https://datalab-us-west1.cloud.google.com"],
"us-west2": ["https://datalab-us-west2.cloud.google.com"],
"us-east1": ["https://datalab-us-east1.cloud.google.com"]
}
"""
class TestUrlsForZone(unittest.TestCase):
def test_get_urls(self):
self.assertEqual([
"https://datalab-us-east1.cloud.google.com",
"https://datalab-us-west1.cloud.google.com"
], urls_for_zone("us-east1-a", json.loads(url_map_json)))
def test_get_urls_no_match(self):
self.assertEqual([],
urls_for_zone(
"euro-west1-a", json.loads(url_map_json)
))
def test_get_urls_incorrect_format(self):
with self.assertRaises(ValueError):
urls_for_zone("weird-format-a", json.loads(url_map_json))
def test_get_urls_priority(self):
self.assertEqual([
"https://datalab-us-west1.cloud.google.com",
"https://datalab-us-west2.cloud.google.com"
], urls_for_zone("us-west1-a", json.loads(url_map_json)))
if __name__ == '__main__':
unittest.main()
| true | true |
1c30e601b7cf6ed33ca78c41bcb799c5c3a262b3 | 4,900 | py | Python | nixgateway/api.py | scardine/py-nixgateway | 36baf34a528fe88893c65b444e847da4ac8df2ab | [
"MIT"
] | 1 | 2020-03-27T22:19:26.000Z | 2020-03-27T22:19:26.000Z | nixgateway/api.py | scardine/py-nixgateway | 36baf34a528fe88893c65b444e847da4ac8df2ab | [
"MIT"
] | null | null | null | nixgateway/api.py | scardine/py-nixgateway | 36baf34a528fe88893c65b444e847da4ac8df2ab | [
"MIT"
] | null | null | null | # coding=utf-8
import json
from uuid import uuid4
import requests
import time
from jose import jwt
import base64
class CardPayments(object):
def __init__(self, gateway):
self._gateway = gateway
def __call__(self, payment_token=None):
params = {}
if payment_token is not None:
if isinstance(payment_token, str):
url = '{}/Orders/CardPayments/{}'.format(self._gateway.base_url, payment_token)
elif isinstance(payment_token, (list, tuple)):
url = '{}/Orders/CardPayments'.format(self._gateway.base_url)
params = {"paymentToken": payment_token}
else:
raise TypeError(u'payment_token should None, list or tuple')
else:
url = '{}/Orders/CardPayments'.format(self._gateway.base_url)
r = requests.get(url, headers=self._gateway.orders.get_headers(), params=params)
if r.status_code == 204:
return []
try:
return r.json()
except ValueError:
return {
"error": u"API is not JSON",
"status_code": r.status_code,
"response": r.text,
}
def authorize(self, request_id, order_id, amount, card, return_url, customer=None, recurrence=None, installments=1,
capture=True, transaction_type=1):
payload = {
"installments": installments,
"capture": capture,
"merchantOrderId": order_id,
"amount": int(amount * 100),
"card": card,
}
if customer is not None:
payload['customer'] = customer
if recurrence is not None:
payload['recurrence'] = recurrence
if return_url is not None:
payload['returnUrl'] = return_url
if transaction_type is not None:
payload['transactionType'] = transaction_type
headers = self._gateway.orders.get_headers(request_id)
url = self._gateway.base_url + '/Orders/CardPayments/Authorize'
r = requests.post(url, headers=headers, json=payload)
if r.status_code == 200:
return r.json()
return {
"error": "Response was not HTTP 200",
"status_code": r.status_code,
"response": r.text,
}
def capture(self, token, amount):
payload = {
"paymentToken": token,
"amount": int(float(amount) * 100),
}
headers = self._gateway.orders.get_headers()
url = self._gateway.base_url + '/Orders/CardPayments/Capture'
r = requests.put(url, headers=headers, json=payload)
if r.status_code == 200:
return r.json()
return {
"error": "Response was not HTTP 200",
"status_code": r.status_code,
"response": r.text,
}
def reverse(self, token, amount):
payload = {
"paymentToken": token,
"amount": int(float(amount) * 100),
}
headers = self._gateway.orders.get_headers()
url = self._gateway.base_url + '/Orders/CardPayments/Reverse'
r = requests.put(url, headers=headers, json=payload)
if r.status_code == 200:
return r.json()
return {
"error": "Response was not HTTP 200",
"status_code": r.status_code,
"response": r.text,
}
class Orders(object):
def __init__(self, gateway):
self.card_payments = CardPayments(gateway)
self._gateway = gateway
def get_headers(self, request_id=None):
headers = {
"Authorization": "Bearer {}".format(self._gateway.get_token())
}
if request_id is None:
request_id = str(uuid4())
headers['requestId'] = request_id
return headers
class NixGateway(object):
def __init__(self, key, secret, token_ttl=3600, base_url='https://gateway-ypqai.nexxera.com/v2'):
self.key = key
self.secret = base64.b64decode(secret)
self.token_ttl = token_ttl
self.base_url = base_url
self.auth = {
"token": None,
"expires": 0,
}
self.orders = Orders(self)
def get_token(self):
if self.auth['token'] and self.auth['expires'] > time.time():
return self.auth['token']
expires = time.time() + self.token_ttl
payload = {
"iss": self.key,
"access":
[
"cardPayments",
"recurrencePlans",
"recurrences",
"checkout",
"boletoPayments"
],
"exp": expires,
}
signed = jwt.encode(payload, self.secret, algorithm='HS256')
self.auth['token'] = signed
self.auth['expires'] = expires
return self.auth['token']
| 30.81761 | 119 | 0.553878 |
import json
from uuid import uuid4
import requests
import time
from jose import jwt
import base64
class CardPayments(object):
def __init__(self, gateway):
self._gateway = gateway
def __call__(self, payment_token=None):
params = {}
if payment_token is not None:
if isinstance(payment_token, str):
url = '{}/Orders/CardPayments/{}'.format(self._gateway.base_url, payment_token)
elif isinstance(payment_token, (list, tuple)):
url = '{}/Orders/CardPayments'.format(self._gateway.base_url)
params = {"paymentToken": payment_token}
else:
raise TypeError(u'payment_token should None, list or tuple')
else:
url = '{}/Orders/CardPayments'.format(self._gateway.base_url)
r = requests.get(url, headers=self._gateway.orders.get_headers(), params=params)
if r.status_code == 204:
return []
try:
return r.json()
except ValueError:
return {
"error": u"API is not JSON",
"status_code": r.status_code,
"response": r.text,
}
def authorize(self, request_id, order_id, amount, card, return_url, customer=None, recurrence=None, installments=1,
capture=True, transaction_type=1):
payload = {
"installments": installments,
"capture": capture,
"merchantOrderId": order_id,
"amount": int(amount * 100),
"card": card,
}
if customer is not None:
payload['customer'] = customer
if recurrence is not None:
payload['recurrence'] = recurrence
if return_url is not None:
payload['returnUrl'] = return_url
if transaction_type is not None:
payload['transactionType'] = transaction_type
headers = self._gateway.orders.get_headers(request_id)
url = self._gateway.base_url + '/Orders/CardPayments/Authorize'
r = requests.post(url, headers=headers, json=payload)
if r.status_code == 200:
return r.json()
return {
"error": "Response was not HTTP 200",
"status_code": r.status_code,
"response": r.text,
}
def capture(self, token, amount):
payload = {
"paymentToken": token,
"amount": int(float(amount) * 100),
}
headers = self._gateway.orders.get_headers()
url = self._gateway.base_url + '/Orders/CardPayments/Capture'
r = requests.put(url, headers=headers, json=payload)
if r.status_code == 200:
return r.json()
return {
"error": "Response was not HTTP 200",
"status_code": r.status_code,
"response": r.text,
}
def reverse(self, token, amount):
payload = {
"paymentToken": token,
"amount": int(float(amount) * 100),
}
headers = self._gateway.orders.get_headers()
url = self._gateway.base_url + '/Orders/CardPayments/Reverse'
r = requests.put(url, headers=headers, json=payload)
if r.status_code == 200:
return r.json()
return {
"error": "Response was not HTTP 200",
"status_code": r.status_code,
"response": r.text,
}
class Orders(object):
def __init__(self, gateway):
self.card_payments = CardPayments(gateway)
self._gateway = gateway
def get_headers(self, request_id=None):
headers = {
"Authorization": "Bearer {}".format(self._gateway.get_token())
}
if request_id is None:
request_id = str(uuid4())
headers['requestId'] = request_id
return headers
class NixGateway(object):
def __init__(self, key, secret, token_ttl=3600, base_url='https://gateway-ypqai.nexxera.com/v2'):
self.key = key
self.secret = base64.b64decode(secret)
self.token_ttl = token_ttl
self.base_url = base_url
self.auth = {
"token": None,
"expires": 0,
}
self.orders = Orders(self)
def get_token(self):
if self.auth['token'] and self.auth['expires'] > time.time():
return self.auth['token']
expires = time.time() + self.token_ttl
payload = {
"iss": self.key,
"access":
[
"cardPayments",
"recurrencePlans",
"recurrences",
"checkout",
"boletoPayments"
],
"exp": expires,
}
signed = jwt.encode(payload, self.secret, algorithm='HS256')
self.auth['token'] = signed
self.auth['expires'] = expires
return self.auth['token']
| true | true |
1c30e66289b203c7bb8621212a6b627a6ffe918c | 1,350 | py | Python | features.py | rebecca0323/Predicting-Migraines-IAIF | 5e4a31ca437b89c622fb5ed3ab8535728686ec2c | [
"MIT"
] | null | null | null | features.py | rebecca0323/Predicting-Migraines-IAIF | 5e4a31ca437b89c622fb5ed3ab8535728686ec2c | [
"MIT"
] | null | null | null | features.py | rebecca0323/Predicting-Migraines-IAIF | 5e4a31ca437b89c622fb5ed3ab8535728686ec2c | [
"MIT"
] | null | null | null | features = ["total_triggers", "rest", "medicine", "headache_day", "menstruation", "stress", "less_sleep", "fatigue",
"emotional_changes", "weather_temp", "noise", "odors", "drinking", "irregular_meals", "other", "other.1", "excess_sleep",
"exercise", "no_exercise", "ovulation", "sunlight", "improper_lighting", "overeating", "caffeine", "smoking", "cheese_chocolate",
"travel", "massage", "exercise.1"]
js_features = ["sunlight", "improper_lighting", "ovulation", "excess_sleep", "exercise.1", "overeating", "travel",
"weather_temp", "irregular_meals", "headache_day", "noise", "emotional_changes", "drinking", "massage", "odors",
"medicine", "fatigue", "less_sleep", "other", "other.1", "menstruation", "stress", "total_triggers", "sleep", "rest",
"sound_sensitivity", "light_sensitivity", "helping_factors", "nausea_vomiting"]
js_smote = ["smoking", "no_exercise", "improper_lighting", "exercise", "sunlight", "exercise.1", "caffeine", "cheese_chocolate",
"ovulation", "excess_sleep", "travel", "overeating", "drinking", "massage", "noise", "weather_temp", "irregular_meals",
"emotional_changes", "odors", "other.1", "other", "fatigue", "menstruation", "less_sleep", "headache_day", "stress",
"medicine", "light_sensitivity", "sleep", "sound_sensitivity", "total_triggers", "rest", "helping_factors", "nausea_vomiting"]
print(len(js_smote)) | 79.411765 | 129 | 0.705926 | features = ["total_triggers", "rest", "medicine", "headache_day", "menstruation", "stress", "less_sleep", "fatigue",
"emotional_changes", "weather_temp", "noise", "odors", "drinking", "irregular_meals", "other", "other.1", "excess_sleep",
"exercise", "no_exercise", "ovulation", "sunlight", "improper_lighting", "overeating", "caffeine", "smoking", "cheese_chocolate",
"travel", "massage", "exercise.1"]
js_features = ["sunlight", "improper_lighting", "ovulation", "excess_sleep", "exercise.1", "overeating", "travel",
"weather_temp", "irregular_meals", "headache_day", "noise", "emotional_changes", "drinking", "massage", "odors",
"medicine", "fatigue", "less_sleep", "other", "other.1", "menstruation", "stress", "total_triggers", "sleep", "rest",
"sound_sensitivity", "light_sensitivity", "helping_factors", "nausea_vomiting"]
js_smote = ["smoking", "no_exercise", "improper_lighting", "exercise", "sunlight", "exercise.1", "caffeine", "cheese_chocolate",
"ovulation", "excess_sleep", "travel", "overeating", "drinking", "massage", "noise", "weather_temp", "irregular_meals",
"emotional_changes", "odors", "other.1", "other", "fatigue", "menstruation", "less_sleep", "headache_day", "stress",
"medicine", "light_sensitivity", "sleep", "sound_sensitivity", "total_triggers", "rest", "helping_factors", "nausea_vomiting"]
print(len(js_smote)) | true | true |
1c30e6e17a75c53fe0a3fc3d4d6bdc8e9ab1cef3 | 3,860 | py | Python | src/silx/math/setup.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 94 | 2016-03-04T17:25:53.000Z | 2022-03-18T18:05:23.000Z | src/silx/math/setup.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 2,841 | 2016-01-21T09:06:49.000Z | 2022-03-18T14:53:56.000Z | src/silx/math/setup.py | t20100/silx | 035cb286dd46f3f0cb3f819a3cfb6ce253c9933b | [
"CC0-1.0",
"MIT"
] | 71 | 2015-09-30T08:35:35.000Z | 2022-03-16T07:16:28.000Z | # coding: utf-8
# /*##########################################################################
# Copyright (C) 2016-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
__authors__ = ["D. Naudet"]
__license__ = "MIT"
__date__ = "27/03/2017"
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
def configuration(parent_package='', top_path=None):
config = Configuration('math', parent_package, top_path)
config.add_subpackage('test')
config.add_subpackage('fit')
config.add_subpackage('medianfilter')
config.add_subpackage('fft')
# =====================================
# histogramnd
# =====================================
histo_src = [os.path.join('histogramnd', 'src', 'histogramnd_c.c'),
'chistogramnd.pyx']
histo_inc = [os.path.join('histogramnd', 'include'),
numpy.get_include()]
config.add_extension('chistogramnd',
sources=histo_src,
include_dirs=histo_inc,
language='c')
# =====================================
# histogramnd_lut
# =====================================
config.add_extension('chistogramnd_lut',
sources=['chistogramnd_lut.pyx'],
include_dirs=histo_inc,
language='c')
# =====================================
# marching cubes
# =====================================
mc_src = [os.path.join('marchingcubes', 'mc_lut.cpp'),
'marchingcubes.pyx']
config.add_extension('marchingcubes',
sources=mc_src,
include_dirs=['marchingcubes', numpy.get_include()],
language='c++')
# min/max
config.add_extension('combo',
sources=['combo.pyx'],
include_dirs=['include'],
language='c')
config.add_extension('_colormap',
sources=["_colormap.pyx"],
language='c',
include_dirs=['include', numpy.get_include()],
extra_link_args=['-fopenmp'],
extra_compile_args=['-fopenmp'])
config.add_extension('interpolate',
sources=["interpolate.pyx"],
language='c',
include_dirs=['include', numpy.get_include()],
extra_link_args=['-fopenmp'],
extra_compile_args=['-fopenmp'])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
| 38.6 | 80 | 0.547668 | true | true | |
1c30e73790fb26a531f799aac05afbdede366545 | 869 | py | Python | week-2/stepik-1.7.1.py | bhavin-ch/bioinformatics | f2844bea3a2e1125b0d71b65b00c6b9e7975921a | [
"MIT"
] | null | null | null | week-2/stepik-1.7.1.py | bhavin-ch/bioinformatics | f2844bea3a2e1125b0d71b65b00c6b9e7975921a | [
"MIT"
] | null | null | null | week-2/stepik-1.7.1.py | bhavin-ch/bioinformatics | f2844bea3a2e1125b0d71b65b00c6b9e7975921a | [
"MIT"
] | null | null | null |
# from dataset_3014_4.txt
TEXT = 'CCAGTCAATG'
D = 1
swap_map = {
'A': 'TCG',
'T': 'CGA',
'C': 'GAT',
'G': 'ATC'
}
def remove_duplicates(somelist):
return list(set(somelist))
def single_letter_neighborhood(pattern, i):
return [pattern[:i] + x + pattern[i+1:] for x in swap_map[pattern[i]]]
def one_neighborhood(pattern):
neighbors = []
for i in range(len(pattern)):
neighbors = [*neighbors, *single_letter_neighborhood(pattern, i)]
return remove_duplicates(neighbors)
def get_neighbours_upto_d(pattern, d):
neighborhood = one_neighborhood(pattern)
if d > len(pattern):
return []
for _ in range(d-1):
for pattern in neighborhood:
neighborhood = [*neighborhood, *one_neighborhood(pattern)]
return remove_duplicates(neighborhood)
print(' '.join(get_neighbours_upto_d(TEXT, D)))
| 24.828571 | 74 | 0.659379 |
TEXT = 'CCAGTCAATG'
D = 1
swap_map = {
'A': 'TCG',
'T': 'CGA',
'C': 'GAT',
'G': 'ATC'
}
def remove_duplicates(somelist):
return list(set(somelist))
def single_letter_neighborhood(pattern, i):
return [pattern[:i] + x + pattern[i+1:] for x in swap_map[pattern[i]]]
def one_neighborhood(pattern):
neighbors = []
for i in range(len(pattern)):
neighbors = [*neighbors, *single_letter_neighborhood(pattern, i)]
return remove_duplicates(neighbors)
def get_neighbours_upto_d(pattern, d):
neighborhood = one_neighborhood(pattern)
if d > len(pattern):
return []
for _ in range(d-1):
for pattern in neighborhood:
neighborhood = [*neighborhood, *one_neighborhood(pattern)]
return remove_duplicates(neighborhood)
print(' '.join(get_neighbours_upto_d(TEXT, D)))
| true | true |
1c30e815a54eefb9e55589706529f73c8d13b3ef | 184 | py | Python | 2020 Leetcode Challenges/12 December Leetcode Challenge 2020/15 square_of_sorted_array.py | FazeelUsmani/Leetcode | aff4c119178f132c28a39506ffaa75606e0a861b | [
"MIT"
] | 7 | 2020-12-01T14:27:57.000Z | 2022-02-12T09:17:22.000Z | 2020 Leetcode Challenges/12 December Leetcode Challenge 2020/15 square_of_sorted_array.py | FazeelUsmani/Leetcode | aff4c119178f132c28a39506ffaa75606e0a861b | [
"MIT"
] | 4 | 2020-11-12T17:49:22.000Z | 2021-09-06T07:46:37.000Z | 2020 Leetcode Challenges/12 December Leetcode Challenge 2020/15 square_of_sorted_array.py | FazeelUsmani/Leetcode | aff4c119178f132c28a39506ffaa75606e0a861b | [
"MIT"
] | 6 | 2021-05-21T03:49:22.000Z | 2022-01-20T20:36:53.000Z | class Solution:
def sortedSquares(self, nums: List[int]) -> List[int]:
ans = [x**2 for x in nums]
ans.sort()
return ans
| 18.4 | 58 | 0.445652 | class Solution:
def sortedSquares(self, nums: List[int]) -> List[int]:
ans = [x**2 for x in nums]
ans.sort()
return ans
| true | true |
1c30e890d496295ecad1fd637cc6ae2760c657e2 | 1,501 | py | Python | sensuspy/data_retrieval.py | hakansakarya/sensuspy | 6b48cd04cbb48e14ae740c9fb4da14c9715f6159 | [
"MIT"
] | 1 | 2018-02-19T14:57:07.000Z | 2018-02-19T14:57:07.000Z | sensuspy/data_retrieval.py | hakansakarya/sensuspy | 6b48cd04cbb48e14ae740c9fb4da14c9715f6159 | [
"MIT"
] | null | null | null | sensuspy/data_retrieval.py | hakansakarya/sensuspy | 6b48cd04cbb48e14ae740c9fb4da14c9715f6159 | [
"MIT"
] | null | null | null | __author__ = "Sait Hakan Sakarya"
__email__ = "shs5fh@virginia.edu"
import os
import glob
import gzip
import struct
def sync_from_aws(s3_path, local_path, profile = "default", aws_client_path = "/usr/local/bin/aws", delete = False, decompress_files = True):
"""Synchronizes data from Amazon S3 to a local path using the AWS client - AWS client needs to be installed"""
aws_args = "s3 --profile " + profile + " sync " + s3_path + " " + local_path
if delete:
aws_args += " --delete"
if os.name == "posix":
aws_client_path = os.popen('which aws').read().strip()
invoke_command = aws_client_path + " " + aws_args
try:
os.system(invoke_command)
if decompress_files:
decompress(local_path)
except Exception as e:
print(e)
def decompress(local_path):
"""Decompresses .gz files downloaded from Amazon S3"""
paths = glob.glob(data_path + '*/**/*.gz', recursive=True)
if len(paths) == 0:
print("No files no decompress.")
else:
print("Decompressing " + str(len(paths)) + " files.")
for path in paths:
try:
decompressed_name = path[:-3]
with gzip.open(path, 'rb') as infile, open(decompressed_name, 'wb') as outfile:
content = infile.read()
outfile.write(content)
os.remove(path)
except Exception as Error:
print(Error)
| 27.290909 | 141 | 0.584277 | __author__ = "Sait Hakan Sakarya"
__email__ = "shs5fh@virginia.edu"
import os
import glob
import gzip
import struct
def sync_from_aws(s3_path, local_path, profile = "default", aws_client_path = "/usr/local/bin/aws", delete = False, decompress_files = True):
aws_args = "s3 --profile " + profile + " sync " + s3_path + " " + local_path
if delete:
aws_args += " --delete"
if os.name == "posix":
aws_client_path = os.popen('which aws').read().strip()
invoke_command = aws_client_path + " " + aws_args
try:
os.system(invoke_command)
if decompress_files:
decompress(local_path)
except Exception as e:
print(e)
def decompress(local_path):
paths = glob.glob(data_path + '*/**/*.gz', recursive=True)
if len(paths) == 0:
print("No files no decompress.")
else:
print("Decompressing " + str(len(paths)) + " files.")
for path in paths:
try:
decompressed_name = path[:-3]
with gzip.open(path, 'rb') as infile, open(decompressed_name, 'wb') as outfile:
content = infile.read()
outfile.write(content)
os.remove(path)
except Exception as Error:
print(Error)
| true | true |
1c30e979a316677653e10a7d840b2373d881b549 | 1,925 | py | Python | src/modules/loss.py | ab3llini/BlindLess | 46c50fb2748b9d372044d00b901f0cde91946684 | [
"MIT"
] | 1 | 2022-03-19T09:19:12.000Z | 2022-03-19T09:19:12.000Z | src/modules/loss.py | ab3llini/BlindLess | 46c50fb2748b9d372044d00b901f0cde91946684 | [
"MIT"
] | 1 | 2020-02-06T18:26:07.000Z | 2020-02-06T18:26:07.000Z | src/modules/loss.py | ab3llini/BlindLess | 46c50fb2748b9d372044d00b901f0cde91946684 | [
"MIT"
] | null | null | null | from torch.nn import CrossEntropyLoss
class GPT2Loss(CrossEntropyLoss):
def __init__(self, pad_token_id):
super(GPT2Loss, self).__init__(ignore_index=pad_token_id)
def forward(self, output, labels):
"""
Loss function for gpt2
:param output:
:param labels:
:return:
"""
# Flatten the tensors (shift-align)
# Remove last token from output
output = output[..., :-1, :].contiguous().view(-1, output.size(-1))
# Remove the first token from labels e do not care for question
labels = (labels[..., 1:].contiguous()).view(-1)
# Compute the actual loss
return super(GPT2Loss, self).forward(output, labels)
class VisualGPT2Loss(GPT2Loss):
def __init__(self, pad_token_id, extract=None):
super(VisualGPT2Loss, self).__init__(pad_token_id=pad_token_id)
if extract is not None:
assert type(extract) == int, 'Extract value MUST be integer'
self.extract = extract
def forward(self, output, labels):
if self.extract is not None:
output = output[self.extract]
# Compute the actual loss
return super(VisualGPT2Loss, self).forward(output, labels[0])
class BERTLoss(CrossEntropyLoss):
def __init__(self, pad_token_id):
super(BERTLoss, self).__init__(ignore_index=pad_token_id)
def forward(self, output, labels):
"""
Loss function for gpt2
:param output:
:param labels:
:return:
"""
# Flatten the tensors (shift-align)
# Remove last token from output
output = output[..., :-1, :].contiguous().view(-1, output.size(-1))
# Remove the first token from labels e do not care for question
labels = (labels[..., 1:].contiguous()).view(-1)
# Compute the actual loss
return super(BERTLoss, self).forward(output, labels)
| 31.048387 | 75 | 0.619221 | from torch.nn import CrossEntropyLoss
class GPT2Loss(CrossEntropyLoss):
def __init__(self, pad_token_id):
super(GPT2Loss, self).__init__(ignore_index=pad_token_id)
def forward(self, output, labels):
output = output[..., :-1, :].contiguous().view(-1, output.size(-1))
labels = (labels[..., 1:].contiguous()).view(-1)
return super(GPT2Loss, self).forward(output, labels)
class VisualGPT2Loss(GPT2Loss):
def __init__(self, pad_token_id, extract=None):
super(VisualGPT2Loss, self).__init__(pad_token_id=pad_token_id)
if extract is not None:
assert type(extract) == int, 'Extract value MUST be integer'
self.extract = extract
def forward(self, output, labels):
if self.extract is not None:
output = output[self.extract]
return super(VisualGPT2Loss, self).forward(output, labels[0])
class BERTLoss(CrossEntropyLoss):
def __init__(self, pad_token_id):
super(BERTLoss, self).__init__(ignore_index=pad_token_id)
def forward(self, output, labels):
output = output[..., :-1, :].contiguous().view(-1, output.size(-1))
labels = (labels[..., 1:].contiguous()).view(-1)
return super(BERTLoss, self).forward(output, labels)
| true | true |
1c30ea2453568d127247c6ab672daa42e273563a | 1,657 | py | Python | src/api/server.py | Avik32223/gala-iam-api | 2e9f852d016be651e90e21cd5693a10048e487e0 | [
"MIT"
] | null | null | null | src/api/server.py | Avik32223/gala-iam-api | 2e9f852d016be651e90e21cd5693a10048e487e0 | [
"MIT"
] | null | null | null | src/api/server.py | Avik32223/gala-iam-api | 2e9f852d016be651e90e21cd5693a10048e487e0 | [
"MIT"
] | null | null | null | import os
from fastapi import Depends, FastAPI
from pymongo import MongoClient
from starlette.requests import Request
from starlette.responses import Response
from db import Database
from routes import permissions, roles, service_accounts, groups, users, resources, resource_actions
from utils import get_db
MONGO_DB__HOST_URI = os.environ.get("MONGO_DB__HOST_URI", "localhost")
MONGO_DB__HOST_PORT = int(os.environ.get("MONGO_DB__HOST_PORT", 27017))
db_connection = MongoClient(host=MONGO_DB__HOST_URI, port=MONGO_DB__HOST_PORT)
app = FastAPI(title="GALA Identity and Access Management API",
description="Authentication and Authorization Management module for GALA resources",
openapi_url="/gala_iam_api__openapi.json")
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
response = Response("Internal server error", status_code=500)
try:
request.state.db = Database(db_connection)
response = await call_next(request)
finally:
request.state.db.close()
return response
app.include_router(roles.routes, tags=["CRUD on Roles"])
app.include_router(resources.routes, tags=["CRUD on Resources"])
app.include_router(resource_actions.routes, tags=[
"CRUD on Resources Actions"])
app.include_router(permissions.routes, tags=["CRUD on Permissions"])
app.include_router(users.routes, tags=["CRUD on Users"])
app.include_router(service_accounts.routes, tags=["CRUD on Service Accounts"])
app.include_router(groups.routes, tags=["CRUD on Groups"])
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=80)
| 38.534884 | 99 | 0.756186 | import os
from fastapi import Depends, FastAPI
from pymongo import MongoClient
from starlette.requests import Request
from starlette.responses import Response
from db import Database
from routes import permissions, roles, service_accounts, groups, users, resources, resource_actions
from utils import get_db
MONGO_DB__HOST_URI = os.environ.get("MONGO_DB__HOST_URI", "localhost")
MONGO_DB__HOST_PORT = int(os.environ.get("MONGO_DB__HOST_PORT", 27017))
db_connection = MongoClient(host=MONGO_DB__HOST_URI, port=MONGO_DB__HOST_PORT)
app = FastAPI(title="GALA Identity and Access Management API",
description="Authentication and Authorization Management module for GALA resources",
openapi_url="/gala_iam_api__openapi.json")
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
response = Response("Internal server error", status_code=500)
try:
request.state.db = Database(db_connection)
response = await call_next(request)
finally:
request.state.db.close()
return response
app.include_router(roles.routes, tags=["CRUD on Roles"])
app.include_router(resources.routes, tags=["CRUD on Resources"])
app.include_router(resource_actions.routes, tags=[
"CRUD on Resources Actions"])
app.include_router(permissions.routes, tags=["CRUD on Permissions"])
app.include_router(users.routes, tags=["CRUD on Users"])
app.include_router(service_accounts.routes, tags=["CRUD on Service Accounts"])
app.include_router(groups.routes, tags=["CRUD on Groups"])
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=80)
| true | true |
1c30ea54a706b7feb4d3cf2542d1c8102ab009e6 | 9,490 | py | Python | pygame_menu/examples/game_selector.py | notrurs/pygame-menu | 159853d856d5b25e813389b8ebf541c79771c8ed | [
"MIT"
] | null | null | null | pygame_menu/examples/game_selector.py | notrurs/pygame-menu | 159853d856d5b25e813389b8ebf541c79771c8ed | [
"MIT"
] | null | null | null | pygame_menu/examples/game_selector.py | notrurs/pygame-menu | 159853d856d5b25e813389b8ebf541c79771c8ed | [
"MIT"
] | null | null | null | # coding=utf-8
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - GAME SELECTOR
Game with 3 difficulty options.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2020 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
# Import libraries
import sys
sys.path.insert(0, '../../')
import os
import pygame
import pygame_menu
from random import randrange
# -----------------------------------------------------------------------------
# Constants and global variables
# -----------------------------------------------------------------------------
ABOUT = ['pygame-menu {0}'.format(pygame_menu.__version__),
'Author: @{0}'.format(pygame_menu.__author__),
'', # new line
'Email: {0}'.format(pygame_menu.__email__)]
DIFFICULTY = ['EASY']
FPS = 60.0
WINDOW_SIZE = (640, 480)
clock = None # type: pygame.time.Clock
main_menu = None # type: pygame_menu.Menu
surface = None # type: pygame.Surface
# -----------------------------------------------------------------------------
# Methods
# -----------------------------------------------------------------------------
def change_difficulty(value, difficulty):
"""
Change difficulty of the game.
:param value: Tuple containing the data of the selected object
:type value: tuple
:param difficulty: Optional parameter passed as argument to add_selector
:type difficulty: str
:return: None
"""
selected, index = value
print('Selected difficulty: "{0}" ({1}) at index {2}'.format(selected, difficulty, index))
DIFFICULTY[0] = difficulty
def random_color():
"""
Return random color.
:return: Color tuple
:rtype: tuple
"""
return randrange(0, 255), randrange(0, 255), randrange(0, 255)
def play_function(difficulty, font, test=False):
"""
Main game function.
:param difficulty: Difficulty of the game
:type difficulty: tuple, list
:param font: Pygame font
:type font: :py:class:`pygame.font.Font`
:param test: Test method, if true only one loop is allowed
:type test: bool
:return: None
"""
assert isinstance(difficulty, (tuple, list))
difficulty = difficulty[0]
assert isinstance(difficulty, str)
# Define globals
global main_menu
global clock
if difficulty == 'EASY':
f = font.render('Playing as a baby (easy)', 1, (255, 255, 255))
elif difficulty == 'MEDIUM':
f = font.render('Playing as a kid (medium)', 1, (255, 255, 255))
elif difficulty == 'HARD':
f = font.render('Playing as a champion (hard)', 1, (255, 255, 255))
else:
raise Exception('Unknown difficulty {0}'.format(difficulty))
# Draw random color and text
bg_color = random_color()
f_width = f.get_size()[0]
# Reset main menu and disable
# You also can set another menu, like a 'pause menu', or just use the same
# main_menu as the menu that will check all your input.
main_menu.disable()
main_menu.reset(1)
while True:
# noinspection PyUnresolvedReferences
clock.tick(60)
# Application events
events = pygame.event.get()
for e in events:
if e.type == pygame.QUIT:
exit()
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
main_menu.enable()
# Quit this function, then skip to loop of main-menu on line 317
return
# Pass events to main_menu
main_menu.update(events)
# Continue playing
surface.fill(bg_color)
surface.blit(f, ((WINDOW_SIZE[0] - f_width) / 2, WINDOW_SIZE[1] / 2))
pygame.display.flip()
# If test returns
if test:
break
def main_background():
"""
Function used by menus, draw on background while menu is active.
:return: None
"""
global surface
surface.fill((128, 0, 128))
def main(test=False):
"""
Main program.
:param test: Indicate function is being tested
:type test: bool
:return: None
"""
# -------------------------------------------------------------------------
# Globals
# -------------------------------------------------------------------------
global clock
global main_menu
global surface
# -------------------------------------------------------------------------
# Init pygame
# -------------------------------------------------------------------------
pygame.init()
os.environ['SDL_VIDEO_CENTERED'] = '1'
# Create pygame screen and objects
surface = pygame.display.set_mode(WINDOW_SIZE)
pygame.display.set_caption('Example - Game Selector')
clock = pygame.time.Clock()
# -------------------------------------------------------------------------
# Create menus: Play Menu
# -------------------------------------------------------------------------
play_menu = pygame_menu.Menu(
center_content=True,
height=WINDOW_SIZE[1] * 0.7,
onclose=pygame_menu.events.DISABLE_CLOSE,
title='Play Menu',
width=WINDOW_SIZE[0] * 0.7,
)
submenu_theme = pygame_menu.themes.THEME_DEFAULT.copy()
submenu_theme.widget_font_size = 15
play_submenu = pygame_menu.Menu(
height=WINDOW_SIZE[1] * 0.5,
theme=submenu_theme,
title='Submenu',
width=WINDOW_SIZE[0] * 0.7,
)
for i in range(30):
play_submenu.add_button('Back {0}'.format(i), pygame_menu.events.BACK)
play_submenu.add_button('Return to main menu', pygame_menu.events.RESET)
play_menu.add_button('Start', # When pressing return -> play(DIFFICULTY[0], font)
play_function,
DIFFICULTY,
pygame.font.Font(pygame_menu.font.FONT_FRANCHISE, 30))
play_menu.add_selector('Select difficulty ',
[('1 - Easy', 'EASY'),
('2 - Medium', 'MEDIUM'),
('3 - Hard', 'HARD')],
onchange=change_difficulty,
selector_id='select_difficulty')
play_menu.add_button('Another menu', play_submenu)
play_menu.add_button('Return to main menu', pygame_menu.events.BACK)
# -------------------------------------------------------------------------
# Create menus:About
# -------------------------------------------------------------------------
about_theme = pygame_menu.themes.THEME_DEFAULT.copy()
about_theme.widget_margin = (0, 0)
about_theme.widget_offset = (0, 0.05)
about_menu = pygame_menu.Menu(
height=WINDOW_SIZE[1] * 0.6,
onclose=pygame_menu.events.DISABLE_CLOSE,
theme=about_theme,
title='About',
width=WINDOW_SIZE[0] * 0.6,
)
for m in ABOUT:
about_menu.add_label(m, align=pygame_menu.locals.ALIGN_LEFT, font_size=20)
about_menu.add_label('')
about_menu.add_button('Return to menu', pygame_menu.events.BACK)
# -------------------------------------------------------------------------
# Create menus: Main
# -------------------------------------------------------------------------
main_menu = pygame_menu.Menu(
back_box=False,
center_content=True,
height=WINDOW_SIZE[1] * 0.6,
onclose=pygame_menu.events.DISABLE_CLOSE,
title='Main Menu',
width=WINDOW_SIZE[0] * 0.6,
)
main_menu.add_button('Play', play_menu)
main_menu.add_button('About', about_menu)
main_menu.add_button('Quit', pygame_menu.events.EXIT)
# -------------------------------------------------------------------------
# Main loop
# -------------------------------------------------------------------------
while True:
# Tick
clock.tick(FPS)
# Paint background
main_background()
# Application events
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
exit()
# Main menu
main_menu.mainloop(surface, main_background, disable_loop=test, fps_limit=FPS)
# Flip surface
pygame.display.flip()
# At first loop returns
if test:
break
if __name__ == '__main__':
main()
| 32.169492 | 94 | 0.541728 |
import sys
sys.path.insert(0, '../../')
import os
import pygame
import pygame_menu
from random import randrange
ABOUT = ['pygame-menu {0}'.format(pygame_menu.__version__),
'Author: @{0}'.format(pygame_menu.__author__),
'',
'Email: {0}'.format(pygame_menu.__email__)]
DIFFICULTY = ['EASY']
FPS = 60.0
WINDOW_SIZE = (640, 480)
clock = None
main_menu = None
surface = None
def change_difficulty(value, difficulty):
selected, index = value
print('Selected difficulty: "{0}" ({1}) at index {2}'.format(selected, difficulty, index))
DIFFICULTY[0] = difficulty
def random_color():
return randrange(0, 255), randrange(0, 255), randrange(0, 255)
def play_function(difficulty, font, test=False):
assert isinstance(difficulty, (tuple, list))
difficulty = difficulty[0]
assert isinstance(difficulty, str)
global main_menu
global clock
if difficulty == 'EASY':
f = font.render('Playing as a baby (easy)', 1, (255, 255, 255))
elif difficulty == 'MEDIUM':
f = font.render('Playing as a kid (medium)', 1, (255, 255, 255))
elif difficulty == 'HARD':
f = font.render('Playing as a champion (hard)', 1, (255, 255, 255))
else:
raise Exception('Unknown difficulty {0}'.format(difficulty))
bg_color = random_color()
f_width = f.get_size()[0]
main_menu.disable()
main_menu.reset(1)
while True:
clock.tick(60)
events = pygame.event.get()
for e in events:
if e.type == pygame.QUIT:
exit()
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
main_menu.enable()
return
main_menu.update(events)
surface.fill(bg_color)
surface.blit(f, ((WINDOW_SIZE[0] - f_width) / 2, WINDOW_SIZE[1] / 2))
pygame.display.flip()
if test:
break
def main_background():
global surface
surface.fill((128, 0, 128))
def main(test=False):
global clock
global main_menu
global surface
pygame.init()
os.environ['SDL_VIDEO_CENTERED'] = '1'
surface = pygame.display.set_mode(WINDOW_SIZE)
pygame.display.set_caption('Example - Game Selector')
clock = pygame.time.Clock()
play_menu = pygame_menu.Menu(
center_content=True,
height=WINDOW_SIZE[1] * 0.7,
onclose=pygame_menu.events.DISABLE_CLOSE,
title='Play Menu',
width=WINDOW_SIZE[0] * 0.7,
)
submenu_theme = pygame_menu.themes.THEME_DEFAULT.copy()
submenu_theme.widget_font_size = 15
play_submenu = pygame_menu.Menu(
height=WINDOW_SIZE[1] * 0.5,
theme=submenu_theme,
title='Submenu',
width=WINDOW_SIZE[0] * 0.7,
)
for i in range(30):
play_submenu.add_button('Back {0}'.format(i), pygame_menu.events.BACK)
play_submenu.add_button('Return to main menu', pygame_menu.events.RESET)
play_menu.add_button('Start',
play_function,
DIFFICULTY,
pygame.font.Font(pygame_menu.font.FONT_FRANCHISE, 30))
play_menu.add_selector('Select difficulty ',
[('1 - Easy', 'EASY'),
('2 - Medium', 'MEDIUM'),
('3 - Hard', 'HARD')],
onchange=change_difficulty,
selector_id='select_difficulty')
play_menu.add_button('Another menu', play_submenu)
play_menu.add_button('Return to main menu', pygame_menu.events.BACK)
about_theme = pygame_menu.themes.THEME_DEFAULT.copy()
about_theme.widget_margin = (0, 0)
about_theme.widget_offset = (0, 0.05)
about_menu = pygame_menu.Menu(
height=WINDOW_SIZE[1] * 0.6,
onclose=pygame_menu.events.DISABLE_CLOSE,
theme=about_theme,
title='About',
width=WINDOW_SIZE[0] * 0.6,
)
for m in ABOUT:
about_menu.add_label(m, align=pygame_menu.locals.ALIGN_LEFT, font_size=20)
about_menu.add_label('')
about_menu.add_button('Return to menu', pygame_menu.events.BACK)
main_menu = pygame_menu.Menu(
back_box=False,
center_content=True,
height=WINDOW_SIZE[1] * 0.6,
onclose=pygame_menu.events.DISABLE_CLOSE,
title='Main Menu',
width=WINDOW_SIZE[0] * 0.6,
)
main_menu.add_button('Play', play_menu)
main_menu.add_button('About', about_menu)
main_menu.add_button('Quit', pygame_menu.events.EXIT)
while True:
clock.tick(FPS)
main_background()
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
exit()
main_menu.mainloop(surface, main_background, disable_loop=test, fps_limit=FPS)
pygame.display.flip()
if test:
break
if __name__ == '__main__':
main()
| true | true |
1c30eaae883f48dd77f10f36bc40d36b1f5eb484 | 1,327 | py | Python | remove_copies.py | bazitur/C6H6 | 490b1e93b4b33d0f14c8a3e3c0f7c012caefdb90 | [
"BSD-3-Clause"
] | null | null | null | remove_copies.py | bazitur/C6H6 | 490b1e93b4b33d0f14c8a3e3c0f7c012caefdb90 | [
"BSD-3-Clause"
] | null | null | null | remove_copies.py | bazitur/C6H6 | 490b1e93b4b33d0f14c8a3e3c0f7c012caefdb90 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
from glob import glob
filenames = sorted(glob("./MOLs/*"))
canonical_SMILES = [Chem.MolToSmiles(Chem.MolFromMolFile(fn)) for fn in filenames]
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
# sort canonical SMILES by similarity
print("\n".join(f7(canonical_SMILES)), end="")
import sys
sys.exit()
canonical_SMILES = sorted(set(canonical_SMILES)) # remove duplicates
N = len(canonical_SMILES)
matrice = [[0 for __ in range(N)] for _ in range(N)]
fingerprints = [
FingerprintMols.FingerprintMol(Chem.MolFromSmiles(s)) for s in canonical_SMILES
]
for i in range(N):
for j in range(N):
if i == j:
matrice[i][j] = -1
else:
matrice[i][j] = DataStructs.FingerprintSimilarity(
fingerprints[i],
fingerprints[j]
)
result = []
current = N - 1 # start with a last one, for no reason
for i in range(N):
result.append(canonical_SMILES[current])
next_index = matrice[current].index(max(matrice[current]))
for j in range(N):
matrice[j][current] = -1
current = next_index
print("\n".join(result), end="")
| 28.847826 | 83 | 0.66315 | from __future__ import print_function
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
from glob import glob
filenames = sorted(glob("./MOLs/*"))
canonical_SMILES = [Chem.MolToSmiles(Chem.MolFromMolFile(fn)) for fn in filenames]
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
print("\n".join(f7(canonical_SMILES)), end="")
import sys
sys.exit()
canonical_SMILES = sorted(set(canonical_SMILES))
N = len(canonical_SMILES)
matrice = [[0 for __ in range(N)] for _ in range(N)]
fingerprints = [
FingerprintMols.FingerprintMol(Chem.MolFromSmiles(s)) for s in canonical_SMILES
]
for i in range(N):
for j in range(N):
if i == j:
matrice[i][j] = -1
else:
matrice[i][j] = DataStructs.FingerprintSimilarity(
fingerprints[i],
fingerprints[j]
)
result = []
current = N - 1
for i in range(N):
result.append(canonical_SMILES[current])
next_index = matrice[current].index(max(matrice[current]))
for j in range(N):
matrice[j][current] = -1
current = next_index
print("\n".join(result), end="")
| true | true |
1c30ebe459262437a52d7dd2fbbc288b940c35a0 | 4,989 | py | Python | lsml/initializer/provided/ball.py | sandeepdas05/lsm-crack-width | 38460e514d48f3424bb8d3bd58cb3eb330153e64 | [
"BSD-3-Clause"
] | 24 | 2020-01-30T15:53:33.000Z | 2022-01-15T09:46:24.000Z | lsml/initializer/provided/ball.py | sandeepdas05/lsm-crack-width | 38460e514d48f3424bb8d3bd58cb3eb330153e64 | [
"BSD-3-Clause"
] | null | null | null | lsml/initializer/provided/ball.py | sandeepdas05/lsm-crack-width | 38460e514d48f3424bb8d3bd58cb3eb330153e64 | [
"BSD-3-Clause"
] | 13 | 2019-12-05T08:32:11.000Z | 2022-03-20T03:12:03.000Z | import numpy
from lsml.initializer.initializer_base import InitializerBase
class BallInitializer(InitializerBase):
""" Initialize the zero level set to a ball of fixed radius """
def __init__(self, radius=10, location=None):
self.radius = radius
self.location = location
def initialize(self, img, dx, seed):
if self.location is not None and len(self.location) != img.ndim:
msg = '`location` is len {} but should be {}'
raise ValueError(msg.format(len(self.location), img.ndim))
if self.location is None:
location = 0.5 * numpy.array(img.shape)
else:
location = self.location
# Used for broadcasting ...
slices = (slice(None),) + tuple(None for _ in range(img.ndim))
indices = numpy.indices(img.shape, dtype=float)
indices *= dx[slices]
indices -= (location * dx)[slices]
return (self.radius - numpy.sqrt((indices**2).sum(axis=0))) > 0
class RandomBallInitializer(InitializerBase):
""" Initialize the zero level set to a circle/sphere/hyper-sphere
with random center and radius
"""
def __init__(self, randomize_center=True, random_state=None):
""" Initialize a RandomBallInitializer initialization object
Parameters
----------
random_state: numpy.random.RandomState, default None
Supply for reproducible results
randomize_center: bool
If True, then location of the random ball is randomized
"""
if random_state is None:
random_state = numpy.random.RandomState()
self.random_state = random_state
self.randomize_center = randomize_center
def _get_seed_value_from_image(self, img):
""" Uses the first integer 4 values after the decimal point of the
first image value as the seed
"""
img_val = img.ravel()[0]
img_str = "{:.4f}".format(img_val)
_, decimal_str = img_str.split(".")
seed_val = int(decimal_str)
return seed_val
def initialize(self, img, dx, seed):
# Seed the random state from the image so that the same "random"
# initialization is given for identical image inputs
seed_value = self._get_seed_value_from_image(img)
# Save the state to be reset later
state = self.random_state.get_state()
self.random_state.seed(seed_value)
# Generate a random radius
min_dim = min(dx * img.shape)
radius = self.random_state.uniform(
low=0.20*min_dim, high=0.25*min_dim)
indices = [numpy.arange(img.shape[i], dtype=numpy.float)*dx[i]
for i in range(img.ndim)]
# Select the center point uniformly at random.
# Expected center is at the center of image, but could
# be terribly far away in general.
if self.randomize_center:
center = []
for i in range(img.ndim):
while True:
center_coord = self.random_state.choice(indices[i])
if (center_coord-radius > indices[i][0] and
center_coord+radius <= indices[i][-1]):
center.append(center_coord)
break
center = numpy.array(center)
else:
center = 0.5 * numpy.array(img.shape, dtype=numpy.float)
indices = numpy.indices(img.shape, dtype=numpy.float)
shape = dx.shape + tuple(numpy.ones(img.ndim, dtype=int))
indices *= dx.reshape(shape)
indices -= center.reshape(shape)
indices **= 2
init_mask = indices.sum(axis=0)**0.5 <= radius
# Reset the random state state
self.random_state.set_state(state)
return init_mask
class ThresholdBallInitializer(InitializerBase):
def __init__(self, sigma=4.0):
self.sigma = sigma
def initialize(self, img, dx, seed):
from scipy.ndimage import gaussian_filter
from scipy.ndimage import label
import skfmm
smoothed = gaussian_filter(img, self.sigma)
thresholded = img > smoothed
labels, _ = label(thresholded)
if labels[self._seed_to_index(seed)] > 0:
seed_ = seed
else:
nonzero = numpy.array(numpy.nonzero(labels)).T
nonzero *= dx
dists = numpy.linalg.norm(nonzero - seed, axis=1)
seed_ = nonzero[dists.argmin()]
mask = labels == labels[self._seed_to_index(seed_)]
inds = numpy.indices(img.shape, dtype=float)
for i in range(inds.shape[0]):
inds[i] -= seed_[i]
inds[i] *= dx[i]
dist_to_seed = (inds**2).sum(axis=0)**0.5
dist_to_boundary = skfmm.distance(mask, dx)
return dist_to_seed < dist_to_boundary[self._seed_to_index(seed_)]
@staticmethod
def _seed_to_index(seed):
return tuple(seed.round().astype(int))
| 32.187097 | 74 | 0.60453 | import numpy
from lsml.initializer.initializer_base import InitializerBase
class BallInitializer(InitializerBase):
def __init__(self, radius=10, location=None):
self.radius = radius
self.location = location
def initialize(self, img, dx, seed):
if self.location is not None and len(self.location) != img.ndim:
msg = '`location` is len {} but should be {}'
raise ValueError(msg.format(len(self.location), img.ndim))
if self.location is None:
location = 0.5 * numpy.array(img.shape)
else:
location = self.location
slices = (slice(None),) + tuple(None for _ in range(img.ndim))
indices = numpy.indices(img.shape, dtype=float)
indices *= dx[slices]
indices -= (location * dx)[slices]
return (self.radius - numpy.sqrt((indices**2).sum(axis=0))) > 0
class RandomBallInitializer(InitializerBase):
def __init__(self, randomize_center=True, random_state=None):
if random_state is None:
random_state = numpy.random.RandomState()
self.random_state = random_state
self.randomize_center = randomize_center
def _get_seed_value_from_image(self, img):
img_val = img.ravel()[0]
img_str = "{:.4f}".format(img_val)
_, decimal_str = img_str.split(".")
seed_val = int(decimal_str)
return seed_val
def initialize(self, img, dx, seed):
seed_value = self._get_seed_value_from_image(img)
state = self.random_state.get_state()
self.random_state.seed(seed_value)
min_dim = min(dx * img.shape)
radius = self.random_state.uniform(
low=0.20*min_dim, high=0.25*min_dim)
indices = [numpy.arange(img.shape[i], dtype=numpy.float)*dx[i]
for i in range(img.ndim)]
if self.randomize_center:
center = []
for i in range(img.ndim):
while True:
center_coord = self.random_state.choice(indices[i])
if (center_coord-radius > indices[i][0] and
center_coord+radius <= indices[i][-1]):
center.append(center_coord)
break
center = numpy.array(center)
else:
center = 0.5 * numpy.array(img.shape, dtype=numpy.float)
indices = numpy.indices(img.shape, dtype=numpy.float)
shape = dx.shape + tuple(numpy.ones(img.ndim, dtype=int))
indices *= dx.reshape(shape)
indices -= center.reshape(shape)
indices **= 2
init_mask = indices.sum(axis=0)**0.5 <= radius
self.random_state.set_state(state)
return init_mask
class ThresholdBallInitializer(InitializerBase):
def __init__(self, sigma=4.0):
self.sigma = sigma
def initialize(self, img, dx, seed):
from scipy.ndimage import gaussian_filter
from scipy.ndimage import label
import skfmm
smoothed = gaussian_filter(img, self.sigma)
thresholded = img > smoothed
labels, _ = label(thresholded)
if labels[self._seed_to_index(seed)] > 0:
seed_ = seed
else:
nonzero = numpy.array(numpy.nonzero(labels)).T
nonzero *= dx
dists = numpy.linalg.norm(nonzero - seed, axis=1)
seed_ = nonzero[dists.argmin()]
mask = labels == labels[self._seed_to_index(seed_)]
inds = numpy.indices(img.shape, dtype=float)
for i in range(inds.shape[0]):
inds[i] -= seed_[i]
inds[i] *= dx[i]
dist_to_seed = (inds**2).sum(axis=0)**0.5
dist_to_boundary = skfmm.distance(mask, dx)
return dist_to_seed < dist_to_boundary[self._seed_to_index(seed_)]
@staticmethod
def _seed_to_index(seed):
return tuple(seed.round().astype(int))
| true | true |
1c30ef844dfb9dcf5038a828087fc023dda43779 | 28,342 | py | Python | neo/rawio/spike2rawio.py | deeptimittal12/python-neo | 7409f47b5debd4d2a75bbf0e77ac10562446c97a | [
"BSD-3-Clause"
] | 1 | 2020-06-08T14:00:03.000Z | 2020-06-08T14:00:03.000Z | neo/rawio/spike2rawio.py | deeptimittal12/python-neo | 7409f47b5debd4d2a75bbf0e77ac10562446c97a | [
"BSD-3-Clause"
] | null | null | null | neo/rawio/spike2rawio.py | deeptimittal12/python-neo | 7409f47b5debd4d2a75bbf0e77ac10562446c97a | [
"BSD-3-Clause"
] | null | null | null | """
Classe for reading data in CED spike2 files (.smr).
This code is based on:
- sonpy, written by Antonio Gonzalez <Antonio.Gonzalez@cantab.net>
Disponible here ::
http://www.neuro.ki.se/broberger/
and sonpy come from :
- SON Library 2.0 for MATLAB, written by Malcolm Lidierth at
King's College London.
See http://www.kcl.ac.uk/depsta/biomedical/cfnr/lidierth.html
This IO support old (<v6) and new files (>v7) of spike2
Author: Samuel Garcia
"""
# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
_event_channel_dtype)
import numpy as np
from collections import OrderedDict
class Spike2RawIO(BaseRawIO):
"""
"""
extensions = ['smr']
rawmode = 'one-file'
def __init__(self, filename='', take_ideal_sampling_rate=False, ced_units=True,
try_signal_grouping=True):
BaseRawIO.__init__(self)
self.filename = filename
self.take_ideal_sampling_rate = take_ideal_sampling_rate
self.ced_units = ced_units
self.try_signal_grouping = try_signal_grouping
def _parse_header(self):
# get header info and channel_info
with open(self.filename, 'rb') as fid:
self._global_info = read_as_dict(fid, headerDescription)
info = self._global_info
if info['system_id'] < 6:
info['dtime_base'] = 1e-6
info['datetime_detail'] = 0
info['datetime_year'] = 0
self._time_factor = info['us_per_time'] * info['dtime_base']
self._channel_infos = []
for chan_id in range(info['channels']):
fid.seek(512 + 140 * chan_id)
chan_info = read_as_dict(fid, channelHeaderDesciption1)
if chan_info['kind'] in [1, 6]:
dt = [('scale', 'f4'), ('offset', 'f4'), ('unit', 'S6'), ]
chan_info.update(read_as_dict(fid, dt))
elif chan_info['kind'] in [7, 9]:
dt = [('min', 'f4'), ('max', 'f4'), ('unit', 'S6'), ]
chan_info.update(read_as_dict(fid, dt))
elif chan_info['kind'] in [4]:
dt = [('init_low', 'u1'), ('next_low', 'u1'), ]
chan_info.update(read_as_dict(fid, dt))
if chan_info['kind'] in [1, 6, 7, 9]:
if info['system_id'] < 6:
chan_info.update(read_as_dict(fid, [('divide', 'i2')]))
else:
chan_info.update(read_as_dict(fid, [('interleave', 'i2')]))
chan_info['type'] = dict_kind[chan_info['kind']]
if chan_info['blocks'] == 0:
chan_info['t_start'] = 0. # this means empty signals
else:
fid.seek(chan_info['firstblock'])
block_info = read_as_dict(fid, blockHeaderDesciption)
chan_info['t_start'] = float(block_info['start_time']) * \
float(info['us_per_time']) * float(info['dtime_base'])
self._channel_infos.append(chan_info)
# get data blocks index for all channel
# run through all data block of of channel to prepare chan to block maps
self._memmap = np.memmap(self.filename, dtype='u1', offset=0, mode='r')
self._all_data_blocks = {}
self._by_seg_data_blocks = {}
for chan_id, chan_info in enumerate(self._channel_infos):
data_blocks = []
ind = chan_info['firstblock']
for b in range(chan_info['blocks']):
block_info = self._memmap[ind:ind + 20].view(blockHeaderDesciption)[0]
data_blocks.append((ind, block_info['items'], 0,
block_info['start_time'], block_info['end_time']))
ind = block_info['succ_block']
data_blocks = np.array(data_blocks, dtype=[(
'pos', 'int32'), ('size', 'int32'), ('cumsum', 'int32'),
('start_time', 'int32'), ('end_time', 'int32')])
data_blocks['pos'] += 20 # 20 is ths header size
self._all_data_blocks[chan_id] = data_blocks
self._by_seg_data_blocks[chan_id] = []
# For all signal channel detect gaps between data block (pause in rec) so new Segment.
# then check that all channel have the same gaps.
# this part is tricky because we need to check that all channel have same pause.
all_gaps_block_ind = {}
for chan_id, chan_info in enumerate(self._channel_infos):
if chan_info['kind'] in [1, 9]:
data_blocks = self._all_data_blocks[chan_id]
sig_size = np.sum(self._all_data_blocks[chan_id]['size'])
if sig_size > 0:
interval = get_sample_interval(info, chan_info) / self._time_factor
# detect gaps
inter_block_sizes = data_blocks['start_time'][1:] - \
data_blocks['end_time'][:-1]
gaps_block_ind, = np.nonzero(inter_block_sizes > interval)
all_gaps_block_ind[chan_id] = gaps_block_ind
# find t_start/t_stop for each seg based on gaps indexe
self._sig_t_starts = {}
self._sig_t_stops = {}
if len(all_gaps_block_ind) == 0:
# this means no signal channels
nb_segment = 1
# loop over event/spike channel to get the min/max time
t_start, t_stop = None, None
for chan_id, chan_info in enumerate(self._channel_infos):
data_blocks = self._all_data_blocks[chan_id]
if data_blocks.size > 0:
# if t_start is None or data_blocks[0]['start_time']<t_start:
# t_start = data_blocks[0]['start_time']
if t_stop is None or data_blocks[-1]['end_time'] > t_stop:
t_stop = data_blocks[-1]['end_time']
# self._seg_t_starts = [t_start]
self._seg_t_starts = [0]
self._seg_t_stops = [t_stop]
else:
all_nb_seg = np.array([v.size + 1 for v in all_gaps_block_ind.values()])
assert np.all(all_nb_seg[0] == all_nb_seg), \
'Signal channel have differents pause so diffrents nb_segment'
nb_segment = int(all_nb_seg[0])
for chan_id, gaps_block_ind in all_gaps_block_ind.items():
data_blocks = self._all_data_blocks[chan_id]
self._sig_t_starts[chan_id] = []
self._sig_t_stops[chan_id] = []
for seg_ind in range(nb_segment):
if seg_ind == 0:
fisrt_bl = 0
else:
fisrt_bl = gaps_block_ind[seg_ind - 1] + 1
self._sig_t_starts[chan_id].append(data_blocks[fisrt_bl]['start_time'])
if seg_ind < nb_segment - 1:
last_bl = gaps_block_ind[seg_ind]
else:
last_bl = data_blocks.size - 1
self._sig_t_stops[chan_id].append(data_blocks[last_bl]['end_time'])
in_seg_data_block = data_blocks[fisrt_bl:last_bl + 1]
in_seg_data_block['cumsum'][1:] = np.cumsum(in_seg_data_block['size'][:-1])
self._by_seg_data_blocks[chan_id].append(in_seg_data_block)
self._seg_t_starts = []
self._seg_t_stops = []
for seg_ind in range(nb_segment):
# there is a small delay between all channel so take the max/min for t_start/t_stop
t_start = min(
self._sig_t_starts[chan_id][seg_ind] for chan_id in self._sig_t_starts)
t_stop = max(self._sig_t_stops[chan_id][seg_ind] for chan_id in self._sig_t_stops)
self._seg_t_starts.append(t_start)
self._seg_t_stops.append(t_stop)
# create typed channels
sig_channels = []
unit_channels = []
event_channels = []
self.internal_unit_ids = {}
for chan_id, chan_info in enumerate(self._channel_infos):
if chan_info['kind'] in [1, 6, 7, 9]:
if self.take_ideal_sampling_rate:
sampling_rate = info['ideal_rate']
else:
sample_interval = get_sample_interval(info, chan_info)
sampling_rate = (1. / sample_interval)
name = chan_info['title']
if chan_info['kind'] in [1, 9]:
# AnalogSignal
if chan_id not in self._sig_t_starts:
continue
units = chan_info['unit']
if chan_info['kind'] == 1: # int16
gain = chan_info['scale'] / 6553.6
offset = chan_info['offset']
sig_dtype = 'int16'
elif chan_info['kind'] == 9: # float32
gain = 1.
offset = 0.
sig_dtype = 'float32'
group_id = 0
sig_channels.append((name, chan_id, sampling_rate, sig_dtype,
units, gain, offset, group_id))
elif chan_info['kind'] in [2, 3, 4, 5, 8]:
# Event
event_channels.append((name, chan_id, 'event'))
elif chan_info['kind'] in [6, 7]: # SpikeTrain with waveforms
wf_units = chan_info['unit']
if chan_info['kind'] == 6:
wf_gain = chan_info['scale'] / 6553.6
wf_offset = chan_info['offset']
wf_left_sweep = chan_info['n_extra'] // 4
elif chan_info['kind'] == 7:
wf_gain = 1.
wf_offset = 0.
wf_left_sweep = chan_info['n_extra'] // 8
wf_sampling_rate = sampling_rate
if self.ced_units:
# this is a hudge pain because need
# to jump over all blocks
data_blocks = self._all_data_blocks[chan_id]
dt = get_channel_dtype(chan_info)
unit_ids = set()
for bl in range(data_blocks.size):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
raw_data = self._memmap[ind0:ind1].view(dt)
marker = raw_data['marker'] & 255
unit_ids.update(np.unique(marker))
unit_ids = sorted(list(unit_ids))
else:
# All spike from one channel are group in one SpikeTrain
unit_ids = ['all']
for unit_id in unit_ids:
unit_index = len(unit_channels)
self.internal_unit_ids[unit_index] = (chan_id, unit_id)
_id = "ch{}#{}".format(chan_id, unit_id)
unit_channels.append((name, _id, wf_units, wf_gain, wf_offset,
wf_left_sweep, wf_sampling_rate))
sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
if len(sig_channels) > 0:
if self.try_signal_grouping:
# try to group signals channel if same sampling_rate/dtype/...
# it can raise error for some files (when they do not have signal length)
common_keys = ['sampling_rate', 'dtype', 'units', 'gain', 'offset']
characteristics = sig_channels[common_keys]
unique_characteristics = np.unique(characteristics)
self._sig_dtypes = {}
for group_id, charact in enumerate(unique_characteristics):
chan_grp_indexes, = np.nonzero(characteristics == charact)
sig_channels['group_id'][chan_grp_indexes] = group_id
# check same size for channel in groups
for seg_index in range(nb_segment):
sig_sizes = []
for ind in chan_grp_indexes:
chan_id = sig_channels[ind]['id']
sig_size = np.sum(self._by_seg_data_blocks[chan_id][seg_index]['size'])
sig_sizes.append(sig_size)
sig_sizes = np.array(sig_sizes)
assert np.all(sig_sizes == sig_sizes[0]),\
'Signal channel in groups do not have same size'\
', use try_signal_grouping=False'
self._sig_dtypes[group_id] = np.dtype(charact['dtype'])
else:
# if try_signal_grouping fail the user can try to split each channel in
# separate group
sig_channels['group_id'] = np.arange(sig_channels.size)
self._sig_dtypes = {s['group_id']: np.dtype(s['dtype']) for s in sig_channels}
# fille into header dict
self.header = {}
self.header['nb_block'] = 1
self.header['nb_segment'] = [nb_segment]
self.header['signal_channels'] = sig_channels
self.header['unit_channels'] = unit_channels
self.header['event_channels'] = event_channels
# Annotations
self._generate_minimal_annotations()
bl_ann = self.raw_annotations['blocks'][0]
bl_ann['system_id'] = info['system_id']
seg_ann = bl_ann['segments'][0]
seg_ann['system_id'] = info['system_id']
for c, sig_channel in enumerate(sig_channels):
chan_id = sig_channel['id']
anasig_an = seg_ann['signals'][c]
anasig_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
anasig_an['comment'] = self._channel_infos[chan_id]['comment']
for c, unit_channel in enumerate(unit_channels):
chan_id, unit_id = self.internal_unit_ids[c]
unit_an = seg_ann['units'][c]
unit_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
unit_an['comment'] = self._channel_infos[chan_id]['comment']
for c, event_channel in enumerate(event_channels):
chan_id = int(event_channel['id'])
ev_an = seg_ann['events'][c]
ev_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
ev_an['comment'] = self._channel_infos[chan_id]['comment']
def _source_name(self):
return self.filename
def _segment_t_start(self, block_index, seg_index):
return self._seg_t_starts[seg_index] * self._time_factor
def _segment_t_stop(self, block_index, seg_index):
return self._seg_t_stops[seg_index] * self._time_factor
def _check_channel_indexes(self, channel_indexes):
if channel_indexes is None:
channel_indexes = slice(None)
channel_indexes = np.arange(self.header['signal_channels'].size)[channel_indexes]
return channel_indexes
def _get_signal_size(self, block_index, seg_index, channel_indexes):
channel_indexes = self._check_channel_indexes(channel_indexes)
chan_id = self.header['signal_channels'][channel_indexes[0]]['id']
sig_size = np.sum(self._by_seg_data_blocks[chan_id][seg_index]['size'])
return sig_size
def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
channel_indexes = self._check_channel_indexes(channel_indexes)
chan_id = self.header['signal_channels'][channel_indexes[0]]['id']
return self._sig_t_starts[chan_id][seg_index] * self._time_factor
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
if i_start is None:
i_start = 0
if i_stop is None:
i_stop = self._get_signal_size(block_index, seg_index, channel_indexes)
channel_indexes = self._check_channel_indexes(channel_indexes)
chan_index = channel_indexes[0]
chan_id = self.header['signal_channels'][chan_index]['id']
group_id = self.header['signal_channels'][channel_indexes[0]]['group_id']
dt = self._sig_dtypes[group_id]
raw_signals = np.zeros((i_stop - i_start, len(channel_indexes)), dtype=dt)
for c, channel_index in enumerate(channel_indexes):
# NOTE: this actual way is slow because we run throught
# the file for each channel. The loop should be reversed.
# But there is no garanty that channels shared the same data block
# indexes. So this make the job too difficult.
chan_header = self.header['signal_channels'][channel_index]
chan_id = chan_header['id']
data_blocks = self._by_seg_data_blocks[chan_id][seg_index]
# loop over data blocks and get chunks
bl0 = np.searchsorted(data_blocks['cumsum'], i_start, side='left')
bl1 = np.searchsorted(data_blocks['cumsum'], i_stop, side='left')
ind = 0
for bl in range(bl0, bl1):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
data = self._memmap[ind0:ind1].view(dt)
if bl == bl1 - 1:
# right border
# be carfull that bl could be both bl0 and bl1!!
border = data.size - (i_stop - data_blocks[bl]['cumsum'])
if border > 0:
data = data[:-border]
if bl == bl0:
# left border
border = i_start - data_blocks[bl]['cumsum']
data = data[border:]
raw_signals[ind:data.size + ind, c] = data
ind += data.size
return raw_signals
def _count_in_time_slice(self, seg_index, chan_id, lim0, lim1, marker_filter=None):
# count event or spike in time slice
data_blocks = self._all_data_blocks[chan_id]
chan_info = self._channel_infos[chan_id]
dt = get_channel_dtype(chan_info)
nb = 0
for bl in range(data_blocks.size):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
raw_data = self._memmap[ind0:ind1].view(dt)
ts = raw_data['tick']
keep = (ts >= lim0) & (ts <= lim1)
if marker_filter is not None:
keep2 = (raw_data['marker'] & 255) == marker_filter
keep = keep & keep2
nb += np.sum(keep)
if ts[-1] > lim1:
break
return nb
def _get_internal_timestamp_(self, seg_index, chan_id,
t_start, t_stop, other_field=None, marker_filter=None):
chan_info = self._channel_infos[chan_id]
# data_blocks = self._by_seg_data_blocks[chan_id][seg_index]
data_blocks = self._all_data_blocks[chan_id]
dt = get_channel_dtype(chan_info)
if t_start is None:
# lim0 = 0
lim0 = self._seg_t_starts[seg_index]
else:
lim0 = int(t_start / self._time_factor)
if t_stop is None:
# lim1 = 2**32
lim1 = self._seg_t_stops[seg_index]
else:
lim1 = int(t_stop / self._time_factor)
timestamps = []
othervalues = []
for bl in range(data_blocks.size):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
raw_data = self._memmap[ind0:ind1].view(dt)
ts = raw_data['tick']
keep = (ts >= lim0) & (ts <= lim1)
if marker_filter is not None:
keep2 = (raw_data['marker'] & 255) == marker_filter
keep = keep & keep2
timestamps.append(ts[keep])
if other_field is not None:
othervalues.append(raw_data[other_field][keep])
if ts[-1] > lim1:
break
if len(timestamps) > 0:
timestamps = np.concatenate(timestamps)
else:
timestamps = np.zeros(0, dtype='int16')
if other_field is None:
return timestamps
else:
if len(timestamps) > 0:
othervalues = np.concatenate(othervalues)
else:
othervalues = np.zeros(0, dtype=dt.fields[other_field][0])
return timestamps, othervalues
def _spike_count(self, block_index, seg_index, unit_index):
chan_id, unit_id = self.internal_unit_ids[unit_index]
if self.ced_units:
marker_filter = unit_id
else:
marker_filter = None
lim0 = self._seg_t_starts[seg_index]
lim1 = self._seg_t_stops[seg_index]
return self._count_in_time_slice(seg_index, chan_id,
lim0, lim1, marker_filter=marker_filter)
def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
unit_header = self.header['unit_channels'][unit_index]
chan_id, unit_id = self.internal_unit_ids[unit_index]
if self.ced_units:
marker_filter = unit_id
else:
marker_filter = None
spike_timestamps = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop,
marker_filter=marker_filter)
return spike_timestamps
def _rescale_spike_timestamp(self, spike_timestamps, dtype):
spike_times = spike_timestamps.astype(dtype)
spike_times *= self._time_factor
return spike_times
def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
unit_header = self.header['unit_channels'][unit_index]
chan_id, unit_id = self.internal_unit_ids[unit_index]
if self.ced_units:
marker_filter = unit_id
else:
marker_filter = None
timestamps, waveforms = self._get_internal_timestamp_(seg_index, chan_id,
t_start, t_stop,
other_field='waveform',
marker_filter=marker_filter)
waveforms = waveforms.reshape(timestamps.size, 1, -1)
return waveforms
def _event_count(self, block_index, seg_index, event_channel_index):
event_header = self.header['event_channels'][event_channel_index]
chan_id = int(event_header['id']) # because set to string in header
lim0 = self._seg_t_starts[seg_index]
lim1 = self._seg_t_stops[seg_index]
return self._count_in_time_slice(seg_index, chan_id, lim0, lim1, marker_filter=None)
def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
event_header = self.header['event_channels'][event_channel_index]
chan_id = int(event_header['id']) # because set to string in header
chan_info = self._channel_infos[chan_id]
if chan_info['kind'] == 5:
timestamps, labels = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop,
other_field='marker')
elif chan_info['kind'] == 8:
timestamps, labels = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop,
other_field='label')
else:
timestamps = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop, other_field=None)
labels = np.zeros(timestamps.size, dtype='U')
labels = labels.astype('U')
durations = None
return timestamps, durations, labels
def _rescale_event_timestamp(self, event_timestamps, dtype):
event_times = event_timestamps.astype(dtype)
event_times *= self._time_factor
return event_times
def read_as_dict(fid, dtype):
"""
Given a file descriptor (seek at the good place externally)
and a numpy.dtype of the binary struct return a dict.
Make conversion for strings.
"""
dt = np.dtype(dtype)
h = np.frombuffer(fid.read(dt.itemsize), dt)[0]
info = OrderedDict()
for k in dt.names:
v = h[k]
if dt[k].kind == 'S':
v = v.decode('iso-8859-1')
if len(v) > 0:
l = ord(v[0])
v = v[1:l + 1]
info[k] = v
return info
def get_channel_dtype(chan_info):
"""
Get dtype by kind.
"""
if chan_info['kind'] == 1: # Raw signal
dt = 'int16'
elif chan_info['kind'] in [2, 3, 4]: # Event data
dt = [('tick', 'i4')]
elif chan_info['kind'] in [5]: # Marker data
dt = [('tick', 'i4'), ('marker', 'i4')]
elif chan_info['kind'] in [6]: # AdcMark data (waveform)
dt = [('tick', 'i4'), ('marker', 'i4'),
# ('adc', 'S%d' % chan_info['n_extra'])]
('waveform', 'int16', chan_info['n_extra'] // 2)]
elif chan_info['kind'] in [7]: # RealMark data (waveform)
dt = [('tick', 'i4'), ('marker', 'i4'),
# ('real', 'S%d' % chan_info['n_extra'])]
('waveform', 'float32', chan_info['n_extra'] // 4)]
elif chan_info['kind'] in [8]: # TextMark data
dt = [('tick', 'i4'), ('marker', 'i4'),
('label', 'S%d' % chan_info['n_extra'])]
elif chan_info['kind'] == 9: # Float signal
dt = 'float32'
dt = np.dtype(dt)
return dt
def get_sample_interval(info, chan_info):
"""
Get sample interval for one channel
"""
if info['system_id'] in [1, 2, 3, 4, 5]: # Before version 5
sample_interval = (chan_info['divide'] * info['us_per_time'] *
info['time_per_adc']) * 1e-6
else:
sample_interval = (chan_info['l_chan_dvd'] *
info['us_per_time'] * info['dtime_base'])
return sample_interval
# headers structures :
headerDescription = [
('system_id', 'i2'),
('copyright', 'S10'),
('creator', 'S8'),
('us_per_time', 'i2'),
('time_per_adc', 'i2'),
('filestate', 'i2'),
('first_data', 'i4'), # i8
('channels', 'i2'),
('chan_size', 'i2'),
('extra_data', 'i2'),
('buffersize', 'i2'),
('os_format', 'i2'),
('max_ftime', 'i4'), # i8
('dtime_base', 'f8'),
('datetime_detail', 'u1'),
('datetime_year', 'i2'),
('pad', 'S52'),
('comment1', 'S80'),
('comment2', 'S80'),
('comment3', 'S80'),
('comment4', 'S80'),
('comment5', 'S80'),
]
channelHeaderDesciption1 = [
('del_size', 'i2'),
('next_del_block', 'i4'), # i8
('firstblock', 'i4'), # i8
('lastblock', 'i4'), # i8
('blocks', 'i2'),
('n_extra', 'i2'),
('pre_trig', 'i2'),
('free0', 'i2'),
('py_sz', 'i2'),
('max_data', 'i2'),
('comment', 'S72'),
('max_chan_time', 'i4'), # i8
('l_chan_dvd', 'i4'), # i8
('phy_chan', 'i2'),
('title', 'S10'),
('ideal_rate', 'f4'),
('kind', 'u1'),
('unused1', 'i1'),
]
blockHeaderDesciption = [
('pred_block', 'i4'), # i8
('succ_block', 'i4'), # i8
('start_time', 'i4'), # i8
('end_time', 'i4'), # i8
('channel_num', 'i2'),
('items', 'i2'),
]
dict_kind = {
0: 'empty',
1: 'Adc',
2: 'EventFall',
3: 'EventRise',
4: 'EventBoth',
5: 'Marker',
6: 'AdcMark',
7: 'RealMark',
8: 'TextMark',
9: 'RealWave',
}
| 41.557185 | 99 | 0.551196 |
from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
_event_channel_dtype)
import numpy as np
from collections import OrderedDict
class Spike2RawIO(BaseRawIO):
extensions = ['smr']
rawmode = 'one-file'
def __init__(self, filename='', take_ideal_sampling_rate=False, ced_units=True,
try_signal_grouping=True):
BaseRawIO.__init__(self)
self.filename = filename
self.take_ideal_sampling_rate = take_ideal_sampling_rate
self.ced_units = ced_units
self.try_signal_grouping = try_signal_grouping
def _parse_header(self):
with open(self.filename, 'rb') as fid:
self._global_info = read_as_dict(fid, headerDescription)
info = self._global_info
if info['system_id'] < 6:
info['dtime_base'] = 1e-6
info['datetime_detail'] = 0
info['datetime_year'] = 0
self._time_factor = info['us_per_time'] * info['dtime_base']
self._channel_infos = []
for chan_id in range(info['channels']):
fid.seek(512 + 140 * chan_id)
chan_info = read_as_dict(fid, channelHeaderDesciption1)
if chan_info['kind'] in [1, 6]:
dt = [('scale', 'f4'), ('offset', 'f4'), ('unit', 'S6'), ]
chan_info.update(read_as_dict(fid, dt))
elif chan_info['kind'] in [7, 9]:
dt = [('min', 'f4'), ('max', 'f4'), ('unit', 'S6'), ]
chan_info.update(read_as_dict(fid, dt))
elif chan_info['kind'] in [4]:
dt = [('init_low', 'u1'), ('next_low', 'u1'), ]
chan_info.update(read_as_dict(fid, dt))
if chan_info['kind'] in [1, 6, 7, 9]:
if info['system_id'] < 6:
chan_info.update(read_as_dict(fid, [('divide', 'i2')]))
else:
chan_info.update(read_as_dict(fid, [('interleave', 'i2')]))
chan_info['type'] = dict_kind[chan_info['kind']]
if chan_info['blocks'] == 0:
chan_info['t_start'] = 0.
else:
fid.seek(chan_info['firstblock'])
block_info = read_as_dict(fid, blockHeaderDesciption)
chan_info['t_start'] = float(block_info['start_time']) * \
float(info['us_per_time']) * float(info['dtime_base'])
self._channel_infos.append(chan_info)
self._memmap = np.memmap(self.filename, dtype='u1', offset=0, mode='r')
self._all_data_blocks = {}
self._by_seg_data_blocks = {}
for chan_id, chan_info in enumerate(self._channel_infos):
data_blocks = []
ind = chan_info['firstblock']
for b in range(chan_info['blocks']):
block_info = self._memmap[ind:ind + 20].view(blockHeaderDesciption)[0]
data_blocks.append((ind, block_info['items'], 0,
block_info['start_time'], block_info['end_time']))
ind = block_info['succ_block']
data_blocks = np.array(data_blocks, dtype=[(
'pos', 'int32'), ('size', 'int32'), ('cumsum', 'int32'),
('start_time', 'int32'), ('end_time', 'int32')])
data_blocks['pos'] += 20
self._all_data_blocks[chan_id] = data_blocks
self._by_seg_data_blocks[chan_id] = []
all_gaps_block_ind = {}
for chan_id, chan_info in enumerate(self._channel_infos):
if chan_info['kind'] in [1, 9]:
data_blocks = self._all_data_blocks[chan_id]
sig_size = np.sum(self._all_data_blocks[chan_id]['size'])
if sig_size > 0:
interval = get_sample_interval(info, chan_info) / self._time_factor
inter_block_sizes = data_blocks['start_time'][1:] - \
data_blocks['end_time'][:-1]
gaps_block_ind, = np.nonzero(inter_block_sizes > interval)
all_gaps_block_ind[chan_id] = gaps_block_ind
self._sig_t_starts = {}
self._sig_t_stops = {}
if len(all_gaps_block_ind) == 0:
nb_segment = 1
t_start, t_stop = None, None
for chan_id, chan_info in enumerate(self._channel_infos):
data_blocks = self._all_data_blocks[chan_id]
if data_blocks.size > 0:
if t_stop is None or data_blocks[-1]['end_time'] > t_stop:
t_stop = data_blocks[-1]['end_time']
self._seg_t_starts = [0]
self._seg_t_stops = [t_stop]
else:
all_nb_seg = np.array([v.size + 1 for v in all_gaps_block_ind.values()])
assert np.all(all_nb_seg[0] == all_nb_seg), \
'Signal channel have differents pause so diffrents nb_segment'
nb_segment = int(all_nb_seg[0])
for chan_id, gaps_block_ind in all_gaps_block_ind.items():
data_blocks = self._all_data_blocks[chan_id]
self._sig_t_starts[chan_id] = []
self._sig_t_stops[chan_id] = []
for seg_ind in range(nb_segment):
if seg_ind == 0:
fisrt_bl = 0
else:
fisrt_bl = gaps_block_ind[seg_ind - 1] + 1
self._sig_t_starts[chan_id].append(data_blocks[fisrt_bl]['start_time'])
if seg_ind < nb_segment - 1:
last_bl = gaps_block_ind[seg_ind]
else:
last_bl = data_blocks.size - 1
self._sig_t_stops[chan_id].append(data_blocks[last_bl]['end_time'])
in_seg_data_block = data_blocks[fisrt_bl:last_bl + 1]
in_seg_data_block['cumsum'][1:] = np.cumsum(in_seg_data_block['size'][:-1])
self._by_seg_data_blocks[chan_id].append(in_seg_data_block)
self._seg_t_starts = []
self._seg_t_stops = []
for seg_ind in range(nb_segment):
t_start = min(
self._sig_t_starts[chan_id][seg_ind] for chan_id in self._sig_t_starts)
t_stop = max(self._sig_t_stops[chan_id][seg_ind] for chan_id in self._sig_t_stops)
self._seg_t_starts.append(t_start)
self._seg_t_stops.append(t_stop)
sig_channels = []
unit_channels = []
event_channels = []
self.internal_unit_ids = {}
for chan_id, chan_info in enumerate(self._channel_infos):
if chan_info['kind'] in [1, 6, 7, 9]:
if self.take_ideal_sampling_rate:
sampling_rate = info['ideal_rate']
else:
sample_interval = get_sample_interval(info, chan_info)
sampling_rate = (1. / sample_interval)
name = chan_info['title']
if chan_info['kind'] in [1, 9]:
if chan_id not in self._sig_t_starts:
continue
units = chan_info['unit']
if chan_info['kind'] == 1:
gain = chan_info['scale'] / 6553.6
offset = chan_info['offset']
sig_dtype = 'int16'
elif chan_info['kind'] == 9:
gain = 1.
offset = 0.
sig_dtype = 'float32'
group_id = 0
sig_channels.append((name, chan_id, sampling_rate, sig_dtype,
units, gain, offset, group_id))
elif chan_info['kind'] in [2, 3, 4, 5, 8]:
event_channels.append((name, chan_id, 'event'))
elif chan_info['kind'] in [6, 7]:
wf_units = chan_info['unit']
if chan_info['kind'] == 6:
wf_gain = chan_info['scale'] / 6553.6
wf_offset = chan_info['offset']
wf_left_sweep = chan_info['n_extra'] // 4
elif chan_info['kind'] == 7:
wf_gain = 1.
wf_offset = 0.
wf_left_sweep = chan_info['n_extra'] // 8
wf_sampling_rate = sampling_rate
if self.ced_units:
data_blocks = self._all_data_blocks[chan_id]
dt = get_channel_dtype(chan_info)
unit_ids = set()
for bl in range(data_blocks.size):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
raw_data = self._memmap[ind0:ind1].view(dt)
marker = raw_data['marker'] & 255
unit_ids.update(np.unique(marker))
unit_ids = sorted(list(unit_ids))
else:
unit_ids = ['all']
for unit_id in unit_ids:
unit_index = len(unit_channels)
self.internal_unit_ids[unit_index] = (chan_id, unit_id)
_id = "ch{}#{}".format(chan_id, unit_id)
unit_channels.append((name, _id, wf_units, wf_gain, wf_offset,
wf_left_sweep, wf_sampling_rate))
sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
if len(sig_channels) > 0:
if self.try_signal_grouping:
common_keys = ['sampling_rate', 'dtype', 'units', 'gain', 'offset']
characteristics = sig_channels[common_keys]
unique_characteristics = np.unique(characteristics)
self._sig_dtypes = {}
for group_id, charact in enumerate(unique_characteristics):
chan_grp_indexes, = np.nonzero(characteristics == charact)
sig_channels['group_id'][chan_grp_indexes] = group_id
for seg_index in range(nb_segment):
sig_sizes = []
for ind in chan_grp_indexes:
chan_id = sig_channels[ind]['id']
sig_size = np.sum(self._by_seg_data_blocks[chan_id][seg_index]['size'])
sig_sizes.append(sig_size)
sig_sizes = np.array(sig_sizes)
assert np.all(sig_sizes == sig_sizes[0]),\
'Signal channel in groups do not have same size'\
', use try_signal_grouping=False'
self._sig_dtypes[group_id] = np.dtype(charact['dtype'])
else:
sig_channels['group_id'] = np.arange(sig_channels.size)
self._sig_dtypes = {s['group_id']: np.dtype(s['dtype']) for s in sig_channels}
self.header = {}
self.header['nb_block'] = 1
self.header['nb_segment'] = [nb_segment]
self.header['signal_channels'] = sig_channels
self.header['unit_channels'] = unit_channels
self.header['event_channels'] = event_channels
self._generate_minimal_annotations()
bl_ann = self.raw_annotations['blocks'][0]
bl_ann['system_id'] = info['system_id']
seg_ann = bl_ann['segments'][0]
seg_ann['system_id'] = info['system_id']
for c, sig_channel in enumerate(sig_channels):
chan_id = sig_channel['id']
anasig_an = seg_ann['signals'][c]
anasig_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
anasig_an['comment'] = self._channel_infos[chan_id]['comment']
for c, unit_channel in enumerate(unit_channels):
chan_id, unit_id = self.internal_unit_ids[c]
unit_an = seg_ann['units'][c]
unit_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
unit_an['comment'] = self._channel_infos[chan_id]['comment']
for c, event_channel in enumerate(event_channels):
chan_id = int(event_channel['id'])
ev_an = seg_ann['events'][c]
ev_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
ev_an['comment'] = self._channel_infos[chan_id]['comment']
def _source_name(self):
return self.filename
def _segment_t_start(self, block_index, seg_index):
return self._seg_t_starts[seg_index] * self._time_factor
def _segment_t_stop(self, block_index, seg_index):
return self._seg_t_stops[seg_index] * self._time_factor
def _check_channel_indexes(self, channel_indexes):
if channel_indexes is None:
channel_indexes = slice(None)
channel_indexes = np.arange(self.header['signal_channels'].size)[channel_indexes]
return channel_indexes
def _get_signal_size(self, block_index, seg_index, channel_indexes):
channel_indexes = self._check_channel_indexes(channel_indexes)
chan_id = self.header['signal_channels'][channel_indexes[0]]['id']
sig_size = np.sum(self._by_seg_data_blocks[chan_id][seg_index]['size'])
return sig_size
def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
channel_indexes = self._check_channel_indexes(channel_indexes)
chan_id = self.header['signal_channels'][channel_indexes[0]]['id']
return self._sig_t_starts[chan_id][seg_index] * self._time_factor
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
if i_start is None:
i_start = 0
if i_stop is None:
i_stop = self._get_signal_size(block_index, seg_index, channel_indexes)
channel_indexes = self._check_channel_indexes(channel_indexes)
chan_index = channel_indexes[0]
chan_id = self.header['signal_channels'][chan_index]['id']
group_id = self.header['signal_channels'][channel_indexes[0]]['group_id']
dt = self._sig_dtypes[group_id]
raw_signals = np.zeros((i_stop - i_start, len(channel_indexes)), dtype=dt)
for c, channel_index in enumerate(channel_indexes):
chan_header = self.header['signal_channels'][channel_index]
chan_id = chan_header['id']
data_blocks = self._by_seg_data_blocks[chan_id][seg_index]
bl0 = np.searchsorted(data_blocks['cumsum'], i_start, side='left')
bl1 = np.searchsorted(data_blocks['cumsum'], i_stop, side='left')
ind = 0
for bl in range(bl0, bl1):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
data = self._memmap[ind0:ind1].view(dt)
if bl == bl1 - 1:
border = data.size - (i_stop - data_blocks[bl]['cumsum'])
if border > 0:
data = data[:-border]
if bl == bl0:
border = i_start - data_blocks[bl]['cumsum']
data = data[border:]
raw_signals[ind:data.size + ind, c] = data
ind += data.size
return raw_signals
def _count_in_time_slice(self, seg_index, chan_id, lim0, lim1, marker_filter=None):
data_blocks = self._all_data_blocks[chan_id]
chan_info = self._channel_infos[chan_id]
dt = get_channel_dtype(chan_info)
nb = 0
for bl in range(data_blocks.size):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
raw_data = self._memmap[ind0:ind1].view(dt)
ts = raw_data['tick']
keep = (ts >= lim0) & (ts <= lim1)
if marker_filter is not None:
keep2 = (raw_data['marker'] & 255) == marker_filter
keep = keep & keep2
nb += np.sum(keep)
if ts[-1] > lim1:
break
return nb
def _get_internal_timestamp_(self, seg_index, chan_id,
t_start, t_stop, other_field=None, marker_filter=None):
chan_info = self._channel_infos[chan_id]
data_blocks = self._all_data_blocks[chan_id]
dt = get_channel_dtype(chan_info)
if t_start is None:
lim0 = self._seg_t_starts[seg_index]
else:
lim0 = int(t_start / self._time_factor)
if t_stop is None:
lim1 = self._seg_t_stops[seg_index]
else:
lim1 = int(t_stop / self._time_factor)
timestamps = []
othervalues = []
for bl in range(data_blocks.size):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
raw_data = self._memmap[ind0:ind1].view(dt)
ts = raw_data['tick']
keep = (ts >= lim0) & (ts <= lim1)
if marker_filter is not None:
keep2 = (raw_data['marker'] & 255) == marker_filter
keep = keep & keep2
timestamps.append(ts[keep])
if other_field is not None:
othervalues.append(raw_data[other_field][keep])
if ts[-1] > lim1:
break
if len(timestamps) > 0:
timestamps = np.concatenate(timestamps)
else:
timestamps = np.zeros(0, dtype='int16')
if other_field is None:
return timestamps
else:
if len(timestamps) > 0:
othervalues = np.concatenate(othervalues)
else:
othervalues = np.zeros(0, dtype=dt.fields[other_field][0])
return timestamps, othervalues
def _spike_count(self, block_index, seg_index, unit_index):
chan_id, unit_id = self.internal_unit_ids[unit_index]
if self.ced_units:
marker_filter = unit_id
else:
marker_filter = None
lim0 = self._seg_t_starts[seg_index]
lim1 = self._seg_t_stops[seg_index]
return self._count_in_time_slice(seg_index, chan_id,
lim0, lim1, marker_filter=marker_filter)
def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
unit_header = self.header['unit_channels'][unit_index]
chan_id, unit_id = self.internal_unit_ids[unit_index]
if self.ced_units:
marker_filter = unit_id
else:
marker_filter = None
spike_timestamps = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop,
marker_filter=marker_filter)
return spike_timestamps
def _rescale_spike_timestamp(self, spike_timestamps, dtype):
spike_times = spike_timestamps.astype(dtype)
spike_times *= self._time_factor
return spike_times
def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
unit_header = self.header['unit_channels'][unit_index]
chan_id, unit_id = self.internal_unit_ids[unit_index]
if self.ced_units:
marker_filter = unit_id
else:
marker_filter = None
timestamps, waveforms = self._get_internal_timestamp_(seg_index, chan_id,
t_start, t_stop,
other_field='waveform',
marker_filter=marker_filter)
waveforms = waveforms.reshape(timestamps.size, 1, -1)
return waveforms
def _event_count(self, block_index, seg_index, event_channel_index):
event_header = self.header['event_channels'][event_channel_index]
chan_id = int(event_header['id'])
lim0 = self._seg_t_starts[seg_index]
lim1 = self._seg_t_stops[seg_index]
return self._count_in_time_slice(seg_index, chan_id, lim0, lim1, marker_filter=None)
def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
event_header = self.header['event_channels'][event_channel_index]
chan_id = int(event_header['id'])
chan_info = self._channel_infos[chan_id]
if chan_info['kind'] == 5:
timestamps, labels = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop,
other_field='marker')
elif chan_info['kind'] == 8:
timestamps, labels = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop,
other_field='label')
else:
timestamps = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop, other_field=None)
labels = np.zeros(timestamps.size, dtype='U')
labels = labels.astype('U')
durations = None
return timestamps, durations, labels
def _rescale_event_timestamp(self, event_timestamps, dtype):
event_times = event_timestamps.astype(dtype)
event_times *= self._time_factor
return event_times
def read_as_dict(fid, dtype):
dt = np.dtype(dtype)
h = np.frombuffer(fid.read(dt.itemsize), dt)[0]
info = OrderedDict()
for k in dt.names:
v = h[k]
if dt[k].kind == 'S':
v = v.decode('iso-8859-1')
if len(v) > 0:
l = ord(v[0])
v = v[1:l + 1]
info[k] = v
return info
def get_channel_dtype(chan_info):
if chan_info['kind'] == 1:
dt = 'int16'
elif chan_info['kind'] in [2, 3, 4]:
dt = [('tick', 'i4')]
elif chan_info['kind'] in [5]:
dt = [('tick', 'i4'), ('marker', 'i4')]
elif chan_info['kind'] in [6]:
dt = [('tick', 'i4'), ('marker', 'i4'),
('waveform', 'int16', chan_info['n_extra'] // 2)]
elif chan_info['kind'] in [7]:
dt = [('tick', 'i4'), ('marker', 'i4'),
('waveform', 'float32', chan_info['n_extra'] // 4)]
elif chan_info['kind'] in [8]:
dt = [('tick', 'i4'), ('marker', 'i4'),
('label', 'S%d' % chan_info['n_extra'])]
elif chan_info['kind'] == 9:
dt = 'float32'
dt = np.dtype(dt)
return dt
def get_sample_interval(info, chan_info):
if info['system_id'] in [1, 2, 3, 4, 5]:
sample_interval = (chan_info['divide'] * info['us_per_time'] *
info['time_per_adc']) * 1e-6
else:
sample_interval = (chan_info['l_chan_dvd'] *
info['us_per_time'] * info['dtime_base'])
return sample_interval
headerDescription = [
('system_id', 'i2'),
('copyright', 'S10'),
('creator', 'S8'),
('us_per_time', 'i2'),
('time_per_adc', 'i2'),
('filestate', 'i2'),
('first_data', 'i4'),
('channels', 'i2'),
('chan_size', 'i2'),
('extra_data', 'i2'),
('buffersize', 'i2'),
('os_format', 'i2'),
('max_ftime', 'i4'),
('dtime_base', 'f8'),
('datetime_detail', 'u1'),
('datetime_year', 'i2'),
('pad', 'S52'),
('comment1', 'S80'),
('comment2', 'S80'),
('comment3', 'S80'),
('comment4', 'S80'),
('comment5', 'S80'),
]
channelHeaderDesciption1 = [
('del_size', 'i2'),
('next_del_block', 'i4'),
('firstblock', 'i4'),
('lastblock', 'i4'),
('blocks', 'i2'),
('n_extra', 'i2'),
('pre_trig', 'i2'),
('free0', 'i2'),
('py_sz', 'i2'),
('max_data', 'i2'),
('comment', 'S72'),
('max_chan_time', 'i4'),
('l_chan_dvd', 'i4'),
('phy_chan', 'i2'),
('title', 'S10'),
('ideal_rate', 'f4'),
('kind', 'u1'),
('unused1', 'i1'),
]
blockHeaderDesciption = [
('pred_block', 'i4'),
('succ_block', 'i4'),
('start_time', 'i4'),
('end_time', 'i4'),
('channel_num', 'i2'),
('items', 'i2'),
]
dict_kind = {
0: 'empty',
1: 'Adc',
2: 'EventFall',
3: 'EventRise',
4: 'EventBoth',
5: 'Marker',
6: 'AdcMark',
7: 'RealMark',
8: 'TextMark',
9: 'RealWave',
}
| true | true |
1c30efc2e301eafc41a2529b8e09e246ac929142 | 35,180 | py | Python | cea/interfaces/arcgis/arcgishelper.py | pajotca/CityEnergyAnalyst | f3d0a08f7b5f5967961bf831625544a95c7702f0 | [
"MIT"
] | null | null | null | cea/interfaces/arcgis/arcgishelper.py | pajotca/CityEnergyAnalyst | f3d0a08f7b5f5967961bf831625544a95c7702f0 | [
"MIT"
] | null | null | null | cea/interfaces/arcgis/arcgishelper.py | pajotca/CityEnergyAnalyst | f3d0a08f7b5f5967961bf831625544a95c7702f0 | [
"MIT"
] | null | null | null | """
A library module with helper functions for creating the City Energy Analyst python toolbox for ArcGIS.
"""
import os
import subprocess
import tempfile
import cea.config
import cea.scripts
import cea.inputlocator
from cea.interfaces.arcgis.modules import arcpy
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas", "Martin Mosteiro Romero", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
LOCATOR = cea.inputlocator.InputLocator(None)
CONFIG = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
# set up logging to help debugging
import logging
logging.basicConfig(filename=os.path.expandvars(r'%TEMP%\arcgishelper.log'),level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
logging.info('arcgishelper loading...')
def create_cea_tool(cea_script):
"""Create a subclass of CeaTool based on the information in the :py:param`cea_script`"""
name = ''.join(w.capitalize() for w in cea_script.name.split('-')) + 'Tool'
return type(name, (CeaTool,), {
'__init__': lambda self: self._init(cea_script)
})
class CeaTool(object):
"""A base class for creating tools in an ArcGIS toolbox. Basically, the user just needs to subclass this,
specify the usual ArcGIS stuff in the __init__ method as well as set `self.cea_tool` to the corresponding
tool name. The rest is auto-configured based on default.config and scripts.yml"""
def _init(self, cea_script):
"""Allow initialization from the ``create_cea_tool``"""
self.cea_tool = cea_script.name
self.label = cea_script.label
self.description = cea_script.description
self.category = cea_script.category
self.canRunInBackground = False
def getParameterInfo(self):
"""Return the list of arcgis Parameter objects for this tool. The general:weather parameter is treated
specially: it is represented as two parameter_infos, weather_name and weather_path."""
config = cea.config.Configuration()
parameter_infos = []
for parameter in get_cea_parameters(config, self.cea_tool):
if parameter.name == 'weather':
parameter_infos.extend(get_weather_parameter_info(config))
else:
parameter_info = get_parameter_info(parameter, config)
parameter_info = self.override_parameter_info(parameter_info, parameter)
if parameter_info:
if isinstance(parameter_info, arcpy.Parameter):
parameter_infos.append(parameter_info)
else:
# allow parameters that are displayed as multiple parameter_info's
parameter_infos.extend(parameter_info)
return parameter_infos
def override_parameter_info(self, parameter_info, parameter):
"""Override this method if you need to use a non-default ArcGIS parameter handling"""
return parameter_info
def updateParameters(self, parameters):
on_dialog_show = not any([p.hasBeenValidated for p in parameters])
parameters = dict_parameters(parameters)
config = cea.config.Configuration()
cea_parameters = {p.fqname: p for p in get_cea_parameters(config, self.cea_tool)}
if on_dialog_show:
# show the parameters as defined in the config file
for parameter_name in parameters.keys():
if parameter_name == 'weather_name':
if is_builtin_weather_path(config.weather):
parameters['weather_name'].value = get_db_weather_name(config.weather)
else:
parameters['weather_name'].value = '<custom>'
parameters['weather_path'].value = config.weather
update_weather_parameters(parameters)
elif parameter_name == 'weather_path':
continue
elif parameter_name in cea_parameters:
cea_parameter = cea_parameters[parameter_name]
builder = BUILDERS[type(cea_parameter)](cea_parameter, config)
builder.on_dialog_show(parameter_name, parameters)
else:
if 'general:scenario' in parameters:
check_senario_exists(parameters)
if 'weather_name' in parameters:
update_weather_parameters(parameters)
for parameter_name in parameters.keys():
if parameter_name in cea_parameters:
cea_parameter = cea_parameters[parameter_name]
builder = BUILDERS[type(cea_parameter)](cea_parameter, config)
builder.on_update_parameters(parameter_name, parameters)
def execute(self, parameters, _):
parameters = dict_parameters(parameters)
if 'general:scenario' in parameters:
check_senario_exists(parameters)
kwargs = {}
if 'weather_name' in parameters:
kwargs['weather'] = get_weather_path_from_parameters(parameters)
for parameter_key in parameters.keys():
if ':' not in parameter_key:
# skip this parameter
continue
section_name, parameter_name = parameter_key.split(':')
parameter = parameters[parameter_key]
# allow the ParameterInfoBuilder subclass to override encoding of values
cea_parameters = {p.fqname: p for p in get_cea_parameters(CONFIG, self.cea_tool)}
cea_parameter = cea_parameters[parameter_key]
logging.info(cea_parameter)
builder = BUILDERS[type(cea_parameter)](cea_parameter, CONFIG)
kwargs[parameter_name] = builder.encode_value(cea_parameter, parameter)
run_cli(self.cea_tool, **kwargs)
def updateMessages(self, parameters):
"""Give the builders a chance to update messages / perform some validation"""
parameters = dict_parameters(parameters)
cea_parameters = {p.fqname: p for p in get_cea_parameters(CONFIG, self.cea_tool)}
for parameter_name in parameters.keys():
if parameter_name in {'general:scenario', 'weather_name', 'weather'}:
continue
if parameter_name in cea_parameters:
cea_parameter = cea_parameters[parameter_name]
builder = BUILDERS[type(cea_parameter)](cea_parameter, CONFIG)
builder.on_update_messages(parameter_name, parameters)
def get_cea_parameters(config, cea_tool):
"""Return a list of cea.config.Parameter objects for each cea_parameter associated with the tool."""
for _, cea_parameter in config.matching_parameters(cea.scripts.by_name(cea_tool).parameters):
yield cea_parameter
def add_message(msg, **kwargs):
"""Log to arcpy.AddMessage() instead of print to STDOUT"""
if len(kwargs):
msg %= kwargs
arcpy.AddMessage(msg)
log_file = os.path.join(tempfile.gettempdir(), 'cea.log')
with open(log_file, 'a') as log:
log.write(str(msg))
def is_db_weather(weather_path):
"""True, if the ``weather_path`` is one of the pre-installed weather files that came with the CEA"""
weather_name = get_db_weather_name(weather_path)
if weather_name in LOCATOR.get_weather_names():
# could still be a custom weather file...
db_weather_path = LOCATOR.get_weather(weather_name)
db_weather_path = os.path.normpath(db_weather_path)
db_weather_path = os.path.normcase(db_weather_path)
weather_path = LOCATOR.get_weather(weather_path)
weather_path = os.path.normpath(weather_path)
weather_path = os.path.normcase(weather_path)
if os.path.dirname(db_weather_path) == os.path.dirname(weather_path):
return True
return False
def get_db_weather_name(weather_path):
weather_name = os.path.splitext(os.path.basename(weather_path))[0]
return weather_name
def get_python_exe():
"""Return the path to the python interpreter that was used to install CEA"""
try:
with open(os.path.expanduser('~/cea_python.pth'), 'r') as f:
python_exe = f.read().strip()
return python_exe
except:
raise AssertionError("Could not find 'cea_python.pth' in home directory.")
def get_environment():
"""Return the system environment to use for the execution - this is based on the location of the python
interpreter in ``get_python_exe``"""
root_dir = os.path.dirname(get_python_exe())
scripts_dir = os.path.join(root_dir, 'Scripts')
env = os.environ.copy()
env['PATH'] = ';'.join((root_dir, scripts_dir, os.environ['PATH']))
add_message('get_environment: root_dir=%s' % root_dir.lower())
# BUGFIX for running in without proper python installation
qt_plugin_path = os.path.join(root_dir, 'Library', 'plugins')
add_message('Setting QT_PLUGIN_PATH=%s' % qt_plugin_path)
env['QT_PLUGIN_PATH'] = qt_plugin_path
return env
def run_cli(script_name, **parameters):
"""Run the CLI in a subprocess without showing windows"""
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
command = [get_python_exe(), '-u', '-m', 'cea.interfaces.cli.cli', script_name]
for parameter_name, parameter_value in parameters.items():
parameter_name = parameter_name.replace('_', '-')
command.append('--' + parameter_name)
command.append(str(parameter_value))
add_message('Executing: ' + ' '.join(command))
process = subprocess.Popen(command, startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=get_environment(), cwd=tempfile.gettempdir())
while True:
next_line = process.stdout.readline()
if next_line == '' and process.poll() is not None:
break
add_message(next_line.rstrip())
stdout, stderr = process.communicate()
add_message(stdout)
add_message(stderr)
if process.returncode == cea.ConfigError.rc:
arcpy.AddError('Tool did not run successfully: Check parameters')
elif process.returncode != 0:
raise Exception('Tool did not run successfully')
def parse_boolean(s):
"""Return True or False, depending on the value of ``s`` as defined by the ConfigParser library."""
boolean_states = {'0': False,
'1': True,
'false': False,
'no': False,
'off': False,
'on': True,
'true': True,
'yes': True}
if s.lower() in boolean_states:
return boolean_states[s.lower()]
return False
def is_builtin_weather_path(weather_path):
"""Return True, if the weather path resolves to one of the builtin weather files shipped with the CEA."""
if weather_path is None:
return False
weather_path = os.path.normpath(os.path.abspath(weather_path))
zug_path = os.path.normpath(os.path.abspath(LOCATOR.get_weather('Zug')))
return os.path.dirname(weather_path) == os.path.dirname(zug_path)
def demand_graph_fields(scenario):
"""Lists the available fields for the demand graphs - these are fields that are present in both the
building demand results files as well as the totals file (albeit with different units)."""
import pandas as pd
locator = cea.inputlocator.InputLocator(scenario)
df_total_demand = pd.read_csv(locator.get_total_demand())
total_fields = set(df_total_demand.columns.tolist())
first_building = df_total_demand['Name'][0]
df_building = pd.read_csv(locator.get_demand_results_file(first_building))
fields = set(df_building.columns.tolist())
fields.remove('DATE')
fields.remove('Name')
# remove fields in demand results files that do not have a corresponding field in the totals file
bad_fields = set(field for field in fields if not field.split('_')[0] + "_MWhyr" in total_fields)
fields = fields - bad_fields
return list(fields)
def create_weather_parameters(config):
"""Create the ``weather_name`` and ``weather_path`` parameters used for choosing the weatherfile."""
weather_name = arcpy.Parameter(
displayName="Weather file (choose from list or enter full path to .epw file)",
name="weather_name",
datatype="String",
parameterType="Required",
direction="Input")
weather_name.filter.list = LOCATOR.get_weather_names() + ['<custom>']
weather_name.value = get_db_weather_name(config.weather) if is_db_weather(config.weather) else '<custom>'
weather_path = arcpy.Parameter(
displayName="Path to .epw file",
name="weather_path",
datatype="DEFile",
parameterType="Optional",
direction="Input")
weather_path.filter.list = ['epw']
weather_path.value = config.weather
weather_path.enabled = not is_db_weather(config.weather)
return weather_name, weather_path
def check_senario_exists(parameters):
"""Makes sure the scenario exists. Create a dictionary of the parameters at the same time"""
scenario_parameter = parameters['general:scenario']
scenario = scenario_parameter.valueAsText
if scenario is None:
config = cea.config.Configuration()
scenario_parameter.value = config.scenario
else:
scenario_parameter.value = scenario
def check_radiation_exists(parameters, scenario):
"""Make sure the radiation files exist."""
locator = cea.inputlocator.InputLocator(scenario)
radiation_csv = locator.get_radiation()
if not os.path.exists(radiation_csv):
parameters['scenario'].setErrorMessage("No radiation file found - please run radiation tool first")
if not os.path.exists(locator.get_surface_properties()):
parameters['scenario'].setErrorMessage("No radiation data found for scenario. Run radiation script first.")
def update_weather_parameters(parameters):
"""Update the weather_name and weather_path parameters"""
weather_name = parameters['weather_name'].value
if weather_name == '<custom>':
weather_path = parameters['weather_path'].valueAsText
else:
weather_path = LOCATOR.get_weather(weather_name)
parameters['weather_path'].value = weather_path
if is_builtin_weather_path(weather_path):
parameters['weather_path'].enabled = False
parameters['weather_name'].value = get_db_weather_name(weather_path)
else:
parameters['weather_path'].enabled = True
parameters['weather_name'].value = '<custom>'
def get_weather_path_from_parameters(parameters):
"""Return the path to the weather file to use depending on wether weather_name or weather_path is set by user"""
if parameters['weather_name'].value == '<custom>':
return parameters['weather_path'].valueAsText
else:
return LOCATOR.get_weather(parameters['weather_name'].value)
def get_weather_parameter_info(config):
"""Create two arcpy Parameter objects to deal with the weather"""
weather_name = arcpy.Parameter(
displayName="Weather file (choose from list or enter full path to .epw file)",
name="weather_name",
datatype="String",
parameterType="Required",
direction="Input")
weather_name.filter.list = LOCATOR.get_weather_names() + ['<custom>']
weather_name.value = get_db_weather_name(config.weather) if is_db_weather(config.weather) else '<custom>'
weather_path = arcpy.Parameter(
displayName="Path to .epw file",
name="weather_path",
datatype="DEFile",
parameterType="Optional",
direction="Input")
weather_path.filter.list = ['epw']
weather_path.value = config.weather
weather_path.enabled = not is_db_weather(config.weather)
return weather_name, weather_path
def dict_parameters(parameters):
return {p.name: p for p in parameters}
def get_parameter_info(cea_parameter, config):
"""Create an arcpy Parameter object based on the configuration in the Default-config.
The name is set to "section_name:parameter_name" so parameters created with this function are
easily identified (```':' in parameter.name``)"""
builder = BUILDERS[type(cea_parameter)](cea_parameter, config)
try:
arcgis_parameter = builder.get_parameter_info()
# arcgis_parameter.value = builder.get_value()
return arcgis_parameter
except TypeError:
logging.info('Failed to build arcpy.Parameter from %s', cea_parameter, exc_info=True)
raise
class ParameterInfoBuilder(object):
"""A base class for building arcpy.Parameter objects based on :py:class:`cea.config.Parameter` objects."""
def __init__(self, cea_parameter, config):
self.cea_parameter = cea_parameter
self.config = config
def get_parameter_info(self):
parameter = arcpy.Parameter(displayName=self.cea_parameter.help,
name=self.cea_parameter.fqname, datatype='String',
parameterType='Required', direction='Input', multiValue=False)
if not self.cea_parameter.category is None:
parameter.category = self.cea_parameter.category
return parameter
def on_dialog_show(self, parameter_name, parameters):
parameters[parameter_name].value = self.cea_parameter.get()
def on_update_parameters(self, parameter_name, parameters):
"""Called each time the parameters are changed (except for first time, on_dialog_show).
Subclasses can use this to customize behavior."""
pass
def on_update_messages(self, parameter_name, parameters):
"""Called for each cea parameter during udateMessages.
Subclasses may want to use this to customize behavior."""
pass
def encode_value(self, cea_parameter, parameter):
return cea_parameter.encode(parameter.value)
class ScalarParameterInfoBuilder(ParameterInfoBuilder):
DATA_TYPE_MAP = { # (arcgis data type, multivalue)
cea.config.StringParameter: 'String',
cea.config.BooleanParameter: 'GPBoolean',
cea.config.RealParameter: 'GPDouble',
cea.config.IntegerParameter: 'GPLong',
cea.config.DateParameter: 'GPDate',
}
def get_parameter_info(self):
parameter = super(ScalarParameterInfoBuilder, self).get_parameter_info()
if hasattr(self.cea_parameter, 'nullable') and self.cea_parameter.nullable:
parameter.datatype = 'String'
parameter.parameterType = 'Optional'
else:
parameter.datatype = self.DATA_TYPE_MAP[type(self.cea_parameter)]
parameter.parameterType = 'Required'
return parameter
def get_value(self):
if hasattr(self.cea_parameter, 'nullable') and self.cea_parameter.nullable:
return self.cea_parameter.encode(self.cea_parameter.get())
else:
return self.cea_parameter.get()
class StringParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(StringParameterInfoBuilder, self).get_parameter_info()
parameter.parameterType = 'Optional'
return parameter
def get_value(self):
return self.cea_parameter.encode(self.cea_parameter.get())
class PathParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(PathParameterInfoBuilder, self).get_parameter_info()
parameter.datatype = 'DEFolder'
if self.cea_parameter._direction == 'output':
parameter.direction = 'Output'
return parameter
class ChoiceParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(ChoiceParameterInfoBuilder, self).get_parameter_info()
parameter.filter.list = self.cea_parameter._choices
return parameter
class MultiChoiceParameterInfoBuilder(ChoiceParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(MultiChoiceParameterInfoBuilder, self).get_parameter_info()
parameter.multiValue = True
parameter.parameterType = 'Optional'
return parameter
def encode_value(self, cea_parameter, parameter):
if parameter.valueAsText is None:
return ''
else:
return cea_parameter.encode(parameter.valueAsText.split(';'))
class SubfoldersParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(SubfoldersParameterInfoBuilder, self).get_parameter_info()
parameter.multiValue = True
parameter.parameterType = 'Optional'
parameter.filter.list = self.cea_parameter.get_folders()
return parameter
def encode_value(self, cea_parameter, parameter):
if parameter.valueAsText is None:
return ''
else:
return cea_parameter.encode(parameter.valueAsText.split(';'))
class FileParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(FileParameterInfoBuilder, self).get_parameter_info()
parameter.datatype = 'DEFile'
if self.cea_parameter._direction == 'input':
parameter.filter.list = self.cea_parameter._extensions
else:
parameter.direction = 'Output'
if hasattr(self.cea_parameter, 'nullable') and self.cea_parameter.nullable:
parameter.parameterType = 'Optional'
return parameter
class ListParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(ListParameterInfoBuilder, self).get_parameter_info()
parameter.multiValue = True
parameter.parameterType = 'Optional'
return parameter
def encode_value(self, cea_parameter, parameter):
if parameter.valueAsText is None:
return ''
else:
return cea_parameter.encode(parameter.valueAsText.split(';'))
class OptimizationIndividualParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(OptimizationIndividualParameterInfoBuilder, self).get_parameter_info()
parameter.parameterType = 'Required'
parameter.datatype = "String"
parameter.enabled = False
scenario_parameter = arcpy.Parameter(
displayName=self.cea_parameter.help + ' (scenario)',
name=self.cea_parameter.fqname.replace(':', '/') + '/scenario',
datatype='String',
parameterType='Required', direction='Input', multiValue=False)
generation_parameter = arcpy.Parameter(
displayName=self.cea_parameter.help + ' (generation)',
name=self.cea_parameter.fqname.replace(':', '/') + '/generation',
datatype='String',
parameterType='Required', direction='Input', multiValue=False)
individual_parameter = arcpy.Parameter(
displayName=self.cea_parameter.help + ' (individual)',
name=self.cea_parameter.fqname.replace(':', '/') + '/individual',
datatype='String',
parameterType='Required', direction='Input', multiValue=False)
return [parameter, scenario_parameter, generation_parameter, individual_parameter]
def on_dialog_show(self, parameter_name, parameters):
super(OptimizationIndividualParameterInfoBuilder, self).on_dialog_show(parameter_name, parameters)
scenario_parameter = parameters[parameter_name.replace(':', '/') + '/scenario']
generation_parameter = parameters[parameter_name.replace(':', '/') + '/generation']
individual_parameter = parameters[parameter_name.replace(':', '/') + '/individual']
if len(self.cea_parameter.get().split('/')) == 1:
s = self.cea_parameter.get()
g = '<none>'
i = '<none>'
else:
s, g, i = self.cea_parameter.get().split('/')
scenario_parameter.value = s
scenario_parameter.filter.list = self.cea_parameter.get_folders()
generation_parameter.value = g
generation_parameter.filter.list = ['<none>'] + self.cea_parameter.get_generations(s)
individual_parameter.value = i
individual_parameter.filter.list = ['<none>'] + self.cea_parameter.get_individuals(s, g)
def on_update_parameters(self, parameter_name, parameters):
"""
Update the parameter value with the values of the additional dropdowns, setting
their filters appropriately.
"""
logging.info('on_update_parameters: %s' % parameter_name)
super(OptimizationIndividualParameterInfoBuilder, self).on_update_parameters(parameters, parameters)
current_value = parameters[parameter_name].value
logging.info('on_update_parameters: current_value=%s' % current_value)
if not current_value:
s, g, i = ('<none>', '<none>', '<none>')
elif len(current_value.split('/')) == 1:
s = current_value
g = '<none>'
i = '<none>'
else:
s, g, i = current_value.split('/')
project_parameter = parameters[self.cea_parameter._project.replace('{', '').replace('}', '')]
project = project_parameter.valueAsText
logging.info('on_update_parameters: project=%s' % project)
scenario_parameter = parameters[parameter_name.replace(':', '/') + '/scenario']
generation_parameter = parameters[parameter_name.replace(':', '/') + '/generation']
individual_parameter = parameters[parameter_name.replace(':', '/') + '/individual']
scenario_parameter.filter.list = self.cea_parameter.get_folders(project)
if scenario_parameter.valueAsText != s:
# user chose new scenario, reset filters for generation and individual
logging.info('on_update_parameters: scenario_parameter.value != s (%s, %s)',
scenario_parameter.valueAsText, s)
s = scenario_parameter.valueAsText
generation_parameter.filter.list = ['<none>'] + self.cea_parameter.get_generations(
scenario=s, project=project)
generation_parameter.value = '<none>'
g = '<none>'
individual_parameter.value = '<none>'
individual_parameter.filter.list = ['<none>']
i = '<none>'
elif generation_parameter.valueAsText != g:
g = generation_parameter.valueAsText
if g == '<none>':
individual_parameter.value = '<none>'
individual_parameter.filter.list = ['<none>']
i = '<none>'
else:
individual_filter = self.cea_parameter.get_individuals(scenario=s, generation=g, project=project)
individual_parameter.filter.list = individual_filter
individual_parameter.value = individual_filter[0]
i = individual_filter[0]
parameters[parameter_name].value = '%(s)s/%(g)s/%(i)s' % locals()
def encode_value(self, cea_parameter, parameter):
value = parameter.valueAsText
if len(value.split('/')) == 3:
s, g, i = value.split('/')
if '<none>' in {g, i}:
return cea_parameter.encode(s)
else:
return cea_parameter.encode(value)
else:
return cea_parameter.encode(value)
class OptimizationIndividualListParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(OptimizationIndividualListParameterInfoBuilder, self).get_parameter_info()
parameter.multiValue = True
parameter.parameterType = 'Optional'
parameter.datatype = "GPValueTable"
parameter.columns = [["GPString", "Scenario"], ["GPString", "Generation"], ["GPString", "Individual"]]
parameter.filters[0].type = 'ValueType'
parameter.filters[1].type = 'ValueType'
parameter.filters[2].type = 'ValueType'
filters = self.get_filters(self.cea_parameter.replace_references(self.cea_parameter._project))
for i in range(3):
parameter.filters[i].list = filters[i]
return parameter
def get_filters(self, project_path):
scenarios = set()
generations = set()
individuals = set()
for scenario in [s for s in os.listdir(project_path) if os.path.isdir(os.path.join(project_path, s))]:
locator = cea.inputlocator.InputLocator(os.path.join(project_path, scenario))
for individual in locator.list_optimization_all_individuals():
s, g, i = individual.split('/')
g = int(g)
i = int(i[3:])
scenarios.add(s)
generations.add(g)
individuals.add(i)
scenarios.add(scenario)
return [sorted(scenarios),
['<none>'] + map(str, sorted(generations)),
['<none>'] + ['ind%s' % i for i in sorted(individuals)]]
def on_dialog_show(self, parameter_name, parameters):
"""Build a nested list of the values"""
values = []
for v in self.cea_parameter.get():
vlist = str(v).split('/')
if len(vlist) == 1:
# just the scenario, no optimization path
vlist.extend(['<none>', '<none>'])
values.append(vlist)
parameters[parameter_name].values = values
def encode_value(self, cea_parameter, parameter):
individuals = []
for s, g, i in parameter.values:
if g == '<none>':
individuals.append(s)
else:
assert not i == '<none>', "Can't encode individuals: %s" % parameter.values
individuals.append('%(s)s/%(g)s/%(i)s' % locals())
return ', '.join(individuals)
def on_update_parameters(self, parameter_name, parameters):
parameter = parameters[parameter_name]
project_parameter = parameters[self.cea_parameter._project.replace('{', '').replace('}', '')]
project = project_parameter.valueAsText
logging.info('on_update_parameters: project=%s' % project)
filters = self.get_filters(project)
for i in range(3):
parameter.filters[i].list = filters[i]
values = []
for s, g, i in parameter.values:
if not g:
g = '<none>'
if not i:
i = '<none>'
values.append([s, g, i])
parameter.values = values
def on_update_messages(self, parameter_name, parameters):
"""Make sure all the values are valid"""
logging.info('on_update_messages for optimization individual list')
parameter = parameters[parameter_name]
project_parameter = parameters[self.cea_parameter._project.replace('{', '').replace('}', '')]
project = project_parameter.valueAsText
logging.info('on_update_messages parameter.values: %s' % parameter.values)
for s, g, i in parameter.values:
logging.info('on_update_messages checking: (%s, %s, %s)' % (s, g, i))
logging.info('on_update_messages checking: (%s, %s, %s)' % tuple(map(type, (s, g, i))))
if s not in self.cea_parameter.get_folders(project=project):
parameter.setErrorMessage('Invalid scenario name: %s' % s)
logging.info('Invalid scenario name: %s' % s)
return
if g == '<none>' and i == '<none>':
continue
if g == '<none>' and i != '<none>':
parameter.setErrorMessage('Optimization individual must be <none> if generation is <none>')
logging.info('Optimization individual may not be <none> if generation is set')
return
if g != '<none>' and i == '<none>':
parameter.setErrorMessage('Optimization individual may not be <none> if generation is set')
logging.info('Optimization individual may not be <none> if generation is set')
return
individual = '%(s)s/%(g)s/%(i)s' % locals()
locator = cea.inputlocator.InputLocator(os.path.join(project, s))
if individual not in locator.list_optimization_all_individuals():
parameter.setErrorMessage('Invalid optimization individual: %s' % individual)
logging.info('Invalid optimization individual: %s' % individual)
return
class BuildingsParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(BuildingsParameterInfoBuilder, self).get_parameter_info()
parameter.multiValue = True
parameter.parameterType = 'Optional'
parameter.filter.list = list_buildings(self.cea_parameter.config.scenario)
return parameter
def on_update_parameters(self, parameter_name, parameters):
scenario = parameters['general:scenario'].valueAsText
buildings = list_buildings(scenario)
if set(buildings) != set(parameters[parameter_name].filter.list):
parameters[parameter_name].filter.list = buildings
parameters[parameter_name].value = []
def encode_value(self, cea_parameter, parameter):
if parameter.valueAsText is None:
return ''
else:
return cea_parameter.encode(parameter.valueAsText.split(';'))
def list_buildings(scenario):
"""Shell out to the CEA python and read in the output"""
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
command = [get_python_exe(), '-u', '-m', 'cea.interfaces.arcgis.list_buildings', scenario]
try:
buildings_string = subprocess.check_output(command, startupinfo=startupinfo)
return [b.strip() for b in buildings_string.split(',')]
except subprocess.CalledProcessError:
return []
BUILDERS = { # dict[cea.config.Parameter, ParameterInfoBuilder]
cea.config.PathParameter: PathParameterInfoBuilder,
cea.config.StringParameter: StringParameterInfoBuilder,
cea.config.BooleanParameter: ScalarParameterInfoBuilder,
cea.config.RealParameter: ScalarParameterInfoBuilder,
cea.config.IntegerParameter: ScalarParameterInfoBuilder,
cea.config.MultiChoiceParameter: MultiChoiceParameterInfoBuilder,
cea.config.ChoiceParameter: ChoiceParameterInfoBuilder,
cea.config.SubfoldersParameter: SubfoldersParameterInfoBuilder,
cea.config.FileParameter: FileParameterInfoBuilder,
cea.config.ListParameter: ListParameterInfoBuilder,
cea.config.BuildingsParameter: BuildingsParameterInfoBuilder,
cea.config.DateParameter: ScalarParameterInfoBuilder,
cea.config.OptimizationIndividualParameter: OptimizationIndividualParameterInfoBuilder,
cea.config.OptimizationIndividualListParameter: OptimizationIndividualListParameterInfoBuilder,
} | 44.19598 | 116 | 0.664895 | import os
import subprocess
import tempfile
import cea.config
import cea.scripts
import cea.inputlocator
from cea.interfaces.arcgis.modules import arcpy
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas", "Martin Mosteiro Romero", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
LOCATOR = cea.inputlocator.InputLocator(None)
CONFIG = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
import logging
logging.basicConfig(filename=os.path.expandvars(r'%TEMP%\arcgishelper.log'),level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
logging.info('arcgishelper loading...')
def create_cea_tool(cea_script):
name = ''.join(w.capitalize() for w in cea_script.name.split('-')) + 'Tool'
return type(name, (CeaTool,), {
'__init__': lambda self: self._init(cea_script)
})
class CeaTool(object):
def _init(self, cea_script):
self.cea_tool = cea_script.name
self.label = cea_script.label
self.description = cea_script.description
self.category = cea_script.category
self.canRunInBackground = False
def getParameterInfo(self):
config = cea.config.Configuration()
parameter_infos = []
for parameter in get_cea_parameters(config, self.cea_tool):
if parameter.name == 'weather':
parameter_infos.extend(get_weather_parameter_info(config))
else:
parameter_info = get_parameter_info(parameter, config)
parameter_info = self.override_parameter_info(parameter_info, parameter)
if parameter_info:
if isinstance(parameter_info, arcpy.Parameter):
parameter_infos.append(parameter_info)
else:
parameter_infos.extend(parameter_info)
return parameter_infos
def override_parameter_info(self, parameter_info, parameter):
return parameter_info
def updateParameters(self, parameters):
on_dialog_show = not any([p.hasBeenValidated for p in parameters])
parameters = dict_parameters(parameters)
config = cea.config.Configuration()
cea_parameters = {p.fqname: p for p in get_cea_parameters(config, self.cea_tool)}
if on_dialog_show:
# show the parameters as defined in the config file
for parameter_name in parameters.keys():
if parameter_name == 'weather_name':
if is_builtin_weather_path(config.weather):
parameters['weather_name'].value = get_db_weather_name(config.weather)
else:
parameters['weather_name'].value = '<custom>'
parameters['weather_path'].value = config.weather
update_weather_parameters(parameters)
elif parameter_name == 'weather_path':
continue
elif parameter_name in cea_parameters:
cea_parameter = cea_parameters[parameter_name]
builder = BUILDERS[type(cea_parameter)](cea_parameter, config)
builder.on_dialog_show(parameter_name, parameters)
else:
if 'general:scenario' in parameters:
check_senario_exists(parameters)
if 'weather_name' in parameters:
update_weather_parameters(parameters)
for parameter_name in parameters.keys():
if parameter_name in cea_parameters:
cea_parameter = cea_parameters[parameter_name]
builder = BUILDERS[type(cea_parameter)](cea_parameter, config)
builder.on_update_parameters(parameter_name, parameters)
def execute(self, parameters, _):
parameters = dict_parameters(parameters)
if 'general:scenario' in parameters:
check_senario_exists(parameters)
kwargs = {}
if 'weather_name' in parameters:
kwargs['weather'] = get_weather_path_from_parameters(parameters)
for parameter_key in parameters.keys():
if ':' not in parameter_key:
# skip this parameter
continue
section_name, parameter_name = parameter_key.split(':')
parameter = parameters[parameter_key]
# allow the ParameterInfoBuilder subclass to override encoding of values
cea_parameters = {p.fqname: p for p in get_cea_parameters(CONFIG, self.cea_tool)}
cea_parameter = cea_parameters[parameter_key]
logging.info(cea_parameter)
builder = BUILDERS[type(cea_parameter)](cea_parameter, CONFIG)
kwargs[parameter_name] = builder.encode_value(cea_parameter, parameter)
run_cli(self.cea_tool, **kwargs)
def updateMessages(self, parameters):
parameters = dict_parameters(parameters)
cea_parameters = {p.fqname: p for p in get_cea_parameters(CONFIG, self.cea_tool)}
for parameter_name in parameters.keys():
if parameter_name in {'general:scenario', 'weather_name', 'weather'}:
continue
if parameter_name in cea_parameters:
cea_parameter = cea_parameters[parameter_name]
builder = BUILDERS[type(cea_parameter)](cea_parameter, CONFIG)
builder.on_update_messages(parameter_name, parameters)
def get_cea_parameters(config, cea_tool):
for _, cea_parameter in config.matching_parameters(cea.scripts.by_name(cea_tool).parameters):
yield cea_parameter
def add_message(msg, **kwargs):
if len(kwargs):
msg %= kwargs
arcpy.AddMessage(msg)
log_file = os.path.join(tempfile.gettempdir(), 'cea.log')
with open(log_file, 'a') as log:
log.write(str(msg))
def is_db_weather(weather_path):
weather_name = get_db_weather_name(weather_path)
if weather_name in LOCATOR.get_weather_names():
# could still be a custom weather file...
db_weather_path = LOCATOR.get_weather(weather_name)
db_weather_path = os.path.normpath(db_weather_path)
db_weather_path = os.path.normcase(db_weather_path)
weather_path = LOCATOR.get_weather(weather_path)
weather_path = os.path.normpath(weather_path)
weather_path = os.path.normcase(weather_path)
if os.path.dirname(db_weather_path) == os.path.dirname(weather_path):
return True
return False
def get_db_weather_name(weather_path):
weather_name = os.path.splitext(os.path.basename(weather_path))[0]
return weather_name
def get_python_exe():
try:
with open(os.path.expanduser('~/cea_python.pth'), 'r') as f:
python_exe = f.read().strip()
return python_exe
except:
raise AssertionError("Could not find 'cea_python.pth' in home directory.")
def get_environment():
root_dir = os.path.dirname(get_python_exe())
scripts_dir = os.path.join(root_dir, 'Scripts')
env = os.environ.copy()
env['PATH'] = ';'.join((root_dir, scripts_dir, os.environ['PATH']))
add_message('get_environment: root_dir=%s' % root_dir.lower())
# BUGFIX for running in without proper python installation
qt_plugin_path = os.path.join(root_dir, 'Library', 'plugins')
add_message('Setting QT_PLUGIN_PATH=%s' % qt_plugin_path)
env['QT_PLUGIN_PATH'] = qt_plugin_path
return env
def run_cli(script_name, **parameters):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
command = [get_python_exe(), '-u', '-m', 'cea.interfaces.cli.cli', script_name]
for parameter_name, parameter_value in parameters.items():
parameter_name = parameter_name.replace('_', '-')
command.append('--' + parameter_name)
command.append(str(parameter_value))
add_message('Executing: ' + ' '.join(command))
process = subprocess.Popen(command, startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=get_environment(), cwd=tempfile.gettempdir())
while True:
next_line = process.stdout.readline()
if next_line == '' and process.poll() is not None:
break
add_message(next_line.rstrip())
stdout, stderr = process.communicate()
add_message(stdout)
add_message(stderr)
if process.returncode == cea.ConfigError.rc:
arcpy.AddError('Tool did not run successfully: Check parameters')
elif process.returncode != 0:
raise Exception('Tool did not run successfully')
def parse_boolean(s):
boolean_states = {'0': False,
'1': True,
'false': False,
'no': False,
'off': False,
'on': True,
'true': True,
'yes': True}
if s.lower() in boolean_states:
return boolean_states[s.lower()]
return False
def is_builtin_weather_path(weather_path):
if weather_path is None:
return False
weather_path = os.path.normpath(os.path.abspath(weather_path))
zug_path = os.path.normpath(os.path.abspath(LOCATOR.get_weather('Zug')))
return os.path.dirname(weather_path) == os.path.dirname(zug_path)
def demand_graph_fields(scenario):
import pandas as pd
locator = cea.inputlocator.InputLocator(scenario)
df_total_demand = pd.read_csv(locator.get_total_demand())
total_fields = set(df_total_demand.columns.tolist())
first_building = df_total_demand['Name'][0]
df_building = pd.read_csv(locator.get_demand_results_file(first_building))
fields = set(df_building.columns.tolist())
fields.remove('DATE')
fields.remove('Name')
# remove fields in demand results files that do not have a corresponding field in the totals file
bad_fields = set(field for field in fields if not field.split('_')[0] + "_MWhyr" in total_fields)
fields = fields - bad_fields
return list(fields)
def create_weather_parameters(config):
weather_name = arcpy.Parameter(
displayName="Weather file (choose from list or enter full path to .epw file)",
name="weather_name",
datatype="String",
parameterType="Required",
direction="Input")
weather_name.filter.list = LOCATOR.get_weather_names() + ['<custom>']
weather_name.value = get_db_weather_name(config.weather) if is_db_weather(config.weather) else '<custom>'
weather_path = arcpy.Parameter(
displayName="Path to .epw file",
name="weather_path",
datatype="DEFile",
parameterType="Optional",
direction="Input")
weather_path.filter.list = ['epw']
weather_path.value = config.weather
weather_path.enabled = not is_db_weather(config.weather)
return weather_name, weather_path
def check_senario_exists(parameters):
scenario_parameter = parameters['general:scenario']
scenario = scenario_parameter.valueAsText
if scenario is None:
config = cea.config.Configuration()
scenario_parameter.value = config.scenario
else:
scenario_parameter.value = scenario
def check_radiation_exists(parameters, scenario):
locator = cea.inputlocator.InputLocator(scenario)
radiation_csv = locator.get_radiation()
if not os.path.exists(radiation_csv):
parameters['scenario'].setErrorMessage("No radiation file found - please run radiation tool first")
if not os.path.exists(locator.get_surface_properties()):
parameters['scenario'].setErrorMessage("No radiation data found for scenario. Run radiation script first.")
def update_weather_parameters(parameters):
weather_name = parameters['weather_name'].value
if weather_name == '<custom>':
weather_path = parameters['weather_path'].valueAsText
else:
weather_path = LOCATOR.get_weather(weather_name)
parameters['weather_path'].value = weather_path
if is_builtin_weather_path(weather_path):
parameters['weather_path'].enabled = False
parameters['weather_name'].value = get_db_weather_name(weather_path)
else:
parameters['weather_path'].enabled = True
parameters['weather_name'].value = '<custom>'
def get_weather_path_from_parameters(parameters):
if parameters['weather_name'].value == '<custom>':
return parameters['weather_path'].valueAsText
else:
return LOCATOR.get_weather(parameters['weather_name'].value)
def get_weather_parameter_info(config):
weather_name = arcpy.Parameter(
displayName="Weather file (choose from list or enter full path to .epw file)",
name="weather_name",
datatype="String",
parameterType="Required",
direction="Input")
weather_name.filter.list = LOCATOR.get_weather_names() + ['<custom>']
weather_name.value = get_db_weather_name(config.weather) if is_db_weather(config.weather) else '<custom>'
weather_path = arcpy.Parameter(
displayName="Path to .epw file",
name="weather_path",
datatype="DEFile",
parameterType="Optional",
direction="Input")
weather_path.filter.list = ['epw']
weather_path.value = config.weather
weather_path.enabled = not is_db_weather(config.weather)
return weather_name, weather_path
def dict_parameters(parameters):
return {p.name: p for p in parameters}
def get_parameter_info(cea_parameter, config):
builder = BUILDERS[type(cea_parameter)](cea_parameter, config)
try:
arcgis_parameter = builder.get_parameter_info()
# arcgis_parameter.value = builder.get_value()
return arcgis_parameter
except TypeError:
logging.info('Failed to build arcpy.Parameter from %s', cea_parameter, exc_info=True)
raise
class ParameterInfoBuilder(object):
def __init__(self, cea_parameter, config):
self.cea_parameter = cea_parameter
self.config = config
def get_parameter_info(self):
parameter = arcpy.Parameter(displayName=self.cea_parameter.help,
name=self.cea_parameter.fqname, datatype='String',
parameterType='Required', direction='Input', multiValue=False)
if not self.cea_parameter.category is None:
parameter.category = self.cea_parameter.category
return parameter
def on_dialog_show(self, parameter_name, parameters):
parameters[parameter_name].value = self.cea_parameter.get()
def on_update_parameters(self, parameter_name, parameters):
pass
def on_update_messages(self, parameter_name, parameters):
pass
def encode_value(self, cea_parameter, parameter):
return cea_parameter.encode(parameter.value)
class ScalarParameterInfoBuilder(ParameterInfoBuilder):
DATA_TYPE_MAP = { # (arcgis data type, multivalue)
cea.config.StringParameter: 'String',
cea.config.BooleanParameter: 'GPBoolean',
cea.config.RealParameter: 'GPDouble',
cea.config.IntegerParameter: 'GPLong',
cea.config.DateParameter: 'GPDate',
}
def get_parameter_info(self):
parameter = super(ScalarParameterInfoBuilder, self).get_parameter_info()
if hasattr(self.cea_parameter, 'nullable') and self.cea_parameter.nullable:
parameter.datatype = 'String'
parameter.parameterType = 'Optional'
else:
parameter.datatype = self.DATA_TYPE_MAP[type(self.cea_parameter)]
parameter.parameterType = 'Required'
return parameter
def get_value(self):
if hasattr(self.cea_parameter, 'nullable') and self.cea_parameter.nullable:
return self.cea_parameter.encode(self.cea_parameter.get())
else:
return self.cea_parameter.get()
class StringParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(StringParameterInfoBuilder, self).get_parameter_info()
parameter.parameterType = 'Optional'
return parameter
def get_value(self):
return self.cea_parameter.encode(self.cea_parameter.get())
class PathParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(PathParameterInfoBuilder, self).get_parameter_info()
parameter.datatype = 'DEFolder'
if self.cea_parameter._direction == 'output':
parameter.direction = 'Output'
return parameter
class ChoiceParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(ChoiceParameterInfoBuilder, self).get_parameter_info()
parameter.filter.list = self.cea_parameter._choices
return parameter
class MultiChoiceParameterInfoBuilder(ChoiceParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(MultiChoiceParameterInfoBuilder, self).get_parameter_info()
parameter.multiValue = True
parameter.parameterType = 'Optional'
return parameter
def encode_value(self, cea_parameter, parameter):
if parameter.valueAsText is None:
return ''
else:
return cea_parameter.encode(parameter.valueAsText.split(';'))
class SubfoldersParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(SubfoldersParameterInfoBuilder, self).get_parameter_info()
parameter.multiValue = True
parameter.parameterType = 'Optional'
parameter.filter.list = self.cea_parameter.get_folders()
return parameter
def encode_value(self, cea_parameter, parameter):
if parameter.valueAsText is None:
return ''
else:
return cea_parameter.encode(parameter.valueAsText.split(';'))
class FileParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(FileParameterInfoBuilder, self).get_parameter_info()
parameter.datatype = 'DEFile'
if self.cea_parameter._direction == 'input':
parameter.filter.list = self.cea_parameter._extensions
else:
parameter.direction = 'Output'
if hasattr(self.cea_parameter, 'nullable') and self.cea_parameter.nullable:
parameter.parameterType = 'Optional'
return parameter
class ListParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(ListParameterInfoBuilder, self).get_parameter_info()
parameter.multiValue = True
parameter.parameterType = 'Optional'
return parameter
def encode_value(self, cea_parameter, parameter):
if parameter.valueAsText is None:
return ''
else:
return cea_parameter.encode(parameter.valueAsText.split(';'))
class OptimizationIndividualParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(OptimizationIndividualParameterInfoBuilder, self).get_parameter_info()
parameter.parameterType = 'Required'
parameter.datatype = "String"
parameter.enabled = False
scenario_parameter = arcpy.Parameter(
displayName=self.cea_parameter.help + ' (scenario)',
name=self.cea_parameter.fqname.replace(':', '/') + '/scenario',
datatype='String',
parameterType='Required', direction='Input', multiValue=False)
generation_parameter = arcpy.Parameter(
displayName=self.cea_parameter.help + ' (generation)',
name=self.cea_parameter.fqname.replace(':', '/') + '/generation',
datatype='String',
parameterType='Required', direction='Input', multiValue=False)
individual_parameter = arcpy.Parameter(
displayName=self.cea_parameter.help + ' (individual)',
name=self.cea_parameter.fqname.replace(':', '/') + '/individual',
datatype='String',
parameterType='Required', direction='Input', multiValue=False)
return [parameter, scenario_parameter, generation_parameter, individual_parameter]
def on_dialog_show(self, parameter_name, parameters):
super(OptimizationIndividualParameterInfoBuilder, self).on_dialog_show(parameter_name, parameters)
scenario_parameter = parameters[parameter_name.replace(':', '/') + '/scenario']
generation_parameter = parameters[parameter_name.replace(':', '/') + '/generation']
individual_parameter = parameters[parameter_name.replace(':', '/') + '/individual']
if len(self.cea_parameter.get().split('/')) == 1:
s = self.cea_parameter.get()
g = '<none>'
i = '<none>'
else:
s, g, i = self.cea_parameter.get().split('/')
scenario_parameter.value = s
scenario_parameter.filter.list = self.cea_parameter.get_folders()
generation_parameter.value = g
generation_parameter.filter.list = ['<none>'] + self.cea_parameter.get_generations(s)
individual_parameter.value = i
individual_parameter.filter.list = ['<none>'] + self.cea_parameter.get_individuals(s, g)
def on_update_parameters(self, parameter_name, parameters):
logging.info('on_update_parameters: %s' % parameter_name)
super(OptimizationIndividualParameterInfoBuilder, self).on_update_parameters(parameters, parameters)
current_value = parameters[parameter_name].value
logging.info('on_update_parameters: current_value=%s' % current_value)
if not current_value:
s, g, i = ('<none>', '<none>', '<none>')
elif len(current_value.split('/')) == 1:
s = current_value
g = '<none>'
i = '<none>'
else:
s, g, i = current_value.split('/')
project_parameter = parameters[self.cea_parameter._project.replace('{', '').replace('}', '')]
project = project_parameter.valueAsText
logging.info('on_update_parameters: project=%s' % project)
scenario_parameter = parameters[parameter_name.replace(':', '/') + '/scenario']
generation_parameter = parameters[parameter_name.replace(':', '/') + '/generation']
individual_parameter = parameters[parameter_name.replace(':', '/') + '/individual']
scenario_parameter.filter.list = self.cea_parameter.get_folders(project)
if scenario_parameter.valueAsText != s:
# user chose new scenario, reset filters for generation and individual
logging.info('on_update_parameters: scenario_parameter.value != s (%s, %s)',
scenario_parameter.valueAsText, s)
s = scenario_parameter.valueAsText
generation_parameter.filter.list = ['<none>'] + self.cea_parameter.get_generations(
scenario=s, project=project)
generation_parameter.value = '<none>'
g = '<none>'
individual_parameter.value = '<none>'
individual_parameter.filter.list = ['<none>']
i = '<none>'
elif generation_parameter.valueAsText != g:
g = generation_parameter.valueAsText
if g == '<none>':
individual_parameter.value = '<none>'
individual_parameter.filter.list = ['<none>']
i = '<none>'
else:
individual_filter = self.cea_parameter.get_individuals(scenario=s, generation=g, project=project)
individual_parameter.filter.list = individual_filter
individual_parameter.value = individual_filter[0]
i = individual_filter[0]
parameters[parameter_name].value = '%(s)s/%(g)s/%(i)s' % locals()
def encode_value(self, cea_parameter, parameter):
value = parameter.valueAsText
if len(value.split('/')) == 3:
s, g, i = value.split('/')
if '<none>' in {g, i}:
return cea_parameter.encode(s)
else:
return cea_parameter.encode(value)
else:
return cea_parameter.encode(value)
class OptimizationIndividualListParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(OptimizationIndividualListParameterInfoBuilder, self).get_parameter_info()
parameter.multiValue = True
parameter.parameterType = 'Optional'
parameter.datatype = "GPValueTable"
parameter.columns = [["GPString", "Scenario"], ["GPString", "Generation"], ["GPString", "Individual"]]
parameter.filters[0].type = 'ValueType'
parameter.filters[1].type = 'ValueType'
parameter.filters[2].type = 'ValueType'
filters = self.get_filters(self.cea_parameter.replace_references(self.cea_parameter._project))
for i in range(3):
parameter.filters[i].list = filters[i]
return parameter
def get_filters(self, project_path):
scenarios = set()
generations = set()
individuals = set()
for scenario in [s for s in os.listdir(project_path) if os.path.isdir(os.path.join(project_path, s))]:
locator = cea.inputlocator.InputLocator(os.path.join(project_path, scenario))
for individual in locator.list_optimization_all_individuals():
s, g, i = individual.split('/')
g = int(g)
i = int(i[3:])
scenarios.add(s)
generations.add(g)
individuals.add(i)
scenarios.add(scenario)
return [sorted(scenarios),
['<none>'] + map(str, sorted(generations)),
['<none>'] + ['ind%s' % i for i in sorted(individuals)]]
def on_dialog_show(self, parameter_name, parameters):
values = []
for v in self.cea_parameter.get():
vlist = str(v).split('/')
if len(vlist) == 1:
# just the scenario, no optimization path
vlist.extend(['<none>', '<none>'])
values.append(vlist)
parameters[parameter_name].values = values
def encode_value(self, cea_parameter, parameter):
individuals = []
for s, g, i in parameter.values:
if g == '<none>':
individuals.append(s)
else:
assert not i == '<none>', "Can't encode individuals: %s" % parameter.values
individuals.append('%(s)s/%(g)s/%(i)s' % locals())
return ', '.join(individuals)
def on_update_parameters(self, parameter_name, parameters):
parameter = parameters[parameter_name]
project_parameter = parameters[self.cea_parameter._project.replace('{', '').replace('}', '')]
project = project_parameter.valueAsText
logging.info('on_update_parameters: project=%s' % project)
filters = self.get_filters(project)
for i in range(3):
parameter.filters[i].list = filters[i]
values = []
for s, g, i in parameter.values:
if not g:
g = '<none>'
if not i:
i = '<none>'
values.append([s, g, i])
parameter.values = values
def on_update_messages(self, parameter_name, parameters):
logging.info('on_update_messages for optimization individual list')
parameter = parameters[parameter_name]
project_parameter = parameters[self.cea_parameter._project.replace('{', '').replace('}', '')]
project = project_parameter.valueAsText
logging.info('on_update_messages parameter.values: %s' % parameter.values)
for s, g, i in parameter.values:
logging.info('on_update_messages checking: (%s, %s, %s)' % (s, g, i))
logging.info('on_update_messages checking: (%s, %s, %s)' % tuple(map(type, (s, g, i))))
if s not in self.cea_parameter.get_folders(project=project):
parameter.setErrorMessage('Invalid scenario name: %s' % s)
logging.info('Invalid scenario name: %s' % s)
return
if g == '<none>' and i == '<none>':
continue
if g == '<none>' and i != '<none>':
parameter.setErrorMessage('Optimization individual must be <none> if generation is <none>')
logging.info('Optimization individual may not be <none> if generation is set')
return
if g != '<none>' and i == '<none>':
parameter.setErrorMessage('Optimization individual may not be <none> if generation is set')
logging.info('Optimization individual may not be <none> if generation is set')
return
individual = '%(s)s/%(g)s/%(i)s' % locals()
locator = cea.inputlocator.InputLocator(os.path.join(project, s))
if individual not in locator.list_optimization_all_individuals():
parameter.setErrorMessage('Invalid optimization individual: %s' % individual)
logging.info('Invalid optimization individual: %s' % individual)
return
class BuildingsParameterInfoBuilder(ParameterInfoBuilder):
def get_parameter_info(self):
parameter = super(BuildingsParameterInfoBuilder, self).get_parameter_info()
parameter.multiValue = True
parameter.parameterType = 'Optional'
parameter.filter.list = list_buildings(self.cea_parameter.config.scenario)
return parameter
def on_update_parameters(self, parameter_name, parameters):
scenario = parameters['general:scenario'].valueAsText
buildings = list_buildings(scenario)
if set(buildings) != set(parameters[parameter_name].filter.list):
parameters[parameter_name].filter.list = buildings
parameters[parameter_name].value = []
def encode_value(self, cea_parameter, parameter):
if parameter.valueAsText is None:
return ''
else:
return cea_parameter.encode(parameter.valueAsText.split(';'))
def list_buildings(scenario):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
command = [get_python_exe(), '-u', '-m', 'cea.interfaces.arcgis.list_buildings', scenario]
try:
buildings_string = subprocess.check_output(command, startupinfo=startupinfo)
return [b.strip() for b in buildings_string.split(',')]
except subprocess.CalledProcessError:
return []
BUILDERS = {
cea.config.PathParameter: PathParameterInfoBuilder,
cea.config.StringParameter: StringParameterInfoBuilder,
cea.config.BooleanParameter: ScalarParameterInfoBuilder,
cea.config.RealParameter: ScalarParameterInfoBuilder,
cea.config.IntegerParameter: ScalarParameterInfoBuilder,
cea.config.MultiChoiceParameter: MultiChoiceParameterInfoBuilder,
cea.config.ChoiceParameter: ChoiceParameterInfoBuilder,
cea.config.SubfoldersParameter: SubfoldersParameterInfoBuilder,
cea.config.FileParameter: FileParameterInfoBuilder,
cea.config.ListParameter: ListParameterInfoBuilder,
cea.config.BuildingsParameter: BuildingsParameterInfoBuilder,
cea.config.DateParameter: ScalarParameterInfoBuilder,
cea.config.OptimizationIndividualParameter: OptimizationIndividualParameterInfoBuilder,
cea.config.OptimizationIndividualListParameter: OptimizationIndividualListParameterInfoBuilder,
} | true | true |
1c30eff3d6b315eb97ba893c46a6df5a2ea50cf2 | 1,169 | py | Python | setup.py | Doridian/rd60xx | 7169dafd52be7e2949bc784b354eb874a7113a88 | [
"Apache-2.0"
] | null | null | null | setup.py | Doridian/rd60xx | 7169dafd52be7e2949bc784b354eb874a7113a88 | [
"Apache-2.0"
] | null | null | null | setup.py | Doridian/rd60xx | 7169dafd52be7e2949bc784b354eb874a7113a88 | [
"Apache-2.0"
] | null | null | null | # You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
import rd60xx
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="rd60xx",
version=rd60xx.__version__,
author="Doridian",
author_email="git@doridian.net",
description="Python bindings for RD60XX",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Doridian/rd60xx",
packages=setuptools.find_packages(),
install_requires=['PyModbus'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
| 33.4 | 84 | 0.661249 |
import setuptools
import rd60xx
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="rd60xx",
version=rd60xx.__version__,
author="Doridian",
author_email="git@doridian.net",
description="Python bindings for RD60XX",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Doridian/rd60xx",
packages=setuptools.find_packages(),
install_requires=['PyModbus'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
| true | true |
1c30f086ddaa3ce0069a4fde03bdec80c98185d2 | 691 | wsgi | Python | app/deploy/vagrant.wsgi | Shikha2410/redi-dropper-client | 18c3a63b55af26e7192365bacd43a824b340d036 | [
"BSD-3-Clause"
] | 2 | 2015-04-08T12:26:32.000Z | 2015-08-19T05:00:20.000Z | app/deploy/vagrant.wsgi | Shikha2410/redi-dropper-client | 18c3a63b55af26e7192365bacd43a824b340d036 | [
"BSD-3-Clause"
] | 60 | 2015-05-04T19:01:39.000Z | 2017-07-11T19:29:41.000Z | app/deploy/vagrant.wsgi | Shikha2410/redi-dropper-client | 18c3a63b55af26e7192365bacd43a824b340d036 | [
"BSD-3-Clause"
] | 12 | 2015-04-07T17:52:05.000Z | 2017-08-04T13:21:02.000Z | #!/usr/bin/env python
"""
Goal: Implement wsgi helper for deployment on Apache
@authors:
Andrei Sura <sura.andrei@gmail.com>
"""
import sys
import os
import logging
logging.basicConfig(stream=sys.stderr)
print("Using interpreter: {}".format(sys.version))
# @TODO: Read from the environment
app_home = '/var/www/dropper/app'
print("Adding application path: {}".format(app_home))
sys.path.insert(0, app_home)
from redidropper.main import app as application, mail
from redidropper import initializer
from config import MODE_DEBUG
# Configures routes, models
application = initializer.do_init(application, mode=MODE_DEBUG)
print("do_init() in debug mode...")
mail.init_app(application)
| 23.033333 | 63 | 0.768452 |
import sys
import os
import logging
logging.basicConfig(stream=sys.stderr)
print("Using interpreter: {}".format(sys.version))
app_home = '/var/www/dropper/app'
print("Adding application path: {}".format(app_home))
sys.path.insert(0, app_home)
from redidropper.main import app as application, mail
from redidropper import initializer
from config import MODE_DEBUG
application = initializer.do_init(application, mode=MODE_DEBUG)
print("do_init() in debug mode...")
mail.init_app(application)
| true | true |
1c30f2235843f4941607a366bec62ecd24201161 | 2,086 | py | Python | data/cirq_new/cirq_program/startCirq_Class387.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_Class387.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_Class387.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=17
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.Y.on(input_qubit[1])) # number=15
c.append(cirq.Y.on(input_qubit[1])) # number=16
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class387.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 30.676471 | 80 | 0.671141 |
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
def make_circuit(n: int, input_qubit):
c = cirq.Circuit()
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0]))
c.append(cirq.Y.on(input_qubit[1]))
c.append(cirq.Y.on(input_qubit[1]))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class387.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | true | true |
1c30f2c417e819203efce167aefd2285c2d89384 | 13,440 | py | Python | layers/categorical_encoding/linear_encoding.py | shawntan/CategoricalNF | 2f92c60f840bf78616c89dc498288e85b00a1587 | [
"MIT"
] | null | null | null | layers/categorical_encoding/linear_encoding.py | shawntan/CategoricalNF | 2f92c60f840bf78616c89dc498288e85b00a1587 | [
"MIT"
] | null | null | null | layers/categorical_encoding/linear_encoding.py | shawntan/CategoricalNF | 2f92c60f840bf78616c89dc498288e85b00a1587 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import numpy as np
sys.path.append("../../")
from general.mutils import get_param_val, one_hot
from layers.flows.flow_layer import FlowLayer
from layers.flows.permutation_layers import InvertibleConv
from layers.flows.activation_normalization import ExtActNormFlow
from layers.flows.coupling_layer import CouplingLayer
from layers.flows.distributions import LogisticDistribution
from layers.networks.help_layers import SimpleLinearLayer, LinearNet
from layers.categorical_encoding.decoder import create_decoder, create_embed_layer
class LinearCategoricalEncoding(FlowLayer):
"""
Class for implementing the mixture model and linear flow encoding scheme of Categorical Normalizing Flows.
A mixture model can be achieved by using a single activation normalization layer as "linear flow".
Hence, this class combines both encoding schemes.
"""
def __init__(self, num_dimensions, flow_config,
dataset_class=None,
vocab=None, vocab_size=-1,
use_decoder=False, decoder_config=None,
default_embed_layer_dims=64,
category_prior=None,
**kwargs):
super().__init__()
self.use_decoder = use_decoder
self.dataset_class = dataset_class
self.D = num_dimensions
self.embed_layer, self.vocab_size = create_embed_layer(vocab, vocab_size, default_embed_layer_dims)
self.num_categories = self.vocab_size
self.prior_distribution = LogisticDistribution(mu=0.0, sigma=1.0) # Prior distribution in encoding flows
self.flow_layers = _create_flows(num_dims=num_dimensions,
embed_dims=self.embed_layer.weight.shape[1],
config=flow_config)
# Create decoder if needed
if self.use_decoder:
self.decoder = create_decoder(num_categories=self.vocab_size,
num_dims=self.D,
config=decoder_config)
# Prior over the categories. If not given, a uniform prior is assumed
if category_prior is None:
category_prior = torch.zeros(self.vocab_size, dtype=torch.float32)
else:
assert category_prior.shape[
0] == self.num_categories, "[!] ERROR: Category prior needs to be of size [%i] but is %s" % (
self.num_categories, str(category_prior.shape))
if isinstance(category_prior, np.ndarray):
category_prior = torch.from_numpy(category_prior)
self.register_buffer("category_prior", F.log_softmax(category_prior, dim=-1))
def forward(self, z, ldj=None, reverse=False, beta=1, delta=0.0, channel_padding_mask=None, **kwargs):
## We reshape z into [batch, 1, ...] as every categorical variable is considered to be independent.
batch_size, seq_length = z.size(0), z.size(1)
z = z.reshape((batch_size * seq_length, 1) + z.shape[2:])
if channel_padding_mask is not None:
channel_padding_mask = channel_padding_mask.reshape(batch_size * seq_length, 1, -1)
else:
channel_padding_mask = z.new_ones((batch_size * seq_length, 1, 1), dtype=torch.float32)
ldj_loc = z.new_zeros(z.size(0), dtype=torch.float32)
detailed_ldj = {}
if not reverse:
# z is of shape [Batch, SeqLength]
z_categ = z # Renaming here for better readability (what is discrete and what is continuous)
## 1.) Forward pass of current token flow
z_cont = self.prior_distribution.sample(shape=(batch_size * seq_length, 1, self.D)).to(z_categ.device)
init_log_p = self.prior_distribution.log_prob(z_cont).sum(dim=[1, 2])
z_cont, ldj_forward = self._flow_forward(z_cont, z_categ, reverse=False)
## 2.) Approach-specific calculation of the posterior
if not self.use_decoder:
class_prior_log = torch.take(self.category_prior, z_categ.squeeze(dim=-1))
log_point_prob = init_log_p - ldj_forward + class_prior_log
class_prob_log = self._calculate_true_posterior(z_cont, z_categ, log_point_prob)
else:
class_prob_log = self._decoder_forward(z_cont, z_categ)
## 3.) Calculate final LDJ
ldj_loc = (beta * class_prob_log - (init_log_p - ldj_forward))
ldj_loc = ldj_loc * channel_padding_mask.squeeze()
z_cont = z_cont * channel_padding_mask
z_out = z_cont
## 4.) Statistics for debugging/monotoring
if self.training:
with torch.no_grad():
z_min = z_out.min()
z_max = z_out.max()
z_std = z_out.view(-1, z_out.shape[-1]).std(0).mean()
channel_padding_mask = channel_padding_mask.squeeze()
detailed_ldj = {"avg_token_prob": (
class_prob_log.exp() * channel_padding_mask).sum() / channel_padding_mask.sum(),
"avg_token_bpd": -(
class_prob_log * channel_padding_mask).sum() / channel_padding_mask.sum() * np.log2(
np.exp(1)),
"z_min": z_min,
"z_max": z_max,
"z_std": z_std}
detailed_ldj = {key: val.detach() for key, val in detailed_ldj.items()}
else:
# z is of shape [Batch * seq_len, 1, D]
assert z.size(
-1) == self.D, "[!] ERROR in categorical decoding: Input must have %i latent dimensions but got %i" % (
self.D, z.shape[-1])
class_prior_log = self.category_prior[None, None, :]
z_cont = z
if not self.use_decoder:
z_out = self._posterior_sample(z_cont)
else:
z_out = self._decoder_sample(z_cont)
# Reshape output back to original shape
if not reverse:
z_out = z_out.reshape(batch_size, seq_length, -1)
else:
z_out = z_out.reshape(batch_size, seq_length)
ldj_loc = ldj_loc.reshape(batch_size, seq_length).sum(dim=-1)
# Add LDJ
if ldj is not None:
ldj = ldj + ldj_loc
else:
ldj = ldj_loc
return z_out, ldj, detailed_ldj
def _flow_forward(self, z_cont, z_categ, reverse, **kwargs):
ldj = z_cont.new_zeros(z_cont.size(0), dtype=torch.float32)
embed_features = self.embed_layer(z_categ)
for flow in (self.flow_layers if not reverse else reversed(self.flow_layers)):
z_cont, ldj = flow(z_cont, ldj, ext_input=embed_features, reverse=reverse, **kwargs)
return z_cont, ldj
def _decoder_forward(self, z_cont, z_categ, **kwargs):
## Applies the deocder on every continuous variable independently and return probability of GT class
class_prob_log = self.decoder(z_cont)
class_prob_log = class_prob_log.gather(dim=-1, index=z_categ.view(-1, 1))
return class_prob_log
def _calculate_true_posterior(self, z_cont, z_categ, log_point_prob, **kwargs):
## Run backward pass of *all* class-conditional flows
z_back_in = z_cont.expand(-1, self.num_categories, -1).reshape(-1, 1, z_cont.size(2))
sample_categ = torch.arange(self.num_categories, dtype=torch.long).to(z_cont.device)
sample_categ = sample_categ[None, :].expand(z_categ.size(0), -1).reshape(-1, 1)
z_back, ldj_backward = self._flow_forward(z_back_in, sample_categ, reverse=True, **kwargs)
back_log_p = self.prior_distribution.log_prob(z_back).sum(dim=[1, 2])
## Calculate the denominator (sum of probabilities of all classes)
flow_log_prob = back_log_p + ldj_backward
log_prob_denominator = flow_log_prob.view(z_cont.size(0), self.num_categories) + self.category_prior[None, :]
# Replace log_prob of original class with forward probability
# This improves stability and prevents the model to exploit numerical errors during inverting the flows
orig_class_mask = one_hot(z_categ.squeeze(), num_classes=log_prob_denominator.size(1))
log_prob_denominator = log_prob_denominator * (1 - orig_class_mask) + log_point_prob.unsqueeze(
dim=-1) * orig_class_mask
# Denominator is the sum of probability -> turn log to exp, and back to log
log_denominator = torch.logsumexp(log_prob_denominator, dim=-1)
## Combine nominator and denominator for final prob log
class_prob_log = (log_point_prob - log_denominator)
return class_prob_log
def _decoder_sample(self, z_cont, **kwargs):
## Sampling from decoder by taking the argmax.
# We could also sample from the probabilities, however experienced that the argmax gives more stable results.
# Presumably because the decoder has also seen values sampled from the encoding distributions and not anywhere besides that.
return self.decoder(z_cont).argmax(dim=-1)
def _posterior_sample(self, z_cont, **kwargs):
## Run backward pass of *all* class-conditional flows
z_back_in = z_cont.expand(-1, self.num_categories, -1).reshape(-1, 1, z_cont.size(2))
sample_categ = torch.arange(self.num_categories, dtype=torch.long).to(z_cont.device)
sample_categ = sample_categ[None, :].expand(z_cont.size(0), -1).reshape(-1, 1)
z_back, ldj_backward = self._flow_forward(z_back_in, sample_categ, reverse=True, **kwargs)
back_log_p = self.prior_distribution.log_prob(z_back).sum(dim=[1, 2])
## Calculate the log probability for each class
flow_log_prob = back_log_p + ldj_backward
log_prob_denominator = flow_log_prob.view(z_cont.size(0), self.num_categories) + self.category_prior[None, :]
return log_prob_denominator.argmax(dim=-1)
def info(self):
s = ""
if len(self.flow_layers) > 1:
s += "Linear Encodings of categories, with %i dimensions and %i flows.\n" % (self.D, len(self.flow_layers))
else:
s += "Mixture model encoding of categories with %i dimensions\n" % (self.D)
s += "-> Prior distribution: %s\n" % self.prior_distribution.info()
if self.use_decoder:
s += "-> Decoder network: %s\n" % self.decoder.info()
s += "\n".join(
["-> [%i] " % (flow_index + 1) + flow.info() for flow_index, flow in enumerate(self.flow_layers)])
return s
def _create_flows(num_dims, embed_dims, config):
num_flows = get_param_val(config, "num_flows", 0)
num_hidden_layers = get_param_val(config, "hidden_layers", 2)
hidden_size = get_param_val(config, "hidden_size", 256)
# We apply a linear net in the coupling layers for linear flows
block_type_name = "LinearNet"
block_fun_coup = lambda c_out: LinearNet(c_in=num_dims,
c_out=c_out,
num_layers=num_hidden_layers,
hidden_size=hidden_size,
ext_input_dims=embed_dims)
# For the activation normalization, we map an embedding to scaling and bias with a single layer
block_fun_actn = lambda: SimpleLinearLayer(c_in=embed_dims, c_out=2 * num_dims, data_init=True)
permut_layer = lambda flow_index: InvertibleConv(c_in=num_dims)
actnorm_layer = lambda flow_index: ExtActNormFlow(c_in=num_dims,
net=block_fun_actn())
# We do not use mixture coupling layers here aas we need the inverse to be differentiable as well
coupling_layer = lambda flow_index: CouplingLayer(c_in=num_dims,
mask=CouplingLayer.create_channel_mask(c_in=num_dims),
block_type=block_type_name,
model_func=block_fun_coup)
flow_layers = []
if num_flows == 0 or num_dims == 1: # Num_flows == 0 => mixture model, num_dims == 1 => coupling layers have no effect
flow_layers += [actnorm_layer(flow_index=0)]
else:
for flow_index in range(num_flows):
flow_layers += [
actnorm_layer(flow_index),
permut_layer(flow_index),
coupling_layer(flow_index)
]
return nn.ModuleList(flow_layers)
if __name__ == '__main__':
## Example for using linear encoding
torch.manual_seed(42)
np.random.seed(42)
batch_size, seq_len = 3, 6
vocab_size, D = 4, 3
flow_config = {
"num_flows": 0,
"num_hidden_layers": 1,
"hidden_size": 128
}
categ_encod = LinearCategoricalEncoding(num_dimensions=D, flow_config=flow_config, vocab_size=vocab_size)
print(categ_encod.info())
rand_inp = torch.randint(high=vocab_size, size=(batch_size, seq_len), dtype=torch.long)
z_out, ldj, detail_ldj = categ_encod(rand_inp)
print("Z out", z_out)
print("Detail ldj", detail_ldj)
| 49.051095 | 146 | 0.626116 | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import numpy as np
sys.path.append("../../")
from general.mutils import get_param_val, one_hot
from layers.flows.flow_layer import FlowLayer
from layers.flows.permutation_layers import InvertibleConv
from layers.flows.activation_normalization import ExtActNormFlow
from layers.flows.coupling_layer import CouplingLayer
from layers.flows.distributions import LogisticDistribution
from layers.networks.help_layers import SimpleLinearLayer, LinearNet
from layers.categorical_encoding.decoder import create_decoder, create_embed_layer
class LinearCategoricalEncoding(FlowLayer):
def __init__(self, num_dimensions, flow_config,
dataset_class=None,
vocab=None, vocab_size=-1,
use_decoder=False, decoder_config=None,
default_embed_layer_dims=64,
category_prior=None,
**kwargs):
super().__init__()
self.use_decoder = use_decoder
self.dataset_class = dataset_class
self.D = num_dimensions
self.embed_layer, self.vocab_size = create_embed_layer(vocab, vocab_size, default_embed_layer_dims)
self.num_categories = self.vocab_size
self.prior_distribution = LogisticDistribution(mu=0.0, sigma=1.0)
self.flow_layers = _create_flows(num_dims=num_dimensions,
embed_dims=self.embed_layer.weight.shape[1],
config=flow_config)
if self.use_decoder:
self.decoder = create_decoder(num_categories=self.vocab_size,
num_dims=self.D,
config=decoder_config)
if category_prior is None:
category_prior = torch.zeros(self.vocab_size, dtype=torch.float32)
else:
assert category_prior.shape[
0] == self.num_categories, "[!] ERROR: Category prior needs to be of size [%i] but is %s" % (
self.num_categories, str(category_prior.shape))
if isinstance(category_prior, np.ndarray):
category_prior = torch.from_numpy(category_prior)
self.register_buffer("category_prior", F.log_softmax(category_prior, dim=-1))
def forward(self, z, ldj=None, reverse=False, beta=1, delta=0.0, channel_padding_mask=None, **kwargs):
gth, 1) + z.shape[2:])
if channel_padding_mask is not None:
channel_padding_mask = channel_padding_mask.reshape(batch_size * seq_length, 1, -1)
else:
channel_padding_mask = z.new_ones((batch_size * seq_length, 1, 1), dtype=torch.float32)
ldj_loc = z.new_zeros(z.size(0), dtype=torch.float32)
detailed_ldj = {}
if not reverse:
z_categ = z
ution.sample(shape=(batch_size * seq_length, 1, self.D)).to(z_categ.device)
init_log_p = self.prior_distribution.log_prob(z_cont).sum(dim=[1, 2])
z_cont, ldj_forward = self._flow_forward(z_cont, z_categ, reverse=False)
class_prior_log = torch.take(self.category_prior, z_categ.squeeze(dim=-1))
log_point_prob = init_log_p - ldj_forward + class_prior_log
class_prob_log = self._calculate_true_posterior(z_cont, z_categ, log_point_prob)
else:
class_prob_log = self._decoder_forward(z_cont, z_categ)
eta * class_prob_log - (init_log_p - ldj_forward))
ldj_loc = ldj_loc * channel_padding_mask.squeeze()
z_cont = z_cont * channel_padding_mask
z_out = z_cont
with torch.no_grad():
z_min = z_out.min()
z_max = z_out.max()
z_std = z_out.view(-1, z_out.shape[-1]).std(0).mean()
channel_padding_mask = channel_padding_mask.squeeze()
detailed_ldj = {"avg_token_prob": (
class_prob_log.exp() * channel_padding_mask).sum() / channel_padding_mask.sum(),
"avg_token_bpd": -(
class_prob_log * channel_padding_mask).sum() / channel_padding_mask.sum() * np.log2(
np.exp(1)),
"z_min": z_min,
"z_max": z_max,
"z_std": z_std}
detailed_ldj = {key: val.detach() for key, val in detailed_ldj.items()}
else:
assert z.size(
-1) == self.D, "[!] ERROR in categorical decoding: Input must have %i latent dimensions but got %i" % (
self.D, z.shape[-1])
class_prior_log = self.category_prior[None, None, :]
z_cont = z
if not self.use_decoder:
z_out = self._posterior_sample(z_cont)
else:
z_out = self._decoder_sample(z_cont)
if not reverse:
z_out = z_out.reshape(batch_size, seq_length, -1)
else:
z_out = z_out.reshape(batch_size, seq_length)
ldj_loc = ldj_loc.reshape(batch_size, seq_length).sum(dim=-1)
if ldj is not None:
ldj = ldj + ldj_loc
else:
ldj = ldj_loc
return z_out, ldj, detailed_ldj
def _flow_forward(self, z_cont, z_categ, reverse, **kwargs):
ldj = z_cont.new_zeros(z_cont.size(0), dtype=torch.float32)
embed_features = self.embed_layer(z_categ)
for flow in (self.flow_layers if not reverse else reversed(self.flow_layers)):
z_cont, ldj = flow(z_cont, ldj, ext_input=embed_features, reverse=reverse, **kwargs)
return z_cont, ldj
def _decoder_forward(self, z_cont, z_categ, **kwargs):
1, index=z_categ.view(-1, 1))
return class_prob_log
def _calculate_true_posterior(self, z_cont, z_categ, log_point_prob, **kwargs):
gories, -1).reshape(-1, 1, z_cont.size(2))
sample_categ = torch.arange(self.num_categories, dtype=torch.long).to(z_cont.device)
sample_categ = sample_categ[None, :].expand(z_categ.size(0), -1).reshape(-1, 1)
z_back, ldj_backward = self._flow_forward(z_back_in, sample_categ, reverse=True, **kwargs)
back_log_p = self.prior_distribution.log_prob(z_back).sum(dim=[1, 2])
ob_denominator = flow_log_prob.view(z_cont.size(0), self.num_categories) + self.category_prior[None, :]
orig_class_mask = one_hot(z_categ.squeeze(), num_classes=log_prob_denominator.size(1))
log_prob_denominator = log_prob_denominator * (1 - orig_class_mask) + log_point_prob.unsqueeze(
dim=-1) * orig_class_mask
log_denominator = torch.logsumexp(log_prob_denominator, dim=-1)
nator)
return class_prob_log
def _decoder_sample(self, z_cont, **kwargs):
r(z_cont).argmax(dim=-1)
def _posterior_sample(self, z_cont, **kwargs):
gories, -1).reshape(-1, 1, z_cont.size(2))
sample_categ = torch.arange(self.num_categories, dtype=torch.long).to(z_cont.device)
sample_categ = sample_categ[None, :].expand(z_cont.size(0), -1).reshape(-1, 1)
z_back, ldj_backward = self._flow_forward(z_back_in, sample_categ, reverse=True, **kwargs)
back_log_p = self.prior_distribution.log_prob(z_back).sum(dim=[1, 2])
ward
log_prob_denominator = flow_log_prob.view(z_cont.size(0), self.num_categories) + self.category_prior[None, :]
return log_prob_denominator.argmax(dim=-1)
def info(self):
s = ""
if len(self.flow_layers) > 1:
s += "Linear Encodings of categories, with %i dimensions and %i flows.\n" % (self.D, len(self.flow_layers))
else:
s += "Mixture model encoding of categories with %i dimensions\n" % (self.D)
s += "-> Prior distribution: %s\n" % self.prior_distribution.info()
if self.use_decoder:
s += "-> Decoder network: %s\n" % self.decoder.info()
s += "\n".join(
["-> [%i] " % (flow_index + 1) + flow.info() for flow_index, flow in enumerate(self.flow_layers)])
return s
def _create_flows(num_dims, embed_dims, config):
num_flows = get_param_val(config, "num_flows", 0)
num_hidden_layers = get_param_val(config, "hidden_layers", 2)
hidden_size = get_param_val(config, "hidden_size", 256)
block_type_name = "LinearNet"
block_fun_coup = lambda c_out: LinearNet(c_in=num_dims,
c_out=c_out,
num_layers=num_hidden_layers,
hidden_size=hidden_size,
ext_input_dims=embed_dims)
block_fun_actn = lambda: SimpleLinearLayer(c_in=embed_dims, c_out=2 * num_dims, data_init=True)
permut_layer = lambda flow_index: InvertibleConv(c_in=num_dims)
actnorm_layer = lambda flow_index: ExtActNormFlow(c_in=num_dims,
net=block_fun_actn())
coupling_layer = lambda flow_index: CouplingLayer(c_in=num_dims,
mask=CouplingLayer.create_channel_mask(c_in=num_dims),
block_type=block_type_name,
model_func=block_fun_coup)
flow_layers = []
if num_flows == 0 or num_dims == 1:
flow_layers += [actnorm_layer(flow_index=0)]
else:
for flow_index in range(num_flows):
flow_layers += [
actnorm_layer(flow_index),
permut_layer(flow_index),
coupling_layer(flow_index)
]
return nn.ModuleList(flow_layers)
if __name__ == '__main__':
andom.seed(42)
batch_size, seq_len = 3, 6
vocab_size, D = 4, 3
flow_config = {
"num_flows": 0,
"num_hidden_layers": 1,
"hidden_size": 128
}
categ_encod = LinearCategoricalEncoding(num_dimensions=D, flow_config=flow_config, vocab_size=vocab_size)
print(categ_encod.info())
rand_inp = torch.randint(high=vocab_size, size=(batch_size, seq_len), dtype=torch.long)
z_out, ldj, detail_ldj = categ_encod(rand_inp)
print("Z out", z_out)
print("Detail ldj", detail_ldj)
| true | true |
1c30f35555b72e0cd7a0c742accfe8de41d46f31 | 1,921 | py | Python | test_add_delete_group.py | rata-mahata/my_training | c8be1db95798382b9aeffa5e793ed66d58c34a25 | [
"Apache-2.0"
] | null | null | null | test_add_delete_group.py | rata-mahata/my_training | c8be1db95798382b9aeffa5e793ed66d58c34a25 | [
"Apache-2.0"
] | null | null | null | test_add_delete_group.py | rata-mahata/my_training | c8be1db95798382b9aeffa5e793ed66d58c34a25 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_delete_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_delete_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/group.php")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("second")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("dfdnbvn")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
if not wd.find_element_by_name("selected[]").is_selected():
wd.find_element_by_name("selected[]").click()
wd.find_element_by_xpath("//div[@id='content']/form/input[5]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| 38.42 | 78 | 0.667361 |
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_delete_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_delete_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/group.php")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("second")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("dfdnbvn")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
if not wd.find_element_by_name("selected[]").is_selected():
wd.find_element_by_name("selected[]").click()
wd.find_element_by_xpath("//div[@id='content']/form/input[5]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| true | true |
1c30f362fea1a85f6147a0b976a0bf91d9d78695 | 7,754 | py | Python | research/cv/retinanet_resnet152/src/backbone.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/retinanet_resnet152/src/backbone.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/retinanet_resnet152/src/backbone.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Backbone"""
import mindspore.nn as nn
from mindspore.ops import operations as P
def _bn(channel):
return nn.BatchNorm2d(channel, eps=1e-5, momentum=0.97,
gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
class ConvBNReLU(nn.Cell):
"""
Convolution/Depthwise fused with Batchnorm and ReLU block definition.
Args:
in_planes (int): Input channel.
out_planes (int): Output channel.
kernel_size (int): Input kernel size.
stride (int): Stride size for the first convolutional layer. Default: 1.
groups (int): channel group. Convolution is 1 while Depthiwse is input channel. Default: 1.
Returns:
Tensor, output tensor.
Examples:
>>> ConvBNReLU(16, 256, kernel_size=1, stride=1, groups=1)
"""
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
super(ConvBNReLU, self).__init__()
padding = 0
conv = nn.Conv2d(in_planes, out_planes, kernel_size, stride, pad_mode='same',
padding=padding)
layers = [conv, _bn(out_planes), nn.ReLU()]
self.features = nn.SequentialCell(layers)
def construct(self, x):
output = self.features(x)
return output
class ResidualBlock(nn.Cell):
"""
ResNet V1 residual block definition.
Args:
in_channel (int): Input channel.
out_channel (int): Output channel.
stride (int): Stride size for the first convolutional layer. Default: 1.
Returns:
Tensor, output tensor.
Examples:
>>> ResidualBlock(3, 256, stride=2)
"""
expansion = 4
def __init__(self,
in_channel,
out_channel,
stride=1):
super(ResidualBlock, self).__init__()
channel = out_channel // self.expansion
self.conv1 = ConvBNReLU(in_channel, channel, kernel_size=1, stride=1)
self.conv2 = ConvBNReLU(channel, channel, kernel_size=3, stride=stride)
self.conv3 = nn.Conv2dBnAct(channel, out_channel, kernel_size=1, stride=1, pad_mode='same', padding=0,
has_bn=True, activation='relu')
self.down_sample = False
if stride != 1 or in_channel != out_channel:
self.down_sample = True
self.down_sample_layer = None
if self.down_sample:
self.down_sample_layer = nn.Conv2dBnAct(in_channel, out_channel,
kernel_size=1, stride=stride,
pad_mode='same', padding=0, has_bn=True, activation='relu')
self.add = P.Add()
self.relu = P.ReLU()
def construct(self, x):
"""construct"""
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.down_sample:
identity = self.down_sample_layer(identity)
out = self.add(out, identity)
out = self.relu(out)
return out
class resnet(nn.Cell):
"""
ResNet architecture.
Args:
block (Cell): Block for network.
layer_nums (list): Numbers of block in different layers.
in_channels (list): Input channel in each layer.
out_channels (list): Output channel in each layer.
strides (list): Stride size in each layer.
num_classes (int): The number of classes that the training images are belonging to.
Returns:
Tensor, output tensor.
Examples:
>>> ResNet(ResidualBlock,
>>> [3, 4, 6, 3],
>>> [64, 256, 512, 1024],
>>> [256, 512, 1024, 2048],
>>> [1, 2, 2, 2],
>>> 10)
"""
def __init__(self,
block,
layer_nums,
in_channels,
out_channels,
strides,
num_classes):
super(resnet, self).__init__()
if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
self.conv1 = ConvBNReLU(3, 64, kernel_size=7, stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
self.layer1 = self._make_layer(block,
layer_nums[0],
in_channel=in_channels[0],
out_channel=out_channels[0],
stride=strides[0])
self.layer2 = self._make_layer(block,
layer_nums[1],
in_channel=in_channels[1],
out_channel=out_channels[1],
stride=strides[1])
self.layer3 = self._make_layer(block,
layer_nums[2],
in_channel=in_channels[2],
out_channel=out_channels[2],
stride=strides[2])
self.layer4 = self._make_layer(block,
layer_nums[3],
in_channel=in_channels[3],
out_channel=out_channels[3],
stride=strides[3])
def _make_layer(self, block, layer_num, in_channel, out_channel, stride):
"""
Make stage network of ResNet.
Args:
block (Cell): Resnet block.
layer_num (int): Layer number.
in_channel (int): Input channel.
out_channel (int): Output channel.
stride (int): Stride size for the first convolutional layer.
Returns:
SequentialCell, the output layer.
Examples:
>>> _make_layer(ResidualBlock, 3, 128, 256, 2)
"""
layers = []
resnet_block = ResidualBlock(in_channel, out_channel, stride=stride)
layers.append(resnet_block)
for _ in range(1, layer_num):
resnet_block = ResidualBlock(out_channel, out_channel, stride=1)
layers.append(resnet_block)
return nn.SequentialCell(layers)
def construct(self, x):
x = self.conv1(x)
C1 = self.maxpool(x)
C2 = self.layer1(C1)
C3 = self.layer2(C2)
C4 = self.layer3(C3)
C5 = self.layer4(C4)
return C3, C4, C5
def resnet101(num_classes):
return resnet(ResidualBlock,
[3, 4, 23, 3],
[64, 256, 512, 1024],
[256, 512, 1024, 2048],
[1, 2, 2, 2],
num_classes)
def resnet152(num_classes):
return resnet(ResidualBlock,
[3, 8, 36, 3],
[64, 256, 512, 1024],
[256, 512, 1024, 2048],
[1, 2, 2, 2],
num_classes)
| 34.008772 | 111 | 0.536497 |
import mindspore.nn as nn
from mindspore.ops import operations as P
def _bn(channel):
return nn.BatchNorm2d(channel, eps=1e-5, momentum=0.97,
gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
class ConvBNReLU(nn.Cell):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
super(ConvBNReLU, self).__init__()
padding = 0
conv = nn.Conv2d(in_planes, out_planes, kernel_size, stride, pad_mode='same',
padding=padding)
layers = [conv, _bn(out_planes), nn.ReLU()]
self.features = nn.SequentialCell(layers)
def construct(self, x):
output = self.features(x)
return output
class ResidualBlock(nn.Cell):
expansion = 4
def __init__(self,
in_channel,
out_channel,
stride=1):
super(ResidualBlock, self).__init__()
channel = out_channel // self.expansion
self.conv1 = ConvBNReLU(in_channel, channel, kernel_size=1, stride=1)
self.conv2 = ConvBNReLU(channel, channel, kernel_size=3, stride=stride)
self.conv3 = nn.Conv2dBnAct(channel, out_channel, kernel_size=1, stride=1, pad_mode='same', padding=0,
has_bn=True, activation='relu')
self.down_sample = False
if stride != 1 or in_channel != out_channel:
self.down_sample = True
self.down_sample_layer = None
if self.down_sample:
self.down_sample_layer = nn.Conv2dBnAct(in_channel, out_channel,
kernel_size=1, stride=stride,
pad_mode='same', padding=0, has_bn=True, activation='relu')
self.add = P.Add()
self.relu = P.ReLU()
def construct(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.down_sample:
identity = self.down_sample_layer(identity)
out = self.add(out, identity)
out = self.relu(out)
return out
class resnet(nn.Cell):
def __init__(self,
block,
layer_nums,
in_channels,
out_channels,
strides,
num_classes):
super(resnet, self).__init__()
if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
self.conv1 = ConvBNReLU(3, 64, kernel_size=7, stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
self.layer1 = self._make_layer(block,
layer_nums[0],
in_channel=in_channels[0],
out_channel=out_channels[0],
stride=strides[0])
self.layer2 = self._make_layer(block,
layer_nums[1],
in_channel=in_channels[1],
out_channel=out_channels[1],
stride=strides[1])
self.layer3 = self._make_layer(block,
layer_nums[2],
in_channel=in_channels[2],
out_channel=out_channels[2],
stride=strides[2])
self.layer4 = self._make_layer(block,
layer_nums[3],
in_channel=in_channels[3],
out_channel=out_channels[3],
stride=strides[3])
def _make_layer(self, block, layer_num, in_channel, out_channel, stride):
layers = []
resnet_block = ResidualBlock(in_channel, out_channel, stride=stride)
layers.append(resnet_block)
for _ in range(1, layer_num):
resnet_block = ResidualBlock(out_channel, out_channel, stride=1)
layers.append(resnet_block)
return nn.SequentialCell(layers)
def construct(self, x):
x = self.conv1(x)
C1 = self.maxpool(x)
C2 = self.layer1(C1)
C3 = self.layer2(C2)
C4 = self.layer3(C3)
C5 = self.layer4(C4)
return C3, C4, C5
def resnet101(num_classes):
return resnet(ResidualBlock,
[3, 4, 23, 3],
[64, 256, 512, 1024],
[256, 512, 1024, 2048],
[1, 2, 2, 2],
num_classes)
def resnet152(num_classes):
return resnet(ResidualBlock,
[3, 8, 36, 3],
[64, 256, 512, 1024],
[256, 512, 1024, 2048],
[1, 2, 2, 2],
num_classes)
| true | true |
1c30f3c64f210a9940e133049ddd8550621c9c93 | 2,745 | py | Python | sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_internal/__init__.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | null | null | null | sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_internal/__init__.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | null | null | null | sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_internal/__init__.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | null | null | null | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from collections import namedtuple
try:
import urllib.parse as parse
except ImportError:
# pylint:disable=import-error
import urlparse as parse # type: ignore
from .challenge_auth_policy import ChallengeAuthPolicy, ChallengeAuthPolicyBase
from .client_base import KeyVaultClientBase
from .http_challenge import HttpChallenge
from . import http_challenge_cache as HttpChallengeCache
__all__ = [
"ChallengeAuthPolicy",
"ChallengeAuthPolicyBase",
"HttpChallenge",
"HttpChallengeCache",
"KeyVaultClientBase",
]
_VaultId = namedtuple("VaultId", ["vault_url", "collection", "name", "version"])
def parse_vault_id(url):
try:
parsed_uri = parse.urlparse(url)
except Exception: # pylint: disable=broad-except
raise ValueError("'{}' is not not a valid url".format(url))
if not (parsed_uri.scheme and parsed_uri.hostname):
raise ValueError("'{}' is not not a valid url".format(url))
path = list(filter(None, parsed_uri.path.split("/")))
if len(path) < 2 or len(path) > 3:
raise ValueError("'{}' is not not a valid vault url".format(url))
return _VaultId(
vault_url="{}://{}".format(parsed_uri.scheme, parsed_uri.hostname),
collection=path[0],
name=path[1],
version=path[2] if len(path) == 3 else None,
)
BackupLocation = namedtuple("BackupLocation", ["container_url", "folder_name"])
def parse_blob_storage_url(blob_storage_url):
# type: (str) -> BackupLocation
"""Parse the blob container URL and folder name from a backup's blob storage URL.
For example, https://<account>.blob.core.windows.net/backup/mhsm-account-2020090117323313 parses to
(container_url="https://<account>.blob.core.windows.net/backup", folder_name="mhsm-account-2020090117323313").
"""
try:
folder_name = blob_storage_url.rstrip("/").split("/")[-1]
container_url = blob_storage_url[: blob_storage_url.rindex(folder_name) - 1]
return BackupLocation(container_url, folder_name)
except: # pylint:disable=broad-except
raise ValueError(
'"blob_storage_url" should be the URL of a blob holding a Key Vault backup, for example '
'"https://<account>.blob.core.windows.net/backup/mhsm-account-2020090117323313"'
)
try:
# pylint:disable=unused-import
from .async_challenge_auth_policy import AsyncChallengeAuthPolicy
from .async_client_base import AsyncKeyVaultClientBase
__all__.extend(["AsyncChallengeAuthPolicy", "AsyncKeyVaultClientBase"])
except (SyntaxError, ImportError):
pass
| 33.888889 | 114 | 0.68561 |
from collections import namedtuple
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse
from .challenge_auth_policy import ChallengeAuthPolicy, ChallengeAuthPolicyBase
from .client_base import KeyVaultClientBase
from .http_challenge import HttpChallenge
from . import http_challenge_cache as HttpChallengeCache
__all__ = [
"ChallengeAuthPolicy",
"ChallengeAuthPolicyBase",
"HttpChallenge",
"HttpChallengeCache",
"KeyVaultClientBase",
]
_VaultId = namedtuple("VaultId", ["vault_url", "collection", "name", "version"])
def parse_vault_id(url):
try:
parsed_uri = parse.urlparse(url)
except Exception:
raise ValueError("'{}' is not not a valid url".format(url))
if not (parsed_uri.scheme and parsed_uri.hostname):
raise ValueError("'{}' is not not a valid url".format(url))
path = list(filter(None, parsed_uri.path.split("/")))
if len(path) < 2 or len(path) > 3:
raise ValueError("'{}' is not not a valid vault url".format(url))
return _VaultId(
vault_url="{}://{}".format(parsed_uri.scheme, parsed_uri.hostname),
collection=path[0],
name=path[1],
version=path[2] if len(path) == 3 else None,
)
BackupLocation = namedtuple("BackupLocation", ["container_url", "folder_name"])
def parse_blob_storage_url(blob_storage_url):
try:
folder_name = blob_storage_url.rstrip("/").split("/")[-1]
container_url = blob_storage_url[: blob_storage_url.rindex(folder_name) - 1]
return BackupLocation(container_url, folder_name)
except:
raise ValueError(
'"blob_storage_url" should be the URL of a blob holding a Key Vault backup, for example '
'"https://<account>.blob.core.windows.net/backup/mhsm-account-2020090117323313"'
)
try:
from .async_challenge_auth_policy import AsyncChallengeAuthPolicy
from .async_client_base import AsyncKeyVaultClientBase
__all__.extend(["AsyncChallengeAuthPolicy", "AsyncKeyVaultClientBase"])
except (SyntaxError, ImportError):
pass
| true | true |
1c30f3d256230cc47ffc4c0e2e70824ce109d9c2 | 2,825 | py | Python | pyexcel_io/database/importers/django.py | pyexcel/pyexcel-io | b66ccfc062b756e4068db484d21da6d9317c49b5 | [
"BSD-3-Clause"
] | 52 | 2016-06-15T17:11:23.000Z | 2022-02-07T12:44:07.000Z | pyexcel_io/database/importers/django.py | pyexcel/pyexcel-io | b66ccfc062b756e4068db484d21da6d9317c49b5 | [
"BSD-3-Clause"
] | 100 | 2015-12-28T17:58:50.000Z | 2022-01-29T19:48:39.000Z | pyexcel_io/database/importers/django.py | pyexcel/pyexcel-io | b66ccfc062b756e4068db484d21da6d9317c49b5 | [
"BSD-3-Clause"
] | 20 | 2016-05-09T16:44:36.000Z | 2021-09-27T11:54:00.000Z | """
pyexcel_io.database.django
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The lower level handler for django import and export
:copyright: (c) 2014-2020 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
import logging
import pyexcel_io.constants as constants
from pyexcel_io.utils import is_empty_array, swap_empty_string_for_none
from pyexcel_io.plugin_api import IWriter, ISheetWriter
log = logging.getLogger(__name__)
class DjangoModelWriter(ISheetWriter):
"""import data into a django model"""
def __init__(self, importer, adapter, batch_size=None, bulk_save=True):
self.batch_size = batch_size
self.model = adapter.model
self.column_names = adapter.column_names
self.mapdict = adapter.column_name_mapping_dict
self.initializer = adapter.row_initializer
self.objs = []
self.bulk_save = bulk_save
self.adapter = adapter
def write_row(self, array):
if is_empty_array(array):
print(constants.MESSAGE_EMPTY_ARRAY)
else:
new_array = swap_empty_string_for_none(array)
if self.mapdict:
another_new_array = []
for index, element in enumerate(new_array):
if index in self.mapdict:
another_new_array.append(element)
new_array = another_new_array
model_to_be_created = new_array
if self.initializer is not None:
model_to_be_created = self.initializer(new_array)
if model_to_be_created:
row = dict(zip(self.column_names, model_to_be_created))
self.objs.append(self.model(**row))
# else
# skip the row
def close(self):
if self.bulk_save:
self.model.objects.bulk_create(
self.objs, batch_size=self.batch_size
)
else:
for an_object in self.objs:
an_object.save()
class DjangoBookWriter(IWriter):
"""write data into django models"""
def __init__(self, exporter, _, **keywords):
self.importer = exporter
self._keywords = keywords
def create_sheet(self, sheet_name):
sheet_writer = None
model = self.importer.get(sheet_name)
if model:
sheet_writer = DjangoModelWriter(
self.importer,
model,
batch_size=self._keywords.get("batch_size", None),
bulk_save=self._keywords.get("bulk_save", True),
)
else:
raise Exception(
"Sheet: %s does not match any given models." % sheet_name
+ "Please be aware of case sensitivity."
)
return sheet_writer
def close(self):
pass
| 31.388889 | 75 | 0.60354 | import logging
import pyexcel_io.constants as constants
from pyexcel_io.utils import is_empty_array, swap_empty_string_for_none
from pyexcel_io.plugin_api import IWriter, ISheetWriter
log = logging.getLogger(__name__)
class DjangoModelWriter(ISheetWriter):
def __init__(self, importer, adapter, batch_size=None, bulk_save=True):
self.batch_size = batch_size
self.model = adapter.model
self.column_names = adapter.column_names
self.mapdict = adapter.column_name_mapping_dict
self.initializer = adapter.row_initializer
self.objs = []
self.bulk_save = bulk_save
self.adapter = adapter
def write_row(self, array):
if is_empty_array(array):
print(constants.MESSAGE_EMPTY_ARRAY)
else:
new_array = swap_empty_string_for_none(array)
if self.mapdict:
another_new_array = []
for index, element in enumerate(new_array):
if index in self.mapdict:
another_new_array.append(element)
new_array = another_new_array
model_to_be_created = new_array
if self.initializer is not None:
model_to_be_created = self.initializer(new_array)
if model_to_be_created:
row = dict(zip(self.column_names, model_to_be_created))
self.objs.append(self.model(**row))
def close(self):
if self.bulk_save:
self.model.objects.bulk_create(
self.objs, batch_size=self.batch_size
)
else:
for an_object in self.objs:
an_object.save()
class DjangoBookWriter(IWriter):
def __init__(self, exporter, _, **keywords):
self.importer = exporter
self._keywords = keywords
def create_sheet(self, sheet_name):
sheet_writer = None
model = self.importer.get(sheet_name)
if model:
sheet_writer = DjangoModelWriter(
self.importer,
model,
batch_size=self._keywords.get("batch_size", None),
bulk_save=self._keywords.get("bulk_save", True),
)
else:
raise Exception(
"Sheet: %s does not match any given models." % sheet_name
+ "Please be aware of case sensitivity."
)
return sheet_writer
def close(self):
pass
| true | true |
1c30f3dbd78aeb08af65d22b400118b11cf43cf1 | 2,916 | py | Python | ci/release-info.py | rmourey26/jormungandr | e5d13409b931a58aee3ea72a5729a99f068b6043 | [
"Apache-2.0",
"MIT"
] | 6 | 2021-08-30T00:49:12.000Z | 2022-01-27T07:07:53.000Z | ci/release-info.py | rmourey26/jormungandr | e5d13409b931a58aee3ea72a5729a99f068b6043 | [
"Apache-2.0",
"MIT"
] | 38 | 2022-01-25T22:27:40.000Z | 2022-03-31T22:38:50.000Z | ci/release-info.py | rmourey26/jormungandr | e5d13409b931a58aee3ea72a5729a99f068b6043 | [
"Apache-2.0",
"MIT"
] | 3 | 2021-05-20T08:26:00.000Z | 2022-03-27T22:31:36.000Z | import json
import os
import re
import sys
from datetime import date
from subprocess import Popen, PIPE
def check_version(crate):
# Checks package version for matching with the current tag reference
if ref is not None and ref != "refs/tags/v" + str(crate[0]):
return 0
else:
return 1
def print_error(crate, match):
# Print errors for packages which versions didn't match tag reference
if not match:
print(
"::error file={path}::version {version} does not match release tag {tag}".format(
tag=ref, version=str(crate[0]), path=str(crate[1])
)
)
def bundle_version(crates):
# Reads package versions from workspace manifest file
channel = Popen(
["cargo", "metadata", "--format-version=1", "--no-deps"], stdout=PIPE
)
# parse json data
data = json.load(channel.stdout).get("packages")
# read, map and assign workspace crates versions to bundle package versions
for package, _ in enumerate(data):
if data[package]["name"] in crates:
crates[data[package]["name"]].append(data[package]["version"])
crates[data[package]["name"]].append(data[package]["manifest_path"])
# Checks package versions of the crates bundle for consistency with the given tag reference
consistency = list(map(check_version, list(crates.values())))
# Print errors for packages which versions didn't match tag reference
if not all(consistency):
list(map(print_error, list(crates.values()), consistency))
sys.exit(1)
elif all(consistency):
version = list(crates.values())[0][0]
return version
event_name = sys.argv[1]
date = date.today().strftime("%Y%m%d")
ref = None
if event_name == "push":
ref = os.getenv("GITHUB_REF")
if ref.startswith("refs/tags/"):
release_type = "tagged"
elif ref == "refs/heads/ci/test/nightly":
# emulate the nightly workflow
release_type = "nightly"
ref = None
else:
raise ValueError("unexpected ref " + ref)
elif event_name == "schedule":
release_type = "nightly"
else:
raise ValueError("unexpected event name " + event_name)
# Cargo workspace crates/packages for versioning bundle
crates = {
"jormungandr": [],
"jormungandr-lib": [],
"jcli": [],
"jormungandr-testing-utils": [],
"jormungandr-integration-tests": [],
"jormungandr-scenario-tests": [],
}
version = bundle_version(crates)
release_flags = ""
if release_type == "tagged":
tag = "v" + version
elif release_type == "nightly":
version = re.sub(
r"^(\d+\.\d+\.\d+)(-.*)?$",
r"\1-nightly." + date,
version,
)
tag = "nightly." + date
release_flags = "--prerelease"
for name in "version", "date", "tag", "release_type", "release_flags":
print("::set-output name={0}::{1}".format(name, globals()[name]))
| 29.16 | 95 | 0.631344 | import json
import os
import re
import sys
from datetime import date
from subprocess import Popen, PIPE
def check_version(crate):
if ref is not None and ref != "refs/tags/v" + str(crate[0]):
return 0
else:
return 1
def print_error(crate, match):
if not match:
print(
"::error file={path}::version {version} does not match release tag {tag}".format(
tag=ref, version=str(crate[0]), path=str(crate[1])
)
)
def bundle_version(crates):
# Reads package versions from workspace manifest file
channel = Popen(
["cargo", "metadata", "--format-version=1", "--no-deps"], stdout=PIPE
)
# parse json data
data = json.load(channel.stdout).get("packages")
# read, map and assign workspace crates versions to bundle package versions
for package, _ in enumerate(data):
if data[package]["name"] in crates:
crates[data[package]["name"]].append(data[package]["version"])
crates[data[package]["name"]].append(data[package]["manifest_path"])
# Checks package versions of the crates bundle for consistency with the given tag reference
consistency = list(map(check_version, list(crates.values())))
# Print errors for packages which versions didn't match tag reference
if not all(consistency):
list(map(print_error, list(crates.values()), consistency))
sys.exit(1)
elif all(consistency):
version = list(crates.values())[0][0]
return version
event_name = sys.argv[1]
date = date.today().strftime("%Y%m%d")
ref = None
if event_name == "push":
ref = os.getenv("GITHUB_REF")
if ref.startswith("refs/tags/"):
release_type = "tagged"
elif ref == "refs/heads/ci/test/nightly":
release_type = "nightly"
ref = None
else:
raise ValueError("unexpected ref " + ref)
elif event_name == "schedule":
release_type = "nightly"
else:
raise ValueError("unexpected event name " + event_name)
crates = {
"jormungandr": [],
"jormungandr-lib": [],
"jcli": [],
"jormungandr-testing-utils": [],
"jormungandr-integration-tests": [],
"jormungandr-scenario-tests": [],
}
version = bundle_version(crates)
release_flags = ""
if release_type == "tagged":
tag = "v" + version
elif release_type == "nightly":
version = re.sub(
r"^(\d+\.\d+\.\d+)(-.*)?$",
r"\1-nightly." + date,
version,
)
tag = "nightly." + date
release_flags = "--prerelease"
for name in "version", "date", "tag", "release_type", "release_flags":
print("::set-output name={0}::{1}".format(name, globals()[name]))
| true | true |
1c30f490906a28914cb02aeeab0fd2025e6e0b92 | 1,461 | py | Python | auraxium/models/_item.py | leonhard-s/auraxium | 8a1b7fb6e6e1b11334d69875df032ccc6da330bf | [
"MIT"
] | 23 | 2018-12-04T12:47:11.000Z | 2022-02-08T05:46:21.000Z | auraxium/models/_item.py | brhumphe/auraxium | 8a1b7fb6e6e1b11334d69875df032ccc6da330bf | [
"MIT"
] | 50 | 2020-04-15T10:55:30.000Z | 2022-02-20T11:14:01.000Z | auraxium/models/_item.py | brhumphe/auraxium | 8a1b7fb6e6e1b11334d69875df032ccc6da330bf | [
"MIT"
] | 6 | 2018-12-02T11:55:03.000Z | 2020-10-06T05:15:36.000Z | """Data classes for :mod:`auraxium.ps2._item`."""
from typing import Optional
from .base import ImageData, RESTPayload
from ..types import LocaleData
__all__ = [
'ItemCategoryData',
'ItemData',
'ItemTypeData'
]
# pylint: disable=too-few-public-methods
class ItemCategoryData(RESTPayload):
"""Data class for :class:`auraxium.ps2.ItemCategory`.
This class mirrors the payload data returned by the API, you may
use its attributes as keys in filters or queries.
"""
item_category_id: int
name: LocaleData
class ItemData(RESTPayload, ImageData):
"""Data class for :class:`auraxium.ps2.Item`.
This class mirrors the payload data returned by the API, you may
use its attributes as keys in filters or queries.
"""
item_id: int
item_type_id: Optional[int] = None
item_category_id: Optional[int] = None
activatable_ability_id: Optional[int] = None
passive_ability_id: Optional[int] = None
is_vehicle_weapon: bool
name: LocaleData
description: Optional[LocaleData] = None
faction_id: Optional[int] = None
max_stack_size: int
skill_set_id: Optional[int] = None
is_default_attachment: bool
class ItemTypeData(RESTPayload):
"""Data class for :class:`auraxium.ps2.ItemType`.
This class mirrors the payload data returned by the API, you may
use its attributes as keys in filters or queries.
"""
item_type_id: int
name: str
code: str
| 24.762712 | 68 | 0.704997 |
from typing import Optional
from .base import ImageData, RESTPayload
from ..types import LocaleData
__all__ = [
'ItemCategoryData',
'ItemData',
'ItemTypeData'
]
class ItemCategoryData(RESTPayload):
item_category_id: int
name: LocaleData
class ItemData(RESTPayload, ImageData):
item_id: int
item_type_id: Optional[int] = None
item_category_id: Optional[int] = None
activatable_ability_id: Optional[int] = None
passive_ability_id: Optional[int] = None
is_vehicle_weapon: bool
name: LocaleData
description: Optional[LocaleData] = None
faction_id: Optional[int] = None
max_stack_size: int
skill_set_id: Optional[int] = None
is_default_attachment: bool
class ItemTypeData(RESTPayload):
item_type_id: int
name: str
code: str
| true | true |
1c30f535a6b24afcc447a580a6aaeb27f51c6db0 | 2,230 | py | Python | python/kwiver/vital/tests/alg/simple_keyframe_selection.py | mwoehlke-kitware/kwiver | 614a488bd2b7fe551ac75eec979766d882709791 | [
"BSD-3-Clause"
] | 176 | 2015-07-31T23:33:37.000Z | 2022-03-21T23:42:44.000Z | python/kwiver/vital/tests/alg/simple_keyframe_selection.py | mwoehlke-kitware/kwiver | 614a488bd2b7fe551ac75eec979766d882709791 | [
"BSD-3-Clause"
] | 1,276 | 2015-05-03T01:21:27.000Z | 2022-03-31T15:32:20.000Z | python/kwiver/vital/tests/alg/simple_keyframe_selection.py | mwoehlke-kitware/kwiver | 614a488bd2b7fe551ac75eec979766d882709791 | [
"BSD-3-Clause"
] | 85 | 2015-01-25T05:13:38.000Z | 2022-01-14T14:59:37.000Z | # ckwg +29
# Copyright 2020 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
from kwiver.vital.algo import KeyframeSelection
from kwiver.vital.tests.py_helpers import CommonConfigurationMixin
class SimpleKeyframeSelection(CommonConfigurationMixin, KeyframeSelection):
def __init__(self):
KeyframeSelection.__init__(self)
def __vital_algorithm_register__():
from kwiver.vital.algo import algorithm_factory
# Register Algorithm
implementation_name = "SimpleKeyframeSelection"
if algorithm_factory.has_algorithm_impl_name(
SimpleKeyframeSelection.static_type_name(), implementation_name
):
return
algorithm_factory.add_algorithm(
implementation_name, "test simple keyframe selection", SimpleKeyframeSelection,
)
algorithm_factory.mark_algorithm_as_loaded(implementation_name)
| 43.72549 | 87 | 0.783408 |
from kwiver.vital.algo import KeyframeSelection
from kwiver.vital.tests.py_helpers import CommonConfigurationMixin
class SimpleKeyframeSelection(CommonConfigurationMixin, KeyframeSelection):
def __init__(self):
KeyframeSelection.__init__(self)
def __vital_algorithm_register__():
from kwiver.vital.algo import algorithm_factory
implementation_name = "SimpleKeyframeSelection"
if algorithm_factory.has_algorithm_impl_name(
SimpleKeyframeSelection.static_type_name(), implementation_name
):
return
algorithm_factory.add_algorithm(
implementation_name, "test simple keyframe selection", SimpleKeyframeSelection,
)
algorithm_factory.mark_algorithm_as_loaded(implementation_name)
| true | true |
1c30f638814e86df729906a7032b2b43419736a3 | 2,726 | py | Python | TUI/Inst/APOGEE/FPIShutterWdg.py | ApachePointObservatory/stui | cfaaa9bcec9da9ac21bad1b9a2c7db2a739ffc97 | [
"BSD-3-Clause"
] | 2 | 2019-05-07T04:33:57.000Z | 2021-12-16T19:54:02.000Z | TUI/Inst/APOGEE/FPIShutterWdg.py | ApachePointObservatory/stui | cfaaa9bcec9da9ac21bad1b9a2c7db2a739ffc97 | [
"BSD-3-Clause"
] | 5 | 2018-05-29T20:14:50.000Z | 2020-02-17T21:58:30.000Z | TUI/Inst/APOGEE/FPIShutterWdg.py | ApachePointObservatory/stui | cfaaa9bcec9da9ac21bad1b9a2c7db2a739ffc97 | [
"BSD-3-Clause"
] | 2 | 2019-10-18T22:02:54.000Z | 2020-09-26T04:20:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2022-01-06
# @Filename: FPIShutterWdg.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import RO.Constants
import RO.Wdg
import TUI.Models
import BaseDeviceWdg
class FPIShutterWdg(BaseDeviceWdg.BaseDeviceWdg):
"""Widgets to control APOGEE's FPI shutter."""
_ShutterCat = "shutter"
def __init__(self, gridder, statusBar, colSpan=3, helpURL=None):
BaseDeviceWdg.BaseDeviceWdg.__init__(self,
master = gridder._master,
actor = "apogeefpi",
statusBar = statusBar,
helpURL = helpURL,
)
self._updatingStatus = False
self.statusBar = statusBar
self.helpURL = helpURL
self.gridder = gridder
master = self.gridder._master
self.shutterWdg = RO.Wdg.Checkbutton(
master = master,
onvalue = "Open",
offvalue = "Closed",
autoIsCurrent = True,
showValue = True,
callFunc = self.doShutter,
helpText = "Open or close FPI shutter",
helpURL = helpURL,
)
gridder.gridWdg("FPI Shutter", self.shutterWdg, self.cancelBtn, sticky="w")
self.model = TUI.Models.getModel(self.actor)
self.model.shutter_position.addCallback(self.updateStatus)
def doShutter(self, wdg=None):
"""Send a command to open or close the shutter
"""
doOpen = self.shutterWdg.getBool()
if doOpen:
cmdStr = "open"
else:
cmdStr = "close"
self.doCmd(cmdStr)
def enableButtons(self, dumCmd=None):
"""Enable or disable widgets, as appropriate."""
isRunning = self.isRunning
self.shutterWdg.setEnable(not isRunning)
self.cancelBtn.setEnable(isRunning)
def updateStatus(self, keyVar=None):
"""Shutter position keyword callback."""
keyVar = self.model.shutter_position
isCurrent = keyVar.isCurrent
with self.updateLock():
if keyVar[0] == '?' or isCurrent is False:
self.shutterWdg['offvalue'] = "?"
self.shutterWdg.set("?", isCurrent=False)
return
if keyVar[0] == 'open':
self.shutterWdg.setDefault(True)
self.shutterWdg.set(True, isCurrent=isCurrent)
elif keyVar[0] == 'closed':
self.shutterWdg['offvalue'] = "Closed"
self.shutterWdg.setDefault(False)
self.shutterWdg.set(False, isCurrent=isCurrent)
else:
self.shutterWdg.setIsCurrent(False)
| 30.288889 | 83 | 0.589875 |
import RO.Constants
import RO.Wdg
import TUI.Models
import BaseDeviceWdg
class FPIShutterWdg(BaseDeviceWdg.BaseDeviceWdg):
_ShutterCat = "shutter"
def __init__(self, gridder, statusBar, colSpan=3, helpURL=None):
BaseDeviceWdg.BaseDeviceWdg.__init__(self,
master = gridder._master,
actor = "apogeefpi",
statusBar = statusBar,
helpURL = helpURL,
)
self._updatingStatus = False
self.statusBar = statusBar
self.helpURL = helpURL
self.gridder = gridder
master = self.gridder._master
self.shutterWdg = RO.Wdg.Checkbutton(
master = master,
onvalue = "Open",
offvalue = "Closed",
autoIsCurrent = True,
showValue = True,
callFunc = self.doShutter,
helpText = "Open or close FPI shutter",
helpURL = helpURL,
)
gridder.gridWdg("FPI Shutter", self.shutterWdg, self.cancelBtn, sticky="w")
self.model = TUI.Models.getModel(self.actor)
self.model.shutter_position.addCallback(self.updateStatus)
def doShutter(self, wdg=None):
doOpen = self.shutterWdg.getBool()
if doOpen:
cmdStr = "open"
else:
cmdStr = "close"
self.doCmd(cmdStr)
def enableButtons(self, dumCmd=None):
isRunning = self.isRunning
self.shutterWdg.setEnable(not isRunning)
self.cancelBtn.setEnable(isRunning)
def updateStatus(self, keyVar=None):
keyVar = self.model.shutter_position
isCurrent = keyVar.isCurrent
with self.updateLock():
if keyVar[0] == '?' or isCurrent is False:
self.shutterWdg['offvalue'] = "?"
self.shutterWdg.set("?", isCurrent=False)
return
if keyVar[0] == 'open':
self.shutterWdg.setDefault(True)
self.shutterWdg.set(True, isCurrent=isCurrent)
elif keyVar[0] == 'closed':
self.shutterWdg['offvalue'] = "Closed"
self.shutterWdg.setDefault(False)
self.shutterWdg.set(False, isCurrent=isCurrent)
else:
self.shutterWdg.setIsCurrent(False)
| true | true |
1c30f674afd407aa801be24a1c4cef789273b52d | 1,771 | py | Python | learn_python/learn_appium/sample-code-master/sample-code/examples/python/selendroid_simple.py | yehonadav/yonadav_tutorials | e797fdaeaea4c5d85392f724442645afb9391f15 | [
"Apache-2.0"
] | 2 | 2019-08-04T17:30:53.000Z | 2020-09-21T08:39:55.000Z | learn_python/learn_appium/sample-code-master/sample-code/examples/python/selendroid_simple.py | yehonadav/yonadav_tutorials | e797fdaeaea4c5d85392f724442645afb9391f15 | [
"Apache-2.0"
] | 5 | 2019-10-31T14:55:58.000Z | 2022-02-26T04:06:39.000Z | learn_python/learn_appium/sample-code-master/sample-code/examples/python/selendroid_simple.py | yehonadav/yonadav_tutorials | e797fdaeaea4c5d85392f724442645afb9391f15 | [
"Apache-2.0"
] | null | null | null | import os
from time import sleep
import unittest
from desired_capabilities import desired_caps
from appium import webdriver
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
# think times can be useful e.g. when testing with an emulator
THINK_TIME = 5.
class SimpleSalendroidTests(unittest.TestCase):
def setUp(self):
desired_caps['automationName'] = "selendroid"
desired_caps['app'] = PATH(
'../../../sample-code/apps/ApiDemos/bin/ApiDemos-debug.apk'
)
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
# end the session
self.driver.quit()
def test_selendroid(self):
el = self.driver.find_element_by_name("Animation")
# assert el.text == "Animation"
self.assertEqual('Animation', el.text)
el = self.driver.find_element_by_class_name("android.widget.TextView")
# assert el.text == "Accessibility"
self.assertEqual('Accessibility', el.text)
el = self.driver.find_element_by_name("App")
el.click()
sleep(THINK_TIME)
els = self.driver.find_elements_by_class_name("android.widget.TextView")
# Selendroid gets all the elements, not just the visible ones
self.assertLessEqual(30, len(els))
self.driver.find_element_by_name('Action Bar')
self.driver.back()
sleep(THINK_TIME)
el = self.driver.find_element_by_name("Animation")
self.assertEqual('Animation', el.text)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(SimpleSalendroidTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 31.070175 | 84 | 0.679277 | import os
from time import sleep
import unittest
from desired_capabilities import desired_caps
from appium import webdriver
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
THINK_TIME = 5.
class SimpleSalendroidTests(unittest.TestCase):
def setUp(self):
desired_caps['automationName'] = "selendroid"
desired_caps['app'] = PATH(
'../../../sample-code/apps/ApiDemos/bin/ApiDemos-debug.apk'
)
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
self.driver.quit()
def test_selendroid(self):
el = self.driver.find_element_by_name("Animation")
self.assertEqual('Animation', el.text)
el = self.driver.find_element_by_class_name("android.widget.TextView")
self.assertEqual('Accessibility', el.text)
el = self.driver.find_element_by_name("App")
el.click()
sleep(THINK_TIME)
els = self.driver.find_elements_by_class_name("android.widget.TextView")
self.assertLessEqual(30, len(els))
self.driver.find_element_by_name('Action Bar')
self.driver.back()
sleep(THINK_TIME)
el = self.driver.find_element_by_name("Animation")
self.assertEqual('Animation', el.text)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(SimpleSalendroidTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| true | true |
1c30f693215ca49da098ac8098c3d7d8b4a6c0b8 | 226 | py | Python | gitlab/datadog_checks/gitlab/__init__.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 4 | 2021-06-21T19:21:49.000Z | 2021-06-23T21:21:55.000Z | gitlab/datadog_checks/gitlab/__init__.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2018-08-15T05:50:17.000Z | 2018-08-15T05:50:17.000Z | gitlab/datadog_checks/gitlab/__init__.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2018-08-15T05:45:42.000Z | 2018-08-15T05:45:42.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .__about__ import __version__
from .gitlab import GitlabCheck
__all__ = [
'__version__',
'GitlabCheck'
]
| 20.545455 | 59 | 0.725664 |
from .__about__ import __version__
from .gitlab import GitlabCheck
__all__ = [
'__version__',
'GitlabCheck'
]
| true | true |
1c30f7d3b3d6ffbeaaf7141bb889427fcedfee0f | 9,853 | py | Python | faker/providers/color/color.py | mgorny/faker | b1176e01bf4d7f1aef408a4bb96a9e46188cc113 | [
"MIT"
] | 12,077 | 2015-01-01T18:30:07.000Z | 2022-03-31T23:22:01.000Z | faker/providers/color/color.py | mgorny/faker | b1176e01bf4d7f1aef408a4bb96a9e46188cc113 | [
"MIT"
] | 1,306 | 2015-01-03T05:18:55.000Z | 2022-03-31T02:43:04.000Z | faker/providers/color/color.py | mgorny/faker | b1176e01bf4d7f1aef408a4bb96a9e46188cc113 | [
"MIT"
] | 1,855 | 2015-01-08T14:20:10.000Z | 2022-03-25T17:23:32.000Z | """Internal module for human-friendly color generation.
.. important::
End users of this library should not use anything in this module.
Code adapted from:
- https://github.com/davidmerfield/randomColor (CC0)
- https://github.com/kevinwuhoo/randomcolor-py (MIT License)
Additional reference from:
- https://en.wikipedia.org/wiki/HSL_and_HSV
"""
import colorsys
import math
import random
import sys
from typing import TYPE_CHECKING, Dict, Hashable, Optional, Sequence, Tuple
if TYPE_CHECKING:
from ...factory import Generator
from ...typing import HueType
COLOR_MAP: Dict[str, Dict[str, Sequence[Tuple[int, int]]]] = {
'monochrome': {
'hue_range': [(0, 0)],
'lower_bounds': [
(0, 0), (100, 0),
],
},
'red': {
'hue_range': [(-26, 18)],
'lower_bounds': [
(20, 100), (30, 92), (40, 89),
(50, 85), (60, 78), (70, 70),
(80, 60), (90, 55), (100, 50),
],
},
'orange': {
'hue_range': [(19, 46)],
'lower_bounds': [
(20, 100), (30, 93), (40, 88), (50, 86),
(60, 85), (70, 70), (100, 70),
],
},
'yellow': {
'hue_range': [(47, 62)],
'lower_bounds': [
(25, 100), (40, 94), (50, 89), (60, 86),
(70, 84), (80, 82), (90, 80), (100, 75),
],
},
'green': {
'hue_range': [(63, 178)],
'lower_bounds': [
(30, 100), (40, 90), (50, 85), (60, 81),
(70, 74), (80, 64), (90, 50), (100, 40),
],
},
'blue': {
'hue_range': [(179, 257)],
'lower_bounds': [
(20, 100), (30, 86), (40, 80),
(50, 74), (60, 60), (70, 52),
(80, 44), (90, 39), (100, 35),
],
},
'purple': {
'hue_range': [(258, 282)],
'lower_bounds': [
(20, 100), (30, 87), (40, 79),
(50, 70), (60, 65), (70, 59),
(80, 52), (90, 45), (100, 42),
],
},
'pink': {
'hue_range': [(283, 334)],
'lower_bounds': [
(20, 100), (30, 90), (40, 86), (60, 84),
(80, 80), (90, 75), (100, 73),
],
},
}
class RandomColor:
"""Implement random color generation in a human-friendly way.
This helper class encapsulates the internal implementation and logic of the
:meth:`color() <faker.providers.color.Provider.color>` method.
"""
def __init__(self, generator: Optional["Generator"] = None, seed: Optional[Hashable] = None) -> None:
self.colormap = COLOR_MAP
# Option to specify a seed was not removed so this class
# can still be tested independently w/o generators
if generator:
self.random = generator.random
else:
self.seed = seed if seed else random.randint(0, sys.maxsize)
self.random = random.Random(self.seed)
for color_name, color_attrs in self.colormap.items():
lower_bounds: Sequence[Tuple[int, int]] = color_attrs['lower_bounds']
s_min, b_max = lower_bounds[0]
s_max, b_min = lower_bounds[-1]
self.colormap[color_name]['saturation_range'] = [(s_min, s_max)]
self.colormap[color_name]['brightness_range'] = [(b_min, b_max)]
def generate(self,
hue: Optional[HueType] = None,
luminosity: Optional[str] = None,
color_format: str = 'hex') -> str:
"""Generate a color.
Whenever :meth:`color() <faker.providers.color.Provider.color>` is
called, the arguments used are simply passed into this method, and this
method handles the rest.
"""
# First we pick a hue (H)
h = self.pick_hue(hue)
# Then use H to determine saturation (S)
s = self.pick_saturation(h, hue, luminosity)
# Then use S and H to determine brightness (B).
b = self.pick_brightness(h, s, luminosity)
# Then we return the HSB color in the desired format
return self.set_format((h, s, b), color_format)
def pick_hue(self, hue: Optional[HueType]) -> int:
"""Return a numerical hue value."""
hue_ = self.random_within(self.get_hue_range(hue))
# Instead of storing red as two separate ranges,
# we group them, using negative numbers
if hue_ < 0:
hue_ += 360
return hue_
def pick_saturation(self, hue: int, hue_name: Optional[HueType], luminosity: Optional[str]) -> int:
"""Return a numerical saturation value."""
if luminosity is None:
luminosity = ''
if luminosity == 'random':
return self.random_within((0, 100))
if isinstance(hue_name, str) and hue_name == 'monochrome':
return 0
s_min, s_max = self.get_saturation_range(hue)
if luminosity == 'bright':
s_min = 55
elif luminosity == 'dark':
s_min = s_max - 10
elif luminosity == 'light':
s_max = 55
return self.random_within((s_min, s_max))
def pick_brightness(self, h: int, s: int, luminosity: Optional[str]) -> int:
"""Return a numerical brightness value."""
if luminosity is None:
luminosity = ''
b_min = self.get_minimum_brightness(h, s)
b_max = 100
if luminosity == 'dark':
b_max = b_min + 20
elif luminosity == 'light':
b_min = (b_max + b_min) // 2
elif luminosity == 'random':
b_min = 0
b_max = 100
return self.random_within((b_min, b_max))
def set_format(self, hsv: Tuple[int, int, int], color_format: str) -> str:
"""Handle conversion of HSV values into desired format."""
if color_format == 'hsv':
color = f'hsv({hsv[0]}, {hsv[1]}, {hsv[2]})'
elif color_format == 'hsl':
hsl = self.hsv_to_hsl(hsv)
color = f'hsl({hsl[0]}, {hsl[1]}, {hsl[2]})'
elif color_format == 'rgb':
rgb = self.hsv_to_rgb(hsv)
color = f'rgb({rgb[0]}, {rgb[1]}, {rgb[2]})'
else:
rgb = self.hsv_to_rgb(hsv)
color = f'#{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}'
return color
def get_minimum_brightness(self, h: int, s: int) -> int:
"""Return the minimum allowed brightness for ``h`` and ``s``."""
lower_bounds: Sequence[Tuple[int, int]] = self.get_color_info(h)['lower_bounds']
for i in range(len(lower_bounds) - 1):
s1, v1 = lower_bounds[i]
s2, v2 = lower_bounds[i + 1]
if s1 <= s <= s2:
m: float = (v2 - v1) / (s2 - s1)
b: float = v1 - m * s1
return int(m * s + b)
return 0
def get_hue_range(self, color_input: Optional[HueType]) -> Tuple[int, int]:
"""Return the hue range for a given ``color_input``."""
if isinstance(color_input, (int, float)) and 0 <= color_input <= 360:
color_input = int(color_input)
return (color_input, color_input)
elif isinstance(color_input, str) and color_input in self.colormap:
return self.colormap[color_input]['hue_range'][0]
elif color_input is None:
return (0, 360)
if isinstance(color_input, list):
color_input = tuple(color_input)
if (isinstance(color_input, tuple) and len(color_input) == 2
and all(isinstance(c, (float, int)) for c in color_input)):
v1 = int(color_input[0])
v2 = int(color_input[1])
if v2 < v1:
v1, v2 = v2, v1
v1 = max(v1, 0)
v2 = min(v2, 360)
return (v1, v2)
raise TypeError('Hue must be a valid string, numeric type, or a tuple/list of 2 numeric types.')
def get_saturation_range(self, hue: int) -> Tuple[int, int]:
"""Return the saturation range for a given numerical ``hue`` value."""
return self.get_color_info(hue)['saturation_range'][0]
def get_color_info(self, hue: int) -> Dict[str, Sequence[Tuple[int, int]]]:
"""Return the color info for a given numerical ``hue`` value."""
# Maps red colors to make picking hue easier
if 334 <= hue <= 360:
hue -= 360
for color_name, color in self.colormap.items():
hue_range: Tuple[int, int] = color['hue_range'][0]
if hue_range[0] <= hue <= hue_range[1]:
return self.colormap[color_name]
else:
raise ValueError('Value of hue `%s` is invalid.' % hue)
def random_within(self, r: Sequence[int]) -> int:
"""Return a random integer within the range ``r``."""
return self.random.randint(int(r[0]), int(r[1]))
@classmethod
def hsv_to_rgb(cls, hsv: Tuple[int, int, int]) -> Tuple[int, int, int]:
"""Convert HSV to RGB.
This method expects ``hsv`` to be a 3-tuple of H, S, and V values, and
it will return a 3-tuple of the equivalent R, G, and B values.
"""
h, s, v = hsv
h = max(h, 1)
h = min(h, 359)
r, g, b = colorsys.hsv_to_rgb(h / 360, s / 100, v / 100)
return (int(r * 255), int(g * 255), int(b * 255))
@classmethod
def hsv_to_hsl(cls, hsv: Tuple[int, int, int]) -> Tuple[int, int, int]:
"""Convert HSV to HSL.
This method expects ``hsv`` to be a 3-tuple of H, S, and V values, and
it will return a 3-tuple of the equivalent H, S, and L values.
"""
h, s, v = hsv
s_: float = s / 100.0
v_: float = v / 100.0
l = 0.5 * (v_) * (2 - s_) # noqa: E741
s_ = 0.0 if l in [0, 1] else v_ * s_ / (1 - math.fabs(2 * l - 1))
return (int(h), int(s_ * 100), int(l * 100))
| 33.4 | 105 | 0.534761 |
import colorsys
import math
import random
import sys
from typing import TYPE_CHECKING, Dict, Hashable, Optional, Sequence, Tuple
if TYPE_CHECKING:
from ...factory import Generator
from ...typing import HueType
COLOR_MAP: Dict[str, Dict[str, Sequence[Tuple[int, int]]]] = {
'monochrome': {
'hue_range': [(0, 0)],
'lower_bounds': [
(0, 0), (100, 0),
],
},
'red': {
'hue_range': [(-26, 18)],
'lower_bounds': [
(20, 100), (30, 92), (40, 89),
(50, 85), (60, 78), (70, 70),
(80, 60), (90, 55), (100, 50),
],
},
'orange': {
'hue_range': [(19, 46)],
'lower_bounds': [
(20, 100), (30, 93), (40, 88), (50, 86),
(60, 85), (70, 70), (100, 70),
],
},
'yellow': {
'hue_range': [(47, 62)],
'lower_bounds': [
(25, 100), (40, 94), (50, 89), (60, 86),
(70, 84), (80, 82), (90, 80), (100, 75),
],
},
'green': {
'hue_range': [(63, 178)],
'lower_bounds': [
(30, 100), (40, 90), (50, 85), (60, 81),
(70, 74), (80, 64), (90, 50), (100, 40),
],
},
'blue': {
'hue_range': [(179, 257)],
'lower_bounds': [
(20, 100), (30, 86), (40, 80),
(50, 74), (60, 60), (70, 52),
(80, 44), (90, 39), (100, 35),
],
},
'purple': {
'hue_range': [(258, 282)],
'lower_bounds': [
(20, 100), (30, 87), (40, 79),
(50, 70), (60, 65), (70, 59),
(80, 52), (90, 45), (100, 42),
],
},
'pink': {
'hue_range': [(283, 334)],
'lower_bounds': [
(20, 100), (30, 90), (40, 86), (60, 84),
(80, 80), (90, 75), (100, 73),
],
},
}
class RandomColor:
def __init__(self, generator: Optional["Generator"] = None, seed: Optional[Hashable] = None) -> None:
self.colormap = COLOR_MAP
if generator:
self.random = generator.random
else:
self.seed = seed if seed else random.randint(0, sys.maxsize)
self.random = random.Random(self.seed)
for color_name, color_attrs in self.colormap.items():
lower_bounds: Sequence[Tuple[int, int]] = color_attrs['lower_bounds']
s_min, b_max = lower_bounds[0]
s_max, b_min = lower_bounds[-1]
self.colormap[color_name]['saturation_range'] = [(s_min, s_max)]
self.colormap[color_name]['brightness_range'] = [(b_min, b_max)]
def generate(self,
hue: Optional[HueType] = None,
luminosity: Optional[str] = None,
color_format: str = 'hex') -> str:
h = self.pick_hue(hue)
s = self.pick_saturation(h, hue, luminosity)
b = self.pick_brightness(h, s, luminosity)
return self.set_format((h, s, b), color_format)
def pick_hue(self, hue: Optional[HueType]) -> int:
hue_ = self.random_within(self.get_hue_range(hue))
if hue_ < 0:
hue_ += 360
return hue_
def pick_saturation(self, hue: int, hue_name: Optional[HueType], luminosity: Optional[str]) -> int:
if luminosity is None:
luminosity = ''
if luminosity == 'random':
return self.random_within((0, 100))
if isinstance(hue_name, str) and hue_name == 'monochrome':
return 0
s_min, s_max = self.get_saturation_range(hue)
if luminosity == 'bright':
s_min = 55
elif luminosity == 'dark':
s_min = s_max - 10
elif luminosity == 'light':
s_max = 55
return self.random_within((s_min, s_max))
def pick_brightness(self, h: int, s: int, luminosity: Optional[str]) -> int:
if luminosity is None:
luminosity = ''
b_min = self.get_minimum_brightness(h, s)
b_max = 100
if luminosity == 'dark':
b_max = b_min + 20
elif luminosity == 'light':
b_min = (b_max + b_min) // 2
elif luminosity == 'random':
b_min = 0
b_max = 100
return self.random_within((b_min, b_max))
def set_format(self, hsv: Tuple[int, int, int], color_format: str) -> str:
if color_format == 'hsv':
color = f'hsv({hsv[0]}, {hsv[1]}, {hsv[2]})'
elif color_format == 'hsl':
hsl = self.hsv_to_hsl(hsv)
color = f'hsl({hsl[0]}, {hsl[1]}, {hsl[2]})'
elif color_format == 'rgb':
rgb = self.hsv_to_rgb(hsv)
color = f'rgb({rgb[0]}, {rgb[1]}, {rgb[2]})'
else:
rgb = self.hsv_to_rgb(hsv)
color = f'#{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}'
return color
def get_minimum_brightness(self, h: int, s: int) -> int:
lower_bounds: Sequence[Tuple[int, int]] = self.get_color_info(h)['lower_bounds']
for i in range(len(lower_bounds) - 1):
s1, v1 = lower_bounds[i]
s2, v2 = lower_bounds[i + 1]
if s1 <= s <= s2:
m: float = (v2 - v1) / (s2 - s1)
b: float = v1 - m * s1
return int(m * s + b)
return 0
def get_hue_range(self, color_input: Optional[HueType]) -> Tuple[int, int]:
if isinstance(color_input, (int, float)) and 0 <= color_input <= 360:
color_input = int(color_input)
return (color_input, color_input)
elif isinstance(color_input, str) and color_input in self.colormap:
return self.colormap[color_input]['hue_range'][0]
elif color_input is None:
return (0, 360)
if isinstance(color_input, list):
color_input = tuple(color_input)
if (isinstance(color_input, tuple) and len(color_input) == 2
and all(isinstance(c, (float, int)) for c in color_input)):
v1 = int(color_input[0])
v2 = int(color_input[1])
if v2 < v1:
v1, v2 = v2, v1
v1 = max(v1, 0)
v2 = min(v2, 360)
return (v1, v2)
raise TypeError('Hue must be a valid string, numeric type, or a tuple/list of 2 numeric types.')
def get_saturation_range(self, hue: int) -> Tuple[int, int]:
return self.get_color_info(hue)['saturation_range'][0]
def get_color_info(self, hue: int) -> Dict[str, Sequence[Tuple[int, int]]]:
if 334 <= hue <= 360:
hue -= 360
for color_name, color in self.colormap.items():
hue_range: Tuple[int, int] = color['hue_range'][0]
if hue_range[0] <= hue <= hue_range[1]:
return self.colormap[color_name]
else:
raise ValueError('Value of hue `%s` is invalid.' % hue)
def random_within(self, r: Sequence[int]) -> int:
return self.random.randint(int(r[0]), int(r[1]))
@classmethod
def hsv_to_rgb(cls, hsv: Tuple[int, int, int]) -> Tuple[int, int, int]:
h, s, v = hsv
h = max(h, 1)
h = min(h, 359)
r, g, b = colorsys.hsv_to_rgb(h / 360, s / 100, v / 100)
return (int(r * 255), int(g * 255), int(b * 255))
@classmethod
def hsv_to_hsl(cls, hsv: Tuple[int, int, int]) -> Tuple[int, int, int]:
h, s, v = hsv
s_: float = s / 100.0
v_: float = v / 100.0
l = 0.5 * (v_) * (2 - s_)
s_ = 0.0 if l in [0, 1] else v_ * s_ / (1 - math.fabs(2 * l - 1))
return (int(h), int(s_ * 100), int(l * 100))
| true | true |
1c30f8108c8a95cb3ca7cc63b5f21bad097144b7 | 14,217 | py | Python | sparse/_utils.py | sayandip18/sparse | 08daaad8edc59e7a7c432a97ae4f9321622e1bd3 | [
"BSD-3-Clause"
] | 1 | 2022-02-22T08:16:13.000Z | 2022-02-22T08:16:13.000Z | sparse/_utils.py | sayandip18/sparse | 08daaad8edc59e7a7c432a97ae4f9321622e1bd3 | [
"BSD-3-Clause"
] | null | null | null | sparse/_utils.py | sayandip18/sparse | 08daaad8edc59e7a7c432a97ae4f9321622e1bd3 | [
"BSD-3-Clause"
] | null | null | null | import functools
from collections.abc import Iterable
from numbers import Integral
from functools import reduce
import operator
import numpy as np
def assert_eq(x, y, check_nnz=True, compare_dtype=True, **kwargs):
from ._coo import COO
assert x.shape == y.shape
if compare_dtype:
assert x.dtype == y.dtype
check_equal = (
np.array_equal
if np.issubdtype(x.dtype, np.integer) and np.issubdtype(y.dtype, np.integer)
else functools.partial(np.allclose, equal_nan=True)
)
if isinstance(x, COO):
assert is_canonical(x)
if isinstance(y, COO):
assert is_canonical(y)
if isinstance(x, COO) and isinstance(y, COO) and check_nnz:
assert np.array_equal(x.coords, y.coords)
assert check_equal(x.data, y.data, **kwargs)
assert x.fill_value == y.fill_value
return
if hasattr(x, "todense"):
xx = x.todense()
if check_nnz:
assert_nnz(x, xx)
else:
xx = x
if hasattr(y, "todense"):
yy = y.todense()
if check_nnz:
assert_nnz(y, yy)
else:
yy = y
assert check_equal(xx, yy, **kwargs)
def assert_nnz(s, x):
fill_value = s.fill_value if hasattr(s, "fill_value") else _zero_of_dtype(s.dtype)
assert np.sum(~equivalent(x, fill_value)) == s.nnz
def is_canonical(x):
return not x.shape or (
(np.diff(x.linear_loc()) > 0).all()
and not equivalent(x.data, x.fill_value).any()
)
def _zero_of_dtype(dtype):
"""
Creates a ()-shaped 0-dimensional zero array of a given dtype.
Parameters
----------
dtype : numpy.dtype
The dtype for the array.
Returns
-------
np.ndarray
The zero array.
"""
return np.zeros((), dtype=dtype)[()]
def random(
shape,
density=None,
nnz=None,
random_state=None,
data_rvs=None,
format="coo",
fill_value=None,
idx_dtype=None,
**kwargs,
):
"""Generate a random sparse multidimensional array
Parameters
----------
shape : Tuple[int]
Shape of the array
density : float, optional
Density of the generated array; default is 0.01.
Mutually exclusive with `nnz`.
nnz : int, optional
Number of nonzero elements in the generated array.
Mutually exclusive with `density`.
random_state : Union[numpy.random.RandomState, int], optional
Random number generator or random seed. If not given, the
singleton numpy.random will be used. This random state will be used
for sampling the sparsity structure, but not necessarily for sampling
the values of the structurally nonzero entries of the matrix.
data_rvs : Callable
Data generation callback. Must accept one single parameter: number of
:code:`nnz` elements, and return one single NumPy array of exactly
that length.
format : str
The format to return the output array in.
fill_value : scalar
The fill value of the output array.
Returns
-------
SparseArray
The generated random matrix.
See Also
--------
:obj:`scipy.sparse.rand` : Equivalent Scipy function.
:obj:`numpy.random.rand` : Similar Numpy function.
Examples
--------
>>> from sparse import random
>>> from scipy import stats
>>> rvs = lambda x: stats.poisson(25, loc=10).rvs(x, random_state=np.random.RandomState(1))
>>> s = random((2, 3, 4), density=0.25, random_state=np.random.RandomState(1), data_rvs=rvs)
>>> s.todense() # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 0, 0, 0],
[ 0, 34, 0, 0],
[33, 34, 0, 29]],
<BLANKLINE>
[[30, 0, 0, 34],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0]]])
"""
# Copied, in large part, from scipy.sparse.random
# See https://github.com/scipy/scipy/blob/master/LICENSE.txt
from ._coo import COO
if density is not None and nnz is not None:
raise ValueError("'density' and 'nnz' are mutually exclusive")
if density is None:
density = 0.01
if not (0 <= density <= 1):
raise ValueError("density {} is not in the unit interval".format(density))
elements = np.prod(shape, dtype=np.intp)
if nnz is None:
nnz = int(elements * density)
if not (0 <= nnz <= elements):
raise ValueError(
"cannot generate {} nonzero elements "
"for an array with {} total elements".format(nnz, elements)
)
if random_state is None:
random_state = np.random
elif isinstance(random_state, Integral):
random_state = np.random.RandomState(random_state)
if data_rvs is None:
data_rvs = random_state.rand
# Use the algorithm from python's random.sample for k < mn/3.
if elements < 3 * nnz:
ind = random_state.choice(elements, size=nnz, replace=False)
else:
ind = np.empty(nnz, dtype=np.min_scalar_type(elements - 1))
selected = set()
for i in range(nnz):
j = random_state.randint(elements)
while j in selected:
j = random_state.randint(elements)
selected.add(j)
ind[i] = j
data = data_rvs(nnz)
ar = COO(ind[None, :], data, shape=elements, fill_value=fill_value,).reshape(shape)
if idx_dtype:
if can_store(idx_dtype, max(shape)):
ar.coords = ar.coords.astype(idx_dtype)
else:
raise ValueError(
"cannot cast array with shape {} to dtype {}.".format(shape, idx_dtype)
)
return ar.asformat(format, **kwargs)
def isscalar(x):
from ._sparse_array import SparseArray
return not isinstance(x, SparseArray) and np.isscalar(x)
def random_value_array(value, fraction):
def replace_values(n):
i = int(n * fraction)
ar = np.empty((n,), dtype=np.float_)
ar[:i] = value
ar[i:] = np.random.rand(n - i)
return ar
return replace_values
def normalize_axis(axis, ndim):
"""
Normalize negative axis indices to their positive counterpart for a given
number of dimensions.
Parameters
----------
axis : Union[int, Iterable[int], None]
The axis indices.
ndim : int
Number of dimensions to normalize axis indices against.
Returns
-------
axis
The normalized axis indices.
"""
if axis is None:
return None
if isinstance(axis, Integral):
axis = int(axis)
if axis < 0:
axis += ndim
if axis >= ndim or axis < 0:
raise ValueError("Invalid axis index %d for ndim=%d" % (axis, ndim))
return axis
if isinstance(axis, Iterable):
if not all(isinstance(a, Integral) for a in axis):
raise ValueError("axis %s not understood" % axis)
return tuple(normalize_axis(a, ndim) for a in axis)
raise ValueError("axis %s not understood" % axis)
def equivalent(x, y):
"""
Checks the equivalence of two scalars or arrays with broadcasting. Assumes
a consistent dtype.
Parameters
----------
x : scalar or numpy.ndarray
y : scalar or numpy.ndarray
Returns
-------
equivalent : scalar or numpy.ndarray
The element-wise comparison of where two arrays are equivalent.
Examples
--------
>>> equivalent(1, 1)
True
>>> equivalent(np.nan, np.nan + 1)
True
>>> equivalent(1, 2)
False
>>> equivalent(np.inf, np.inf)
True
>>> equivalent(np.PZERO, np.NZERO)
True
"""
x = np.asarray(x)
y = np.asarray(y)
# Can't contain NaNs
if any(np.issubdtype(x.dtype, t) for t in [np.integer, np.bool_, np.character]):
return x == y
# Can contain NaNs
# FIXME: Complex floats and np.void with multiple values can't be compared properly.
# lgtm [py/comparison-of-identical-expressions]
return (x == y) | ((x != x) & (y != y))
# copied from zarr
# See https://github.com/zarr-developers/zarr-python/blob/master/zarr/util.py
def human_readable_size(size):
if size < 2 ** 10:
return "%s" % size
elif size < 2 ** 20:
return "%.1fK" % (size / float(2 ** 10))
elif size < 2 ** 30:
return "%.1fM" % (size / float(2 ** 20))
elif size < 2 ** 40:
return "%.1fG" % (size / float(2 ** 30))
elif size < 2 ** 50:
return "%.1fT" % (size / float(2 ** 40))
else:
return "%.1fP" % (size / float(2 ** 50))
def html_table(arr):
table = "<table>"
table += "<tbody>"
headings = ["Format", "Data Type", "Shape", "nnz", "Density", "Read-only"]
density = np.float_(arr.nnz) / np.float_(arr.size)
info = [
type(arr).__name__.lower(),
str(arr.dtype),
str(arr.shape),
str(arr.nnz),
str(density),
]
# read-only
info.append(str(not hasattr(arr, "__setitem__")))
if hasattr(arr, "nbytes"):
headings.append("Size")
info.append(human_readable_size(arr.nbytes))
headings.append("Storage ratio")
info.append(
"%.1f"
% (
np.float_(arr.nbytes)
/ np.float_(reduce(operator.mul, arr.shape, 1) * arr.dtype.itemsize)
)
)
# compressed_axes
if type(arr).__name__ == "GCXS":
headings.append("Compressed Axes")
info.append(str(arr.compressed_axes))
for h, i in zip(headings, info):
table += (
"<tr>"
'<th style="text-align: left">%s</th>'
'<td style="text-align: left">%s</td>'
"</tr>" % (h, i)
)
table += "</tbody>"
table += "</table>"
return table
def check_compressed_axes(ndim, compressed_axes):
"""
Checks if the given compressed_axes are compatible with the shape of the array.
Parameters
----------
ndim : int
compressed_axes : Iterable
Raises
------
ValueError
If the compressed_axes are incompatible with the number of dimensions
"""
if compressed_axes is None:
return
if isinstance(ndim, Iterable):
ndim = len(ndim)
if not isinstance(compressed_axes, Iterable):
raise ValueError("compressed_axes must be an iterable")
if len(compressed_axes) == ndim:
raise ValueError("cannot compress all axes")
if not np.array_equal(list(set(compressed_axes)), compressed_axes):
raise ValueError("axes must be sorted without repeats")
if not all(isinstance(a, Integral) for a in compressed_axes):
raise ValueError("axes must be represented with integers")
if min(compressed_axes) < 0 or max(compressed_axes) >= ndim:
raise ValueError("axis out of range")
def check_zero_fill_value(*args):
"""
Checks if all the arguments have zero fill-values.
Parameters
----------
*args : Iterable[SparseArray]
Raises
------
ValueError
If all arguments don't have zero fill-values.
Examples
--------
>>> import sparse
>>> s1 = sparse.random((10,), density=0.5)
>>> s2 = sparse.random((10,), density=0.5, fill_value=0.5)
>>> check_zero_fill_value(s1)
>>> check_zero_fill_value(s2)
Traceback (most recent call last):
...
ValueError: This operation requires zero fill values, but argument 0 had a fill value of 0.5.
>>> check_zero_fill_value(s1, s2)
Traceback (most recent call last):
...
ValueError: This operation requires zero fill values, but argument 1 had a fill value of 0.5.
"""
for i, arg in enumerate(args):
if hasattr(arg, "fill_value") and not equivalent(
arg.fill_value, _zero_of_dtype(arg.dtype)
):
raise ValueError(
"This operation requires zero fill values, "
"but argument {:d} had a fill value of {!s}.".format(i, arg.fill_value)
)
def check_consistent_fill_value(arrays):
"""
Checks if all the arguments have consistent fill-values.
Parameters
----------
args : Iterable[SparseArray]
Raises
------
ValueError
If all elements of :code:`arrays` don't have the same fill-value.
Examples
--------
>>> import sparse
>>> s1 = sparse.random((10,), density=0.5, fill_value=0.1)
>>> s2 = sparse.random((10,), density=0.5, fill_value=0.5)
>>> check_consistent_fill_value([s1, s1])
>>> check_consistent_fill_value([s1, s2]) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: This operation requires consistent fill-values, but argument 1 had a fill value of 0.5,\
which is different from a fill_value of 0.1 in the first argument.
"""
arrays = list(arrays)
from ._sparse_array import SparseArray
if not all(isinstance(s, SparseArray) for s in arrays):
raise ValueError("All arrays must be instances of SparseArray.")
if len(arrays) == 0:
raise ValueError("At least one array required.")
fv = arrays[0].fill_value
for i, arg in enumerate(arrays):
if not equivalent(fv, arg.fill_value):
raise ValueError(
"This operation requires consistent fill-values, "
"but argument {:d} had a fill value of {!s}, which "
"is different from a fill_value of {!s} in the first "
"argument.".format(i, arg.fill_value, fv)
)
def get_out_dtype(arr, scalar):
out_type = arr.dtype
if not can_store(out_type, scalar):
out_type = np.min_scalar_type(scalar)
return out_type
def can_store(dtype, scalar):
return np.array(scalar, dtype=dtype) == np.array(scalar)
def is_unsigned_dtype(dtype):
return not np.array(-1, dtype=dtype) == np.array(-1)
def convert_format(format):
from ._sparse_array import SparseArray
if isinstance(format, type):
if not issubclass(format, SparseArray):
raise ValueError(f"Invalid format: {format}")
return format.__name__.lower()
if isinstance(format, str):
return format
raise ValueError(f"Invalid format: {format}")
| 28.490982 | 104 | 0.600267 | import functools
from collections.abc import Iterable
from numbers import Integral
from functools import reduce
import operator
import numpy as np
def assert_eq(x, y, check_nnz=True, compare_dtype=True, **kwargs):
from ._coo import COO
assert x.shape == y.shape
if compare_dtype:
assert x.dtype == y.dtype
check_equal = (
np.array_equal
if np.issubdtype(x.dtype, np.integer) and np.issubdtype(y.dtype, np.integer)
else functools.partial(np.allclose, equal_nan=True)
)
if isinstance(x, COO):
assert is_canonical(x)
if isinstance(y, COO):
assert is_canonical(y)
if isinstance(x, COO) and isinstance(y, COO) and check_nnz:
assert np.array_equal(x.coords, y.coords)
assert check_equal(x.data, y.data, **kwargs)
assert x.fill_value == y.fill_value
return
if hasattr(x, "todense"):
xx = x.todense()
if check_nnz:
assert_nnz(x, xx)
else:
xx = x
if hasattr(y, "todense"):
yy = y.todense()
if check_nnz:
assert_nnz(y, yy)
else:
yy = y
assert check_equal(xx, yy, **kwargs)
def assert_nnz(s, x):
fill_value = s.fill_value if hasattr(s, "fill_value") else _zero_of_dtype(s.dtype)
assert np.sum(~equivalent(x, fill_value)) == s.nnz
def is_canonical(x):
return not x.shape or (
(np.diff(x.linear_loc()) > 0).all()
and not equivalent(x.data, x.fill_value).any()
)
def _zero_of_dtype(dtype):
return np.zeros((), dtype=dtype)[()]
def random(
shape,
density=None,
nnz=None,
random_state=None,
data_rvs=None,
format="coo",
fill_value=None,
idx_dtype=None,
**kwargs,
):
from ._coo import COO
if density is not None and nnz is not None:
raise ValueError("'density' and 'nnz' are mutually exclusive")
if density is None:
density = 0.01
if not (0 <= density <= 1):
raise ValueError("density {} is not in the unit interval".format(density))
elements = np.prod(shape, dtype=np.intp)
if nnz is None:
nnz = int(elements * density)
if not (0 <= nnz <= elements):
raise ValueError(
"cannot generate {} nonzero elements "
"for an array with {} total elements".format(nnz, elements)
)
if random_state is None:
random_state = np.random
elif isinstance(random_state, Integral):
random_state = np.random.RandomState(random_state)
if data_rvs is None:
data_rvs = random_state.rand
if elements < 3 * nnz:
ind = random_state.choice(elements, size=nnz, replace=False)
else:
ind = np.empty(nnz, dtype=np.min_scalar_type(elements - 1))
selected = set()
for i in range(nnz):
j = random_state.randint(elements)
while j in selected:
j = random_state.randint(elements)
selected.add(j)
ind[i] = j
data = data_rvs(nnz)
ar = COO(ind[None, :], data, shape=elements, fill_value=fill_value,).reshape(shape)
if idx_dtype:
if can_store(idx_dtype, max(shape)):
ar.coords = ar.coords.astype(idx_dtype)
else:
raise ValueError(
"cannot cast array with shape {} to dtype {}.".format(shape, idx_dtype)
)
return ar.asformat(format, **kwargs)
def isscalar(x):
from ._sparse_array import SparseArray
return not isinstance(x, SparseArray) and np.isscalar(x)
def random_value_array(value, fraction):
def replace_values(n):
i = int(n * fraction)
ar = np.empty((n,), dtype=np.float_)
ar[:i] = value
ar[i:] = np.random.rand(n - i)
return ar
return replace_values
def normalize_axis(axis, ndim):
if axis is None:
return None
if isinstance(axis, Integral):
axis = int(axis)
if axis < 0:
axis += ndim
if axis >= ndim or axis < 0:
raise ValueError("Invalid axis index %d for ndim=%d" % (axis, ndim))
return axis
if isinstance(axis, Iterable):
if not all(isinstance(a, Integral) for a in axis):
raise ValueError("axis %s not understood" % axis)
return tuple(normalize_axis(a, ndim) for a in axis)
raise ValueError("axis %s not understood" % axis)
def equivalent(x, y):
x = np.asarray(x)
y = np.asarray(y)
# Can't contain NaNs
if any(np.issubdtype(x.dtype, t) for t in [np.integer, np.bool_, np.character]):
return x == y
# lgtm [py/comparison-of-identical-expressions]
return (x == y) | ((x != x) & (y != y))
# copied from zarr
# See https://github.com/zarr-developers/zarr-python/blob/master/zarr/util.py
def human_readable_size(size):
if size < 2 ** 10:
return "%s" % size
elif size < 2 ** 20:
return "%.1fK" % (size / float(2 ** 10))
elif size < 2 ** 30:
return "%.1fM" % (size / float(2 ** 20))
elif size < 2 ** 40:
return "%.1fG" % (size / float(2 ** 30))
elif size < 2 ** 50:
return "%.1fT" % (size / float(2 ** 40))
else:
return "%.1fP" % (size / float(2 ** 50))
def html_table(arr):
table = "<table>"
table += "<tbody>"
headings = ["Format", "Data Type", "Shape", "nnz", "Density", "Read-only"]
density = np.float_(arr.nnz) / np.float_(arr.size)
info = [
type(arr).__name__.lower(),
str(arr.dtype),
str(arr.shape),
str(arr.nnz),
str(density),
]
# read-only
info.append(str(not hasattr(arr, "__setitem__")))
if hasattr(arr, "nbytes"):
headings.append("Size")
info.append(human_readable_size(arr.nbytes))
headings.append("Storage ratio")
info.append(
"%.1f"
% (
np.float_(arr.nbytes)
/ np.float_(reduce(operator.mul, arr.shape, 1) * arr.dtype.itemsize)
)
)
# compressed_axes
if type(arr).__name__ == "GCXS":
headings.append("Compressed Axes")
info.append(str(arr.compressed_axes))
for h, i in zip(headings, info):
table += (
"<tr>"
'<th style="text-align: left">%s</th>'
'<td style="text-align: left">%s</td>'
"</tr>" % (h, i)
)
table += "</tbody>"
table += "</table>"
return table
def check_compressed_axes(ndim, compressed_axes):
if compressed_axes is None:
return
if isinstance(ndim, Iterable):
ndim = len(ndim)
if not isinstance(compressed_axes, Iterable):
raise ValueError("compressed_axes must be an iterable")
if len(compressed_axes) == ndim:
raise ValueError("cannot compress all axes")
if not np.array_equal(list(set(compressed_axes)), compressed_axes):
raise ValueError("axes must be sorted without repeats")
if not all(isinstance(a, Integral) for a in compressed_axes):
raise ValueError("axes must be represented with integers")
if min(compressed_axes) < 0 or max(compressed_axes) >= ndim:
raise ValueError("axis out of range")
def check_zero_fill_value(*args):
for i, arg in enumerate(args):
if hasattr(arg, "fill_value") and not equivalent(
arg.fill_value, _zero_of_dtype(arg.dtype)
):
raise ValueError(
"This operation requires zero fill values, "
"but argument {:d} had a fill value of {!s}.".format(i, arg.fill_value)
)
def check_consistent_fill_value(arrays):
arrays = list(arrays)
from ._sparse_array import SparseArray
if not all(isinstance(s, SparseArray) for s in arrays):
raise ValueError("All arrays must be instances of SparseArray.")
if len(arrays) == 0:
raise ValueError("At least one array required.")
fv = arrays[0].fill_value
for i, arg in enumerate(arrays):
if not equivalent(fv, arg.fill_value):
raise ValueError(
"This operation requires consistent fill-values, "
"but argument {:d} had a fill value of {!s}, which "
"is different from a fill_value of {!s} in the first "
"argument.".format(i, arg.fill_value, fv)
)
def get_out_dtype(arr, scalar):
out_type = arr.dtype
if not can_store(out_type, scalar):
out_type = np.min_scalar_type(scalar)
return out_type
def can_store(dtype, scalar):
return np.array(scalar, dtype=dtype) == np.array(scalar)
def is_unsigned_dtype(dtype):
return not np.array(-1, dtype=dtype) == np.array(-1)
def convert_format(format):
from ._sparse_array import SparseArray
if isinstance(format, type):
if not issubclass(format, SparseArray):
raise ValueError(f"Invalid format: {format}")
return format.__name__.lower()
if isinstance(format, str):
return format
raise ValueError(f"Invalid format: {format}")
| true | true |
1c30f8330d16ce09de8703219e810dba9626d74e | 5,751 | py | Python | framework/EntityFactoryBase.py | FlanFlanagan/raven | bd7fca18af94376a28e2144ba1da72c01c8d343c | [
"Apache-2.0"
] | 159 | 2017-03-24T21:07:06.000Z | 2022-03-20T13:44:40.000Z | framework/EntityFactoryBase.py | FlanFlanagan/raven | bd7fca18af94376a28e2144ba1da72c01c8d343c | [
"Apache-2.0"
] | 1,667 | 2017-03-27T14:41:22.000Z | 2022-03-31T19:50:06.000Z | framework/EntityFactoryBase.py | wanghy-anl/raven | ef1372364a2776385931763f2b28fdf2930c77b9 | [
"Apache-2.0"
] | 95 | 2017-03-24T21:05:03.000Z | 2022-03-08T17:30:22.000Z | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created March 15, 2020
@author: talbpaul
"""
from BaseClasses import MessageUser
from BaseClasses import InputDataUser
import PluginManager
from utils import utils
class EntityFactory(MessageUser):
"""
Provides structure for entity factory
"""
#############
# API
def __init__(self, name=None, needsRunInfo=False, returnInputParameter=False):
"""
Constructor.
@ In, name, str, optional, base entity name (e.g. Sampler)
@ In, returnInputParameter, bool, optional, whether this entity can use inputParams (otherwise xml)
@ Out, None
"""
super().__init__()
self.name = name # name of entity, e.g. Sampler
self.needsRunInfo = needsRunInfo # whether entity needs run info
self.returnInputParameter = returnInputParameter # use xml or inputParams
self._registeredTypes = {} # registered types for this entity
self._pluginFactory = PluginManager # plugin factory, if any; provided by Simulation
def registerType(self, name, obj):
"""
Registers class as type of this entity
@ In, name, str, name by which entity should be known
@ In, obj, object, class definition
@ Out, None
"""
# TODO check for duplicates?
# if name in self._registeredTypes:
# raise RuntimeError(f'Duplicate entries in "{self.name}" Factory type "{name}": '+
# f'{self._registeredTypes[name]}, {obj}')
self._registeredTypes[name] = obj
def registerAllSubtypes(self, baseType, alias=None):
"""
Registers all inheritors of the baseType as types by classname for this entity.
@ In, baseType, object, base class type (e.g. Sampler.Sampler)
@ In, alias, dict, optional, alias names to use for registration names as {"ObjectName": "AliasName"}
@ Out, None
"""
if alias is None:
alias = {}
for obj in utils.getAllSubclasses(baseType):
name = alias.get(obj.__name__, obj.__name__)
self.registerType(name, obj)
def unregisterSubtype(self, name):
"""
Remove type from registry.
@ In, name, str, name of subtype
@ Out, None
"""
self._registeredTypes.pop(name, None)
def knownTypes(self):
"""
Returns known types.
@ Out, __knownTypes, list, list of known types
"""
# NOTE: plugins might not be listed if they haven't been loaded yet!
return self._registeredTypes.keys()
def returnClass(self, Type):
"""
Returns an object construction pointer from this module.
@ In, Type, string, requested object
@ Out, returnClass, object, class of the object
"""
# is this from an unloaded plugin?
# return class from known types
try:
return self._registeredTypes[Type]
except KeyError:
# is this a request from an unloaded plugin?
obj = self._checkInUnloadedPlugin(Type)
if obj is None:
# otherwise, error
msg = f'"{self.name}" module does not recognize type "{Type}"; '
msg += f'known types are: {", ".join(list(self.knownTypes()))}'
self.raiseAnError(NameError, msg)
else:
return obj
def returnInstance(self, Type, **kwargs):
"""
Returns an instance pointer from this module.
@ In, Type, string, requested object
@ In, kwargs, dict, additional keyword arguments to constructor
@ Out, returnInstance, instance, instance of the object
"""
cls = self.returnClass(Type)
instance = cls(**kwargs)
return instance
def collectInputSpecs(self, base):
"""
Extends "base" to include all specs for all objects known by this factory as children of "base"
@ In, base, InputData.ParameterInput, starting spec
@ Out, None
"""
for name in self.knownTypes():
cls = self.returnClass(name, None)
if isinstance(cls, InputDataUser):
base.addSub(cls.getInputSpecifications())
def instanceFromXML(self, xml):
"""
Using the provided XML, return the required instance
@ In, xml, xml.etree.ElementTree.Element, head element for instance
@ In, runInfo, dict, info from runInfo
@ Out, kind, str, name of type of entity
@ Out, name, str, identifying name of entity
@ Out, entity, instance, object from factory
"""
kind = xml.tag
name = xml.attrib['name']
entity = self.returnInstance(kind)
return kind, name, entity
#############
# UTILITIES
def _checkInUnloadedPlugin(self, typeName):
"""
Checks if the requested entity is from a plugin (has '.' in type name), and if so loads plugin if it isn't already
@ In, typeName, str, name of entity to check (e.g. MonteCarlo or MyPlugin.MySampler)
@ Out, _checkInUnloadedPlugin, object, requested object if found or None if not.
"""
if self._pluginFactory is not None and '.' in typeName:
pluginName, remainder = typeName.split('.', maxsplit=1)
loadedNew = self._pluginFactory.finishLoadPlugin(pluginName)
if not loadedNew:
return None
else:
return self._registeredTypes.get(typeName, None)
| 35.720497 | 120 | 0.656581 |
from BaseClasses import MessageUser
from BaseClasses import InputDataUser
import PluginManager
from utils import utils
class EntityFactory(MessageUser):
alse):
super().__init__()
self.name = name
self.needsRunInfo = needsRunInfo
self.returnInputParameter = returnInputParameter
self._registeredTypes = {}
self._pluginFactory = PluginManager
def registerType(self, name, obj):
self._registeredTypes[name] = obj
def registerAllSubtypes(self, baseType, alias=None):
if alias is None:
alias = {}
for obj in utils.getAllSubclasses(baseType):
name = alias.get(obj.__name__, obj.__name__)
self.registerType(name, obj)
def unregisterSubtype(self, name):
self._registeredTypes.pop(name, None)
def knownTypes(self):
return self._registeredTypes.keys()
def returnClass(self, Type):
# is this from an unloaded plugin?
# return class from known types
try:
return self._registeredTypes[Type]
except KeyError:
# is this a request from an unloaded plugin?
obj = self._checkInUnloadedPlugin(Type)
if obj is None:
# otherwise, error
msg = f'"{self.name}" module does not recognize type "{Type}"; '
msg += f'known types are: {", ".join(list(self.knownTypes()))}'
self.raiseAnError(NameError, msg)
else:
return obj
def returnInstance(self, Type, **kwargs):
cls = self.returnClass(Type)
instance = cls(**kwargs)
return instance
def collectInputSpecs(self, base):
for name in self.knownTypes():
cls = self.returnClass(name, None)
if isinstance(cls, InputDataUser):
base.addSub(cls.getInputSpecifications())
def instanceFromXML(self, xml):
kind = xml.tag
name = xml.attrib['name']
entity = self.returnInstance(kind)
return kind, name, entity
#############
# UTILITIES
def _checkInUnloadedPlugin(self, typeName):
if self._pluginFactory is not None and '.' in typeName:
pluginName, remainder = typeName.split('.', maxsplit=1)
loadedNew = self._pluginFactory.finishLoadPlugin(pluginName)
if not loadedNew:
return None
else:
return self._registeredTypes.get(typeName, None)
| true | true |
1c30fbdf19846be0cef6d27e989602bf22870419 | 3,440 | py | Python | minitests/roi_harness/create_design_json.py | garvitgupta08/prjxray | dd5fb6d9d54526c3338ef745874d9a4f92066dca | [
"ISC"
] | 1 | 2020-02-28T20:54:46.000Z | 2020-02-28T20:54:46.000Z | minitests/roi_harness/create_design_json.py | garvitgupta08/prjxray | dd5fb6d9d54526c3338ef745874d9a4f92066dca | [
"ISC"
] | null | null | null | minitests/roi_harness/create_design_json.py | garvitgupta08/prjxray | dd5fb6d9d54526c3338ef745874d9a4f92066dca | [
"ISC"
] | null | null | null | import xjson
import csv
import argparse
import sys
import fasm
from prjxray.db import Database
from prjxray.roi import Roi
from prjxray.util import get_db_root, get_part
from prjxray.xjson import extract_numbers
def set_port_wires(ports, name, pin, wires_outside_roi):
for port in ports:
if name == port['name']:
port['wires_outside_roi'] = sorted(
wires_outside_roi, key=extract_numbers)
assert port['pin'] == pin
return
assert False, name
def main():
parser = argparse.ArgumentParser(
description=
"Creates design.json from output of ROI generation tcl script.")
parser.add_argument('--design_txt', required=True)
parser.add_argument('--design_info_txt', required=True)
parser.add_argument('--pad_wires', required=True)
parser.add_argument('--design_fasm', required=True)
args = parser.parse_args()
design_json = {}
design_json['ports'] = []
design_json['info'] = {}
with open(args.design_txt) as f:
for d in csv.DictReader(f, delimiter=' '):
design_json['ports'].append(d)
with open(args.design_info_txt) as f:
for l in f:
name, value = l.strip().split(' = ')
design_json['info'][name] = int(value)
db = Database(get_db_root(), get_part())
grid = db.grid()
roi = Roi(
db=db,
x1=design_json['info']['GRID_X_MIN'],
y1=design_json['info']['GRID_Y_MIN'],
x2=design_json['info']['GRID_X_MAX'],
y2=design_json['info']['GRID_Y_MAX'],
)
with open(args.pad_wires) as f:
for l in f:
parts = l.strip().split(' ')
name = parts[0]
pin = parts[1]
wires = parts[2:]
wires_outside_roi = []
for wire in wires:
tile = wire.split('/')[0]
loc = grid.loc_of_tilename(tile)
if not roi.tile_in_roi(loc):
wires_outside_roi.append(wire)
set_port_wires(design_json['ports'], name, pin, wires_outside_roi)
frames_in_use = set()
for tile in roi.gen_tiles():
gridinfo = grid.gridinfo_at_tilename(tile)
for bit in gridinfo.bits.values():
frames_in_use.add(bit.base_address)
required_features = []
for fasm_line in fasm.parse_fasm_filename(args.design_fasm):
if fasm_line.annotations:
for annotation in fasm_line.annotations:
if annotation.name != 'unknown_segment':
continue
unknown_base_address = int(annotation.value, 0)
assert False, "Found unknown bit in base address 0x{:08x}".format(
unknown_base_address)
if not fasm_line.set_feature:
continue
tile = fasm_line.set_feature.feature.split('.')[0]
loc = grid.loc_of_tilename(tile)
gridinfo = grid.gridinfo_at_tilename(tile)
not_in_roi = not roi.tile_in_roi(loc)
if not_in_roi:
required_features.append(fasm_line)
design_json['required_features'] = sorted(
fasm.fasm_tuple_to_string(required_features,
canonical=True).split('\n'),
key=extract_numbers)
design_json['ports'].sort(key=lambda x: extract_numbers(x['name']))
xjson.pprint(sys.stdout, design_json)
if __name__ == '__main__':
main()
| 28.429752 | 82 | 0.602035 | import xjson
import csv
import argparse
import sys
import fasm
from prjxray.db import Database
from prjxray.roi import Roi
from prjxray.util import get_db_root, get_part
from prjxray.xjson import extract_numbers
def set_port_wires(ports, name, pin, wires_outside_roi):
for port in ports:
if name == port['name']:
port['wires_outside_roi'] = sorted(
wires_outside_roi, key=extract_numbers)
assert port['pin'] == pin
return
assert False, name
def main():
parser = argparse.ArgumentParser(
description=
"Creates design.json from output of ROI generation tcl script.")
parser.add_argument('--design_txt', required=True)
parser.add_argument('--design_info_txt', required=True)
parser.add_argument('--pad_wires', required=True)
parser.add_argument('--design_fasm', required=True)
args = parser.parse_args()
design_json = {}
design_json['ports'] = []
design_json['info'] = {}
with open(args.design_txt) as f:
for d in csv.DictReader(f, delimiter=' '):
design_json['ports'].append(d)
with open(args.design_info_txt) as f:
for l in f:
name, value = l.strip().split(' = ')
design_json['info'][name] = int(value)
db = Database(get_db_root(), get_part())
grid = db.grid()
roi = Roi(
db=db,
x1=design_json['info']['GRID_X_MIN'],
y1=design_json['info']['GRID_Y_MIN'],
x2=design_json['info']['GRID_X_MAX'],
y2=design_json['info']['GRID_Y_MAX'],
)
with open(args.pad_wires) as f:
for l in f:
parts = l.strip().split(' ')
name = parts[0]
pin = parts[1]
wires = parts[2:]
wires_outside_roi = []
for wire in wires:
tile = wire.split('/')[0]
loc = grid.loc_of_tilename(tile)
if not roi.tile_in_roi(loc):
wires_outside_roi.append(wire)
set_port_wires(design_json['ports'], name, pin, wires_outside_roi)
frames_in_use = set()
for tile in roi.gen_tiles():
gridinfo = grid.gridinfo_at_tilename(tile)
for bit in gridinfo.bits.values():
frames_in_use.add(bit.base_address)
required_features = []
for fasm_line in fasm.parse_fasm_filename(args.design_fasm):
if fasm_line.annotations:
for annotation in fasm_line.annotations:
if annotation.name != 'unknown_segment':
continue
unknown_base_address = int(annotation.value, 0)
assert False, "Found unknown bit in base address 0x{:08x}".format(
unknown_base_address)
if not fasm_line.set_feature:
continue
tile = fasm_line.set_feature.feature.split('.')[0]
loc = grid.loc_of_tilename(tile)
gridinfo = grid.gridinfo_at_tilename(tile)
not_in_roi = not roi.tile_in_roi(loc)
if not_in_roi:
required_features.append(fasm_line)
design_json['required_features'] = sorted(
fasm.fasm_tuple_to_string(required_features,
canonical=True).split('\n'),
key=extract_numbers)
design_json['ports'].sort(key=lambda x: extract_numbers(x['name']))
xjson.pprint(sys.stdout, design_json)
if __name__ == '__main__':
main()
| true | true |
1c30fbf78a4054a0a941ef7d5b9fefd5478362a1 | 58 | py | Python | pyforchange/__init__.py | PythonForChange/pyforchange | 2cc5afef227ac68147e291e447c57924586a0b12 | [
"MIT"
] | 1 | 2021-06-07T02:10:41.000Z | 2021-06-07T02:10:41.000Z | pyforchange/__init__.py | PythonForChange/pyforchange | 2cc5afef227ac68147e291e447c57924586a0b12 | [
"MIT"
] | null | null | null | pyforchange/__init__.py | PythonForChange/pyforchange | 2cc5afef227ac68147e291e447c57924586a0b12 | [
"MIT"
] | null | null | null | _author="eanorambuena"
_author_email="eanorambuena@uc.cl"
| 19.333333 | 34 | 0.827586 | _author="eanorambuena"
_author_email="eanorambuena@uc.cl"
| true | true |
1c30fcad0ee72b5267c870a9bf812ed7d53bea43 | 3,603 | py | Python | jupyter_client/restarter.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 4 | 2018-01-19T17:15:06.000Z | 2018-01-24T00:06:42.000Z | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/jupyter_client/restarter.py | nitin-cherian/LifeLongLearning | 84084792058358365162c645742c70064a2d5fd6 | [
"MIT"
] | 10 | 2017-07-13T00:24:03.000Z | 2017-07-17T07:39:03.000Z | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/jupyter_client/restarter.py | nitin-cherian/LifeLongLearning | 84084792058358365162c645742c70064a2d5fd6 | [
"MIT"
] | 7 | 2017-08-01T04:02:07.000Z | 2018-10-06T21:07:20.000Z | """A basic kernel monitor with autorestarting.
This watches a kernel's state using KernelManager.is_alive and auto
restarts the kernel if it dies.
It is an incomplete base class, and must be subclassed.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets.config.configurable import LoggingConfigurable
from traitlets import (
Instance, Float, Dict, Bool, Integer,
)
class KernelRestarter(LoggingConfigurable):
"""Monitor and autorestart a kernel."""
kernel_manager = Instance('jupyter_client.KernelManager')
debug = Bool(False, config=True,
help="""Whether to include every poll event in debugging output.
Has to be set explicitly, because there will be *a lot* of output.
"""
)
time_to_dead = Float(3.0, config=True,
help="""Kernel heartbeat interval in seconds."""
)
restart_limit = Integer(5, config=True,
help="""The number of consecutive autorestarts before the kernel is presumed dead."""
)
_restarting = Bool(False)
_restart_count = Integer(0)
callbacks = Dict()
def _callbacks_default(self):
return dict(restart=[], dead=[])
def start(self):
"""Start the polling of the kernel."""
raise NotImplementedError("Must be implemented in a subclass")
def stop(self):
"""Stop the kernel polling."""
raise NotImplementedError("Must be implemented in a subclass")
def add_callback(self, f, event='restart'):
"""register a callback to fire on a particular event
Possible values for event:
'restart' (default): kernel has died, and will be restarted.
'dead': restart has failed, kernel will be left dead.
"""
self.callbacks[event].append(f)
def remove_callback(self, f, event='restart'):
"""unregister a callback to fire on a particular event
Possible values for event:
'restart' (default): kernel has died, and will be restarted.
'dead': restart has failed, kernel will be left dead.
"""
try:
self.callbacks[event].remove(f)
except ValueError:
pass
def _fire_callbacks(self, event):
"""fire our callbacks for a particular event"""
for callback in self.callbacks[event]:
try:
callback()
except Exception as e:
self.log.error("KernelRestarter: %s callback %r failed", event, callback, exc_info=True)
def poll(self):
if self.debug:
self.log.debug('Polling kernel...')
if not self.kernel_manager.is_alive():
if self._restarting:
self._restart_count += 1
else:
self._restart_count = 1
if self._restart_count >= self.restart_limit:
self.log.warning("KernelRestarter: restart failed")
self._fire_callbacks('dead')
self._restarting = False
self._restart_count = 0
self.stop()
else:
self.log.info('KernelRestarter: restarting kernel (%i/%i)',
self._restart_count,
self.restart_limit
)
self._fire_callbacks('restart')
self.kernel_manager.restart_kernel(now=True)
self._restarting = True
else:
if self._restarting:
self.log.debug("KernelRestarter: restart apparently succeeded")
self._restarting = False
| 32.169643 | 104 | 0.610047 |
from traitlets.config.configurable import LoggingConfigurable
from traitlets import (
Instance, Float, Dict, Bool, Integer,
)
class KernelRestarter(LoggingConfigurable):
kernel_manager = Instance('jupyter_client.KernelManager')
debug = Bool(False, config=True,
help="""Whether to include every poll event in debugging output.
Has to be set explicitly, because there will be *a lot* of output.
"""
)
time_to_dead = Float(3.0, config=True,
help="""Kernel heartbeat interval in seconds."""
)
restart_limit = Integer(5, config=True,
help="""The number of consecutive autorestarts before the kernel is presumed dead."""
)
_restarting = Bool(False)
_restart_count = Integer(0)
callbacks = Dict()
def _callbacks_default(self):
return dict(restart=[], dead=[])
def start(self):
raise NotImplementedError("Must be implemented in a subclass")
def stop(self):
raise NotImplementedError("Must be implemented in a subclass")
def add_callback(self, f, event='restart'):
self.callbacks[event].append(f)
def remove_callback(self, f, event='restart'):
try:
self.callbacks[event].remove(f)
except ValueError:
pass
def _fire_callbacks(self, event):
for callback in self.callbacks[event]:
try:
callback()
except Exception as e:
self.log.error("KernelRestarter: %s callback %r failed", event, callback, exc_info=True)
def poll(self):
if self.debug:
self.log.debug('Polling kernel...')
if not self.kernel_manager.is_alive():
if self._restarting:
self._restart_count += 1
else:
self._restart_count = 1
if self._restart_count >= self.restart_limit:
self.log.warning("KernelRestarter: restart failed")
self._fire_callbacks('dead')
self._restarting = False
self._restart_count = 0
self.stop()
else:
self.log.info('KernelRestarter: restarting kernel (%i/%i)',
self._restart_count,
self.restart_limit
)
self._fire_callbacks('restart')
self.kernel_manager.restart_kernel(now=True)
self._restarting = True
else:
if self._restarting:
self.log.debug("KernelRestarter: restart apparently succeeded")
self._restarting = False
| true | true |
1c30fd8f4d7f0364b9ddc979fc4ad6b7d537d4d4 | 702 | py | Python | cms/management/commands/subcommands/moderator.py | ScholzVolkmer/django-cms-old | 5641181e793ed3c833dd310fc3cc49c3660e548d | [
"BSD-3-Clause"
] | 2 | 2016-02-19T04:19:22.000Z | 2016-02-19T04:19:36.000Z | cms/management/commands/subcommands/moderator.py | ScholzVolkmer/django-cms-old | 5641181e793ed3c833dd310fc3cc49c3660e548d | [
"BSD-3-Clause"
] | 9 | 2015-06-25T10:31:12.000Z | 2022-03-12T00:41:22.000Z | cms/management/commands/subcommands/moderator.py | ScholzVolkmer/django-cms-old | 5641181e793ed3c833dd310fc3cc49c3660e548d | [
"BSD-3-Clause"
] | 1 | 2017-08-22T07:00:30.000Z | 2017-08-22T07:00:30.000Z | # -*- coding: utf-8 -*-
from cms.management.commands.subcommands.base import SubcommandsCommand
from cms.models.pagemodel import Page
from django.conf import settings
from django.core.management.base import NoArgsCommand
class ModeratorOnCommand(NoArgsCommand):
help = 'Turn moderation on, run AFTER setting CMS_MODERATOR = True'
def handle_noargs(self, **options):
assert settings.CMS_MODERATOR == True, 'Command can only be run if CMS_MODERATOR is True'
for page in Page.objects.filter(published=True):
page.publish()
class ModeratorCommand(SubcommandsCommand):
help = 'Moderator utilities'
subcommands = {
'on': ModeratorOnCommand,
}
| 31.909091 | 97 | 0.725071 |
from cms.management.commands.subcommands.base import SubcommandsCommand
from cms.models.pagemodel import Page
from django.conf import settings
from django.core.management.base import NoArgsCommand
class ModeratorOnCommand(NoArgsCommand):
help = 'Turn moderation on, run AFTER setting CMS_MODERATOR = True'
def handle_noargs(self, **options):
assert settings.CMS_MODERATOR == True, 'Command can only be run if CMS_MODERATOR is True'
for page in Page.objects.filter(published=True):
page.publish()
class ModeratorCommand(SubcommandsCommand):
help = 'Moderator utilities'
subcommands = {
'on': ModeratorOnCommand,
}
| true | true |
1c30fde49808c196063e3ba6407c8641ef87d4e2 | 707 | py | Python | scripts/examples/03-Drawing/image_drawing.py | jibonaronno/OpenMV-openmv | ec7bca0a3d0407f632d86b57ac2bdc6dc84f0252 | [
"MIT"
] | null | null | null | scripts/examples/03-Drawing/image_drawing.py | jibonaronno/OpenMV-openmv | ec7bca0a3d0407f632d86b57ac2bdc6dc84f0252 | [
"MIT"
] | null | null | null | scripts/examples/03-Drawing/image_drawing.py | jibonaronno/OpenMV-openmv | ec7bca0a3d0407f632d86b57ac2bdc6dc84f0252 | [
"MIT"
] | 11 | 2020-06-03T10:12:28.000Z | 2020-06-05T16:02:40.000Z | # Draw Image Example
#
# This example shows off how to draw images in the frame buffer.
import sensor, image, time, pyb
sensor.reset()
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA) # or QQVGA...
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
w = img.width()
h = img.height()
# Draws an image in the frame buffer. In this case we're
# drawing the image we're currently drawing which causes
# graphical glitches but is cool. Pass an optional mask
# image to control what pixels are drawn.
img.draw_image(img, w//4, h//4, x_scale=0.5, y_scale=0.5)
print(clock.fps())
| 27.192308 | 64 | 0.688826 |
import sensor, image, time, pyb
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
w = img.width()
h = img.height()
# drawing the image we're currently drawing which causes
img.draw_image(img, w//4, h//4, x_scale=0.5, y_scale=0.5)
print(clock.fps())
| true | true |
1c30fe2ea5211d35c57ed54d7987a71f8ff2dfc4 | 3,561 | py | Python | fin_model_course/plbuild/sources/document/pr1_python_retirement.py | whoopnip/fin-model-course | e6c5ae313bba601c4aca0f334818b61cc0393118 | [
"MIT"
] | 5 | 2020-08-29T15:28:39.000Z | 2021-12-01T16:53:25.000Z | fin_model_course/plbuild/sources/document/pr1_python_retirement.py | whoopnip/fin-model-course | e6c5ae313bba601c4aca0f334818b61cc0393118 | [
"MIT"
] | 16 | 2020-02-26T16:03:47.000Z | 2021-06-15T15:17:37.000Z | fin_model_course/plbuild/sources/document/pr1_python_retirement.py | whoopnip/fin-model-course | e6c5ae313bba601c4aca0f334818b61cc0393118 | [
"MIT"
] | 3 | 2021-01-22T19:38:36.000Z | 2021-09-28T08:14:00.000Z | import os
import pyexlatex as pl
import pyexlatex.table as lt
import pyexlatex.presentation as lp
import pyexlatex.graphics as lg
import pyexlatex.layouts as ll
from jinja2 import FileSystemLoader
import plbuild
from plbuild.paths import images_path
AUTHORS = ['Nick DeRobertis']
DOCUMENT_CLASS = pl.Document
OUTPUT_LOCATION = plbuild.paths.DOCUMENTS_BUILD_PATH
HANDOUTS_OUTPUT_LOCATION = None
TITLE = 'Python Retirement Savings Rate Problem'
ORDER = 'PR1'
def get_content():
jinja_templates_path = os.path.sep.join(['pltemplates', 'practice', 'python_retirement'])
jinja_env = pl.JinjaEnvironment(loader=FileSystemLoader(jinja_templates_path))
return [
pl.Section(
[
pl.SubSection(
[
PythonRetirementPracticeProblemModel(template_path='prob_definition.j2', environment=jinja_env),
],
title='Problem Definition'
),
pl.SubSection(
[
pl.Center(
lt.Tabular(
[
lt.TopRule(),
lt.ValuesTable.from_list_of_lists([[
'Input', 'Default Value',
]]),
lt.MidRule(),
lt.ValuesTable.from_list_of_lists(
[
['Starting Salary', '\$50,000'],
['Salary Growth', '3%'],
['Mid-Salary Cutoff', r'\$80,000'],
['High-Salary Cutoff', r'\$120,000'],
['Low Savings Rate', '10%'],
['Mid Savings Rate', '25%'],
['High Savings Rate', '40%'],
['Interest Rate', '5%'],
['Desired Cash', r'\$1,500,000'],
],
),
lt.BottomRule(),
],
align='l|cc'
)
)
],
title='Inputs'
),
pl.SubSection(
[
"""
The final answer with the default inputs should be 37 years to retirement. Try hard to get
there working from scratch. If you are very stuck, then try taking the Dynamic Salary
Retirement model and modifying it. If you are still stuck, then check the provided Jupyter
notebook solution. If you have a lot of trouble with this, please see me in office hours or
after class, as your first project will be similar but a bit more difficult.
"""
],
title='Solution'
)
],
title='Capital Budgeting Probabilities with Monte Carlo Simulation'
)
]
class PythonRetirementPracticeProblemModel(pl.Model):
pass
DOCUMENT_CLASS_KWARGS = dict(
remove_section_numbering=True,
)
OUTPUT_NAME = TITLE
| 37.484211 | 120 | 0.430778 | import os
import pyexlatex as pl
import pyexlatex.table as lt
import pyexlatex.presentation as lp
import pyexlatex.graphics as lg
import pyexlatex.layouts as ll
from jinja2 import FileSystemLoader
import plbuild
from plbuild.paths import images_path
AUTHORS = ['Nick DeRobertis']
DOCUMENT_CLASS = pl.Document
OUTPUT_LOCATION = plbuild.paths.DOCUMENTS_BUILD_PATH
HANDOUTS_OUTPUT_LOCATION = None
TITLE = 'Python Retirement Savings Rate Problem'
ORDER = 'PR1'
def get_content():
jinja_templates_path = os.path.sep.join(['pltemplates', 'practice', 'python_retirement'])
jinja_env = pl.JinjaEnvironment(loader=FileSystemLoader(jinja_templates_path))
return [
pl.Section(
[
pl.SubSection(
[
PythonRetirementPracticeProblemModel(template_path='prob_definition.j2', environment=jinja_env),
],
title='Problem Definition'
),
pl.SubSection(
[
pl.Center(
lt.Tabular(
[
lt.TopRule(),
lt.ValuesTable.from_list_of_lists([[
'Input', 'Default Value',
]]),
lt.MidRule(),
lt.ValuesTable.from_list_of_lists(
[
['Starting Salary', '\$50,000'],
['Salary Growth', '3%'],
['Mid-Salary Cutoff', r'\$80,000'],
['High-Salary Cutoff', r'\$120,000'],
['Low Savings Rate', '10%'],
['Mid Savings Rate', '25%'],
['High Savings Rate', '40%'],
['Interest Rate', '5%'],
['Desired Cash', r'\$1,500,000'],
],
),
lt.BottomRule(),
],
align='l|cc'
)
)
],
title='Inputs'
),
pl.SubSection(
[
"""
The final answer with the default inputs should be 37 years to retirement. Try hard to get
there working from scratch. If you are very stuck, then try taking the Dynamic Salary
Retirement model and modifying it. If you are still stuck, then check the provided Jupyter
notebook solution. If you have a lot of trouble with this, please see me in office hours or
after class, as your first project will be similar but a bit more difficult.
"""
],
title='Solution'
)
],
title='Capital Budgeting Probabilities with Monte Carlo Simulation'
)
]
class PythonRetirementPracticeProblemModel(pl.Model):
pass
DOCUMENT_CLASS_KWARGS = dict(
remove_section_numbering=True,
)
OUTPUT_NAME = TITLE
| true | true |
1c30fe9d6f70cc8afacf2f87398deda280b3f4bf | 1,742 | py | Python | engine/mainhandler.py | still-learning-ev/MALDY-PyWebView | 8bb28c5aa19fead585fd1ca73f42f20d5c18bd26 | [
"bzip2-1.0.6"
] | null | null | null | engine/mainhandler.py | still-learning-ev/MALDY-PyWebView | 8bb28c5aa19fead585fd1ca73f42f20d5c18bd26 | [
"bzip2-1.0.6"
] | null | null | null | engine/mainhandler.py | still-learning-ev/MALDY-PyWebView | 8bb28c5aa19fead585fd1ca73f42f20d5c18bd26 | [
"bzip2-1.0.6"
] | null | null | null | import threading
import time
import sys
import random
import webview
from static.statictrain import retrain_model_new
from static.staticanalysis import Analyse
class Api:
def __init__(self):
self.cancel_analysis_flag = False
def start_analysis_static(self, path_to_file, retrain_model):
time.sleep(1)
if (str(retrain_model)=='True'):
retrain_model_new(path_to_file)
elif(str(retrain_model)=='False'):
ana = Analyse()
result = ana.analyse(path_to_file)
return result
# time.sleep(5)
# #self.cancel_analysis_flag = False
# for i in range(0, 1000000):
# if self.cancel_analysis_flag:
# response = {'message': 'Analysis cancelled'}
# break
# else:
# response = {
# 'message': 'Operation performed on {} and retraining model is {}'.format(path_to_file, retrain_model),
# 'message1': 'Operation performed on {}jjj and retraining model is {}'.format(path_to_file, retrain_model)
# }
# return response
def start_analysis_behavioral(self, path_to_file, retrain_model):
time.sleep(5)
self.cancel_analysis_flag = False
for i in range(0, 1000000):
if self.cancel_analysis_flag:
response = {'message': 'Analysis cancelled'}
break
else:
response = {
'message': 'Operation performed on {} and retraining model is {}'.format(path_to_file, retrain_model)
}
return response
def cancel_analysis(self):
time.sleep(0.1)
self.cancel_analysis_flag = True
| 33.5 | 127 | 0.591848 | import threading
import time
import sys
import random
import webview
from static.statictrain import retrain_model_new
from static.staticanalysis import Analyse
class Api:
def __init__(self):
self.cancel_analysis_flag = False
def start_analysis_static(self, path_to_file, retrain_model):
time.sleep(1)
if (str(retrain_model)=='True'):
retrain_model_new(path_to_file)
elif(str(retrain_model)=='False'):
ana = Analyse()
result = ana.analyse(path_to_file)
return result
def start_analysis_behavioral(self, path_to_file, retrain_model):
time.sleep(5)
self.cancel_analysis_flag = False
for i in range(0, 1000000):
if self.cancel_analysis_flag:
response = {'message': 'Analysis cancelled'}
break
else:
response = {
'message': 'Operation performed on {} and retraining model is {}'.format(path_to_file, retrain_model)
}
return response
def cancel_analysis(self):
time.sleep(0.1)
self.cancel_analysis_flag = True
| true | true |
1c30fedb23f1e78f2bb10f06977d6db533047678 | 1,354 | py | Python | guidos-gorgeous-lasagna/lasagna.py | pmareke/exercism-python | 69ad20555f52f921efed0e03c1f2d52dc44c3e38 | [
"MIT"
] | null | null | null | guidos-gorgeous-lasagna/lasagna.py | pmareke/exercism-python | 69ad20555f52f921efed0e03c1f2d52dc44c3e38 | [
"MIT"
] | null | null | null | guidos-gorgeous-lasagna/lasagna.py | pmareke/exercism-python | 69ad20555f52f921efed0e03c1f2d52dc44c3e38 | [
"MIT"
] | null | null | null | """This module helps guido make gorgeous lasagna."""
EXPECTED_BAKE_TIME = 40
PREPARATION_TIME = 2
def bake_time_remaining(elapsed_bake_time):
"""Calculate the bake time remaining.
:param elapsed_bake_time: int baking time already elapsed.
:return: int remaining bake time derived from 'EXPECTED_BAKE_TIME'.
Function that takes the actual minutes the lasagna has been in the oven as
an argument and returns how many minutes the lasagna still needs to bake
based on the `EXPECTED_BAKE_TIME`.
"""
return EXPECTED_BAKE_TIME - elapsed_bake_time
def preparation_time_in_minutes(number_of_layers):
"""Calculates the preparation time for the given layers.
:param number_of_layers: int number of layers.
:return int preparation time in minutes.
Function that takes the number of layers you want to add to the lasagna as
an argument and returns how many minutes you would spend making them
"""
return number_of_layers * PREPARATION_TIME
def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):
"""Calculates the elapsed time in minutes.
:param number_of_layers: int number of layers.
:param elapsed_bake_time: int elapsed bake time.
:return int elapsed time in minutes in the hoven.
"""
return preparation_time_in_minutes(number_of_layers) + elapsed_bake_time
| 34.717949 | 78 | 0.760709 |
EXPECTED_BAKE_TIME = 40
PREPARATION_TIME = 2
def bake_time_remaining(elapsed_bake_time):
return EXPECTED_BAKE_TIME - elapsed_bake_time
def preparation_time_in_minutes(number_of_layers):
return number_of_layers * PREPARATION_TIME
def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):
return preparation_time_in_minutes(number_of_layers) + elapsed_bake_time
| true | true |
1c30ffe32724163c4b8b9b923c3014d66f4c6376 | 1,111 | py | Python | grr/server/grr_response_server/gui/api_plugins/artifact_regression_test.py | nkrios/grr | 399e078ed522bf0555a2666fb086aa7809d54971 | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/gui/api_plugins/artifact_regression_test.py | nkrios/grr | 399e078ed522bf0555a2666fb086aa7809d54971 | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/gui/api_plugins/artifact_regression_test.py | nkrios/grr | 399e078ed522bf0555a2666fb086aa7809d54971 | [
"Apache-2.0"
] | 1 | 2020-07-09T01:08:48.000Z | 2020-07-09T01:08:48.000Z | #!/usr/bin/env python
"""This modules contains regression tests for artifact API handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from absl import app
from grr_response_core import config
from grr_response_server import artifact_registry
from grr_response_server.gui import api_regression_test_lib
from grr_response_server.gui.api_plugins import artifact as artifact_plugin
from grr.test_lib import artifact_test_lib
class ApiListArtifactsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
api_method = "ListArtifacts"
handler = artifact_plugin.ApiListArtifactsHandler
def Run(self):
with artifact_test_lib.PatchCleanArtifactRegistry():
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifact.json")
artifact_registry.REGISTRY.AddFileSource(test_artifacts_file)
self.Check("ListArtifacts")
def main(argv):
api_regression_test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| 27.775 | 75 | 0.782178 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from absl import app
from grr_response_core import config
from grr_response_server import artifact_registry
from grr_response_server.gui import api_regression_test_lib
from grr_response_server.gui.api_plugins import artifact as artifact_plugin
from grr.test_lib import artifact_test_lib
class ApiListArtifactsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
api_method = "ListArtifacts"
handler = artifact_plugin.ApiListArtifactsHandler
def Run(self):
with artifact_test_lib.PatchCleanArtifactRegistry():
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifact.json")
artifact_registry.REGISTRY.AddFileSource(test_artifacts_file)
self.Check("ListArtifacts")
def main(argv):
api_regression_test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| true | true |
1c310103a1e0547f12c69317ab7424a6eab8a83d | 1,956 | py | Python | samcli/commands/_utils/custom_options/option_nargs.py | paoptu023/aws-sam-cli | e382d603f739e9694d64f622daa228ccfe4581f4 | [
"Apache-2.0"
] | 1 | 2019-05-01T08:27:35.000Z | 2019-05-01T08:27:35.000Z | samcli/commands/_utils/custom_options/option_nargs.py | paoptu023/aws-sam-cli | e382d603f739e9694d64f622daa228ccfe4581f4 | [
"Apache-2.0"
] | 3 | 2020-01-27T05:20:12.000Z | 2020-10-03T01:01:11.000Z | samcli/commands/_utils/custom_options/option_nargs.py | paoptu023/aws-sam-cli | e382d603f739e9694d64f622daa228ccfe4581f4 | [
"Apache-2.0"
] | 1 | 2021-11-29T19:10:17.000Z | 2021-11-29T19:10:17.000Z | """
Custom Click options for multiple arguments
"""
import click
class OptionNargs(click.Option):
"""
A custom option class that allows parsing for multiple arguments
for an option, when the number of arguments for an option are unknown.
"""
def __init__(self, *args, **kwargs):
self.nargs = kwargs.pop("nargs", -1)
super(OptionNargs, self).__init__(*args, **kwargs)
self._previous_parser_process = None
self._nargs_parser = None
def add_to_parser(self, parser, ctx):
def parser_process(value, state):
# look ahead into arguments till we reach the next option.
# the next option starts with a prefix which is either '-' or '--'
next_option = False
value = [value]
while state.rargs and not next_option:
for prefix in self._nargs_parser.prefixes:
if state.rargs[0].startswith(prefix):
next_option = True
if not next_option:
value.append(state.rargs.pop(0))
value = tuple(value)
# call the actual process
self._previous_parser_process(value, state)
# Add current option to Parser by calling add_to_parser on the super class.
super(OptionNargs, self).add_to_parser(parser, ctx)
for name in self.opts:
# Get OptionParser object for current option
option_parser = getattr(parser, "_long_opt").get(name) or getattr(parser, "_short_opt").get(name)
if option_parser:
# Monkey patch `process` method for click.parser.Option class.
# This allows for setting multiple parsed values into current option arguments
self._nargs_parser = option_parser
self._previous_parser_process = option_parser.process
option_parser.process = parser_process
break
| 38.352941 | 109 | 0.615542 |
import click
class OptionNargs(click.Option):
def __init__(self, *args, **kwargs):
self.nargs = kwargs.pop("nargs", -1)
super(OptionNargs, self).__init__(*args, **kwargs)
self._previous_parser_process = None
self._nargs_parser = None
def add_to_parser(self, parser, ctx):
def parser_process(value, state):
next_option = False
value = [value]
while state.rargs and not next_option:
for prefix in self._nargs_parser.prefixes:
if state.rargs[0].startswith(prefix):
next_option = True
if not next_option:
value.append(state.rargs.pop(0))
value = tuple(value)
self._previous_parser_process(value, state)
super(OptionNargs, self).add_to_parser(parser, ctx)
for name in self.opts:
option_parser = getattr(parser, "_long_opt").get(name) or getattr(parser, "_short_opt").get(name)
if option_parser:
self._nargs_parser = option_parser
self._previous_parser_process = option_parser.process
option_parser.process = parser_process
break
| true | true |
1c310267a3bc86262c3bd0369457cef6eb63ab33 | 1,630 | py | Python | src/pyjams/color/__init__.py | mcuntz/pyjams | 1393c68a9e21a1e7b88291229120641fdaddc998 | [
"MIT"
] | 2 | 2021-11-06T10:44:37.000Z | 2022-03-10T18:13:00.000Z | src/pyjams/color/__init__.py | mcuntz/pyjams | 1393c68a9e21a1e7b88291229120641fdaddc998 | [
"MIT"
] | null | null | null | src/pyjams/color/__init__.py | mcuntz/pyjams | 1393c68a9e21a1e7b88291229120641fdaddc998 | [
"MIT"
] | null | null | null | """
Collection of color palettes and continuous color maps
:copyright: Copyright 2021- Matthias Cuntz, see AUTHORS.md for details.
:license: MIT License, see LICENSE for details.
Subpackages
===========
.. autosummary::
brewer_palettes
mathematica_palettes
ncl_palettes
oregon_palettes
pyjams_palettes
sron2012_palettes
sron_palettes
ufz_palettes
color
"""
# colour palettes
from .brewer_palettes import brewer_sequential, brewer_diverging
from .brewer_palettes import brewer_qualitative
from .mathematica_palettes import mathematica_rainbow
from .ncl_palettes import ncl_large, ncl_small, ncl_meteo_swiss
from .oregon_palettes import oregon_sequential, oregon_diverging
from .oregon_palettes import oregon_qualitative
from .pyjams_palettes import pyjams_cmaps
from .sron2012_palettes import sron2012_colors, sron2012_functions
from .sron_palettes import sron_colors, sron_colormaps, sron_functions
from .ufz_palettes import ufz_colors
# get, show, print colors and color palettes
from .color import get_color, print_colors
from .color import get_cmap, print_palettes, show_palettes
__all__ = ['brewer_sequential', 'brewer_diverging', 'brewer_qualitative',
'mathematica_rainbow',
'ncl_large', 'ncl_small', 'ncl_meteo_swiss',
'oregon_sequential', 'oregon_diverging', 'oregon_qualitative',
'pyjams_cmaps',
'sron2012_colors', 'sron2012_functions',
'sron_colors', 'sron_colormaps', 'sron_functions',
'ufz_colors',
'get_color', 'print_colors',
'get_cmap', 'print_palettes', 'show_palettes',
]
| 33.958333 | 73 | 0.753988 |
from .brewer_palettes import brewer_sequential, brewer_diverging
from .brewer_palettes import brewer_qualitative
from .mathematica_palettes import mathematica_rainbow
from .ncl_palettes import ncl_large, ncl_small, ncl_meteo_swiss
from .oregon_palettes import oregon_sequential, oregon_diverging
from .oregon_palettes import oregon_qualitative
from .pyjams_palettes import pyjams_cmaps
from .sron2012_palettes import sron2012_colors, sron2012_functions
from .sron_palettes import sron_colors, sron_colormaps, sron_functions
from .ufz_palettes import ufz_colors
from .color import get_color, print_colors
from .color import get_cmap, print_palettes, show_palettes
__all__ = ['brewer_sequential', 'brewer_diverging', 'brewer_qualitative',
'mathematica_rainbow',
'ncl_large', 'ncl_small', 'ncl_meteo_swiss',
'oregon_sequential', 'oregon_diverging', 'oregon_qualitative',
'pyjams_cmaps',
'sron2012_colors', 'sron2012_functions',
'sron_colors', 'sron_colormaps', 'sron_functions',
'ufz_colors',
'get_color', 'print_colors',
'get_cmap', 'print_palettes', 'show_palettes',
]
| true | true |
1c3102ca10278ae7d1eec5a32bee7dcb0bfd6a56 | 1,338 | py | Python | playmate/settings.py | Gulats/playmate | c01b060aa0fd3ceea5082ffbee6b105a3cedc7dc | [
"MIT"
] | null | null | null | playmate/settings.py | Gulats/playmate | c01b060aa0fd3ceea5082ffbee6b105a3cedc7dc | [
"MIT"
] | null | null | null | playmate/settings.py | Gulats/playmate | c01b060aa0fd3ceea5082ffbee6b105a3cedc7dc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
BASE_URL = 'https://play.google.com/store/apps'
SUGGESTION_URL = 'https://market.android.com/suggest/SuggRequest'
SEARCH_URL = 'https://play.google.com/store/search'
SEARCH_PAGINATED_URL = 'https://play.google.com/store/apps/collection/search_results_cluster_apps?gsr={gsr}&authuser=0'
CONCURRENT_REQUESTS = 10
USER_AGENT = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/45.0.2454.101 Safari/537.36')
# Number of results to retrieve from a collection. Range(1 - 120)
NUM_RESULTS = 120
# Number of results to retrieve from a developer
DEV_RESULTS = 24
# Number of results to retrieve from similar. Range (1 - 60)
SIMILAR_RESULTS = 24
# Number of results to retrieve from search and max page possible
SEARCH_RESULTS = 48
SEARCH_MAX_PAGE = 5
# pagTok post data strings to paginate through search results
PAGE_TOKENS = (
'-p6BnQMCCDE=:S:ANO1ljJ4Cw8',
'-p6BnQMCCGI=:S:ANO1ljJYYFs',
'-p6BnQMDCJMB:S:ANO1ljLvbuA',
'-p6BnQMDCMQB:S:ANO1ljIeRbo',
'-p6BnQMDCPUB:S:ANO1ljKG00U'
)
UNWANTED_KEYS = (
'description_html',
'screenshots',
'video',
'histogram',
'interactive_elements',
'recent_changes'
)
# Regex to find page tokens within scrip tags
TOKEN_RE = r'GAEiA[\w=]{3,7}:S:ANO1lj[\w]{5}'
| 29.086957 | 119 | 0.707025 |
BASE_URL = 'https://play.google.com/store/apps'
SUGGESTION_URL = 'https://market.android.com/suggest/SuggRequest'
SEARCH_URL = 'https://play.google.com/store/search'
SEARCH_PAGINATED_URL = 'https://play.google.com/store/apps/collection/search_results_cluster_apps?gsr={gsr}&authuser=0'
CONCURRENT_REQUESTS = 10
USER_AGENT = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/45.0.2454.101 Safari/537.36')
NUM_RESULTS = 120
DEV_RESULTS = 24
SIMILAR_RESULTS = 24
SEARCH_RESULTS = 48
SEARCH_MAX_PAGE = 5
PAGE_TOKENS = (
'-p6BnQMCCDE=:S:ANO1ljJ4Cw8',
'-p6BnQMCCGI=:S:ANO1ljJYYFs',
'-p6BnQMDCJMB:S:ANO1ljLvbuA',
'-p6BnQMDCMQB:S:ANO1ljIeRbo',
'-p6BnQMDCPUB:S:ANO1ljKG00U'
)
UNWANTED_KEYS = (
'description_html',
'screenshots',
'video',
'histogram',
'interactive_elements',
'recent_changes'
)
TOKEN_RE = r'GAEiA[\w=]{3,7}:S:ANO1lj[\w]{5}'
| true | true |
1c31032cb9f282847da8a1d8e2f19671ca1f4b4d | 3,127 | py | Python | other/make_sublime_syntax.py | mechatroner/sublime_rainbow_csv | 09fa51886258ce2b634296aea452ff90d0f40255 | [
"MIT"
] | 76 | 2018-01-31T12:34:06.000Z | 2022-03-16T12:27:21.000Z | other/make_sublime_syntax.py | mechatroner/sublime_rainbow_csv | 09fa51886258ce2b634296aea452ff90d0f40255 | [
"MIT"
] | 32 | 2018-01-31T20:06:35.000Z | 2022-02-05T19:24:08.000Z | other/make_sublime_syntax.py | mechatroner/sublime_rainbow_csv | 09fa51886258ce2b634296aea452ff90d0f40255 | [
"MIT"
] | 8 | 2018-06-01T13:57:46.000Z | 2021-07-07T06:21:19.000Z | #!/usr/bin/env python
import sys
import os
import argparse
import random
import re
parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, parent_dir)
import auto_syntax
def name_normalize(delim):
if delim == '<':
return 'less-than'
if delim == '>':
return 'greater-than'
if delim == ':':
return 'colon'
if delim == '"':
return 'double-quote'
if delim == '/':
return 'slash'
if delim == '\\':
return 'backslash'
if delim == '|':
return 'pipe'
if delim == '?':
return 'question-mark'
if delim == '*':
return 'asterisk'
if delim == '\t':
return 'tab'
if delim == ' ':
return 'space'
return '[{}]'.format(delim)
def get_syntax_file_name_old(delim, policy):
policy = auto_syntax.filename_policy_map[policy]
if delim == '\t' and policy == 'Simple':
return 'TSV (Rainbow)'
if delim == ',' and policy == 'Standard':
return 'CSV (Rainbow)'
return 'Rainbow CSV {} {}'.format(name_normalize(delim), policy)
def write_sublime_syntax(delim, policy, dst_dir, old_names):
# TODO get rid of this
if old_names:
syntax_file_name = get_syntax_file_name_old(delim, policy) + '.sublime-syntax'
else:
syntax_file_name = auto_syntax.get_syntax_file_basename(delim, policy)
syntax_path = os.path.join(dst_dir, syntax_file_name)
syntax_text = auto_syntax.make_sublime_syntax(delim, policy)
with open(syntax_path, 'w') as dst:
dst.write(syntax_text)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--make_grammars_prod', help='make and put grammars into DIR')
parser.add_argument('--make_grammars_old', help='make and put grammars into DIR')
parser.add_argument('--dbg_delim', help='Run in debug mode: print single grammar with delim')
parser.add_argument('--dbg_policy', help='Run in debug mode: print single grammar with policy')
args = parser.parse_args()
if args.make_grammars_old:
dst_dir = args.make_grammars_old
delims = auto_syntax.get_pregenerated_delims()
standard_delims = '\t|,;'
for delim in delims:
if standard_delims.find(delim) != -1:
write_sublime_syntax(delim, 'quoted', dst_dir, old_names=True)
write_sublime_syntax(delim, 'simple', dst_dir, old_names=True)
return
if args.make_grammars_prod:
dst_dir = args.make_grammars_prod
delims = auto_syntax.get_pregenerated_delims()
standard_delims = ',;'
for delim in delims:
if standard_delims.find(delim) != -1:
write_sublime_syntax(delim, 'quoted', dst_dir, old_names=False)
write_sublime_syntax(delim, 'quoted_rfc', dst_dir, old_names=False)
write_sublime_syntax(delim, 'simple', dst_dir, old_names=False)
return
delim = args.dbg_delim
policy = args.dbg_policy
grammar = auto_syntax.make_sublime_syntax(delim, policy)
print(grammar)
if __name__ == '__main__':
main()
| 30.960396 | 99 | 0.641829 |
import sys
import os
import argparse
import random
import re
parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, parent_dir)
import auto_syntax
def name_normalize(delim):
if delim == '<':
return 'less-than'
if delim == '>':
return 'greater-than'
if delim == ':':
return 'colon'
if delim == '"':
return 'double-quote'
if delim == '/':
return 'slash'
if delim == '\\':
return 'backslash'
if delim == '|':
return 'pipe'
if delim == '?':
return 'question-mark'
if delim == '*':
return 'asterisk'
if delim == '\t':
return 'tab'
if delim == ' ':
return 'space'
return '[{}]'.format(delim)
def get_syntax_file_name_old(delim, policy):
policy = auto_syntax.filename_policy_map[policy]
if delim == '\t' and policy == 'Simple':
return 'TSV (Rainbow)'
if delim == ',' and policy == 'Standard':
return 'CSV (Rainbow)'
return 'Rainbow CSV {} {}'.format(name_normalize(delim), policy)
def write_sublime_syntax(delim, policy, dst_dir, old_names):
# TODO get rid of this
if old_names:
syntax_file_name = get_syntax_file_name_old(delim, policy) + '.sublime-syntax'
else:
syntax_file_name = auto_syntax.get_syntax_file_basename(delim, policy)
syntax_path = os.path.join(dst_dir, syntax_file_name)
syntax_text = auto_syntax.make_sublime_syntax(delim, policy)
with open(syntax_path, 'w') as dst:
dst.write(syntax_text)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--make_grammars_prod', help='make and put grammars into DIR')
parser.add_argument('--make_grammars_old', help='make and put grammars into DIR')
parser.add_argument('--dbg_delim', help='Run in debug mode: print single grammar with delim')
parser.add_argument('--dbg_policy', help='Run in debug mode: print single grammar with policy')
args = parser.parse_args()
if args.make_grammars_old:
dst_dir = args.make_grammars_old
delims = auto_syntax.get_pregenerated_delims()
standard_delims = '\t|,;'
for delim in delims:
if standard_delims.find(delim) != -1:
write_sublime_syntax(delim, 'quoted', dst_dir, old_names=True)
write_sublime_syntax(delim, 'simple', dst_dir, old_names=True)
return
if args.make_grammars_prod:
dst_dir = args.make_grammars_prod
delims = auto_syntax.get_pregenerated_delims()
standard_delims = ',;'
for delim in delims:
if standard_delims.find(delim) != -1:
write_sublime_syntax(delim, 'quoted', dst_dir, old_names=False)
write_sublime_syntax(delim, 'quoted_rfc', dst_dir, old_names=False)
write_sublime_syntax(delim, 'simple', dst_dir, old_names=False)
return
delim = args.dbg_delim
policy = args.dbg_policy
grammar = auto_syntax.make_sublime_syntax(delim, policy)
print(grammar)
if __name__ == '__main__':
main()
| true | true |
1c310357ee22e48c9836eca6dbad144ce5e88526 | 1,076 | py | Python | atest/run.py | fthmko/ScreenCapLibrary | ab32ceab06fdd7a9c4f3782936c0fa85b562b54a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-03-31T20:34:14.000Z | 2022-03-31T20:34:14.000Z | atest/run.py | fthmko/ScreenCapLibrary | ab32ceab06fdd7a9c4f3782936c0fa85b562b54a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-03-28T23:35:51.000Z | 2022-03-28T23:35:51.000Z | atest/run.py | fthmko/ScreenCapLibrary | ab32ceab06fdd7a9c4f3782936c0fa85b562b54a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""usage: python atest/run.py <test_suite_path>"
Examples:
Running all the tests with Robot:
python atest/run.py atest
Robot results are found in path 'atest/results/'
"""
import sys
from os.path import abspath, dirname, join
from robot import run_cli, rebot
from robotstatuschecker import process_output
CURDIR = dirname(abspath(__file__))
OUTPUT_DIR = join(CURDIR, 'results')
sys.path.append(join(CURDIR, '..', 'src'))
COMMON_OPTS = ('--log', 'NONE', '--report', 'NONE')
def atests(*opts):
python(*opts)
process_output(join(OUTPUT_DIR, 'output.xml'))
return rebot(join(OUTPUT_DIR, 'output.xml'), outputdir=OUTPUT_DIR)
def python(*opts):
try:
run_cli(['--outputdir', OUTPUT_DIR]
+ list(COMMON_OPTS + opts))
except SystemExit:
pass
if __name__ == '__main__':
if len(sys.argv) == 1 or '--help' in sys.argv:
print(__doc__)
rc = 251
else:
rc = atests(*sys.argv[1:])
print("\nAfter status check there were %s failures." % rc)
sys.exit(rc)
| 23.911111 | 70 | 0.644981 |
import sys
from os.path import abspath, dirname, join
from robot import run_cli, rebot
from robotstatuschecker import process_output
CURDIR = dirname(abspath(__file__))
OUTPUT_DIR = join(CURDIR, 'results')
sys.path.append(join(CURDIR, '..', 'src'))
COMMON_OPTS = ('--log', 'NONE', '--report', 'NONE')
def atests(*opts):
python(*opts)
process_output(join(OUTPUT_DIR, 'output.xml'))
return rebot(join(OUTPUT_DIR, 'output.xml'), outputdir=OUTPUT_DIR)
def python(*opts):
try:
run_cli(['--outputdir', OUTPUT_DIR]
+ list(COMMON_OPTS + opts))
except SystemExit:
pass
if __name__ == '__main__':
if len(sys.argv) == 1 or '--help' in sys.argv:
print(__doc__)
rc = 251
else:
rc = atests(*sys.argv[1:])
print("\nAfter status check there were %s failures." % rc)
sys.exit(rc)
| true | true |
1c31067c3e8a0b1c06501fb3f52532e80bf6652d | 5,814 | py | Python | neo4j/exceptions.py | krisgeus/neo4j-python-driver | bdf6631702a4552253ab616055c47f9ab90c5d7a | [
"Apache-2.0"
] | null | null | null | neo4j/exceptions.py | krisgeus/neo4j-python-driver | bdf6631702a4552253ab616055c47f9ab90c5d7a | [
"Apache-2.0"
] | null | null | null | neo4j/exceptions.py | krisgeus/neo4j-python-driver | bdf6631702a4552253ab616055c47f9ab90c5d7a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2020 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the core driver exceptions.
"""
class ProtocolError(Exception):
""" Raised when an unexpected or unsupported protocol event occurs.
"""
class ServiceUnavailable(Exception):
""" Raised when no database service is available.
"""
class IncompleteCommitError(Exception):
""" Raised when a disconnection occurs while still waiting for a commit
response. For non-idempotent write transactions, this leaves the data
in an unknown state with regard to whether the transaction completed
successfully or not.
"""
class SecurityError(Exception):
""" Raised when an action is denied due to security settings.
"""
class CypherError(Exception):
""" Raised when the Cypher engine returns an error to the client.
"""
message = None
code = None
classification = None
category = None
title = None
metadata = None
@classmethod
def hydrate(cls, message=None, code=None, **metadata):
message = message or "An unknown error occurred."
code = code or "Neo.DatabaseError.General.UnknownError"
try:
_, classification, category, title = code.split(".")
except ValueError:
classification = "DatabaseError"
category = "General"
title = "UnknownError"
error_class = cls._extract_error_class(classification, code)
inst = error_class(message)
inst.message = message
inst.code = code
inst.classification = classification
inst.category = category
inst.title = title
inst.metadata = metadata
return inst
@classmethod
def _extract_error_class(cls, classification, code):
if classification == "ClientError":
try:
return client_errors[code]
except KeyError:
return ClientError
elif classification == "TransientError":
try:
return transient_errors[code]
except KeyError:
return TransientError
elif classification == "DatabaseError":
return DatabaseError
else:
return cls
class ClientError(CypherError):
""" The Client sent a bad request - changing the request might yield a successful outcome.
"""
class DatabaseError(CypherError):
""" The database failed to service the request.
"""
class TransientError(CypherError):
""" The database cannot service the request right now, retrying later might yield a successful outcome.
"""
class DatabaseUnavailableError(TransientError):
"""
"""
class ConstraintError(ClientError):
"""
"""
class CypherSyntaxError(ClientError):
"""
"""
class CypherTypeError(ClientError):
"""
"""
class NotALeaderError(ClientError):
"""
"""
class Forbidden(ClientError, SecurityError):
"""
"""
class ForbiddenOnReadOnlyDatabaseError(Forbidden):
"""
"""
class AuthError(ClientError, SecurityError):
""" Raised when authentication failure occurs.
"""
client_errors = {
# ConstraintError
"Neo.ClientError.Schema.ConstraintValidationFailed": ConstraintError,
"Neo.ClientError.Schema.ConstraintViolation": ConstraintError,
"Neo.ClientError.Statement.ConstraintVerificationFailed": ConstraintError,
"Neo.ClientError.Statement.ConstraintViolation": ConstraintError,
# CypherSyntaxError
"Neo.ClientError.Statement.InvalidSyntax": CypherSyntaxError,
"Neo.ClientError.Statement.SyntaxError": CypherSyntaxError,
# CypherTypeError
"Neo.ClientError.Procedure.TypeError": CypherTypeError,
"Neo.ClientError.Statement.InvalidType": CypherTypeError,
"Neo.ClientError.Statement.TypeError": CypherTypeError,
# Forbidden
"Neo.ClientError.General.ForbiddenOnReadOnlyDatabase": ForbiddenOnReadOnlyDatabaseError,
"Neo.ClientError.General.ReadOnly": Forbidden,
"Neo.ClientError.Schema.ForbiddenOnConstraintIndex": Forbidden,
"Neo.ClientError.Schema.IndexBelongsToConstraint": Forbidden,
"Neo.ClientError.Security.Forbidden": Forbidden,
"Neo.ClientError.Transaction.ForbiddenDueToTransactionType": Forbidden,
# AuthError
"Neo.ClientError.Security.AuthorizationFailed": AuthError,
"Neo.ClientError.Security.Unauthorized": AuthError,
# NotALeaderError
"Neo.ClientError.Cluster.NotALeader": NotALeaderError
}
transient_errors = {
# DatabaseUnavailableError
"Neo.TransientError.General.DatabaseUnavailable": DatabaseUnavailableError
}
class SessionExpired(Exception):
""" Raised when no a session is no longer able to fulfil
the purpose described by its original parameters.
"""
def __init__(self, session, *args, **kwargs):
super(SessionExpired, self).__init__(session, *args, **kwargs)
class TransactionError(Exception):
""" Raised when an error occurs while using a transaction.
"""
def __init__(self, transaction, *args, **kwargs):
super(TransactionError, self).__init__(*args, **kwargs)
self.transaction = transaction
| 27.04186 | 107 | 0.691778 |
class ProtocolError(Exception):
class ServiceUnavailable(Exception):
class IncompleteCommitError(Exception):
class SecurityError(Exception):
class CypherError(Exception):
message = None
code = None
classification = None
category = None
title = None
metadata = None
@classmethod
def hydrate(cls, message=None, code=None, **metadata):
message = message or "An unknown error occurred."
code = code or "Neo.DatabaseError.General.UnknownError"
try:
_, classification, category, title = code.split(".")
except ValueError:
classification = "DatabaseError"
category = "General"
title = "UnknownError"
error_class = cls._extract_error_class(classification, code)
inst = error_class(message)
inst.message = message
inst.code = code
inst.classification = classification
inst.category = category
inst.title = title
inst.metadata = metadata
return inst
@classmethod
def _extract_error_class(cls, classification, code):
if classification == "ClientError":
try:
return client_errors[code]
except KeyError:
return ClientError
elif classification == "TransientError":
try:
return transient_errors[code]
except KeyError:
return TransientError
elif classification == "DatabaseError":
return DatabaseError
else:
return cls
class ClientError(CypherError):
class DatabaseError(CypherError):
class TransientError(CypherError):
class DatabaseUnavailableError(TransientError):
class ConstraintError(ClientError):
class CypherSyntaxError(ClientError):
class CypherTypeError(ClientError):
class NotALeaderError(ClientError):
class Forbidden(ClientError, SecurityError):
class ForbiddenOnReadOnlyDatabaseError(Forbidden):
class AuthError(ClientError, SecurityError):
client_errors = {
"Neo.ClientError.Schema.ConstraintValidationFailed": ConstraintError,
"Neo.ClientError.Schema.ConstraintViolation": ConstraintError,
"Neo.ClientError.Statement.ConstraintVerificationFailed": ConstraintError,
"Neo.ClientError.Statement.ConstraintViolation": ConstraintError,
"Neo.ClientError.Statement.InvalidSyntax": CypherSyntaxError,
"Neo.ClientError.Statement.SyntaxError": CypherSyntaxError,
"Neo.ClientError.Procedure.TypeError": CypherTypeError,
"Neo.ClientError.Statement.InvalidType": CypherTypeError,
"Neo.ClientError.Statement.TypeError": CypherTypeError,
"Neo.ClientError.General.ForbiddenOnReadOnlyDatabase": ForbiddenOnReadOnlyDatabaseError,
"Neo.ClientError.General.ReadOnly": Forbidden,
"Neo.ClientError.Schema.ForbiddenOnConstraintIndex": Forbidden,
"Neo.ClientError.Schema.IndexBelongsToConstraint": Forbidden,
"Neo.ClientError.Security.Forbidden": Forbidden,
"Neo.ClientError.Transaction.ForbiddenDueToTransactionType": Forbidden,
"Neo.ClientError.Security.AuthorizationFailed": AuthError,
"Neo.ClientError.Security.Unauthorized": AuthError,
"Neo.ClientError.Cluster.NotALeader": NotALeaderError
}
transient_errors = {
"Neo.TransientError.General.DatabaseUnavailable": DatabaseUnavailableError
}
class SessionExpired(Exception):
def __init__(self, session, *args, **kwargs):
super(SessionExpired, self).__init__(session, *args, **kwargs)
class TransactionError(Exception):
def __init__(self, transaction, *args, **kwargs):
super(TransactionError, self).__init__(*args, **kwargs)
self.transaction = transaction
| true | true |
1c3108951246f94df36ef26bb60584b60b79cd04 | 372 | py | Python | mmdet/models/utils/__init__.py | escapist2019/AugFPN | ffc7e5e2ec367a77d43b072968050951ca6406b6 | [
"Apache-2.0"
] | 144 | 2019-12-12T02:34:59.000Z | 2022-03-21T09:13:54.000Z | mmdet/models/utils/__init__.py | wyf-1996/AugFPN | cceb9be892767ba77db48fa1e36280c54a565fe1 | [
"Apache-2.0"
] | 18 | 2020-01-08T12:16:57.000Z | 2021-12-11T03:35:59.000Z | mmdet/models/utils/__init__.py | wyf-1996/AugFPN | cceb9be892767ba77db48fa1e36280c54a565fe1 | [
"Apache-2.0"
] | 44 | 2020-05-18T13:25:50.000Z | 2022-03-04T11:28:17.000Z | from .conv_module import ConvModule
from .norm import build_norm_layer
from .weight_init import (xavier_init, normal_init, uniform_init, kaiming_init,
bias_init_with_prob)
from .scale import Scale
__all__ = [
'ConvModule', 'build_norm_layer', 'xavier_init', 'normal_init',
'uniform_init', 'kaiming_init', 'bias_init_with_prob', 'Scale'
]
| 37.2 | 79 | 0.725806 | from .conv_module import ConvModule
from .norm import build_norm_layer
from .weight_init import (xavier_init, normal_init, uniform_init, kaiming_init,
bias_init_with_prob)
from .scale import Scale
__all__ = [
'ConvModule', 'build_norm_layer', 'xavier_init', 'normal_init',
'uniform_init', 'kaiming_init', 'bias_init_with_prob', 'Scale'
]
| true | true |
1c3108b1c505938bec71c8dbd25a9875d1220307 | 8,618 | py | Python | di/_utils/task.py | adriangb/di | f277bb7189c8e8bde41170afb3181e6600b06be8 | [
"MIT"
] | 57 | 2021-09-28T00:48:08.000Z | 2022-03-16T16:50:39.000Z | di/_utils/task.py | adriangb/di | f277bb7189c8e8bde41170afb3181e6600b06be8 | [
"MIT"
] | 59 | 2021-09-25T00:06:22.000Z | 2022-03-31T15:49:36.000Z | di/_utils/task.py | adriangb/di | f277bb7189c8e8bde41170afb3181e6600b06be8 | [
"MIT"
] | 3 | 2021-12-31T10:03:03.000Z | 2021-12-31T16:07:54.000Z | from __future__ import annotations
import contextlib
from contextlib import AsyncExitStack, ExitStack
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
NamedTuple,
Tuple,
TypeVar,
Union,
)
from di._utils.inspect import (
is_async_gen_callable,
is_coroutine_callable,
is_gen_callable,
)
from di._utils.scope_map import ScopeMap
from di._utils.types import CacheKey
from di.api.dependencies import DependantBase
from di.api.providers import DependencyProvider
from di.api.scopes import Scope
from di.exceptions import IncompatibleDependencyError
class ExecutionState(NamedTuple):
stacks: Mapping[Scope, Union[AsyncExitStack, ExitStack]]
results: List[Any]
cache: ScopeMap[CacheKey, Any]
values: Mapping[DependencyProvider, Any]
DependencyType = TypeVar("DependencyType")
UNSET: Any = object()
class Task:
__slots__ = (
"wrapped_call",
"user_function",
"scope",
"cache_key",
"dependant",
"task_id",
"call_user_func_with_deps",
"compute",
)
compute: Any
wrapped_call: DependencyProvider
user_function: DependencyProvider
def __init__(
self,
scope: Scope,
call: DependencyProvider,
use_cache: bool,
cache_key: CacheKey,
dependant: DependantBase[Any],
task_id: int,
positional_parameters: Iterable[Task],
keyword_parameters: Iterable[Tuple[str, Task]],
) -> None:
self.scope = scope
self.user_function = call
self.cache_key = cache_key
self.dependant = dependant
self.task_id = task_id
if is_async_gen_callable(self.user_function):
self.wrapped_call = contextlib.asynccontextmanager(call) # type: ignore[arg-type]
if use_cache:
self.compute = self.compute_async_cm_cache
else:
self.compute = self.compute_async_cm_no_cache
elif is_coroutine_callable(self.user_function):
self.wrapped_call = self.user_function
if use_cache:
self.compute = self.compute_async_coro_cache
else:
self.compute = self.compute_async_coro_no_cache
elif is_gen_callable(call):
self.wrapped_call = contextlib.contextmanager(call) # type: ignore[arg-type]
if use_cache:
self.compute = self.compute_sync_cm_cache
else:
self.compute = self.compute_sync_cm_no_cache
else:
self.wrapped_call = call
if use_cache:
self.compute = self.compute_sync_func_cache
else:
self.compute = self.compute_sync_func_no_cache
self.call_user_func_with_deps = self.generate_execute_fn(
self.wrapped_call, positional_parameters, keyword_parameters
)
def __hash__(self) -> int:
return self.task_id
def generate_execute_fn(
self,
call: DependencyProvider,
positional_parameters: Iterable[Task],
keyword_parameters: Iterable[Tuple[str, Task]],
) -> Callable[[List[Any]], Any]:
# this codegen speeds up argument collection and passing
# by avoiding creation of intermediary containers to store the values
positional_arg_template = "results[{}]"
keyword_arg_template = "{}=results[{}]"
args: List[str] = []
for task in positional_parameters:
args.append(positional_arg_template.format(task.task_id))
for keyword, task in keyword_parameters:
args.append(keyword_arg_template.format(keyword, task.task_id))
lcls: Dict[str, Any] = {}
glbls = {"call": call}
exec(f'def execute(results): return call({",".join(args)})', glbls, lcls)
return lcls["execute"] # type: ignore[no-any-return]
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}(scope={self.scope}, call={self.user_function})"
)
async def compute_async_coro_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
value = state.cache.get_key(self.cache_key, scope=self.scope, default=UNSET)
if value is not UNSET:
state.results[self.task_id] = value
return
dependency_value = await self.call_user_func_with_deps(state.results)
state.results[self.task_id] = dependency_value
state.cache.set(self.cache_key, dependency_value, scope=self.scope)
async def compute_async_coro_no_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
dependency_value = await self.call_user_func_with_deps(state.results)
state.results[self.task_id] = dependency_value
async def compute_async_cm_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
value = state.cache.get_key(self.cache_key, scope=self.scope, default=UNSET)
if value is not UNSET:
state.results[self.task_id] = value
return
try:
enter = state.stacks[self.scope].enter_async_context # type: ignore[union-attr]
except AttributeError:
raise IncompatibleDependencyError(
f"The dependency {self.user_function} is an awaitable dependency"
f" and canot be used in the sync scope {self.scope}"
) from None
dependency_value: Any = await enter(
self.call_user_func_with_deps(state.results)
)
state.results[self.task_id] = dependency_value
state.cache.set(self.cache_key, dependency_value, scope=self.scope)
async def compute_async_cm_no_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
try:
enter = state.stacks[self.scope].enter_async_context # type: ignore[union-attr]
except AttributeError:
raise IncompatibleDependencyError(
f"The dependency {self.user_function} is an awaitable dependency"
f" and canot be used in the sync scope {self.scope}"
) from None
dependency_value: Any = await enter(
self.call_user_func_with_deps(state.results)
)
state.results[self.task_id] = dependency_value
def compute_sync_cm_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
value = state.cache.get_key(self.cache_key, scope=self.scope, default=UNSET)
if value is not UNSET:
state.results[self.task_id] = value
return
val = state.stacks[self.scope].enter_context(
self.call_user_func_with_deps(state.results)
)
state.results[self.task_id] = val
state.cache.set(self.cache_key, val, scope=self.scope)
def compute_sync_cm_no_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
state.results[self.task_id] = state.stacks[self.scope].enter_context(
self.call_user_func_with_deps(state.results)
)
def compute_sync_func_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
value = state.cache.get_key(self.cache_key, scope=self.scope, default=UNSET)
if value is not UNSET:
state.results[self.task_id] = value
return
val = self.call_user_func_with_deps(state.results)
state.results[self.task_id] = val
state.cache.set(self.cache_key, val, scope=self.scope)
def compute_sync_func_no_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
state.results[self.task_id] = self.call_user_func_with_deps(state.results)
| 37.469565 | 94 | 0.649107 | from __future__ import annotations
import contextlib
from contextlib import AsyncExitStack, ExitStack
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
NamedTuple,
Tuple,
TypeVar,
Union,
)
from di._utils.inspect import (
is_async_gen_callable,
is_coroutine_callable,
is_gen_callable,
)
from di._utils.scope_map import ScopeMap
from di._utils.types import CacheKey
from di.api.dependencies import DependantBase
from di.api.providers import DependencyProvider
from di.api.scopes import Scope
from di.exceptions import IncompatibleDependencyError
class ExecutionState(NamedTuple):
stacks: Mapping[Scope, Union[AsyncExitStack, ExitStack]]
results: List[Any]
cache: ScopeMap[CacheKey, Any]
values: Mapping[DependencyProvider, Any]
DependencyType = TypeVar("DependencyType")
UNSET: Any = object()
class Task:
__slots__ = (
"wrapped_call",
"user_function",
"scope",
"cache_key",
"dependant",
"task_id",
"call_user_func_with_deps",
"compute",
)
compute: Any
wrapped_call: DependencyProvider
user_function: DependencyProvider
def __init__(
self,
scope: Scope,
call: DependencyProvider,
use_cache: bool,
cache_key: CacheKey,
dependant: DependantBase[Any],
task_id: int,
positional_parameters: Iterable[Task],
keyword_parameters: Iterable[Tuple[str, Task]],
) -> None:
self.scope = scope
self.user_function = call
self.cache_key = cache_key
self.dependant = dependant
self.task_id = task_id
if is_async_gen_callable(self.user_function):
self.wrapped_call = contextlib.asynccontextmanager(call)
if use_cache:
self.compute = self.compute_async_cm_cache
else:
self.compute = self.compute_async_cm_no_cache
elif is_coroutine_callable(self.user_function):
self.wrapped_call = self.user_function
if use_cache:
self.compute = self.compute_async_coro_cache
else:
self.compute = self.compute_async_coro_no_cache
elif is_gen_callable(call):
self.wrapped_call = contextlib.contextmanager(call)
if use_cache:
self.compute = self.compute_sync_cm_cache
else:
self.compute = self.compute_sync_cm_no_cache
else:
self.wrapped_call = call
if use_cache:
self.compute = self.compute_sync_func_cache
else:
self.compute = self.compute_sync_func_no_cache
self.call_user_func_with_deps = self.generate_execute_fn(
self.wrapped_call, positional_parameters, keyword_parameters
)
def __hash__(self) -> int:
return self.task_id
def generate_execute_fn(
self,
call: DependencyProvider,
positional_parameters: Iterable[Task],
keyword_parameters: Iterable[Tuple[str, Task]],
) -> Callable[[List[Any]], Any]:
positional_arg_template = "results[{}]"
keyword_arg_template = "{}=results[{}]"
args: List[str] = []
for task in positional_parameters:
args.append(positional_arg_template.format(task.task_id))
for keyword, task in keyword_parameters:
args.append(keyword_arg_template.format(keyword, task.task_id))
lcls: Dict[str, Any] = {}
glbls = {"call": call}
exec(f'def execute(results): return call({",".join(args)})', glbls, lcls)
return lcls["execute"]
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}(scope={self.scope}, call={self.user_function})"
)
async def compute_async_coro_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
value = state.cache.get_key(self.cache_key, scope=self.scope, default=UNSET)
if value is not UNSET:
state.results[self.task_id] = value
return
dependency_value = await self.call_user_func_with_deps(state.results)
state.results[self.task_id] = dependency_value
state.cache.set(self.cache_key, dependency_value, scope=self.scope)
async def compute_async_coro_no_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
dependency_value = await self.call_user_func_with_deps(state.results)
state.results[self.task_id] = dependency_value
async def compute_async_cm_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
value = state.cache.get_key(self.cache_key, scope=self.scope, default=UNSET)
if value is not UNSET:
state.results[self.task_id] = value
return
try:
enter = state.stacks[self.scope].enter_async_context
except AttributeError:
raise IncompatibleDependencyError(
f"The dependency {self.user_function} is an awaitable dependency"
f" and canot be used in the sync scope {self.scope}"
) from None
dependency_value: Any = await enter(
self.call_user_func_with_deps(state.results)
)
state.results[self.task_id] = dependency_value
state.cache.set(self.cache_key, dependency_value, scope=self.scope)
async def compute_async_cm_no_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
try:
enter = state.stacks[self.scope].enter_async_context
except AttributeError:
raise IncompatibleDependencyError(
f"The dependency {self.user_function} is an awaitable dependency"
f" and canot be used in the sync scope {self.scope}"
) from None
dependency_value: Any = await enter(
self.call_user_func_with_deps(state.results)
)
state.results[self.task_id] = dependency_value
def compute_sync_cm_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
value = state.cache.get_key(self.cache_key, scope=self.scope, default=UNSET)
if value is not UNSET:
state.results[self.task_id] = value
return
val = state.stacks[self.scope].enter_context(
self.call_user_func_with_deps(state.results)
)
state.results[self.task_id] = val
state.cache.set(self.cache_key, val, scope=self.scope)
def compute_sync_cm_no_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
state.results[self.task_id] = state.stacks[self.scope].enter_context(
self.call_user_func_with_deps(state.results)
)
def compute_sync_func_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
value = state.cache.get_key(self.cache_key, scope=self.scope, default=UNSET)
if value is not UNSET:
state.results[self.task_id] = value
return
val = self.call_user_func_with_deps(state.results)
state.results[self.task_id] = val
state.cache.set(self.cache_key, val, scope=self.scope)
def compute_sync_func_no_cache(self, state: ExecutionState) -> None:
if self.user_function in state.values:
state.results[self.task_id] = state.values[self.user_function]
return
state.results[self.task_id] = self.call_user_func_with_deps(state.results)
| true | true |
1c31098d86defc704ddef75172543813361119da | 107 | py | Python | python/ray/train/accelerator.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 22 | 2018-05-08T05:52:34.000Z | 2020-04-01T10:09:55.000Z | python/ray/train/accelerator.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 73 | 2021-09-25T07:11:39.000Z | 2022-03-26T07:10:59.000Z | python/ray/train/accelerator.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 10 | 2018-04-27T10:50:59.000Z | 2020-02-24T02:41:43.000Z | import abc
class Accelerator(abc.ABC):
"""A utility that contains methods to accelerate training."""
| 17.833333 | 65 | 0.728972 | import abc
class Accelerator(abc.ABC):
| true | true |
1c3109a7f8d581710331ce81326405acb22b82e2 | 3,842 | py | Python | fmralign/fetch_example_data.py | hugorichard/fmralign | b8990cc22204591399b731460375b99254b38527 | [
"BSD-3-Clause"
] | 12 | 2019-04-19T11:46:46.000Z | 2021-05-01T16:09:28.000Z | fmralign/fetch_example_data.py | hugorichard/fmralign | b8990cc22204591399b731460375b99254b38527 | [
"BSD-3-Clause"
] | 38 | 2018-10-29T19:32:56.000Z | 2022-01-26T17:08:29.000Z | fmralign/fetch_example_data.py | hugorichard/fmralign | b8990cc22204591399b731460375b99254b38527 | [
"BSD-3-Clause"
] | 3 | 2019-09-09T20:43:00.000Z | 2021-11-29T14:22:47.000Z | # -*- coding: utf-8 -*-
import os
from nilearn.datasets.utils import _fetch_files, _get_dataset_dir
import pandas as pd
def fetch_ibc_subjects_contrasts(subjects, data_dir=None, verbose=1):
"""Fetch all IBC contrast maps for each of subjects.
After downloading all relevant images that are not already cached,
it returns a dataframe with all needed links.
Parameters
----------
subjects : list of str.
Subjects data to download. Available strings are ['sub-01', 'sub-02',
'sub-04' ... 'sub-09', 'sub-11' ... sub-15]
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location.
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
files : list of list of str
List (for every subject) of list of path (for every conditions),
in ap then pa acquisition.
metadata_df : Pandas Dataframe
Table containing some metadata for each available image in the dataset,
as well as their pathself.
Filtered to contain only the 'subjects' parameter metadatas
mask: str
Path to the mask to be used on the data
Notes
------
This function is a caller to nilearn.datasets.utils._fetch_files in order
to simplify examples reading and understanding for fmralign.
See Also
---------
nilearn.datasets.fetch_localizer_calculation_task
nilearn.datasets.fetch_localizer_contrasts
"""
# The URLs can be retrieved from the nilearn account on OSF
if subjects is "all":
subjects = ['sub-%02d' %
i for i in [1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15]]
dataset_name = 'ibc'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
# download or retrieve metadatas, put it in a dataframe,
# list all condition and specify path to the right directory
metadata_path = _fetch_files(data_dir, [('ibc_3mm_all_subjects_metadata.csv',
"https://osf.io/pcvje/download",
{"uncompress": True})],
verbose=verbose)
metadata_df = pd.read_csv(metadata_path[0])
conditions = metadata_df.condition.unique()
metadata_df['path'] = metadata_df['path'].str.replace(
'path_to_dir', data_dir)
# filter the dataframe to return only rows relevant for subjects argument
metadata_df = metadata_df[metadata_df.subject.isin(subjects)]
# download / retrieve mask niimg and find its path
mask = _fetch_files(
data_dir, [('gm_mask_3mm.nii.gz', "https://osf.io/yvju3/download",
{"uncompress": True})], verbose=verbose)[0]
# list all url keys for downloading separetely each subject data
url_keys = {"sub-01": "8z23h", "sub-02": "e9kbm", "sub-04": "qn5b6",
"sub-05": "u74a3", "sub-06": "83bje", "sub-07": "43j69",
"sub-08": "ua8qx", "sub-09": "bxwtv", "sub-11": "3dfbv",
"sub-12": "uat7d", "sub-13": "p238h", "sub-14": "prdk4",
"sub-15": "sw72z"}
# for all subjects in argument, download all contrasts images and list
# their path in the variable files
opts = {'uncompress': True}
files = []
for subject in subjects:
url = "https://osf.io/%s/download" % url_keys[subject]
filenames = [(os.path.join(subject, "%s_ap.nii.gz" % condition),
url, opts) for condition in conditions]
filenames.extend([(os.path.join(subject, "%s_pa.nii.gz" % condition),
url, opts) for condition in conditions])
files.append(_fetch_files(data_dir, filenames, verbose=verbose))
return files, metadata_df, mask
| 43.659091 | 81 | 0.618168 |
import os
from nilearn.datasets.utils import _fetch_files, _get_dataset_dir
import pandas as pd
def fetch_ibc_subjects_contrasts(subjects, data_dir=None, verbose=1):
if subjects is "all":
subjects = ['sub-%02d' %
i for i in [1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15]]
dataset_name = 'ibc'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
metadata_path = _fetch_files(data_dir, [('ibc_3mm_all_subjects_metadata.csv',
"https://osf.io/pcvje/download",
{"uncompress": True})],
verbose=verbose)
metadata_df = pd.read_csv(metadata_path[0])
conditions = metadata_df.condition.unique()
metadata_df['path'] = metadata_df['path'].str.replace(
'path_to_dir', data_dir)
metadata_df = metadata_df[metadata_df.subject.isin(subjects)]
mask = _fetch_files(
data_dir, [('gm_mask_3mm.nii.gz', "https://osf.io/yvju3/download",
{"uncompress": True})], verbose=verbose)[0]
url_keys = {"sub-01": "8z23h", "sub-02": "e9kbm", "sub-04": "qn5b6",
"sub-05": "u74a3", "sub-06": "83bje", "sub-07": "43j69",
"sub-08": "ua8qx", "sub-09": "bxwtv", "sub-11": "3dfbv",
"sub-12": "uat7d", "sub-13": "p238h", "sub-14": "prdk4",
"sub-15": "sw72z"}
opts = {'uncompress': True}
files = []
for subject in subjects:
url = "https://osf.io/%s/download" % url_keys[subject]
filenames = [(os.path.join(subject, "%s_ap.nii.gz" % condition),
url, opts) for condition in conditions]
filenames.extend([(os.path.join(subject, "%s_pa.nii.gz" % condition),
url, opts) for condition in conditions])
files.append(_fetch_files(data_dir, filenames, verbose=verbose))
return files, metadata_df, mask
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.