hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794d02d598caebdad524d1816be8af3b20dd8e84
| 365
|
py
|
Python
|
setup.py
|
brunoilponseisae/bottle-cas
|
2a031cea87abe1ec819701a94a4a82d70f105cc3
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
brunoilponseisae/bottle-cas
|
2a031cea87abe1ec819701a94a4a82d70f105cc3
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
brunoilponseisae/bottle-cas
|
2a031cea87abe1ec819701a94a4a82d70f105cc3
|
[
"BSD-3-Clause"
] | null | null | null |
import setuptools
setuptools.setup(name='bottle-cas-python3',
version='3.0.0',
description='A fork of bottle-cas package supporting python3',
author='Bruno Ilponse',
url='http://github.com/brunoilponseisae/bottle-cas',
packages=['bottle_cas'],
python_requires='>=3.6',
install_requires=['bottle', 'beaker', 'requests'],)
| 30.416667
| 68
| 0.663014
|
794d02f48c71f40e2d825863b8204e88bd9ce388
| 3,353
|
py
|
Python
|
UniGrammarRuntime/backends/multilanguage/antlr4.py
|
UniGrammar/UniGrammarRuntime.py
|
58097f1d03f35c346a0534d1eb821b98edd25ad5
|
[
"Unlicense"
] | null | null | null |
UniGrammarRuntime/backends/multilanguage/antlr4.py
|
UniGrammar/UniGrammarRuntime.py
|
58097f1d03f35c346a0534d1eb821b98edd25ad5
|
[
"Unlicense"
] | null | null | null |
UniGrammarRuntime/backends/multilanguage/antlr4.py
|
UniGrammar/UniGrammarRuntime.py
|
58097f1d03f35c346a0534d1eb821b98edd25ad5
|
[
"Unlicense"
] | null | null | null |
import typing
from ...grammarClasses import LL
from ...IParsingBackend import IParsingBackend, ToolSpecificGrammarASTWalkStrategy
from ...ToolMetadata import Product, ToolMetadata
antlr4 = None
try:
from antlrCompile.backends.python import ANTLRInternalClassesPython
from antlrCompile.core import ANTLRParserFactory as ANTLRCompileANTLRParserFactory
from antlrCompile.core import backendsPool
except ImportError:
from warnings import warn
antlrCompileNotInstalledErrorMessage = "antlrCompile is not installed, generation of ANTLR bundles and visualization of results is not available"
warn(antlrCompileNotInstalledErrorMessage)
class ANTLRDummy:
__slots__ = ()
def compileStr(self, *args, **kwargs):
raise NotImplementedError(antlrCompileNotInstalledErrorMessage)
class ANTLRCompileDummy:
__slots__ = ()
def __init__(self, *args, **kwargs):
raise NotImplementedError(antlrCompileNotInstalledErrorMessage)
ANTLR = ANTLRDummy
ANTLRCompileVis = ANTLRCompileDummy
ANTLRCompileANTLRParserFactory = ANTLRCompileDummy
ANTLRInternalClassesPython = ANTLRCompileDummy
toolGithubOrg = "https://github.com/antlr"
toolRepoBase = toolGithubOrg + "/antlr4"
toolRuntimesBase = toolRepoBase + "/tree/master/runtime"
languagesRemap = {
"python": "Python3",
"js": "JavaScript",
"java": "Java",
"go": "Go",
"c++": "Cpp",
"c#": "CSharp",
"swift": "Swift",
}
class ANTLRParserFactory(ANTLRCompileANTLRParserFactory):
__slots__ = ()
META = ToolMetadata(
Product(
name="antlr4",
website=toolRepoBase,
),
runtimeLib={
lang: (toolRuntimesBase + "/" + antlrLang) for lang, antlrLang in languagesRemap.items()
},
grammarClasses=(LL,),
buildsTree=True,
)
def _bundleToIterable(self, backend, grammarResources: "InMemoryGrammarResources") -> typing.Iterable[typing.Any]:
return backend._somethingToIterable(grammarResources, lambda grammarResources, role, className: grammarResources.parent.backendsPythonAST[self.__class__.PARSER_CLASS.NAME, className])
def fromBundle(self, grammarResources: "InMemoryGrammarResources") -> "antlrCompile.core.ANTLRParser":
global antlr4
pythonBackend = backendsPool(ANTLRInternalClassesPython)
antlr4 = pythonBackend.antlr4
return self._fromAttrIterable(pythonBackend, self._bundleToIterable(pythonBackend, grammarResources))
class ANTLRWalkStrategy(ToolSpecificGrammarASTWalkStrategy):
__slots__ = ()
def iterateChildren(self, node):
return node.children
def isTerminal(self, node: "antlr4.tree.Tree.TerminalNodeImpl") -> bool:
return isinstance(node, (str, self.parserFactory.antlr4.tree.Tree.TerminalNode, self.parserFactory.antlr4.Token))
def iterateCollection(self, lst: "antlr4.ParserRuleContext.ParserRuleContext") -> typing.Any:
if lst:
if lst.children:
return lst.children
return ()
def isCollection(self, lst: typing.Any) -> bool:
return isinstance(lst, self.parserFactory.antlr4.RuleContext)
class ANTLRParsingBackend(IParsingBackend):
__slots__ = ()
PARSER = ANTLRParserFactory
WSTR = ANTLRWalkStrategy
def terminalNodeToStr(self, token: typing.Union["antlr4.Token.CommonToken", "antlr4.tree.Tree.TerminalNodeImpl"]) -> typing.Optional[str]:
if token is not None:
if isinstance(token, str):
return token
if isinstance(token, antlr4.Token):
return token.text
return token.getText()
return None
| 30.481818
| 185
| 0.778407
|
794d032847686cd58db8599cdbfe682972a91a39
| 1,048
|
py
|
Python
|
docker/duktape-wiki-build-ubuntu-18.04/run.py
|
svaarala/duktape-wiki
|
2a1bfa8345e67b2d8d6274a08f1cbfc0296766c9
|
[
"MIT"
] | 26
|
2016-05-03T20:37:36.000Z
|
2022-03-09T15:07:03.000Z
|
docker/duktape-wiki-build-ubuntu-18.04/run.py
|
svaarala/duktape-wiki
|
2a1bfa8345e67b2d8d6274a08f1cbfc0296766c9
|
[
"MIT"
] | 87
|
2015-09-02T22:49:37.000Z
|
2021-07-23T15:02:10.000Z
|
docker/duktape-wiki-build-ubuntu-18.04/run.py
|
svaarala/duktape-wiki
|
2a1bfa8345e67b2d8d6274a08f1cbfc0296766c9
|
[
"MIT"
] | 18
|
2016-05-13T12:02:21.000Z
|
2021-11-12T13:44:36.000Z
|
#!/usr/bin/env python2
import os
import sys
import subprocess
import zipfile
import StringIO
def main():
# Unpack ZIP from stdin, contains input repo snapshot etc.
zf = zipfile.ZipFile(StringIO.StringIO(sys.stdin.read()))
zf.extractall()
os.chdir('duktape-wiki')
# Actual dist build.
os.system('bash build_pandoc.sh')
os.system('cd /tmp/wiki-output-tmp && zip /tmp/duktape-wiki-dist-html.zip *')
# Text conversion from HTML files, allows diffing dists easily.
os.system('cd /tmp && mkdir wiki-output-txt && cd wiki-output-tmp && for i in *.html; do html2text -o ../wiki-output-txt/${i%%.html}.txt $i; done')
os.system('cd /tmp/wiki-output-txt && zip /tmp/duktape-wiki-dist-txt.zip *')
# Create output ZIP. Output ZIP file must appear last in stdout with no
# trailing garbage. Leading garbage is automatically ignored by ZIP.
os.system('cd /tmp && zip out.zip duktape-wiki-dist-html.zip duktape-wiki-dist-txt.zip')
os.system('cat /tmp/out.zip')
if __name__ == '__main__':
main()
| 33.806452
| 151
| 0.682252
|
794d03a946fda703a6693cc7edf2305d8a5d8b2a
| 537
|
py
|
Python
|
app.py
|
Misschl/flask-fresh
|
df17fd377b9e27aaad9fe0c5582c56098d09068c
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
Misschl/flask-fresh
|
df17fd377b9e27aaad9fe0c5582c56098d09068c
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
Misschl/flask-fresh
|
df17fd377b9e27aaad9fe0c5582c56098d09068c
|
[
"Apache-2.0"
] | 1
|
2020-12-21T14:01:53.000Z
|
2020-12-21T14:01:53.000Z
|
from flask import Flask
import config
from utils.core import DemoInit
from utils.extentions import db, session, admin, csrf
app = Flask(
__name__,
template_folder=config.TEMPLATE_FOLDER,
static_folder=config.STATIC_FOLDER
)
app.config.from_object(config)
demo = DemoInit(app)
extentions = [
db,
session,
admin,
csrf,
]
application = demo.init(extentions)
@app.template_global('fdfs_img_tag')
def fdfs_img_tag(value):
return config.FAST_DFS_DOMAIN + value
if __name__ == '__main__':
app.run()
| 16.272727
| 53
| 0.726257
|
794d045b561ffeaa0626f86590b2260ef403de71
| 7,968
|
py
|
Python
|
kubernetes/client/models/v1_gce_persistent_disk_volume_source.py
|
lp67/python
|
33c5ea9835356410ce4a9fa54a02c6a2a22143c6
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_gce_persistent_disk_volume_source.py
|
lp67/python
|
33c5ea9835356410ce4a9fa54a02c6a2a22143c6
|
[
"Apache-2.0"
] | 4
|
2019-11-19T10:33:47.000Z
|
2022-03-01T03:33:52.000Z
|
kubernetes/client/models/v1_gce_persistent_disk_volume_source.py
|
mohramadan911/PythonClientAPI
|
5d111812c81b7a573ac8661d1aec60bb97072412
|
[
"Apache-2.0"
] | 2
|
2021-08-10T16:35:31.000Z
|
2021-09-14T04:53:06.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.20
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1GCEPersistentDiskVolumeSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'partition': 'int',
'pd_name': 'str',
'read_only': 'bool'
}
attribute_map = {
'fs_type': 'fsType',
'partition': 'partition',
'pd_name': 'pdName',
'read_only': 'readOnly'
}
def __init__(self, fs_type=None, partition=None, pd_name=None, read_only=None, local_vars_configuration=None): # noqa: E501
"""V1GCEPersistentDiskVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._fs_type = None
self._partition = None
self._pd_name = None
self._read_only = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if partition is not None:
self.partition = partition
self.pd_name = pd_name
if read_only is not None:
self.read_only = read_only
@property
def fs_type(self):
"""Gets the fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1GCEPersistentDiskVolumeSource.
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param fs_type: The fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def partition(self):
"""Gets the partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: int
"""
return self._partition
@partition.setter
def partition(self, partition):
"""Sets the partition of this V1GCEPersistentDiskVolumeSource.
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param partition: The partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: int
"""
self._partition = partition
@property
def pd_name(self):
"""Gets the pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._pd_name
@pd_name.setter
def pd_name(self, pd_name):
"""Sets the pd_name of this V1GCEPersistentDiskVolumeSource.
Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param pd_name: The pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pd_name is None: # noqa: E501
raise ValueError("Invalid value for `pd_name`, must not be `None`") # noqa: E501
self._pd_name = pd_name
@property
def read_only(self):
"""Gets the read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1GCEPersistentDiskVolumeSource.
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param read_only: The read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1GCEPersistentDiskVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1GCEPersistentDiskVolumeSource):
return True
return self.to_dict() != other.to_dict()
| 38.307692
| 367
| 0.646084
|
794d04a429cc7d79656eb901731942118f7f9e4c
| 1,768
|
py
|
Python
|
tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py
|
torilov/ClickHouse
|
bbbe51033dfd5b8c3d54e168475ca707ac7ec0b4
|
[
"Apache-2.0"
] | 2
|
2020-08-31T08:39:21.000Z
|
2020-12-29T08:19:11.000Z
|
tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py
|
torilov/ClickHouse
|
bbbe51033dfd5b8c3d54e168475ca707ac7ec0b4
|
[
"Apache-2.0"
] | null | null | null |
tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py
|
torilov/ClickHouse
|
bbbe51033dfd5b8c3d54e168475ca707ac7ec0b4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import signal
CURDIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CURDIR, 'helpers'))
from client import client, prompt, end_of_block
log = None
# uncomment the line below for debugging
#log=sys.stdout
with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2:
client1.expect(prompt)
client2.expect(prompt)
client1.send('SET allow_experimental_live_view = 1')
client1.expect(prompt)
client2.send('SET allow_experimental_live_view = 1')
client2.expect(prompt)
client1.send('DROP TABLE IF EXISTS test.lv')
client1.expect(prompt)
client1.send('DROP TABLE IF EXISTS test.mt')
client1.expect(prompt)
client1.send('SET live_view_heartbeat_interval=1')
client1.expect(prompt)
client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()')
client1.expect(prompt)
client1.send('CREATE LIVE VIEW test.lv WITH TIMEOUT AS SELECT sum(a) FROM test.mt')
client1.expect(prompt)
client1.send('WATCH test.lv EVENTS FORMAT CSV')
client1.expect('Progress: 1.00 rows.*\)')
client2.send('INSERT INTO test.mt VALUES (1)')
client2.expect(prompt)
client1.expect('Progress: 2.00 rows.*\)')
client2.send('INSERT INTO test.mt VALUES (2),(3)')
client2.expect(prompt)
# wait for heartbeat
client1.expect('Progress: 3.00 rows.*\)')
# send Ctrl-C
client1.send('\x03', eol='')
match = client1.expect('(%s)|([#\$] )' % prompt)
if match.groups()[1]:
client1.send(client1.command)
client1.expect(prompt)
client1.send('DROP TABLE test.lv')
client1.expect(prompt)
client1.send('DROP TABLE test.mt')
client1.expect(prompt)
| 33.358491
| 94
| 0.688914
|
794d04eda71a36740be3cea3af6206ca660461ad
| 31,369
|
py
|
Python
|
AppServer/google/appengine/api/logservice/logservice.py
|
echoi-appscale/appscale
|
bff3d6a9d42b0c2dd58796c4fc6aa1ddd2c00bcc
|
[
"Apache-2.0"
] | 1
|
2016-02-24T02:26:35.000Z
|
2016-02-24T02:26:35.000Z
|
AppServer/google/appengine/api/logservice/logservice.py
|
echoi-appscale/appscale
|
bff3d6a9d42b0c2dd58796c4fc6aa1ddd2c00bcc
|
[
"Apache-2.0"
] | null | null | null |
AppServer/google/appengine/api/logservice/logservice.py
|
echoi-appscale/appscale
|
bff3d6a9d42b0c2dd58796c4fc6aa1ddd2c00bcc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
LogService API.
This module allows apps to flush logs, provide status messages, and
programmatically access their request and application logs.
"""
import base64
import cStringIO
import logging
import os
import re
import sys
import threading
import time
import warnings
from google.net.proto import ProtocolBuffer
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api.logservice import log_service_pb
from google.appengine.api.logservice import logsutil
from google.appengine.datastore import datastore_rpc
from google.appengine.runtime import apiproxy_errors
AUTOFLUSH_ENABLED = True
AUTOFLUSH_EVERY_SECONDS = 60
AUTOFLUSH_EVERY_BYTES = 4096
AUTOFLUSH_EVERY_LINES = 50
MAX_ITEMS_PER_FETCH = 1000
LOG_LEVEL_DEBUG = 0
LOG_LEVEL_INFO = 1
LOG_LEVEL_WARNING = 2
LOG_LEVEL_ERROR = 3
LOG_LEVEL_CRITICAL = 4
MODULE_ID_RE_STRING = r'(?!-)[a-z\d\-]{1,63}'
MODULE_VERSION_RE_STRING = r'(?!-)[a-z\d\-]{1,100}'
_MAJOR_VERSION_ID_PATTERN = r'^(?:(?:(%s):)?)(%s)$' % (MODULE_ID_RE_STRING,
MODULE_VERSION_RE_STRING)
_MAJOR_VERSION_ID_RE = re.compile(_MAJOR_VERSION_ID_PATTERN)
_REQUEST_ID_PATTERN = r'^[\da-fA-F]+$'
_REQUEST_ID_RE = re.compile(_REQUEST_ID_PATTERN)
class Error(Exception):
"""Base error class for this module."""
class InvalidArgumentError(Error):
"""Function argument has invalid value."""
class TimeoutError(Error):
"""Requested timeout for fetch() call has expired while iterating results."""
def __init__(self, msg, offset, last_end_time):
Error.__init__(self, msg)
self.__offset = offset
self.__last_end_time = last_end_time
@property
def offset(self):
"""Binary offset indicating the current position in the result stream.
May be submitted to future Log read requests to continue iterating logs
starting exactly where this iterator left off.
Returns:
A byte string representing an offset into the log stream, or None.
"""
return self.__offset
@property
def last_end_time(self):
"""End time of the last request examined prior to the timeout, or None.
Returns:
A float representing the completion time in seconds since the Unix
epoch of the last request examined.
"""
return self.__last_end_time
class LogsBuffer(object):
"""Threadsafe buffer for storing and periodically flushing app logs."""
_MAX_FLUSH_SIZE = int(1e6)
_MAX_LINE_SIZE = _MAX_FLUSH_SIZE
assert _MAX_LINE_SIZE <= _MAX_FLUSH_SIZE
def __init__(self, stream=None, stderr=False):
"""Initializes the buffer, which wraps the given stream or sys.stderr.
The state of the LogsBuffer is protected by a separate lock. The lock is
acquired before any variables are mutated or accessed, and released
afterward. A recursive lock is used so that a single thread can acquire the
lock multiple times, and release it only when an identical number of
'unlock()' calls have been performed.
Args:
stream: A file-like object to store logs. Defaults to a cStringIO object.
stderr: If specified, use sys.stderr as the underlying stream.
"""
self._stderr = stderr
if self._stderr:
assert stream is None
else:
self._stream = stream or cStringIO.StringIO()
self._lock = threading.RLock()
self._reset()
def _lock_and_call(self, method, *args):
"""Calls 'method' while holding the buffer lock."""
self._lock.acquire()
try:
return method(*args)
finally:
self._lock.release()
def stream(self):
"""Returns the underlying file-like object used to buffer logs."""
if self._stderr:
return sys.stderr
else:
return self._stream
def lines(self):
"""Returns the number of log lines currently buffered."""
return self._lock_and_call(lambda: self._lines)
def bytes(self):
"""Returns the size of the log buffer, in bytes."""
return self._lock_and_call(lambda: self._bytes)
def age(self):
"""Returns the number of seconds since the log buffer was flushed."""
return self._lock_and_call(lambda: time.time() - self._flush_time)
def flush_time(self):
"""Returns last time that the log buffer was flushed."""
return self._lock_and_call(lambda: self._flush_time)
def contents(self):
"""Returns the contents of the logs buffer."""
return self._lock_and_call(self._contents)
def _contents(self):
"""Internal version of contents() with no locking."""
try:
return self.stream().getvalue()
except AttributeError:
return ''
def reset(self):
"""Resets the buffer state, without clearing the underlying stream."""
self._lock_and_call(self._reset)
def _reset(self):
"""Internal version of reset() with no locking."""
contents = self._contents()
self._bytes = len(contents)
self._lines = len(contents.split('\n')) - 1
self._flush_time = time.time()
self._request = logsutil.RequestID()
def clear(self):
"""Clears the contents of the logs buffer, and resets autoflush state."""
self._lock_and_call(self._clear)
def _clear(self):
"""Internal version of clear() with no locking."""
if self._bytes > 0:
self.stream().truncate(0)
self._reset()
def close(self):
"""Closes the underlying stream, flushing the current contents."""
self._lock_and_call(self._close)
def _close(self):
"""Internal version of close() with no locking."""
self._flush()
self.stream().close()
def parse_logs(self):
"""Parse the contents of the buffer and return an array of log lines."""
return logsutil.ParseLogs(self.contents())
def write(self, line):
"""Writes a line to the logs buffer."""
return self._lock_and_call(self._write, line)
def writelines(self, seq):
"""Writes each line in the given sequence to the logs buffer."""
for line in seq:
self.write(line)
def _write(self, line):
"""Writes a line to the logs buffer."""
if self._request != logsutil.RequestID():
self._reset()
self.stream().write(line)
self._lines += 1
self._bytes += len(line)
self._autoflush()
@staticmethod
def _truncate(line, max_length=_MAX_LINE_SIZE):
"""Truncates a potentially long log down to a specified maximum length."""
if len(line) > max_length:
original_length = len(line)
suffix = '...(length %d)' % original_length
line = line[:max_length - len(suffix)] + suffix
return line
def flush(self):
"""Flushes the contents of the logs buffer.
This method holds the buffer lock until the API call has finished to ensure
that flush calls are performed in the correct order, so that log messages
written during the flush call aren't dropped or accidentally wiped, and so
that the other buffer state variables (flush time, lines, bytes) are updated
synchronously with the flush.
"""
self._lock_and_call(self._flush)
def _flush(self):
"""Internal version of flush() with no locking."""
logs = self.parse_logs()
self._clear()
first_iteration = True
while logs or first_iteration:
first_iteration = False
request = log_service_pb.FlushRequest()
group = log_service_pb.UserAppLogGroup()
byte_size = 0
n = 0
for entry in logs:
if len(entry[2]) > LogsBuffer._MAX_LINE_SIZE:
entry = list(entry)
entry[2] = self._truncate(entry[2], LogsBuffer._MAX_LINE_SIZE)
if byte_size + len(entry[2]) > LogsBuffer._MAX_FLUSH_SIZE:
break
line = group.add_log_line()
line.set_timestamp_usec(entry[0])
line.set_level(entry[1])
line.set_message(entry[2])
byte_size += 1 + group.lengthString(line.ByteSize())
n += 1
assert n > 0 or not logs
logs = logs[n:]
request.set_logs(group.Encode())
response = api_base_pb.VoidProto()
apiproxy_stub_map.MakeSyncCall('logservice', 'Flush', request, response)
def autoflush(self):
"""Flushes the buffer if certain conditions have been met."""
self._lock_and_call(self._autoflush)
def _autoflush(self):
"""Internal version of autoflush() with no locking."""
if not self.autoflush_enabled():
return
if ((AUTOFLUSH_EVERY_SECONDS and self.age() >= AUTOFLUSH_EVERY_SECONDS) or
(AUTOFLUSH_EVERY_LINES and self.lines() >= AUTOFLUSH_EVERY_LINES) or
(AUTOFLUSH_EVERY_BYTES and self.bytes() >= AUTOFLUSH_EVERY_BYTES)):
self._flush()
def autoflush_enabled(self):
"""Indicates if the buffer will periodically flush logs during a request."""
return AUTOFLUSH_ENABLED
_global_buffer = LogsBuffer(stderr=True)
def logs_buffer():
"""Returns the LogsBuffer used by the current request."""
return _global_buffer
def write(message):
"""Adds 'message' to the logs buffer, and checks for autoflush.
Args:
message: A message (string) to be written to application logs.
"""
logs_buffer().write(message)
def clear():
"""Clear the logs buffer and reset the autoflush state."""
logs_buffer().clear()
def autoflush():
"""If AUTOFLUSH conditions have been met, performs a Flush API call."""
logs_buffer().autoflush()
def flush():
"""Flushes log lines that are currently buffered."""
logs_buffer().flush()
def flush_time():
"""Returns last time that the logs buffer was flushed."""
return logs_buffer().flush_time()
def log_buffer_age():
"""Returns the number of seconds since the logs buffer was flushed."""
return logs_buffer().age()
def log_buffer_contents():
"""Returns the contents of the logs buffer."""
return logs_buffer().contents()
def log_buffer_bytes():
"""Returns the size of the logs buffer, in bytes."""
return logs_buffer().bytes()
def log_buffer_lines():
"""Returns the number of log lines currently buffered."""
return logs_buffer().lines()
class _LogQueryResult(object):
"""A container that holds a log request and provides an iterator to read logs.
A _LogQueryResult object is the standard returned item for a call to fetch().
It is iterable - each value returned is a log that the user has queried for,
and internally, it holds a cursor that it uses to fetch more results once the
current, locally held set, are exhausted.
Properties:
_request: A LogReadRequest that contains the parameters the user has set for
the initial fetch call, which will be updated with a more current cursor
if more logs are requested.
_logs: A list of RequestLogs corresponding to logs the user has asked for.
_read_called: A boolean that indicates if a Read call has even been made
with the request stored in this object.
"""
def __init__(self, request, timeout=None):
"""Constructor.
Args:
request: A LogReadRequest object that will be used for Read calls.
"""
self._request = request
self._logs = []
self._read_called = False
self._last_end_time = None
self._end_time = None
if timeout is not None:
self._end_time = time.time() + timeout
def __iter__(self):
"""Provides an iterator that yields log records one at a time."""
while True:
for log_item in self._logs:
yield RequestLog(log_item)
if not self._read_called or self._request.has_offset():
if self._end_time and time.time() >= self._end_time:
offset = None
if self._request.has_offset():
offset = self._request.offset().Encode()
raise TimeoutError('A timeout occurred while iterating results',
offset=offset, last_end_time=self._last_end_time)
self._read_called = True
self._advance()
else:
break
def _advance(self):
"""Acquires additional logs via cursor.
This method is used by the iterator when it has exhausted its current set of
logs to acquire more logs and update its internal structures accordingly.
"""
response = log_service_pb.LogReadResponse()
try:
apiproxy_stub_map.MakeSyncCall('logservice', 'Read', self._request,
response)
except apiproxy_errors.ApplicationError, e:
if e.application_error == log_service_pb.LogServiceError.INVALID_REQUEST:
raise InvalidArgumentError(e.error_detail)
raise Error(e.error_detail)
self._logs = response.log_list()
self._request.clear_offset()
if response.has_offset():
self._request.mutable_offset().CopyFrom(response.offset())
self._last_end_time = None
if response.has_last_end_time():
self._last_end_time = response.last_end_time() / 1e6
class RequestLog(object):
"""Complete log information about a single request to an application."""
def __init__(self, request_log=None):
if type(request_log) is str:
self.__pb = log_service_pb.RequestLog(base64.b64decode(request_log))
elif request_log.__class__ == log_service_pb.RequestLog:
self.__pb = request_log
else:
self.__pb = log_service_pb.RequestLog()
self.__lines = []
def __repr__(self):
return 'RequestLog(\'%s\')' % base64.b64encode(self.__pb.Encode())
def __str__(self):
if self.module_id == 'default':
return ('<RequestLog(app_id=%s, version_id=%s, request_id=%s)>' %
(self.app_id, self.version_id, base64.b64encode(self.request_id)))
else:
return ('<RequestLog(app_id=%s, module_id=%s, version_id=%s, '
'request_id=%s)>' %
(self.app_id, self.module_id, self.version_id,
base64.b64encode(self.request_id)))
@property
def _pb(self):
return self.__pb
@property
def app_id(self):
"""Application id that handled this request, as a string."""
return self.__pb.app_id()
@property
def server_id(self):
"""Module id that handled this request, as a string."""
logging.warning('The server_id property is deprecated, please use '
'the module_id property instead.')
return self.__pb.module_id()
@property
def module_id(self):
"""Module id that handled this request, as a string."""
return self.__pb.module_id()
@property
def version_id(self):
"""Version of the application that handled this request, as a string."""
return self.__pb.version_id()
@property
def request_id(self):
"""Globally unique identifier for a request, based on request start time.
Request ids for requests which started later will compare greater as
binary strings than those for requests which started earlier.
Returns:
A byte string containing a unique identifier for this request.
"""
return self.__pb.request_id()
@property
def offset(self):
"""Binary offset indicating current position in the result stream.
May be submitted to future Log read requests to continue immediately after
this request.
Returns:
A byte string representing an offset into the active result stream.
"""
if self.__pb.has_offset():
return self.__pb.offset().Encode()
return None
@property
def ip(self):
"""The origin IP address of the request, as a string."""
return self.__pb.ip()
@property
def nickname(self):
"""Nickname of the user that made the request if known and logged in.
Returns:
A string representation of the logged in user's nickname, or None.
"""
if self.__pb.has_nickname():
return self.__pb.nickname()
return None
@property
def start_time(self):
"""Time at which request was known to have begun processing.
Returns:
A float representing the time this request began processing in seconds
since the Unix epoch.
"""
return self.__pb.start_time() / 1e6
@property
def end_time(self):
"""Time at which request was known to have completed.
Returns:
A float representing the request completion time in seconds since the
Unix epoch.
"""
return self.__pb.end_time() / 1e6
@property
def latency(self):
"""Time required to process request in seconds, as a float."""
return self.__pb.latency() / 1e6
@property
def mcycles(self):
"""Number of machine cycles used to process request, as an integer."""
return self.__pb.mcycles()
@property
def method(self):
"""Request method (GET, PUT, POST, etc), as a string."""
return self.__pb.method()
@property
def resource(self):
"""Resource path on server requested by client.
For example, http://nowhere.com/app would have a resource string of '/app'.
Returns:
A string containing the path component of the request URL.
"""
return self.__pb.resource()
@property
def http_version(self):
"""HTTP version of request, as a string."""
return self.__pb.http_version()
@property
def status(self):
"""Response status of request, as an int."""
return self.__pb.status()
@property
def response_size(self):
"""Size in bytes sent back to client by request, as a long."""
return self.__pb.response_size()
@property
def referrer(self):
"""Referrer URL of request as a string, or None."""
if self.__pb.has_referrer():
return self.__pb.referrer()
return None
@property
def user_agent(self):
"""User agent used to make the request as a string, or None."""
if self.__pb.has_user_agent():
return self.__pb.user_agent()
return None
@property
def url_map_entry(self):
"""File or class within URL mapping used for request.
Useful for tracking down the source code which was responsible for managing
request, especially for multiply mapped handlers.
Returns:
A string containing a file or class name.
"""
return self.__pb.url_map_entry()
@property
def combined(self):
"""Apache combined log entry for request.
The information in this field can be constructed from the rest of
this message, however, this field is included for convenience.
Returns:
A string containing an Apache-style log line in the form documented at
http://httpd.apache.org/docs/1.3/logs.html.
"""
return self.__pb.combined()
@property
def api_mcycles(self):
"""Number of machine cycles spent in API calls while processing request.
Deprecated. This value is no longer meaningful.
Returns:
Number of API machine cycles used as a long, or None if not available.
"""
warnings.warn('api_mcycles does not return a meaningful value.',
DeprecationWarning, stacklevel=2)
if self.__pb.has_api_mcycles():
return self.__pb.api_mcycles()
return None
@property
def host(self):
"""The Internet host and port number of the resource being requested.
Returns:
A string representing the host and port receiving the request, or None
if not available.
"""
if self.__pb.has_host():
return self.__pb.host()
return None
@property
def cost(self):
"""The estimated cost of this request, in fractional dollars.
Returns:
A float representing an estimated fractional dollar cost of this
request, or None if not available.
"""
if self.__pb.has_cost():
return self.__pb.cost()
return None
@property
def task_queue_name(self):
"""The request's queue name, if generated via the Task Queue API.
Returns:
A string containing the request's queue name if relevant, or None.
"""
if self.__pb.has_task_queue_name():
return self.__pb.task_queue_name()
return None
@property
def task_name(self):
"""The request's task name, if generated via the Task Queue API.
Returns:
A string containing the request's task name if relevant, or None.
"""
if self.__pb.has_task_name():
return self.__pb.task_name()
@property
def was_loading_request(self):
"""Returns whether this request was a loading request for an instance.
Returns:
A bool indicating whether this request was a loading request.
"""
return bool(self.__pb.was_loading_request())
@property
def pending_time(self):
"""Time this request spent in the pending request queue.
Returns:
A float representing the time in seconds that this request was pending.
"""
return self.__pb.pending_time() / 1e6
@property
def replica_index(self):
"""The module replica that handled the request as an integer, or None."""
if self.__pb.has_replica_index():
return self.__pb.replica_index()
return None
@property
def finished(self):
"""Whether or not this log represents a finished request, as a bool."""
return bool(self.__pb.finished())
@property
def instance_key(self):
"""Mostly-unique identifier for the instance that handled the request.
Returns:
A string encoding of an instance key if available, or None.
"""
if self.__pb.has_clone_key():
return self.__pb.clone_key()
return None
@property
def app_logs(self):
"""Logs emitted by the application while serving this request.
Returns:
A list of AppLog objects representing the log lines for this request, or
an empty list if none were emitted or the query did not request them.
"""
if not self.__lines and self.__pb.line_size():
self.__lines = [AppLog(time=line.time() / 1e6, level=line.level(),
message=line.log_message())
for line in self.__pb.line_list()]
return self.__lines
class AppLog(object):
"""Application log line emitted while processing a request."""
def __init__(self, time=None, level=None, message=None):
self._time = time
self._level = level
self._message = message
def __eq__(self, other):
return (self.time == other.time and self.level and other.level and
self.message == other.message)
def __repr__(self):
return ('AppLog(time=%f, level=%d, message=\'%s\')' %
(self.time, self.level, self.message))
@property
def time(self):
"""Time log entry was made, in seconds since the Unix epoch, as a float."""
return self._time
@property
def level(self):
"""Level or severity of log, as an int."""
return self._level
@property
def message(self):
"""Application-provided log message, as a string."""
return self._message
_FETCH_KWARGS = frozenset(['prototype_request', 'timeout', 'batch_size',
'server_versions'])
@datastore_rpc._positional(0)
def fetch(start_time=None,
end_time=None,
offset=None,
minimum_log_level=None,
include_incomplete=False,
include_app_logs=False,
module_versions=None,
version_ids=None,
request_ids=None,
**kwargs):
"""Returns an iterator yielding an application's request and application logs.
Logs will be returned by the iterator in reverse chronological order by
request end time, or by last flush time for requests still in progress (if
requested). The items yielded are RequestLog objects, the contents of which
are accessible via method calls.
All parameters are optional.
Args:
start_time: The earliest request completion or last-update time that
results should be fetched for, in seconds since the Unix epoch.
end_time: The latest request completion or last-update time that
results should be fetched for, in seconds since the Unix epoch.
offset: A byte string representing an offset into the log stream, extracted
from a previously emitted RequestLog. This iterator will begin
immediately after the record from which the offset came.
minimum_log_level: An application log level which serves as a filter on the
requests returned--requests with no application log at or above the
specified level will be omitted. Works even if include_app_logs is not
True. In ascending order, the available log levels are:
logservice.LOG_LEVEL_DEBUG, logservice.LOG_LEVEL_INFO,
logservice.LOG_LEVEL_WARNING, logservice.LOG_LEVEL_ERROR,
and logservice.LOG_LEVEL_CRITICAL.
include_incomplete: Whether or not to include requests that have started but
not yet finished, as a boolean. Defaults to False.
include_app_logs: Whether or not to include application level logs in the
results, as a boolean. Defaults to False.
module_versions: A list of tuples of the form (module, version), that
indicate that the logs for the given module/version combination should be
fetched. Duplicate tuples will be ignored. This kwarg may not be used
in conjunction with the 'version_ids' kwarg.
version_ids: A list of version ids whose logs should be queried against.
Defaults to the application's current version id only. This kwarg may not
be used in conjunction with the 'module_versions' kwarg.
request_ids: If not None, indicates that instead of a time-based scan, logs
for the specified requests should be returned. Malformed request IDs will
cause the entire request to be rejected, while any requests that are
unknown will be ignored. This option may not be combined with any
filtering options such as start_time, end_time, offset, or
minimum_log_level. version_ids is ignored. IDs that do not correspond to
a request log will be ignored. Logs will be returned in the order
requested.
Returns:
An iterable object containing the logs that the user has queried for.
Raises:
InvalidArgumentError: Raised if any of the input parameters are not of the
correct type.
"""
args_diff = set(kwargs) - _FETCH_KWARGS
if args_diff:
raise InvalidArgumentError('Invalid arguments: %s' % ', '.join(args_diff))
request = log_service_pb.LogReadRequest()
request.set_app_id(os.environ['APPLICATION_ID'])
if start_time is not None:
if not isinstance(start_time, (float, int, long)):
raise InvalidArgumentError('start_time must be a float or integer')
request.set_start_time(long(start_time * 1000000))
if end_time is not None:
if not isinstance(end_time, (float, int, long)):
raise InvalidArgumentError('end_time must be a float or integer')
request.set_end_time(long(end_time * 1000000))
if offset is not None:
try:
request.mutable_offset().ParseFromString(offset)
except (TypeError, ProtocolBuffer.ProtocolBufferDecodeError):
raise InvalidArgumentError('offset must be a string or read-only buffer')
if minimum_log_level is not None:
if not isinstance(minimum_log_level, int):
raise InvalidArgumentError('minimum_log_level must be an int')
if not minimum_log_level in range(LOG_LEVEL_CRITICAL+1):
raise InvalidArgumentError("""minimum_log_level must be between 0 and 4
inclusive""")
request.set_minimum_log_level(minimum_log_level)
if not isinstance(include_incomplete, bool):
raise InvalidArgumentError('include_incomplete must be a boolean')
request.set_include_incomplete(include_incomplete)
if not isinstance(include_app_logs, bool):
raise InvalidArgumentError('include_app_logs must be a boolean')
request.set_include_app_logs(include_app_logs)
if 'server_versions' in kwargs:
logging.warning('The server_versions kwarg to the fetch() method is '
'deprecated. Please use the module_versions kwarg '
'instead.')
module_versions = kwargs.pop('server_versions')
if version_ids and module_versions:
raise InvalidArgumentError('version_ids and module_versions may not be '
'used at the same time.')
if version_ids is None and module_versions is None:
version_id = os.environ['CURRENT_VERSION_ID']
request.add_module_version().set_version_id(version_id.split('.')[0])
if module_versions:
if not isinstance(module_versions, list):
raise InvalidArgumentError('module_versions must be a list')
req_module_versions = set()
for entry in module_versions:
if not isinstance(entry, (list, tuple)):
raise InvalidArgumentError('module_versions list entries must all be '
'tuples or lists.')
if len(entry) != 2:
raise InvalidArgumentError('module_versions list entries must all be '
'of length 2.')
req_module_versions.add((entry[0], entry[1]))
for module, version in sorted(req_module_versions):
req_module_version = request.add_module_version()
if module != 'default':
req_module_version.set_module_id(module)
req_module_version.set_version_id(version)
if version_ids:
if not isinstance(version_ids, list):
raise InvalidArgumentError('version_ids must be a list')
for version_id in version_ids:
if not _MAJOR_VERSION_ID_RE.match(version_id):
raise InvalidArgumentError(
'version_ids must only contain valid major version identifiers')
request.add_module_version().set_version_id(version_id)
if request_ids is not None:
if not isinstance(request_ids, list):
raise InvalidArgumentError('request_ids must be a list')
if not request_ids:
raise InvalidArgumentError('request_ids must not be empty')
if len(request_ids) != len(set(request_ids)):
raise InvalidArgumentError('request_ids must not contain duplicates')
for request_id in request_ids:
if not _REQUEST_ID_RE.match(request_id):
raise InvalidArgumentError(
'%s is not a valid request log id' % request_id)
request.request_id_list()[:] = request_ids
prototype_request = kwargs.get('prototype_request')
if prototype_request:
if not isinstance(prototype_request, log_service_pb.LogReadRequest):
raise InvalidArgumentError('prototype_request must be a LogReadRequest')
request.MergeFrom(prototype_request)
timeout = kwargs.get('timeout')
if timeout is not None:
if not isinstance(timeout, (float, int, long)):
raise InvalidArgumentError('timeout must be a float or integer')
batch_size = kwargs.get('batch_size')
if batch_size is not None:
if not isinstance(batch_size, (int, long)):
raise InvalidArgumentError('batch_size must be an integer')
if batch_size < 1:
raise InvalidArgumentError('batch_size must be greater than zero')
if batch_size > MAX_ITEMS_PER_FETCH:
raise InvalidArgumentError('batch_size specified is too large')
request.set_count(batch_size)
return _LogQueryResult(request, timeout=timeout)
| 31.49498
| 80
| 0.692244
|
794d05fcacd42514f9fc73136e0211e0c799d42d
| 49
|
py
|
Python
|
Codeforces/A_Dubstep.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
Codeforces/A_Dubstep.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
Codeforces/A_Dubstep.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
print (" ".join(input().split('WUB')).strip(" "))
| 49
| 49
| 0.55102
|
794d067a9e679b854bf72d1e2687ce97410646e8
| 27,460
|
py
|
Python
|
caffe2/python/examples/imagenet_trainer.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | 40
|
2021-06-01T07:37:59.000Z
|
2022-03-25T01:42:09.000Z
|
caffe2/python/examples/imagenet_trainer.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | 14
|
2021-06-01T11:52:46.000Z
|
2022-03-25T02:13:08.000Z
|
caffe2/python/examples/imagenet_trainer.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | 7
|
2021-07-20T19:34:26.000Z
|
2022-03-13T21:07:36.000Z
|
# Module caffe2.python.examples.resnet50_trainer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import numpy as np
import time
import os
from caffe2.python import core, workspace, experiment_util, data_parallel_model
from caffe2.python import dyndep, optimizer
from caffe2.python import timeout_guard, model_helper, brew
from caffe2.proto import caffe2_pb2
import caffe2.python.models.resnet as resnet
import caffe2.python.models.shufflenet as shufflenet
from caffe2.python.modeling.initializers import Initializer, PseudoFP16Initializer
import caffe2.python.predictor.predictor_exporter as pred_exp
import caffe2.python.predictor.predictor_py_utils as pred_utils
from caffe2.python.predictor_constants import predictor_constants as predictor_constants
'''
Parallelized multi-GPU distributed trainer for Resne(X)t & Shufflenet.
Can be used to train on imagenet data, for example.
The default parameters can train a standard Resnet-50 (1x64d), and parameters
can be provided to train ResNe(X)t models (e.g., ResNeXt-101 32x4d).
To run the trainer in single-machine multi-gpu mode by setting num_shards = 1.
To run the trainer in multi-machine multi-gpu mode with M machines,
run the same program on all machines, specifying num_shards = M, and
shard_id = a unique integer in the set [0, M-1].
For rendezvous (the trainer processes have to know about each other),
you can either use a directory path that is visible to all processes
(e.g. NFS directory), or use a Redis instance. Use the former by
passing the `file_store_path` argument. Use the latter by passing the
`redis_host` and `redis_port` arguments.
'''
logging.basicConfig()
log = logging.getLogger("Imagenet_trainer")
log.setLevel(logging.DEBUG)
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:file_store_handler_ops')
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:redis_store_handler_ops')
def AddImageInput(
model,
reader,
batch_size,
img_size,
dtype,
is_test,
mean_per_channel=None,
std_per_channel=None,
):
'''
The image input operator loads image and label data from the reader and
applies transformations to the images (random cropping, mirroring, ...).
'''
data, label = brew.image_input(
model,
reader, ["data", "label"],
batch_size=batch_size,
output_type=dtype,
use_gpu_transform=True if core.IsGPUDeviceType(model._device_type) else False,
use_caffe_datum=True,
mean_per_channel=mean_per_channel,
std_per_channel=std_per_channel,
# mean_per_channel takes precedence over mean
mean=128.,
std=128.,
scale=256,
crop=img_size,
mirror=1,
is_test=is_test,
)
data = model.StopGradient(data, data)
def AddNullInput(model, reader, batch_size, img_size, dtype):
'''
The null input function uses a gaussian fill operator to emulate real image
input. A label blob is hardcoded to a single value. This is useful if you
want to test compute throughput or don't have a dataset available.
'''
suffix = "_fp16" if dtype == "float16" else ""
model.param_init_net.GaussianFill(
[],
["data" + suffix],
shape=[batch_size, 3, img_size, img_size],
)
if dtype == "float16":
model.param_init_net.FloatToHalf("data" + suffix, "data")
model.param_init_net.ConstantFill(
[],
["label"],
shape=[batch_size],
value=1,
dtype=core.DataType.INT32,
)
def SaveModel(args, train_model, epoch, use_ideep):
prefix = "[]_{}".format(train_model._device_prefix, train_model._devices[0])
predictor_export_meta = pred_exp.PredictorExportMeta(
predict_net=train_model.net.Proto(),
parameters=data_parallel_model.GetCheckpointParams(train_model),
inputs=[prefix + "/data"],
outputs=[prefix + "/softmax"],
shapes={
prefix + "/softmax": (1, args.num_labels),
prefix + "/data": (args.num_channels, args.image_size, args.image_size)
}
)
# save the train_model for the current epoch
model_path = "%s/%s_%d.mdl" % (
args.file_store_path,
args.save_model_name,
epoch,
)
# set db_type to be "minidb" instead of "log_file_db", which breaks
# the serialization in save_to_db. Need to switch back to log_file_db
# after migration
pred_exp.save_to_db(
db_type="minidb",
db_destination=model_path,
predictor_export_meta=predictor_export_meta,
use_ideep=use_ideep
)
def LoadModel(path, model, use_ideep):
'''
Load pretrained model from file
'''
log.info("Loading path: {}".format(path))
meta_net_def = pred_exp.load_from_db(path, 'minidb')
init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.GLOBAL_INIT_NET_TYPE))
predict_init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.PREDICT_INIT_NET_TYPE))
if use_ideep:
predict_init_net.RunAllOnIDEEP()
else:
predict_init_net.RunAllOnGPU()
if use_ideep:
init_net.RunAllOnIDEEP()
else:
init_net.RunAllOnGPU()
assert workspace.RunNetOnce(predict_init_net)
assert workspace.RunNetOnce(init_net)
# Hack: fix iteration counter which is in CUDA context after load model
itercnt = workspace.FetchBlob("optimizer_iteration")
workspace.FeedBlob(
"optimizer_iteration",
itercnt,
device_option=core.DeviceOption(caffe2_pb2.CPU, 0)
)
def RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog,
):
'''
Run one epoch of the trainer.
TODO: add checkpointing here.
'''
# TODO: add loading from checkpoint
log.info("Starting epoch {}/{}".format(epoch, args.num_epochs))
epoch_iters = int(args.epoch_size / total_batch_size / num_shards)
test_epoch_iters = int(args.test_epoch_size / total_batch_size / num_shards)
for i in range(epoch_iters):
# This timeout is required (temporarily) since CUDA-NCCL
# operators might deadlock when synchronizing between GPUs.
timeout = args.first_iter_timeout if i == 0 else args.timeout
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
fmt = "Finished iteration {}/{} of epoch {} ({:.2f} images/sec)"
log.info(fmt.format(i + 1, epoch_iters, epoch, total_batch_size / dt))
prefix = "{}_{}".format(
train_model._device_prefix,
train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
train_fmt = "Training loss: {}, accuracy: {}"
log.info(train_fmt.format(loss, accuracy))
num_images = epoch * epoch_iters * total_batch_size
prefix = "{}_{}".format(train_model._device_prefix, train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
learning_rate = workspace.FetchBlob(
data_parallel_model.GetLearningRateBlobNames(train_model)[0]
)
test_accuracy = 0
test_accuracy_top5 = 0
if test_model is not None:
# Run 100 iters of testing
ntests = 0
for _ in range(test_epoch_iters):
workspace.RunNet(test_model.net.Proto().name)
for g in test_model._devices:
test_accuracy += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy'
))
test_accuracy_top5 += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy_top5'
))
ntests += 1
test_accuracy /= ntests
test_accuracy_top5 /= ntests
else:
test_accuracy = (-1)
test_accuracy_top5 = (-1)
explog.log(
input_count=num_images,
batch_count=(i + epoch * epoch_iters),
additional_values={
'accuracy': accuracy,
'loss': loss,
'learning_rate': learning_rate,
'epoch': epoch,
'top1_test_accuracy': test_accuracy,
'top5_test_accuracy': test_accuracy_top5,
}
)
assert loss < 40, "Exploded gradients :("
# TODO: add checkpointing
return epoch + 1
def Train(args):
if args.model == "resnext":
model_name = "resnext" + str(args.num_layers)
elif args.model == "shufflenet":
model_name = "shufflenet"
# Either use specified device list or generate one
if args.gpus is not None:
gpus = [int(x) for x in args.gpus.split(',')]
num_gpus = len(gpus)
else:
gpus = list(range(args.num_gpus))
num_gpus = args.num_gpus
log.info("Running on GPUs: {}".format(gpus))
# Verify valid batch size
total_batch_size = args.batch_size
batch_per_device = total_batch_size // num_gpus
assert \
total_batch_size % num_gpus == 0, \
"Number of GPUs must divide batch size"
# Verify valid image mean/std per channel
if args.image_mean_per_channel:
assert \
len(args.image_mean_per_channel) == args.num_channels, \
"The number of channels of image mean doesn't match input"
if args.image_std_per_channel:
assert \
len(args.image_std_per_channel) == args.num_channels, \
"The number of channels of image std doesn't match input"
# Round down epoch size to closest multiple of batch size across machines
global_batch_size = total_batch_size * args.num_shards
epoch_iters = int(args.epoch_size / global_batch_size)
assert \
epoch_iters > 0, \
"Epoch size must be larger than batch size times shard count"
args.epoch_size = epoch_iters * global_batch_size
log.info("Using epoch size: {}".format(args.epoch_size))
# Create ModelHelper object
if args.use_ideep:
train_arg_scope = {
'use_cudnn': False,
'cudnn_exhaustive_search': False,
'training_mode': 1
}
else:
train_arg_scope = {
'order': 'NCHW',
'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': (args.cudnn_workspace_limit_mb * 1024 * 1024),
}
train_model = model_helper.ModelHelper(
name=model_name, arg_scope=train_arg_scope
)
num_shards = args.num_shards
shard_id = args.shard_id
# Expect interfaces to be comma separated.
# Use of multiple network interfaces is not yet complete,
# so simply use the first one in the list.
interfaces = args.distributed_interfaces.split(",")
# Rendezvous using MPI when run with mpirun
if os.getenv("OMPI_COMM_WORLD_SIZE") is not None:
num_shards = int(os.getenv("OMPI_COMM_WORLD_SIZE", 1))
shard_id = int(os.getenv("OMPI_COMM_WORLD_RANK", 0))
if num_shards > 1:
rendezvous = dict(
kv_handler=None,
num_shards=num_shards,
shard_id=shard_id,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
mpi_rendezvous=True,
exit_nets=None)
elif num_shards > 1:
# Create rendezvous for distributed computation
store_handler = "store_handler"
if args.redis_host is not None:
# Use Redis for rendezvous if Redis host is specified
workspace.RunOperatorOnce(
core.CreateOperator(
"RedisStoreHandlerCreate", [], [store_handler],
host=args.redis_host,
port=args.redis_port,
prefix=args.run_id,
)
)
else:
# Use filesystem for rendezvous otherwise
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate", [], [store_handler],
path=args.file_store_path,
prefix=args.run_id,
)
)
rendezvous = dict(
kv_handler=store_handler,
shard_id=shard_id,
num_shards=num_shards,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
exit_nets=None)
else:
rendezvous = None
# Model building functions
def create_resnext_model_ops(model, loss_scale):
initializer = (PseudoFP16Initializer if args.dtype == 'float16'
else Initializer)
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=initializer,
BiasInitializer=initializer,
enable_tensor_core=args.enable_tensor_core,
float16_compute=args.float16_compute):
pred = resnet.create_resnext(
model,
"data",
num_input_channels=args.num_channels,
num_labels=args.num_labels,
num_layers=args.num_layers,
num_groups=args.resnext_num_groups,
num_width_per_group=args.resnext_width_per_group,
no_bias=True,
no_loss=True,
)
if args.dtype == 'float16':
pred = model.net.HalfToFloat(pred, pred + '_fp32')
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy", top_k=1)
brew.accuracy(model, [softmax, "label"], "accuracy_top5", top_k=5)
return [loss]
def create_shufflenet_model_ops(model, loss_scale):
initializer = (PseudoFP16Initializer if args.dtype == 'float16'
else Initializer)
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=initializer,
BiasInitializer=initializer,
enable_tensor_core=args.enable_tensor_core,
float16_compute=args.float16_compute):
pred = shufflenet.create_shufflenet(
model,
"data",
num_input_channels=args.num_channels,
num_labels=args.num_labels,
no_loss=True,
)
if args.dtype == 'float16':
pred = model.net.HalfToFloat(pred, pred + '_fp32')
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy", top_k=1)
brew.accuracy(model, [softmax, "label"], "accuracy_top5", top_k=5)
return [loss]
def add_optimizer(model):
stepsz = int(30 * args.epoch_size / total_batch_size / num_shards)
if args.float16_compute:
# TODO: merge with multi-precision optimizer
opt = optimizer.build_fp16_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
weight_decay=args.weight_decay, # weight decay included
policy="step",
stepsize=stepsz,
gamma=0.1
)
else:
optimizer.add_weight_decay(model, args.weight_decay)
opt = optimizer.build_multi_precision_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
policy="step",
stepsize=stepsz,
gamma=0.1
)
return opt
# Define add_image_input function.
# Depends on the "train_data" argument.
# Note that the reader will be shared with between all GPUS.
if args.train_data == "null":
def add_image_input(model):
AddNullInput(
model,
None,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
)
else:
reader = train_model.CreateDB(
"reader",
db=args.train_data,
db_type=args.db_type,
num_shards=num_shards,
shard_id=shard_id,
)
def add_image_input(model):
AddImageInput(
model,
reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=False,
mean_per_channel=args.image_mean_per_channel,
std_per_channel=args.image_std_per_channel,
)
def add_post_sync_ops(model):
"""Add ops applied after initial parameter sync."""
for param_info in model.GetOptimizationParamInfo(model.GetParams()):
if param_info.blob_copy is not None:
model.param_init_net.HalfToFloat(
param_info.blob,
param_info.blob_copy[core.DataType.FLOAT]
)
data_parallel_model.Parallelize(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnext_model_ops
if args.model == "resnext" else create_shufflenet_model_ops,
optimizer_builder_fun=add_optimizer,
post_sync_builder_fun=add_post_sync_ops,
devices=gpus,
rendezvous=rendezvous,
optimize_gradient_memory=False,
use_nccl=args.use_nccl,
cpu_device=args.use_cpu,
ideep=args.use_ideep,
shared_model=args.use_cpu,
combine_spatial_bn=args.use_cpu,
)
data_parallel_model.OptimizeGradientMemory(train_model, {}, set(), False)
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net)
# Add test model, if specified
test_model = None
if (args.test_data is not None):
log.info("----- Create test net ----")
if args.use_ideep:
test_arg_scope = {
'use_cudnn': False,
'cudnn_exhaustive_search': False,
}
else:
test_arg_scope = {
'order': "NCHW",
'use_cudnn': True,
'cudnn_exhaustive_search': True,
}
test_model = model_helper.ModelHelper(
name=model_name + "_test",
arg_scope=test_arg_scope,
init_params=False,
)
test_reader = test_model.CreateDB(
"test_reader",
db=args.test_data,
db_type=args.db_type,
)
def test_input_fn(model):
AddImageInput(
model,
test_reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=True,
mean_per_channel=args.image_mean_per_channel,
std_per_channel=args.image_std_per_channel,
)
data_parallel_model.Parallelize(
test_model,
input_builder_fun=test_input_fn,
forward_pass_builder_fun=create_resnext_model_ops
if args.model == "resnext" else create_shufflenet_model_ops,
post_sync_builder_fun=add_post_sync_ops,
param_update_builder_fun=None,
devices=gpus,
use_nccl=args.use_nccl,
cpu_device=args.use_cpu,
)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net)
epoch = 0
# load the pre-trained model and reset epoch
if args.load_model_path is not None:
LoadModel(args.load_model_path, train_model, args.use_ideep)
# Sync the model params
data_parallel_model.FinalizeAfterCheckpoint(train_model)
# reset epoch. load_model_path should end with *_X.mdl,
# where X is the epoch number
last_str = args.load_model_path.split('_')[-1]
if last_str.endswith('.mdl'):
epoch = int(last_str[:-4])
log.info("Reset epoch to {}".format(epoch))
else:
log.warning("The format of load_model_path doesn't match!")
expname = "%s_gpu%d_b%d_L%d_lr%.2f_v2" % (
model_name,
args.num_gpus,
total_batch_size,
args.num_labels,
args.base_learning_rate,
)
explog = experiment_util.ModelTrainerLog(expname, args)
# Run the training one epoch a time
while epoch < args.num_epochs:
epoch = RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog
)
# Save the model for each epoch
SaveModel(args, train_model, epoch, args.use_ideep)
model_path = "%s/%s_" % (
args.file_store_path,
args.save_model_name
)
# remove the saved model from the previous epoch if it exists
if os.path.isfile(model_path + str(epoch - 1) + ".mdl"):
os.remove(model_path + str(epoch - 1) + ".mdl")
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
description="Caffe2: ImageNet Trainer"
)
parser.add_argument("--train_data", type=str, default=None, required=True,
help="Path to training data (or 'null' to simulate)")
parser.add_argument("--num_layers", type=int, default=50,
help="The number of layers in ResNe(X)t model")
parser.add_argument("--resnext_num_groups", type=int, default=1,
help="The cardinality of resnext")
parser.add_argument("--resnext_width_per_group", type=int, default=64,
help="The cardinality of resnext")
parser.add_argument("--test_data", type=str, default=None,
help="Path to test data")
parser.add_argument("--image_mean_per_channel", type=float, nargs='+',
help="The per channel mean for the images")
parser.add_argument("--image_std_per_channel", type=float, nargs='+',
help="The per channel standard deviation for the images")
parser.add_argument("--test_epoch_size", type=int, default=50000,
help="Number of test images")
parser.add_argument("--db_type", type=str, default="lmdb",
help="Database type (such as lmdb or leveldb)")
parser.add_argument("--gpus", type=str,
help="Comma separated list of GPU devices to use")
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of GPU devices (instead of --gpus)")
parser.add_argument("--num_channels", type=int, default=3,
help="Number of color channels")
parser.add_argument("--image_size", type=int, default=224,
help="Input image size (to crop to)")
parser.add_argument("--num_labels", type=int, default=1000,
help="Number of labels")
parser.add_argument("--batch_size", type=int, default=32,
help="Batch size, total over all GPUs")
parser.add_argument("--epoch_size", type=int, default=1500000,
help="Number of images/epoch, total over all machines")
parser.add_argument("--num_epochs", type=int, default=1000,
help="Num epochs.")
parser.add_argument("--base_learning_rate", type=float, default=0.1,
help="Initial learning rate.")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help="Weight decay (L2 regularization)")
parser.add_argument("--cudnn_workspace_limit_mb", type=int, default=64,
help="CuDNN workspace limit in MBs")
parser.add_argument("--num_shards", type=int, default=1,
help="Number of machines in distributed run")
parser.add_argument("--shard_id", type=int, default=0,
help="Shard id.")
parser.add_argument("--run_id", type=str,
help="Unique run identifier (e.g. uuid)")
parser.add_argument("--redis_host", type=str,
help="Host of Redis server (for rendezvous)")
parser.add_argument("--redis_port", type=int, default=6379,
help="Port of Redis server (for rendezvous)")
parser.add_argument("--file_store_path", type=str, default="/tmp",
help="Path to directory to use for rendezvous")
parser.add_argument("--save_model_name", type=str, default="resnext_model",
help="Save the trained model to a given name")
parser.add_argument("--load_model_path", type=str, default=None,
help="Load previously saved model to continue training")
parser.add_argument("--use_cpu", action="store_true",
help="Use CPU instead of GPU")
parser.add_argument("--use_nccl", action="store_true",
help="Use nccl for inter-GPU collectives")
parser.add_argument("--use_ideep", type=bool, default=False,
help="Use ideep")
parser.add_argument('--dtype', default='float',
choices=['float', 'float16'],
help='Data type used for training')
parser.add_argument('--float16_compute', action='store_true',
help="Use float 16 compute, if available")
parser.add_argument('--enable_tensor_core', action='store_true',
help='Enable Tensor Core math for Conv and FC ops')
parser.add_argument("--distributed_transport", type=str, default="tcp",
help="Transport to use for distributed run [tcp|ibverbs]")
parser.add_argument("--distributed_interfaces", type=str, default="",
help="Network interfaces to use for distributed run")
parser.add_argument("--first_iter_timeout", type=int, default=1200,
help="Timeout (secs) of the first iteration "
"(default: %(default)s)")
parser.add_argument("--timeout", type=int, default=60,
help="Timeout (secs) of each (except the first) iteration "
"(default: %(default)s)")
parser.add_argument("--model",
default="resnext", const="resnext", nargs="?",
choices=["shufflenet", "resnext"],
help="List of models which can be run")
args = parser.parse_args()
Train(args)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main()
| 37.513661
| 88
| 0.605353
|
794d0699485b201c5b1dc6dff70c4bd91f3d3998
| 327
|
py
|
Python
|
tests/test_app/library/loans/views.py
|
Pijuli/django-jazzmin
|
e3f9d45183d58f78bf4c6793969490631a84681d
|
[
"MIT"
] | 972
|
2020-05-12T19:51:01.000Z
|
2022-03-31T20:18:33.000Z
|
tests/test_app/library/loans/views.py
|
Pijuli/django-jazzmin
|
e3f9d45183d58f78bf4c6793969490631a84681d
|
[
"MIT"
] | 290
|
2020-05-12T17:35:21.000Z
|
2022-03-31T15:18:59.000Z
|
tests/test_app/library/loans/views.py
|
Pijuli/django-jazzmin
|
e3f9d45183d58f78bf4c6793969490631a84681d
|
[
"MIT"
] | 166
|
2020-06-11T10:50:47.000Z
|
2022-03-24T12:19:00.000Z
|
from django.views.generic import TemplateView
from django.contrib.admin.sites import site
class CustomView(TemplateView):
template_name = "loans/custom.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update(site.each_context(self.request))
return ctx
| 27.25
| 51
| 0.721713
|
794d06b118f73f84830493463806b2128c9f87dc
| 15,417
|
py
|
Python
|
kubernetes/test/test_io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster import IoXK8sClusterInfrastructureV1alpha4AWSCluster # noqa: E501
from kubernetes.client.rest import ApiException
class TestIoXK8sClusterInfrastructureV1alpha4AWSCluster(unittest.TestCase):
"""IoXK8sClusterInfrastructureV1alpha4AWSCluster unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IoXK8sClusterInfrastructureV1alpha4AWSCluster
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster.IoXK8sClusterInfrastructureV1alpha4AWSCluster() # noqa: E501
if include_optional :
return IoXK8sClusterInfrastructureV1alpha4AWSCluster(
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta_v2.v1.ObjectMeta_v2(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference_v2.v1.OwnerReference_v2(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
spec = kubernetes.client.models.io_x_k8s_cluster_infrastructure_v1alpha4_aws_cluster_spec.io_x_k8s_cluster_infrastructure_v1alpha4_AWSCluster_spec(
additional_tags = {
'key' : '0'
},
bastion = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_spec_bastion.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_spec_bastion(
allowed_cidr_blocks = [
'0'
],
ami = '0',
disable_ingress_rules = True,
enabled = True,
instance_type = '0', ),
control_plane_endpoint = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_spec_control_plane_endpoint.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_spec_controlPlaneEndpoint(
host = '0',
port = 56, ),
control_plane_load_balancer = kubernetes.client.models.io_x_k8s_cluster_infrastructure_v1alpha3_aws_cluster_spec_control_plane_load_balancer.io_x_k8s_cluster_infrastructure_v1alpha3_AWSCluster_spec_controlPlaneLoadBalancer(
additional_security_groups = [
'0'
],
cross_zone_load_balancing = True,
scheme = 'internet-facing',
subnets = [
'0'
], ),
identity_ref = kubernetes.client.models.io_x_k8s_cluster_infrastructure_v1alpha3_aws_cluster_spec_identity_ref.io_x_k8s_cluster_infrastructure_v1alpha3_AWSCluster_spec_identityRef(
kind = 'AWSClusterControllerIdentity',
name = '0', ),
image_lookup_base_os = '0',
image_lookup_format = '0',
image_lookup_org = '0',
network = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_spec_network_spec.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_spec_networkSpec(
cni = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_spec_network_spec_cni.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_spec_networkSpec_cni(
cni_ingress_rules = [
kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_spec_network_spec_cni_cni_ingress_rules.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_spec_networkSpec_cni_cniIngressRules(
description = '0',
from_port = 56,
protocol = '0',
to_port = 56, )
], ),
security_group_overrides = {
'key' : '0'
},
vpc = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_spec_network_spec_vpc.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_spec_networkSpec_vpc(
availability_zone_selection = 'Ordered',
availability_zone_usage_limit = 1,
cidr_block = '0',
id = '0',
internet_gateway_id = '0',
tags = {
'key' : '0'
}, ), ),
region = '0',
ssh_key_name = '0', ),
status = kubernetes.client.models.io_x_k8s_cluster_infrastructure_v1alpha4_aws_cluster_status.io_x_k8s_cluster_infrastructure_v1alpha4_AWSCluster_status(
bastion = kubernetes.client.models.io_x_k8s_cluster_infrastructure_v1alpha4_aws_cluster_status_bastion.io_x_k8s_cluster_infrastructure_v1alpha4_AWSCluster_status_bastion(
addresses = [
kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_status_bastion_addresses.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_status_bastion_addresses(
address = '0',
type = '0', )
],
availability_zone = '0',
ebs_optimized = True,
ena_support = True,
iam_profile = '0',
id = '0',
image_id = '0',
instance_state = '0',
network_interfaces = [
'0'
],
non_root_volumes = [
kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha4_aws_managed_control_plane_status_bastion_non_root_volumes.io_x_k8s_cluster_controlplane_v1alpha4_AWSManagedControlPlane_status_bastion_nonRootVolumes(
device_name = '0',
encrypted = True,
encryption_key = '0',
iops = 56,
size = 8,
throughput = 56,
type = '0', )
],
private_ip = '0',
public_ip = '0',
root_volume = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha4_aws_managed_control_plane_status_bastion_root_volume.io_x_k8s_cluster_controlplane_v1alpha4_AWSManagedControlPlane_status_bastion_rootVolume(
device_name = '0',
encrypted = True,
encryption_key = '0',
iops = 56,
size = 8,
throughput = 56,
type = '0', ),
security_group_ids = [
'0'
],
spot_market_options = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_status_bastion_spot_market_options.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_status_bastion_spotMarketOptions(
max_price = '0', ),
ssh_key_name = '0',
subnet_id = '0',
tags = {
'key' : '0'
},
tenancy = '0',
type = '0',
user_data = '0',
volume_i_ds = [
'0'
], ),
conditions = [
kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_status_conditions.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_status_conditions(
last_transition_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
reason = '0',
severity = '0',
status = '0',
type = '0', )
],
failure_domains = {
'key' : kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_status_failure_domains.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_status_failureDomains(
attributes = {
'key' : '0'
},
control_plane = True, )
},
network_status = kubernetes.client.models.io_x_k8s_cluster_infrastructure_v1alpha4_aws_cluster_status_network_status.io_x_k8s_cluster_infrastructure_v1alpha4_AWSCluster_status_networkStatus(
api_server_elb = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_status_network_api_server_elb.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_status_network_apiServerElb(
availability_zones = [
'0'
],
dns_name = '0',
health_checks = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_status_network_api_server_elb_health_checks.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_status_network_apiServerElb_healthChecks(
healthy_threshold = 56,
interval = 56,
target = '0',
timeout = 56,
unhealthy_threshold = 56, ),
listeners = [
kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_status_network_api_server_elb_listeners.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_status_network_apiServerElb_listeners(
instance_port = 56,
instance_protocol = '0',
port = 56,
protocol = '0', )
],
name = '0',
scheme = '0',
subnet_ids = [
'0'
], ),
security_groups = {
'key' : kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_status_network_security_groups.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_status_network_securityGroups(
id = '0',
ingress_rule = [
kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_status_network_ingress_rule.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_status_network_ingressRule(
cidr_blocks = [
'0'
],
description = '0',
from_port = 56,
protocol = '0',
source_security_group_ids = [
'0'
],
to_port = 56, )
],
name = '0', )
}, ),
ready = True, )
)
else :
return IoXK8sClusterInfrastructureV1alpha4AWSCluster(
)
def testIoXK8sClusterInfrastructureV1alpha4AWSCluster(self):
"""Test IoXK8sClusterInfrastructureV1alpha4AWSCluster"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 59.755814
| 281
| 0.512746
|
794d070bb58d699ffe9286af979c96b85869f3e6
| 2,590
|
py
|
Python
|
src/sql/interpretor.py
|
tsellam/syllabus
|
112d9d47715a85181bd4afb19acfdd17a895eaad
|
[
"MIT"
] | null | null | null |
src/sql/interpretor.py
|
tsellam/syllabus
|
112d9d47715a85181bd4afb19acfdd17a895eaad
|
[
"MIT"
] | null | null | null |
src/sql/interpretor.py
|
tsellam/syllabus
|
112d9d47715a85181bd4afb19acfdd17a895eaad
|
[
"MIT"
] | 1
|
2021-04-15T04:51:05.000Z
|
2021-04-15T04:51:05.000Z
|
from collections import *
try:
import instabase.notebook.ipython.utils as ib
ib.import_pyfile('./ops.py', 'ops')
except:
pass
from ops import *
def run_op(op, f=lambda t:t):
"""
This function interprets the current operator and constructs an
appropriate callback function to send to the parent operator
@op current operator to execute
@f the function to call for every output tuple of this operator (op)
"""
klass = op.__class__.__name__
if klass == "Print":
def print_f(tup):
print tup
run_op(op.p, print_f)
elif klass == "Scan":
for tup in op.data:
if f(tup) == False:
break
elif klass == "Join":
def outer_loop(left):
def inner_loop(right):
if op.cond(left, right):
newtup = dict()
newtup.update(left)
newtup.update(right)
f(newtup)
run_op(op.r, inner_loop)
run_op(op.l, outer_loop)
elif klass == "Limit":
# super ugly object hack because int counter doesn't work
class I(object):
def __init__(self):
self.i = 0
def __f__(i):
def limit_f(tup):
if i.i >= op.limit:
return False
i.i += 1
f(tup)
return limit_f
run_op(op.p, __f__(I()))
elif klass == "GroupBy":
hashtable = defaultdict(lambda: [None, None, []])
def group_f(tup):
key = tuple([e(tup) for e in op.group_exprs])
hashtable[key][0] = key
hashtable[key][1] = tup
hashtable[key][2].append(tup)
run_op(op.p, group_f)
for _, (key, tup, group) in hashtable.iteritems():
tup = dict(tup)
tup["__key__"] = key
tup["__group__"] = group
f(tup)
elif klass == "OrderBy":
tup_buffer = []
def order_f(tup):
tup_buffer.append(tup)
run_op(op.p, order_f)
elif klass == "Filter":
def where_f(tup):
if op.cond(tup):
f(tup)
run_op(op.p, where_f)
elif klass == "Project":
def project_f(tup):
ret = dict()
for exp, alias in zip(op.exprs, op.aliases):
ret[alias] = exp(tup)
f(ret)
run_op(op.p, project_f)
if __name__ == "__main__":
o = Print(
Limit(
Project(
Filter(
Join(
Scan("data.csv"),
Project(Scan("data.csv"), ["a", "b", "c"], ["x", "y", "z"]),
"a = x"),
"a <= x"),
["a*2", "c-a"]
),
5
)
)
s = Scan("data.csv")
g = GroupBy(s, ["a"])
p = Project(g, ["avg(b)", "avg(a)", "a"], ["avg", "avg2", "a"])
run_op(Print(p))
| 22.920354
| 76
| 0.535907
|
794d07798c5f1b6841aca064b6aed00ca4aa7b7c
| 894
|
py
|
Python
|
population/config/urls.py
|
Zachary-Jackson/population-statistical-visualization
|
6d29119e8f7d181786293b6bd3f0c1900a1fa6b7
|
[
"BSD-3-Clause"
] | null | null | null |
population/config/urls.py
|
Zachary-Jackson/population-statistical-visualization
|
6d29119e8f7d181786293b6bd3f0c1900a1fa6b7
|
[
"BSD-3-Clause"
] | null | null | null |
population/config/urls.py
|
Zachary-Jackson/population-statistical-visualization
|
6d29119e8f7d181786293b6bd3f0c1900a1fa6b7
|
[
"BSD-3-Clause"
] | null | null | null |
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/', include('population.urls')),
url(r'^api-auth/', include('rest_framework.urls'))
]
| 35.76
| 77
| 0.704698
|
794d077b308646d88ea156c101b66acca5300cf4
| 1,197
|
py
|
Python
|
azure_blob_loadgen.py
|
lawrencegripper/airflow-testing
|
e059cd2491a1507c5b759861b6fa7d540be78d19
|
[
"MIT"
] | null | null | null |
azure_blob_loadgen.py
|
lawrencegripper/airflow-testing
|
e059cd2491a1507c5b759861b6fa7d540be78d19
|
[
"MIT"
] | null | null | null |
azure_blob_loadgen.py
|
lawrencegripper/airflow-testing
|
e059cd2491a1507c5b759861b6fa7d540be78d19
|
[
"MIT"
] | null | null | null |
from datetime import timedelta, datetime
import airflow
from airflow import DAG
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id="azure_blob_loadgen",
default_args=args,
schedule_interval="@once",
)
image_url = "https://proxy.duckduckgo.com/iu/?u=http%3A%2F%2Fwww.gpwebsolutions-host.co.uk%2F1400%2Ffiles%2F2014%2F02%2Fprescription1.jpg&f=1"
def start_image_processing(**context):
print("Start load gen")
for x in range (0, 100):
def trigger_processing_dag(context, dag_run_obj):
dag_run_obj.payload = {
"image_url": image_url,
}
return dag_run_obj
TriggerDagRunOperator(
task_id="trigger_processing",
trigger_dag_id="image_processing",
python_callable=trigger_processing_dag,
dag=dag
).execute(context)
print("Finish load gen")
python_task = PythonOperator(
task_id='start',
python_callable=start_image_processing,
dag=dag,
)
| 28.5
| 142
| 0.675021
|
794d07f03002987e9d5736620f67c6413858f67f
| 143
|
py
|
Python
|
example/validator/development_settings.py
|
thekashifmalik/dynaconf
|
2cb4ad53e9dff7ed2582a48ec975fa86780ab911
|
[
"MIT"
] | null | null | null |
example/validator/development_settings.py
|
thekashifmalik/dynaconf
|
2cb4ad53e9dff7ed2582a48ec975fa86780ab911
|
[
"MIT"
] | null | null | null |
example/validator/development_settings.py
|
thekashifmalik/dynaconf
|
2cb4ad53e9dff7ed2582a48ec975fa86780ab911
|
[
"MIT"
] | null | null | null |
EXAMPLE = True
MYSQL_HOST = 'development.com'
VERSION = 1
AGE = 15
NAME = 'MIKE'
IMAGE_1 = 'aaa'
IMAGE_2 = 'bbb'
IMAGE_4 = 'a'
IMAGE_5 = 'b'
| 11.916667
| 30
| 0.643357
|
794d0805d7c47b1105492ab28e5f72a701e9f5c8
| 76,090
|
py
|
Python
|
transiter_ny_mta/transiter_ny_mta/proto/subwaytrips_pb2/transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2.py
|
Pizza-Ratz/transiter-ny
|
40091d3ff0c1b9e046b0d3ca708acb81df5019c6
|
[
"MIT"
] | 1
|
2021-01-25T16:02:14.000Z
|
2021-01-25T16:02:14.000Z
|
transiter_ny_mta/transiter_ny_mta/proto/subwaytrips_pb2/transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2.py
|
Pizza-Ratz/transiter-ny
|
40091d3ff0c1b9e046b0d3ca708acb81df5019c6
|
[
"MIT"
] | null | null | null |
transiter_ny_mta/transiter_ny_mta/proto/subwaytrips_pb2/transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2.py
|
Pizza-Ratz/transiter-ny
|
40091d3ff0c1b9e046b0d3ca708acb81df5019c6
|
[
"MIT"
] | 1
|
2021-07-02T14:34:04.000Z
|
2021-07-02T14:34:04.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: transiter-ny-mta-subwaytrips-gtfs-rt-base.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="transiter-ny-mta-subwaytrips-gtfs-rt-base.proto",
package="transiter_ny_mta_subwaytrips",
syntax="proto2",
serialized_options=_b("\n'com.github.transiter-ny-mta.subwaytrips"),
serialized_pb=_b(
'\n/transiter-ny-mta-subwaytrips-gtfs-rt-base.proto\x12\x1ctransiter_ny_mta_subwaytrips"\x91\x01\n\x0b\x46\x65\x65\x64Message\x12\x38\n\x06header\x18\x01 \x02(\x0b\x32(.transiter_ny_mta_subwaytrips.FeedHeader\x12\x38\n\x06\x65ntity\x18\x02 \x03(\x0b\x32(.transiter_ny_mta_subwaytrips.FeedEntity*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"\xe3\x01\n\nFeedHeader\x12\x1d\n\x15gtfs_realtime_version\x18\x01 \x02(\t\x12]\n\x0eincrementality\x18\x02 \x01(\x0e\x32\x37.transiter_ny_mta_subwaytrips.FeedHeader.Incrementality:\x0c\x46ULL_DATASET\x12\x11\n\ttimestamp\x18\x03 \x01(\x04"4\n\x0eIncrementality\x12\x10\n\x0c\x46ULL_DATASET\x10\x00\x12\x10\n\x0c\x44IFFERENTIAL\x10\x01*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"\xf6\x01\n\nFeedEntity\x12\n\n\x02id\x18\x01 \x02(\t\x12\x19\n\nis_deleted\x18\x02 \x01(\x08:\x05\x66\x61lse\x12=\n\x0btrip_update\x18\x03 \x01(\x0b\x32(.transiter_ny_mta_subwaytrips.TripUpdate\x12>\n\x07vehicle\x18\x04 \x01(\x0b\x32-.transiter_ny_mta_subwaytrips.VehiclePosition\x12\x32\n\x05\x61lert\x18\x05 \x01(\x0b\x32#.transiter_ny_mta_subwaytrips.Alert*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"\x8b\x06\n\nTripUpdate\x12:\n\x04trip\x18\x01 \x02(\x0b\x32,.transiter_ny_mta_subwaytrips.TripDescriptor\x12@\n\x07vehicle\x18\x03 \x01(\x0b\x32/.transiter_ny_mta_subwaytrips.VehicleDescriptor\x12Q\n\x10stop_time_update\x18\x02 \x03(\x0b\x32\x37.transiter_ny_mta_subwaytrips.TripUpdate.StopTimeUpdate\x12\x11\n\ttimestamp\x18\x04 \x01(\x04\x12\r\n\x05\x64\x65lay\x18\x05 \x01(\x05\x1aQ\n\rStopTimeEvent\x12\r\n\x05\x64\x65lay\x18\x01 \x01(\x05\x12\x0c\n\x04time\x18\x02 \x01(\x03\x12\x13\n\x0buncertainty\x18\x03 \x01(\x05*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N\x1a\xa6\x03\n\x0eStopTimeUpdate\x12\x15\n\rstop_sequence\x18\x01 \x01(\r\x12\x0f\n\x07stop_id\x18\x04 \x01(\t\x12G\n\x07\x61rrival\x18\x02 \x01(\x0b\x32\x36.transiter_ny_mta_subwaytrips.TripUpdate.StopTimeEvent\x12I\n\tdeparture\x18\x03 \x01(\x0b\x32\x36.transiter_ny_mta_subwaytrips.TripUpdate.StopTimeEvent\x12v\n\x15schedule_relationship\x18\x05 \x01(\x0e\x32L.transiter_ny_mta_subwaytrips.TripUpdate.StopTimeUpdate.ScheduleRelationship:\tSCHEDULED"P\n\x14ScheduleRelationship\x12\r\n\tSCHEDULED\x10\x00\x12\x0b\n\x07SKIPPED\x10\x01\x12\x0b\n\x07NO_DATA\x10\x02\x12\x0f\n\x0bUNSCHEDULED\x10\x03*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"\xce\x07\n\x0fVehiclePosition\x12:\n\x04trip\x18\x01 \x01(\x0b\x32,.transiter_ny_mta_subwaytrips.TripDescriptor\x12@\n\x07vehicle\x18\x08 \x01(\x0b\x32/.transiter_ny_mta_subwaytrips.VehicleDescriptor\x12\x38\n\x08position\x18\x02 \x01(\x0b\x32&.transiter_ny_mta_subwaytrips.Position\x12\x1d\n\x15\x63urrent_stop_sequence\x18\x03 \x01(\r\x12\x0f\n\x07stop_id\x18\x07 \x01(\t\x12\x66\n\x0e\x63urrent_status\x18\x04 \x01(\x0e\x32?.transiter_ny_mta_subwaytrips.VehiclePosition.VehicleStopStatus:\rIN_TRANSIT_TO\x12\x11\n\ttimestamp\x18\x05 \x01(\x04\x12W\n\x10\x63ongestion_level\x18\x06 \x01(\x0e\x32=.transiter_ny_mta_subwaytrips.VehiclePosition.CongestionLevel\x12W\n\x10occupancy_status\x18\t \x01(\x0e\x32=.transiter_ny_mta_subwaytrips.VehiclePosition.OccupancyStatus\x12\x1c\n\x14occupancy_percentage\x18\n \x01(\r"G\n\x11VehicleStopStatus\x12\x0f\n\x0bINCOMING_AT\x10\x00\x12\x0e\n\nSTOPPED_AT\x10\x01\x12\x11\n\rIN_TRANSIT_TO\x10\x02"}\n\x0f\x43ongestionLevel\x12\x1c\n\x18UNKNOWN_CONGESTION_LEVEL\x10\x00\x12\x14\n\x10RUNNING_SMOOTHLY\x10\x01\x12\x0f\n\x0bSTOP_AND_GO\x10\x02\x12\x0e\n\nCONGESTION\x10\x03\x12\x15\n\x11SEVERE_CONGESTION\x10\x04"\xaf\x01\n\x0fOccupancyStatus\x12\t\n\x05\x45MPTY\x10\x00\x12\x18\n\x14MANY_SEATS_AVAILABLE\x10\x01\x12\x17\n\x13\x46\x45W_SEATS_AVAILABLE\x10\x02\x12\x16\n\x12STANDING_ROOM_ONLY\x10\x03\x12\x1e\n\x1a\x43RUSHED_STANDING_ROOM_ONLY\x10\x04\x12\x08\n\x04\x46ULL\x10\x05\x12\x1c\n\x18NOT_ACCEPTING_PASSENGERS\x10\x06*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"\xf8\t\n\x05\x41lert\x12>\n\ractive_period\x18\x01 \x03(\x0b\x32\'.transiter_ny_mta_subwaytrips.TimeRange\x12\x45\n\x0finformed_entity\x18\x05 \x03(\x0b\x32,.transiter_ny_mta_subwaytrips.EntitySelector\x12G\n\x05\x63\x61use\x18\x06 \x01(\x0e\x32).transiter_ny_mta_subwaytrips.Alert.Cause:\rUNKNOWN_CAUSE\x12J\n\x06\x65\x66\x66\x65\x63t\x18\x07 \x01(\x0e\x32*.transiter_ny_mta_subwaytrips.Alert.Effect:\x0eUNKNOWN_EFFECT\x12;\n\x03url\x18\x08 \x01(\x0b\x32..transiter_ny_mta_subwaytrips.TranslatedString\x12\x43\n\x0bheader_text\x18\n \x01(\x0b\x32..transiter_ny_mta_subwaytrips.TranslatedString\x12H\n\x10\x64\x65scription_text\x18\x0b \x01(\x0b\x32..transiter_ny_mta_subwaytrips.TranslatedString\x12G\n\x0ftts_header_text\x18\x0c \x01(\x0b\x32..transiter_ny_mta_subwaytrips.TranslatedString\x12L\n\x14tts_description_text\x18\r \x01(\x0b\x32..transiter_ny_mta_subwaytrips.TranslatedString\x12[\n\x0eseverity_level\x18\x0e \x01(\x0e\x32\x31.transiter_ny_mta_subwaytrips.Alert.SeverityLevel:\x10UNKNOWN_SEVERITY"\xd8\x01\n\x05\x43\x61use\x12\x11\n\rUNKNOWN_CAUSE\x10\x01\x12\x0f\n\x0bOTHER_CAUSE\x10\x02\x12\x15\n\x11TECHNICAL_PROBLEM\x10\x03\x12\n\n\x06STRIKE\x10\x04\x12\x11\n\rDEMONSTRATION\x10\x05\x12\x0c\n\x08\x41\x43\x43IDENT\x10\x06\x12\x0b\n\x07HOLIDAY\x10\x07\x12\x0b\n\x07WEATHER\x10\x08\x12\x0f\n\x0bMAINTENANCE\x10\t\x12\x10\n\x0c\x43ONSTRUCTION\x10\n\x12\x13\n\x0fPOLICE_ACTIVITY\x10\x0b\x12\x15\n\x11MEDICAL_EMERGENCY\x10\x0c"\xdd\x01\n\x06\x45\x66\x66\x65\x63t\x12\x0e\n\nNO_SERVICE\x10\x01\x12\x13\n\x0fREDUCED_SERVICE\x10\x02\x12\x16\n\x12SIGNIFICANT_DELAYS\x10\x03\x12\n\n\x06\x44\x45TOUR\x10\x04\x12\x16\n\x12\x41\x44\x44ITIONAL_SERVICE\x10\x05\x12\x14\n\x10MODIFIED_SERVICE\x10\x06\x12\x10\n\x0cOTHER_EFFECT\x10\x07\x12\x12\n\x0eUNKNOWN_EFFECT\x10\x08\x12\x0e\n\nSTOP_MOVED\x10\t\x12\r\n\tNO_EFFECT\x10\n\x12\x17\n\x13\x41\x43\x43\x45SSIBILITY_ISSUE\x10\x0b"H\n\rSeverityLevel\x12\x14\n\x10UNKNOWN_SEVERITY\x10\x01\x12\x08\n\x04INFO\x10\x02\x12\x0b\n\x07WARNING\x10\x03\x12\n\n\x06SEVERE\x10\x04*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"7\n\tTimeRange\x12\r\n\x05start\x18\x01 \x01(\x04\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x04*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"q\n\x08Position\x12\x10\n\x08latitude\x18\x01 \x02(\x02\x12\x11\n\tlongitude\x18\x02 \x02(\x02\x12\x0f\n\x07\x62\x65\x61ring\x18\x03 \x01(\x02\x12\x10\n\x08odometer\x18\x04 \x01(\x01\x12\r\n\x05speed\x18\x05 \x01(\x02*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"\xc9\x02\n\x0eTripDescriptor\x12\x0f\n\x07trip_id\x18\x01 \x01(\t\x12\x10\n\x08route_id\x18\x05 \x01(\t\x12\x14\n\x0c\x64irection_id\x18\x06 \x01(\r\x12\x12\n\nstart_time\x18\x02 \x01(\t\x12\x12\n\nstart_date\x18\x03 \x01(\t\x12`\n\x15schedule_relationship\x18\x04 \x01(\x0e\x32\x41.transiter_ny_mta_subwaytrips.TripDescriptor.ScheduleRelationship"d\n\x14ScheduleRelationship\x12\r\n\tSCHEDULED\x10\x00\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x01\x12\x0f\n\x0bUNSCHEDULED\x10\x02\x12\x0c\n\x08\x43\x41NCELED\x10\x03\x12\x13\n\x0bREPLACEMENT\x10\x05\x1a\x02\x08\x01*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"U\n\x11VehicleDescriptor\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\t\x12\x15\n\rlicense_plate\x18\x03 \x01(\t*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"\xbc\x01\n\x0e\x45ntitySelector\x12\x11\n\tagency_id\x18\x01 \x01(\t\x12\x10\n\x08route_id\x18\x02 \x01(\t\x12\x12\n\nroute_type\x18\x03 \x01(\x05\x12:\n\x04trip\x18\x04 \x01(\x0b\x32,.transiter_ny_mta_subwaytrips.TripDescriptor\x12\x0f\n\x07stop_id\x18\x05 \x01(\t\x12\x14\n\x0c\x64irection_id\x18\x06 \x01(\r*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N"\xb2\x01\n\x10TranslatedString\x12O\n\x0btranslation\x18\x01 \x03(\x0b\x32:.transiter_ny_mta_subwaytrips.TranslatedString.Translation\x1a=\n\x0bTranslation\x12\x0c\n\x04text\x18\x01 \x02(\t\x12\x10\n\x08language\x18\x02 \x01(\t*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90N*\x06\x08\xe8\x07\x10\xd0\x0f*\x06\x08\xa8\x46\x10\x90NB)\n\'com.github.transiter-ny-mta.subwaytrips'
),
)
_FEEDHEADER_INCREMENTALITY = _descriptor.EnumDescriptor(
name="Incrementality",
full_name="transiter_ny_mta_subwaytrips.FeedHeader.Incrementality",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="FULL_DATASET", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DIFFERENTIAL", index=1, number=1, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=389,
serialized_end=441,
)
_sym_db.RegisterEnumDescriptor(_FEEDHEADER_INCREMENTALITY)
_TRIPUPDATE_STOPTIMEUPDATE_SCHEDULERELATIONSHIP = _descriptor.EnumDescriptor(
name="ScheduleRelationship",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeUpdate.ScheduleRelationship",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="SCHEDULED", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SKIPPED", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="NO_DATA", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="UNSCHEDULED", index=3, number=3, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=1376,
serialized_end=1456,
)
_sym_db.RegisterEnumDescriptor(_TRIPUPDATE_STOPTIMEUPDATE_SCHEDULERELATIONSHIP)
_VEHICLEPOSITION_VEHICLESTOPSTATUS = _descriptor.EnumDescriptor(
name="VehicleStopStatus",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.VehicleStopStatus",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="INCOMING_AT", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="STOPPED_AT", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="IN_TRANSIT_TO", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=2073,
serialized_end=2144,
)
_sym_db.RegisterEnumDescriptor(_VEHICLEPOSITION_VEHICLESTOPSTATUS)
_VEHICLEPOSITION_CONGESTIONLEVEL = _descriptor.EnumDescriptor(
name="CongestionLevel",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.CongestionLevel",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="UNKNOWN_CONGESTION_LEVEL",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="RUNNING_SMOOTHLY",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="STOP_AND_GO", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CONGESTION", index=3, number=3, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SEVERE_CONGESTION",
index=4,
number=4,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=2146,
serialized_end=2271,
)
_sym_db.RegisterEnumDescriptor(_VEHICLEPOSITION_CONGESTIONLEVEL)
_VEHICLEPOSITION_OCCUPANCYSTATUS = _descriptor.EnumDescriptor(
name="OccupancyStatus",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.OccupancyStatus",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="EMPTY", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MANY_SEATS_AVAILABLE",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="FEW_SEATS_AVAILABLE",
index=2,
number=2,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="STANDING_ROOM_ONLY",
index=3,
number=3,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="CRUSHED_STANDING_ROOM_ONLY",
index=4,
number=4,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="FULL", index=5, number=5, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="NOT_ACCEPTING_PASSENGERS",
index=6,
number=6,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=2274,
serialized_end=2449,
)
_sym_db.RegisterEnumDescriptor(_VEHICLEPOSITION_OCCUPANCYSTATUS)
_ALERT_CAUSE = _descriptor.EnumDescriptor(
name="Cause",
full_name="transiter_ny_mta_subwaytrips.Alert.Cause",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="UNKNOWN_CAUSE", index=0, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="OTHER_CAUSE", index=1, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="TECHNICAL_PROBLEM",
index=2,
number=3,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="STRIKE", index=3, number=4, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DEMONSTRATION", index=4, number=5, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ACCIDENT", index=5, number=6, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="HOLIDAY", index=6, number=7, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="WEATHER", index=7, number=8, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MAINTENANCE", index=8, number=9, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CONSTRUCTION", index=9, number=10, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="POLICE_ACTIVITY",
index=10,
number=11,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="MEDICAL_EMERGENCY",
index=11,
number=12,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=3210,
serialized_end=3426,
)
_sym_db.RegisterEnumDescriptor(_ALERT_CAUSE)
_ALERT_EFFECT = _descriptor.EnumDescriptor(
name="Effect",
full_name="transiter_ny_mta_subwaytrips.Alert.Effect",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="NO_SERVICE", index=0, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REDUCED_SERVICE",
index=1,
number=2,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="SIGNIFICANT_DELAYS",
index=2,
number=3,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="DETOUR", index=3, number=4, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ADDITIONAL_SERVICE",
index=4,
number=5,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="MODIFIED_SERVICE",
index=5,
number=6,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="OTHER_EFFECT", index=6, number=7, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="UNKNOWN_EFFECT", index=7, number=8, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="STOP_MOVED", index=8, number=9, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="NO_EFFECT", index=9, number=10, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ACCESSIBILITY_ISSUE",
index=10,
number=11,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=3429,
serialized_end=3650,
)
_sym_db.RegisterEnumDescriptor(_ALERT_EFFECT)
_ALERT_SEVERITYLEVEL = _descriptor.EnumDescriptor(
name="SeverityLevel",
full_name="transiter_ny_mta_subwaytrips.Alert.SeverityLevel",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="UNKNOWN_SEVERITY",
index=0,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="INFO", index=1, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="WARNING", index=2, number=3, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SEVERE", index=3, number=4, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=3652,
serialized_end=3724,
)
_sym_db.RegisterEnumDescriptor(_ALERT_SEVERITYLEVEL)
_TRIPDESCRIPTOR_SCHEDULERELATIONSHIP = _descriptor.EnumDescriptor(
name="ScheduleRelationship",
full_name="transiter_ny_mta_subwaytrips.TripDescriptor.ScheduleRelationship",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="SCHEDULED", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ADDED", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="UNSCHEDULED", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CANCELED", index=3, number=3, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REPLACEMENT",
index=4,
number=5,
serialized_options=_b("\010\001"),
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=4128,
serialized_end=4228,
)
_sym_db.RegisterEnumDescriptor(_TRIPDESCRIPTOR_SCHEDULERELATIONSHIP)
_FEEDMESSAGE = _descriptor.Descriptor(
name="FeedMessage",
full_name="transiter_ny_mta_subwaytrips.FeedMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="header",
full_name="transiter_ny_mta_subwaytrips.FeedMessage.header",
index=0,
number=1,
type=11,
cpp_type=10,
label=2,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="entity",
full_name="transiter_ny_mta_subwaytrips.FeedMessage.entity",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=82,
serialized_end=227,
)
_FEEDHEADER = _descriptor.Descriptor(
name="FeedHeader",
full_name="transiter_ny_mta_subwaytrips.FeedHeader",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="gtfs_realtime_version",
full_name="transiter_ny_mta_subwaytrips.FeedHeader.gtfs_realtime_version",
index=0,
number=1,
type=9,
cpp_type=9,
label=2,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="incrementality",
full_name="transiter_ny_mta_subwaytrips.FeedHeader.incrementality",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="timestamp",
full_name="transiter_ny_mta_subwaytrips.FeedHeader.timestamp",
index=2,
number=3,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_FEEDHEADER_INCREMENTALITY,],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=230,
serialized_end=457,
)
_FEEDENTITY = _descriptor.Descriptor(
name="FeedEntity",
full_name="transiter_ny_mta_subwaytrips.FeedEntity",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="transiter_ny_mta_subwaytrips.FeedEntity.id",
index=0,
number=1,
type=9,
cpp_type=9,
label=2,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="is_deleted",
full_name="transiter_ny_mta_subwaytrips.FeedEntity.is_deleted",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="trip_update",
full_name="transiter_ny_mta_subwaytrips.FeedEntity.trip_update",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="vehicle",
full_name="transiter_ny_mta_subwaytrips.FeedEntity.vehicle",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="alert",
full_name="transiter_ny_mta_subwaytrips.FeedEntity.alert",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=460,
serialized_end=706,
)
_TRIPUPDATE_STOPTIMEEVENT = _descriptor.Descriptor(
name="StopTimeEvent",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeEvent",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="delay",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeEvent.delay",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="time",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeEvent.time",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="uncertainty",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeEvent.uncertainty",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=966,
serialized_end=1047,
)
_TRIPUPDATE_STOPTIMEUPDATE = _descriptor.Descriptor(
name="StopTimeUpdate",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeUpdate",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="stop_sequence",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeUpdate.stop_sequence",
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="stop_id",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeUpdate.stop_id",
index=1,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="arrival",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeUpdate.arrival",
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="departure",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeUpdate.departure",
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="schedule_relationship",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.StopTimeUpdate.schedule_relationship",
index=4,
number=5,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_TRIPUPDATE_STOPTIMEUPDATE_SCHEDULERELATIONSHIP,],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=1050,
serialized_end=1472,
)
_TRIPUPDATE = _descriptor.Descriptor(
name="TripUpdate",
full_name="transiter_ny_mta_subwaytrips.TripUpdate",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="trip",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.trip",
index=0,
number=1,
type=11,
cpp_type=10,
label=2,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="vehicle",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.vehicle",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="stop_time_update",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.stop_time_update",
index=2,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="timestamp",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.timestamp",
index=3,
number=4,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="delay",
full_name="transiter_ny_mta_subwaytrips.TripUpdate.delay",
index=4,
number=5,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_TRIPUPDATE_STOPTIMEEVENT, _TRIPUPDATE_STOPTIMEUPDATE,],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=709,
serialized_end=1488,
)
_VEHICLEPOSITION = _descriptor.Descriptor(
name="VehiclePosition",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="trip",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.trip",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="vehicle",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.vehicle",
index=1,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="position",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.position",
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="current_stop_sequence",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.current_stop_sequence",
index=3,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="stop_id",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.stop_id",
index=4,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="current_status",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.current_status",
index=5,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=2,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="timestamp",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.timestamp",
index=6,
number=5,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="congestion_level",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.congestion_level",
index=7,
number=6,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="occupancy_status",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.occupancy_status",
index=8,
number=9,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="occupancy_percentage",
full_name="transiter_ny_mta_subwaytrips.VehiclePosition.occupancy_percentage",
index=9,
number=10,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[
_VEHICLEPOSITION_VEHICLESTOPSTATUS,
_VEHICLEPOSITION_CONGESTIONLEVEL,
_VEHICLEPOSITION_OCCUPANCYSTATUS,
],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=1491,
serialized_end=2465,
)
_ALERT = _descriptor.Descriptor(
name="Alert",
full_name="transiter_ny_mta_subwaytrips.Alert",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="active_period",
full_name="transiter_ny_mta_subwaytrips.Alert.active_period",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="informed_entity",
full_name="transiter_ny_mta_subwaytrips.Alert.informed_entity",
index=1,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cause",
full_name="transiter_ny_mta_subwaytrips.Alert.cause",
index=2,
number=6,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="effect",
full_name="transiter_ny_mta_subwaytrips.Alert.effect",
index=3,
number=7,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=8,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="url",
full_name="transiter_ny_mta_subwaytrips.Alert.url",
index=4,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="header_text",
full_name="transiter_ny_mta_subwaytrips.Alert.header_text",
index=5,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="description_text",
full_name="transiter_ny_mta_subwaytrips.Alert.description_text",
index=6,
number=11,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="tts_header_text",
full_name="transiter_ny_mta_subwaytrips.Alert.tts_header_text",
index=7,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="tts_description_text",
full_name="transiter_ny_mta_subwaytrips.Alert.tts_description_text",
index=8,
number=13,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="severity_level",
full_name="transiter_ny_mta_subwaytrips.Alert.severity_level",
index=9,
number=14,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_ALERT_CAUSE, _ALERT_EFFECT, _ALERT_SEVERITYLEVEL,],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=2468,
serialized_end=3740,
)
_TIMERANGE = _descriptor.Descriptor(
name="TimeRange",
full_name="transiter_ny_mta_subwaytrips.TimeRange",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="start",
full_name="transiter_ny_mta_subwaytrips.TimeRange.start",
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="end",
full_name="transiter_ny_mta_subwaytrips.TimeRange.end",
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=3742,
serialized_end=3797,
)
_POSITION = _descriptor.Descriptor(
name="Position",
full_name="transiter_ny_mta_subwaytrips.Position",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="latitude",
full_name="transiter_ny_mta_subwaytrips.Position.latitude",
index=0,
number=1,
type=2,
cpp_type=6,
label=2,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="longitude",
full_name="transiter_ny_mta_subwaytrips.Position.longitude",
index=1,
number=2,
type=2,
cpp_type=6,
label=2,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="bearing",
full_name="transiter_ny_mta_subwaytrips.Position.bearing",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="odometer",
full_name="transiter_ny_mta_subwaytrips.Position.odometer",
index=3,
number=4,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="speed",
full_name="transiter_ny_mta_subwaytrips.Position.speed",
index=4,
number=5,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=3799,
serialized_end=3912,
)
_TRIPDESCRIPTOR = _descriptor.Descriptor(
name="TripDescriptor",
full_name="transiter_ny_mta_subwaytrips.TripDescriptor",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="trip_id",
full_name="transiter_ny_mta_subwaytrips.TripDescriptor.trip_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="route_id",
full_name="transiter_ny_mta_subwaytrips.TripDescriptor.route_id",
index=1,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="direction_id",
full_name="transiter_ny_mta_subwaytrips.TripDescriptor.direction_id",
index=2,
number=6,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="transiter_ny_mta_subwaytrips.TripDescriptor.start_time",
index=3,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_date",
full_name="transiter_ny_mta_subwaytrips.TripDescriptor.start_date",
index=4,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="schedule_relationship",
full_name="transiter_ny_mta_subwaytrips.TripDescriptor.schedule_relationship",
index=5,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_TRIPDESCRIPTOR_SCHEDULERELATIONSHIP,],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=3915,
serialized_end=4244,
)
_VEHICLEDESCRIPTOR = _descriptor.Descriptor(
name="VehicleDescriptor",
full_name="transiter_ny_mta_subwaytrips.VehicleDescriptor",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="transiter_ny_mta_subwaytrips.VehicleDescriptor.id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="label",
full_name="transiter_ny_mta_subwaytrips.VehicleDescriptor.label",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="license_plate",
full_name="transiter_ny_mta_subwaytrips.VehicleDescriptor.license_plate",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=4246,
serialized_end=4331,
)
_ENTITYSELECTOR = _descriptor.Descriptor(
name="EntitySelector",
full_name="transiter_ny_mta_subwaytrips.EntitySelector",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="agency_id",
full_name="transiter_ny_mta_subwaytrips.EntitySelector.agency_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="route_id",
full_name="transiter_ny_mta_subwaytrips.EntitySelector.route_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="route_type",
full_name="transiter_ny_mta_subwaytrips.EntitySelector.route_type",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="trip",
full_name="transiter_ny_mta_subwaytrips.EntitySelector.trip",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="stop_id",
full_name="transiter_ny_mta_subwaytrips.EntitySelector.stop_id",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="direction_id",
full_name="transiter_ny_mta_subwaytrips.EntitySelector.direction_id",
index=5,
number=6,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=4334,
serialized_end=4522,
)
_TRANSLATEDSTRING_TRANSLATION = _descriptor.Descriptor(
name="Translation",
full_name="transiter_ny_mta_subwaytrips.TranslatedString.Translation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="text",
full_name="transiter_ny_mta_subwaytrips.TranslatedString.Translation.text",
index=0,
number=1,
type=9,
cpp_type=9,
label=2,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="language",
full_name="transiter_ny_mta_subwaytrips.TranslatedString.Translation.language",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=4626,
serialized_end=4687,
)
_TRANSLATEDSTRING = _descriptor.Descriptor(
name="TranslatedString",
full_name="transiter_ny_mta_subwaytrips.TranslatedString",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="translation",
full_name="transiter_ny_mta_subwaytrips.TranslatedString.translation",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_TRANSLATEDSTRING_TRANSLATION,],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[(1000, 2000), (9000, 10000),],
oneofs=[],
serialized_start=4525,
serialized_end=4703,
)
_FEEDMESSAGE.fields_by_name["header"].message_type = _FEEDHEADER
_FEEDMESSAGE.fields_by_name["entity"].message_type = _FEEDENTITY
_FEEDHEADER.fields_by_name["incrementality"].enum_type = _FEEDHEADER_INCREMENTALITY
_FEEDHEADER_INCREMENTALITY.containing_type = _FEEDHEADER
_FEEDENTITY.fields_by_name["trip_update"].message_type = _TRIPUPDATE
_FEEDENTITY.fields_by_name["vehicle"].message_type = _VEHICLEPOSITION
_FEEDENTITY.fields_by_name["alert"].message_type = _ALERT
_TRIPUPDATE_STOPTIMEEVENT.containing_type = _TRIPUPDATE
_TRIPUPDATE_STOPTIMEUPDATE.fields_by_name[
"arrival"
].message_type = _TRIPUPDATE_STOPTIMEEVENT
_TRIPUPDATE_STOPTIMEUPDATE.fields_by_name[
"departure"
].message_type = _TRIPUPDATE_STOPTIMEEVENT
_TRIPUPDATE_STOPTIMEUPDATE.fields_by_name[
"schedule_relationship"
].enum_type = _TRIPUPDATE_STOPTIMEUPDATE_SCHEDULERELATIONSHIP
_TRIPUPDATE_STOPTIMEUPDATE.containing_type = _TRIPUPDATE
_TRIPUPDATE_STOPTIMEUPDATE_SCHEDULERELATIONSHIP.containing_type = (
_TRIPUPDATE_STOPTIMEUPDATE
)
_TRIPUPDATE.fields_by_name["trip"].message_type = _TRIPDESCRIPTOR
_TRIPUPDATE.fields_by_name["vehicle"].message_type = _VEHICLEDESCRIPTOR
_TRIPUPDATE.fields_by_name["stop_time_update"].message_type = _TRIPUPDATE_STOPTIMEUPDATE
_VEHICLEPOSITION.fields_by_name["trip"].message_type = _TRIPDESCRIPTOR
_VEHICLEPOSITION.fields_by_name["vehicle"].message_type = _VEHICLEDESCRIPTOR
_VEHICLEPOSITION.fields_by_name["position"].message_type = _POSITION
_VEHICLEPOSITION.fields_by_name[
"current_status"
].enum_type = _VEHICLEPOSITION_VEHICLESTOPSTATUS
_VEHICLEPOSITION.fields_by_name[
"congestion_level"
].enum_type = _VEHICLEPOSITION_CONGESTIONLEVEL
_VEHICLEPOSITION.fields_by_name[
"occupancy_status"
].enum_type = _VEHICLEPOSITION_OCCUPANCYSTATUS
_VEHICLEPOSITION_VEHICLESTOPSTATUS.containing_type = _VEHICLEPOSITION
_VEHICLEPOSITION_CONGESTIONLEVEL.containing_type = _VEHICLEPOSITION
_VEHICLEPOSITION_OCCUPANCYSTATUS.containing_type = _VEHICLEPOSITION
_ALERT.fields_by_name["active_period"].message_type = _TIMERANGE
_ALERT.fields_by_name["informed_entity"].message_type = _ENTITYSELECTOR
_ALERT.fields_by_name["cause"].enum_type = _ALERT_CAUSE
_ALERT.fields_by_name["effect"].enum_type = _ALERT_EFFECT
_ALERT.fields_by_name["url"].message_type = _TRANSLATEDSTRING
_ALERT.fields_by_name["header_text"].message_type = _TRANSLATEDSTRING
_ALERT.fields_by_name["description_text"].message_type = _TRANSLATEDSTRING
_ALERT.fields_by_name["tts_header_text"].message_type = _TRANSLATEDSTRING
_ALERT.fields_by_name["tts_description_text"].message_type = _TRANSLATEDSTRING
_ALERT.fields_by_name["severity_level"].enum_type = _ALERT_SEVERITYLEVEL
_ALERT_CAUSE.containing_type = _ALERT
_ALERT_EFFECT.containing_type = _ALERT
_ALERT_SEVERITYLEVEL.containing_type = _ALERT
_TRIPDESCRIPTOR.fields_by_name[
"schedule_relationship"
].enum_type = _TRIPDESCRIPTOR_SCHEDULERELATIONSHIP
_TRIPDESCRIPTOR_SCHEDULERELATIONSHIP.containing_type = _TRIPDESCRIPTOR
_ENTITYSELECTOR.fields_by_name["trip"].message_type = _TRIPDESCRIPTOR
_TRANSLATEDSTRING_TRANSLATION.containing_type = _TRANSLATEDSTRING
_TRANSLATEDSTRING.fields_by_name[
"translation"
].message_type = _TRANSLATEDSTRING_TRANSLATION
DESCRIPTOR.message_types_by_name["FeedMessage"] = _FEEDMESSAGE
DESCRIPTOR.message_types_by_name["FeedHeader"] = _FEEDHEADER
DESCRIPTOR.message_types_by_name["FeedEntity"] = _FEEDENTITY
DESCRIPTOR.message_types_by_name["TripUpdate"] = _TRIPUPDATE
DESCRIPTOR.message_types_by_name["VehiclePosition"] = _VEHICLEPOSITION
DESCRIPTOR.message_types_by_name["Alert"] = _ALERT
DESCRIPTOR.message_types_by_name["TimeRange"] = _TIMERANGE
DESCRIPTOR.message_types_by_name["Position"] = _POSITION
DESCRIPTOR.message_types_by_name["TripDescriptor"] = _TRIPDESCRIPTOR
DESCRIPTOR.message_types_by_name["VehicleDescriptor"] = _VEHICLEDESCRIPTOR
DESCRIPTOR.message_types_by_name["EntitySelector"] = _ENTITYSELECTOR
DESCRIPTOR.message_types_by_name["TranslatedString"] = _TRANSLATEDSTRING
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FeedMessage = _reflection.GeneratedProtocolMessageType(
"FeedMessage",
(_message.Message,),
dict(
DESCRIPTOR=_FEEDMESSAGE,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.FeedMessage)
),
)
_sym_db.RegisterMessage(FeedMessage)
FeedHeader = _reflection.GeneratedProtocolMessageType(
"FeedHeader",
(_message.Message,),
dict(
DESCRIPTOR=_FEEDHEADER,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.FeedHeader)
),
)
_sym_db.RegisterMessage(FeedHeader)
FeedEntity = _reflection.GeneratedProtocolMessageType(
"FeedEntity",
(_message.Message,),
dict(
DESCRIPTOR=_FEEDENTITY,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.FeedEntity)
),
)
_sym_db.RegisterMessage(FeedEntity)
TripUpdate = _reflection.GeneratedProtocolMessageType(
"TripUpdate",
(_message.Message,),
dict(
StopTimeEvent=_reflection.GeneratedProtocolMessageType(
"StopTimeEvent",
(_message.Message,),
dict(
DESCRIPTOR=_TRIPUPDATE_STOPTIMEEVENT,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.TripUpdate.StopTimeEvent)
),
),
StopTimeUpdate=_reflection.GeneratedProtocolMessageType(
"StopTimeUpdate",
(_message.Message,),
dict(
DESCRIPTOR=_TRIPUPDATE_STOPTIMEUPDATE,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.TripUpdate.StopTimeUpdate)
),
),
DESCRIPTOR=_TRIPUPDATE,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.TripUpdate)
),
)
_sym_db.RegisterMessage(TripUpdate)
_sym_db.RegisterMessage(TripUpdate.StopTimeEvent)
_sym_db.RegisterMessage(TripUpdate.StopTimeUpdate)
VehiclePosition = _reflection.GeneratedProtocolMessageType(
"VehiclePosition",
(_message.Message,),
dict(
DESCRIPTOR=_VEHICLEPOSITION,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.VehiclePosition)
),
)
_sym_db.RegisterMessage(VehiclePosition)
Alert = _reflection.GeneratedProtocolMessageType(
"Alert",
(_message.Message,),
dict(
DESCRIPTOR=_ALERT,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.Alert)
),
)
_sym_db.RegisterMessage(Alert)
TimeRange = _reflection.GeneratedProtocolMessageType(
"TimeRange",
(_message.Message,),
dict(
DESCRIPTOR=_TIMERANGE,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.TimeRange)
),
)
_sym_db.RegisterMessage(TimeRange)
Position = _reflection.GeneratedProtocolMessageType(
"Position",
(_message.Message,),
dict(
DESCRIPTOR=_POSITION,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.Position)
),
)
_sym_db.RegisterMessage(Position)
TripDescriptor = _reflection.GeneratedProtocolMessageType(
"TripDescriptor",
(_message.Message,),
dict(
DESCRIPTOR=_TRIPDESCRIPTOR,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.TripDescriptor)
),
)
_sym_db.RegisterMessage(TripDescriptor)
VehicleDescriptor = _reflection.GeneratedProtocolMessageType(
"VehicleDescriptor",
(_message.Message,),
dict(
DESCRIPTOR=_VEHICLEDESCRIPTOR,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.VehicleDescriptor)
),
)
_sym_db.RegisterMessage(VehicleDescriptor)
EntitySelector = _reflection.GeneratedProtocolMessageType(
"EntitySelector",
(_message.Message,),
dict(
DESCRIPTOR=_ENTITYSELECTOR,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.EntitySelector)
),
)
_sym_db.RegisterMessage(EntitySelector)
TranslatedString = _reflection.GeneratedProtocolMessageType(
"TranslatedString",
(_message.Message,),
dict(
Translation=_reflection.GeneratedProtocolMessageType(
"Translation",
(_message.Message,),
dict(
DESCRIPTOR=_TRANSLATEDSTRING_TRANSLATION,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.TranslatedString.Translation)
),
),
DESCRIPTOR=_TRANSLATEDSTRING,
__module__="transiter_ny_mta_subwaytrips_gtfs_rt_base_pb2"
# @@protoc_insertion_point(class_scope:transiter_ny_mta_subwaytrips.TranslatedString)
),
)
_sym_db.RegisterMessage(TranslatedString)
_sym_db.RegisterMessage(TranslatedString.Translation)
DESCRIPTOR._options = None
_TRIPDESCRIPTOR_SCHEDULERELATIONSHIP.values_by_name["REPLACEMENT"]._options = None
# @@protoc_insertion_point(module_scope)
| 35.129271
| 8,003
| 0.619635
|
794d08eff63a69dc1d8843339c4b288181abe2e2
| 2,566
|
py
|
Python
|
Frontend/PULP_node.py
|
jmihali/dory
|
dedfb693a887a46d96e8e8a0d28979417a1f8344
|
[
"Apache-2.0"
] | null | null | null |
Frontend/PULP_node.py
|
jmihali/dory
|
dedfb693a887a46d96e8e8a0d28979417a1f8344
|
[
"Apache-2.0"
] | null | null | null |
Frontend/PULP_node.py
|
jmihali/dory
|
dedfb693a887a46d96e8e8a0d28979417a1f8344
|
[
"Apache-2.0"
] | null | null | null |
# should work even without -*-
# -*- coding: utf-8 -*-
#!/bin/bash
# PULP_node.py
# Alessio Burrello <alessio.burrello@unibo.it>
#
# Copyright (C) 2019-2020 University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
class node_element():
# A node allocated in the PULP_Graph
def __init__(self):
self.name = 'Not-initialized'
self.kernel_shape = 'Not-initialized' # fH x fW
self.ch_in = 'Not-initialized'
self.ch_out = 'Not-initialized'
self.input_index = 'Not-initialized'
self.output_index = 'Not-initialized'
self.input_dim = 'Not-initialized' # H x W
self.output_dim = 'Not-initialized' # H x W
self.pads = 'Not-initialized' # Top, Left, Bottom, Right
self.branch_out = 0
self.branch_in = 0
self.branch_change = 0
self.branch_last = 0
self.input_activation_dimensions_L3 = 0
self.output_activation_dimensions_L3 = 0
self.inmul1 = 'empty'
self.inmul2 = 'empty'
self.weight_bits = 8
self.out_activation_bits = 8
self.input_activation_bits = 8
self.outshift = 0
self.out_add = 0 # used for pool nodes
def log_parameters(self):
for parameter in self.__dict__:
if parameter not in ['weights', 'k', 'lambda']:
logging.debug(parameter + ': ' + str(self.__dict__[parameter]))
else:
logging.debug(parameter + ': Present')
def print_parameters(self):
for parameter in self.__dict__:
if parameter not in ['weights', 'k', 'lambda']:
print(parameter + ': ' + str(self.__dict__[parameter]))
else:
print(parameter + ': Present')
def add_parameter(self, name, value):
self.__dict__[name] = value
def add_dict_parameter(self, dict_parameters):
for key, value in dict_parameters.items():
self.__dict__[key] = value
def get_parameter(self, name):
return self.__dict__[name]
| 34.213333
| 79
| 0.636399
|
794d0935d418bc64e08a73e6441521e398790f74
| 1,240
|
py
|
Python
|
Python/StationX/1. Python Basics/5functions.py
|
aguswake1/Courses_and_stuff
|
841e2a8316c976488c3e40866c336dcbdb57cce2
|
[
"Apache-2.0"
] | null | null | null |
Python/StationX/1. Python Basics/5functions.py
|
aguswake1/Courses_and_stuff
|
841e2a8316c976488c3e40866c336dcbdb57cce2
|
[
"Apache-2.0"
] | null | null | null |
Python/StationX/1. Python Basics/5functions.py
|
aguswake1/Courses_and_stuff
|
841e2a8316c976488c3e40866c336dcbdb57cce2
|
[
"Apache-2.0"
] | null | null | null |
#Functions DRY = Don't Repeat Yourself
"""
def say_hi(name = 'Dean', nickname = "puppy"): # Default value
#Doc string, function says hi ¿what does this function do or why does it exist?
print("hi {}! {}".format(name, nickname))
say_hi()
say_hi("Sam", "moose")
help(say_hi) # type q to exit the help screen
# even = par odd = impar
"""
# Exercise 1
def get_word(word_type):
"""Get a word from a user and return that word."""
if word_type == 'adjetive':
is_a_an = 'an'
else:
is_a_an = 'a'
return input("Enter a word that is {0} {1}: ".format(is_a_an, word_type))
def fill_in_the_blanks(noun, verb, adjective):
"""Fills in the blanks and returns a completed story."""
story = "In this course you will learn how to {1}. It’s so easy even a {0} can do it. Trust me, it will be very {2}.".format(noun, verb, adjective)
return story
def display_story(story):
"""Displays a story."""
print(story)
def create_story():
"""Creates a story by capturing the input and displaying a finished story."""
noun = get_word('noun')
verb = get_word('verb')
adjetive = get_word('adjetive')
display_story(fill_in_the_blanks(noun,verb,adjetive))
create_story()
| 27.555556
| 153
| 0.649194
|
794d093aab7757df27819baa843442744d595760
| 3,290
|
py
|
Python
|
scrapers/scraper_golea.py
|
do5562/SLEDIMedO
|
1abba7e5454b251244213abe3cd8cdadd1c94475
|
[
"MIT"
] | null | null | null |
scrapers/scraper_golea.py
|
do5562/SLEDIMedO
|
1abba7e5454b251244213abe3cd8cdadd1c94475
|
[
"MIT"
] | null | null | null |
scrapers/scraper_golea.py
|
do5562/SLEDIMedO
|
1abba7e5454b251244213abe3cd8cdadd1c94475
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup as bs
import hashlib
from database.dbExecutor import dbExecutor
import datetime
base_url = 'https://www.golea.si'
full_url = 'https://www.golea.si/aktualno/page/' #dodaj se stevilo strani - prva stran je 0
headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}
def make_hash(title, date):
return hashlib.sha1((title + date).encode('utf-8')).hexdigest()
def is_article_new(hash):
is_new = False
try:
f = open('article_list.txt', 'r+')
except FileNotFoundError:
f = open('article_list.txt', 'a+')
if hash not in f.read().split():
is_new = True
f.write(hash + '\n')
print('new article found')
f.close()
return is_new
def get_title(soup):
title = soup.find('h3', class_='post-title')
if title:
return title.text
print('title not found, update select() method')
return 'title not found'
def get_date(soup):
raw_date = soup.find('div', class_='meta-data')
if raw_date:
date = raw_date.text
date = date.split()
return formatDate(''.join(date[1:]))
print('date not found')
return '1.1.1111' #code for date not found
def get_link(soup):
link = soup.find('a')
if link:
return link.get('href')
print('link not found')
return base_url #return base url to avoid exceptions
def get_content(soup):
content = soup.find('article', class_='post-content')
if content:
return content.text.strip()
print('content not found')
return 'content not found'
def get_articles_on_pages(num_pages_to_check, session):
articles = []
for n in range(num_pages_to_check):
r = session.get(full_url + str(n+1))
soup = bs(r.text, 'html.parser')
articles += soup.find('ul', class_='posts-listing').find_all('li')
return articles
def formatDate(date):
#format date for consistent database
date = date.split('.')
for i in range(2):
if len(date[i]) == 1:
date[i] = '0'+date[i]
return '-'.join(reversed(date))
def main():
num_pages_to_check = 2
num_new_articles = 0
articles_checked = 0
with requests.Session() as session:
session.headers.update(headers)
articles = get_articles_on_pages(num_pages_to_check,session)
articles_checked = len(articles)
new_articles_tuples = []
for x in articles:
title = get_title(x)
date = get_date(x)
hash_str = make_hash(title, date)
if is_article_new(hash_str):
link = get_link(x)
r = requests.get(link)
soup = bs(r.text, 'html.parser')
content = get_content(soup)
print(link + '\n')
new_tup = (str(datetime.date.today()), title, content, date, hash_str, link, base_url)
new_articles_tuples.append(new_tup)
num_new_articles += 1
#add new articles to database
dbExecutor.insertMany(new_articles_tuples)
print(num_new_articles, 'new articles found,', articles_checked,'articles checked')
if __name__ == '__main__':
main()
| 28.362069
| 149
| 0.620061
|
794d099c7ec7c5044056b961b07b9fbf0aee6966
| 606
|
py
|
Python
|
blog/migrations/0006_auto_20201009_2136.py
|
JiajiaHuang/smonus
|
95ec209ae3562ea73ee9ce4c22a0d3a3f0975210
|
[
"Unlicense"
] | null | null | null |
blog/migrations/0006_auto_20201009_2136.py
|
JiajiaHuang/smonus
|
95ec209ae3562ea73ee9ce4c22a0d3a3f0975210
|
[
"Unlicense"
] | null | null | null |
blog/migrations/0006_auto_20201009_2136.py
|
JiajiaHuang/smonus
|
95ec209ae3562ea73ee9ce4c22a0d3a3f0975210
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 2.2.1 on 2020-10-09 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20201008_1805'),
]
operations = [
migrations.AlterField(
model_name='article',
name='clicked',
field=models.IntegerField(default=0, null=True, verbose_name='点击数'),
),
migrations.AlterField(
model_name='article',
name='comment_count',
field=models.IntegerField(default=0, null=True, verbose_name='评论数'),
),
]
| 25.25
| 80
| 0.594059
|
794d0a496f51c8e7d770352e916bf650f283270a
| 674
|
py
|
Python
|
tests/plugins/tasks/commands/test_remote_command.py
|
tejasmokashi1992/nornir
|
7933c7fcca3953037485a5c02b79b7cf15865a23
|
[
"Apache-2.0"
] | 1
|
2020-07-19T19:54:54.000Z
|
2020-07-19T19:54:54.000Z
|
tests/plugins/tasks/commands/test_remote_command.py
|
tejasmokashi1992/nornir
|
7933c7fcca3953037485a5c02b79b7cf15865a23
|
[
"Apache-2.0"
] | null | null | null |
tests/plugins/tasks/commands/test_remote_command.py
|
tejasmokashi1992/nornir
|
7933c7fcca3953037485a5c02b79b7cf15865a23
|
[
"Apache-2.0"
] | 1
|
2020-05-26T13:36:18.000Z
|
2020-05-26T13:36:18.000Z
|
from nornir.core.exceptions import CommandError
from nornir.plugins.tasks import commands
from tests import skip
pytestmark = skip
class Test(object):
def test_remote_command(self, nornir):
result = nornir.run(commands.remote_command, command="hostname")
assert result
for h, r in result.items():
assert h == r.stdout.strip()
def test_remote_command_error_generic(self, nornir):
result = nornir.run(commands.remote_command, command="ls /asdadsd")
processed = False
for r in result.values():
processed = True
assert isinstance(r.exception, CommandError)
assert processed
| 30.636364
| 75
| 0.676558
|
794d0a51d89645d03b7513ca368c452011957c47
| 7,902
|
py
|
Python
|
data_steward/common.py
|
ratuagga/curation
|
047b984f20643e21bf3ab1e309903abaf816ecd5
|
[
"MIT"
] | 1
|
2021-04-05T18:06:25.000Z
|
2021-04-05T18:06:25.000Z
|
data_steward/common.py
|
rfrancis1/curation
|
b4c0a71408a54b8af0723107fcbd5ce0cfa79438
|
[
"MIT"
] | null | null | null |
data_steward/common.py
|
rfrancis1/curation
|
b4c0a71408a54b8af0723107fcbd5ce0cfa79438
|
[
"MIT"
] | null | null | null |
# Python imports
import os
# Project imports
from constants.bq_utils import VALIDATION_DATASET_REGEX
from constants.validation.participants.identity_match import REPORT_DIRECTORY_REGEX
import jinja2
# AOU required PII tables
PII_WILDCARD = 'pii*'
PII_NAME = 'pii_name'
PII_EMAIL = 'pii_email'
PII_PHONE_NUMBER = 'pii_phone_number'
PII_ADDRESS = 'pii_address'
PII_MRN = 'pii_mrn'
PARTICIPANT_MATCH = 'participant_match'
PII_TABLES = [
PII_NAME, PII_EMAIL, PII_PHONE_NUMBER, PII_ADDRESS, PII_MRN,
PARTICIPANT_MATCH
]
# AOU required CDM tables
CARE_SITE = 'care_site'
CONDITION_OCCURRENCE = 'condition_occurrence'
DEATH = 'death'
DEVICE_EXPOSURE = 'device_exposure'
DRUG_EXPOSURE = 'drug_exposure'
FACT_RELATIONSHIP = 'fact_relationship'
LOCATION = 'location'
MEASUREMENT = 'measurement'
NOTE = 'note'
OBSERVATION = 'observation'
PERSON = 'person'
PROCEDURE_OCCURRENCE = 'procedure_occurrence'
PROVIDER = 'provider'
SPECIMEN = 'specimen'
VISIT_OCCURRENCE = 'visit_occurrence'
AOU_REQUIRED = [
CARE_SITE, CONDITION_OCCURRENCE, DEATH, DEVICE_EXPOSURE, DRUG_EXPOSURE,
FACT_RELATIONSHIP, LOCATION, MEASUREMENT, NOTE, OBSERVATION, PERSON,
PROCEDURE_OCCURRENCE, PROVIDER, SPECIMEN, VISIT_OCCURRENCE
]
# Standardized clinical data tables in OMOP. All should contain a person_id column. See
# https://github.com/OHDSI/CommonDataModel/wiki/Standardized-Clinical-Data-Tables
# Clinical tables which do not have a corresponding mapping table.
MAPPED_CLINICAL_DATA_TABLES = [
VISIT_OCCURRENCE, CONDITION_OCCURRENCE, DRUG_EXPOSURE, MEASUREMENT,
PROCEDURE_OCCURRENCE, OBSERVATION, DEVICE_EXPOSURE, SPECIMEN
]
# Clinical tables which do not have a corresponding mapping table.
UNMAPPED_CLINICAL_DATA_TABLES = [DEATH]
# All clinical tables.
CLINICAL_DATA_TABLES = MAPPED_CLINICAL_DATA_TABLES + UNMAPPED_CLINICAL_DATA_TABLES
# other CDM tables
ATTRIBUTE_DEFINITION = 'attribute_definition'
COHORT_DEFINITION = 'cohort_definition'
CONDITION_ERA = 'condition_era'
DRUG_ERA = 'drug_era'
DOSE_ERA = 'dose_era'
DRUG_COST = 'drug_cost'
VISIT_COST = 'visit_cost'
DEVICE_COST = 'device_cost'
PROCEDURE_COST = 'procedure_cost'
OBSERVATION_PERIOD = 'observation_period'
PAYER_PLAN_PERIOD = 'payer_plan_period'
OTHER_CDM_TABLES = [
ATTRIBUTE_DEFINITION, COHORT_DEFINITION, CONDITION_ERA, DRUG_ERA, DOSE_ERA,
DRUG_COST, VISIT_COST, DEVICE_COST, PROCEDURE_COST, OBSERVATION_PERIOD,
PAYER_PLAN_PERIOD
]
CDM_TABLES = AOU_REQUIRED + OTHER_CDM_TABLES
AOU_REQUIRED_FILES = [table + '.csv' for table in AOU_REQUIRED]
PII_FILES = [table + '.csv' for table in PII_TABLES]
SUBMISSION_FILES = AOU_REQUIRED_FILES + PII_FILES
RESULTS_HTML = 'results.html'
PROCESSED_TXT = 'processed.txt'
LOG_JSON = 'log.json'
ACHILLES_HEEL_REPORT = 'achillesheel'
PERSON_REPORT = 'person'
DATA_DENSITY_REPORT = 'datadensity'
ALL_REPORTS = [ACHILLES_HEEL_REPORT, PERSON_REPORT, DATA_DENSITY_REPORT]
ALL_REPORT_FILES = [report + '.json' for report in ALL_REPORTS]
# Wearables
ACTIVITY_SUMMARY = 'activity_summary'
HEART_RATE_MINUTE_LEVEL = 'heart_rate_minute_level'
HEART_RATE_SUMMARY = 'heart_rate_summary'
STEPS_INTRADAY = 'steps_intraday'
FITBIT_TABLES = [
ACTIVITY_SUMMARY, HEART_RATE_MINUTE_LEVEL, HEART_RATE_SUMMARY,
STEPS_INTRADAY
]
# Vocabulary
CONCEPT = 'concept'
CONCEPT_ANCESTOR = 'concept_ancestor'
CONCEPT_CLASS = 'concept_class'
CONCEPT_RELATIONSHIP = 'concept_relationship'
CONCEPT_SYNONYM = 'concept_synonym'
DOMAIN = 'domain'
DRUG_STRENGTH = 'drug_strength'
RELATIONSHIP = 'relationship'
SOURCE_TO_CONCEPT_MAP = 'source_to_concept_map'
VOCABULARY = 'vocabulary'
VOCABULARY_TABLES = [
CONCEPT, CONCEPT_ANCESTOR, CONCEPT_CLASS, CONCEPT_RELATIONSHIP,
CONCEPT_SYNONYM, DOMAIN, DRUG_STRENGTH, RELATIONSHIP, VOCABULARY
]
# Achilles
ACHILLES_ANALYSIS = 'achilles_analysis'
ACHILLES_RESULTS = 'achilles_results'
ACHILLES_RESULTS_DIST = 'achilles_results_dist'
ACHILLES_TABLES = [ACHILLES_ANALYSIS, ACHILLES_RESULTS, ACHILLES_RESULTS_DIST]
ACHILLES_HEEL_RESULTS = 'achilles_heel_results'
ACHILLES_RESULTS_DERIVED = 'achilles_results_derived'
ACHILLES_HEEL_TABLES = [ACHILLES_HEEL_RESULTS, ACHILLES_RESULTS_DERIVED]
REQUIRED_TABLES = ['person']
REQUIRED_FILES = [table + '.csv' for table in REQUIRED_TABLES]
ACHILLES_EXPORT_PREFIX_STRING = "curation_report/data/"
IGNORE_STRING_LIST = [ACHILLES_EXPORT_PREFIX_STRING]
ACHILLES_EXPORT_DATASOURCES_JSON = ACHILLES_EXPORT_PREFIX_STRING + 'datasources.json'
# latest vocabulary dataset name in test and prod
VOCABULARY_DATASET = os.environ.get('VOCABULARY_DATASET')
CLINICAL = 'clinical'
ACHILLES = 'achilles'
CDM_COMPONENTS = [CLINICAL, VOCABULARY, ACHILLES]
UNKNOWN_FILE = 'Unknown file'
# fact relationship id constants
MEASUREMENT_DOMAIN_CONCEPT_ID = 21
OBSERVATION_DOMAIN_CONCEPT_ID = 27
PERSON_DOMAIN_CONCEPT_ID = 56
# ID Spaces
#
# The following constants are added to values in all ID (or "primary key") fields to prevent
# collisions during union/combine phases
# Values for ID fields for each HPO are summed with a factor of ID_CONSTANT_FACTOR
ID_CONSTANT_FACTOR = 1000000000000000
# Added to value in all ID fields in records coming from the RDR
RDR_ID_CONSTANT = ID_CONSTANT_FACTOR
PARTICIPANT_DIR = 'participant/'
IGNORE_DIRECTORIES = [
PARTICIPANT_DIR,
REPORT_DIRECTORY_REGEX,
VALIDATION_DATASET_REGEX,
]
OBSERVATION_TO_MEASUREMENT_CONCEPT_ID = 581410
MEASUREMENT_TO_OBSERVATION_CONCEPT_ID = 581411
PARENT_TO_CHILD_MEASUREMENT_CONCEPT_ID = 581436
CHILD_TO_PARENT_MEASUREMENT_CONCEPT_ID = 581437
DIASTOLIC_TO_SYSTOLIC_CONCEPT_ID = 46233682
SYSTOLIC_TO_DIASTOLIC_CONCEPT_ID = 46233683
LATEST_REPORTS_JSON = 'latest_reports.json'
LATEST_RESULTS_JSON = 'latest_results.json'
REPORT_FOR_ACHILLES = 'achilles'
REPORT_FOR_RESULTS = 'results'
LOG_YEAR = '2019'
DELIMITER = '\t'
LINE_TERMINATOR = '\n'
TRANSFORM_FILES = 'transform_files'
APPEND_VOCABULARY = 'append_vocabulary'
APPEND_CONCEPTS = 'append_concepts'
ADD_AOU_VOCABS = 'add_aou_vocabs'
ERRORS = 'errors'
AOU_GEN_ID = 'AoU_General'
AOU_GEN_NAME = 'AoU_General'
AOU_GEN_VOCABULARY_CONCEPT_ID = '2000000000'
AOU_GEN_VOCABULARY_REFERENCE = 'https://docs.google.com/document/d/10Gji9VW5-RTysM-yAbRa77rXqVfDfO2li2U4LxUQH9g'
AOU_CUSTOM_ID = 'AoU_Custom'
AOU_CUSTOM_NAME = 'AoU_Custom'
AOU_CUSTOM_VOCABULARY_CONCEPT_ID = '2100000000'
AOU_CUSTOM_VOCABULARY_REFERENCE = 'https://precisionmedicineinitiative.atlassian.net/browse/DC-618'
OMOP_VOCABULARY_CONCEPT_ID = '44819096'
ERROR_APPENDING = 'Appending to {in_path} which already contains rows for {vocab_id}'
VERSION_TEMPLATE = 'insert version info here'
VOCABULARY_UPDATES = {
AOU_GEN_ID: [
AOU_GEN_ID, AOU_GEN_NAME, AOU_GEN_VOCABULARY_REFERENCE,
VERSION_TEMPLATE, AOU_GEN_VOCABULARY_CONCEPT_ID
],
AOU_CUSTOM_ID: [
AOU_CUSTOM_ID, AOU_CUSTOM_NAME, AOU_CUSTOM_VOCABULARY_REFERENCE,
VERSION_TEMPLATE, AOU_CUSTOM_VOCABULARY_CONCEPT_ID
]
}
COMBINED = 'combined'
UNIONED_EHR = 'unioned_ehr'
DEID = 'deid'
EHR = 'ehr'
RDR = 'rdr'
RELEASE = 'release'
OTHER = 'other'
MAPPING = 'mapping'
MAPPING_PREFIX = '_mapping_'
EXT = 'ext'
EXT_SUFFIX = '_ext'
DEID_MAP = '_deid_map'
MAX_DEID_DATE_SHIFT = 364
PID_RID_MAPPING = 'pid_rid_mapping'
PRIMARY_PID_RID_MAPPING = 'primary_pid_rid_mapping'
SITE_MASKING_TABLE_ID = 'site_maskings'
PIPELINE_TABLES = 'pipeline_tables'
COPE_SURVEY_MAP = 'cope_survey_semantic_version_map'
ZIP3_LOOKUP = 'zip3_lookup'
# Participant Summary
DRC_OPS = 'drc_ops'
PS_API_VALUES = 'ps_api_values'
# JINJA
JINJA_ENV = jinja2.Environment(
# block tags on their own lines
# will not cause extra white space
trim_blocks=True,
lstrip_blocks=True,
# syntax highlighting should be better
# with these comment delimiters
comment_start_string='--',
comment_end_string=' --',
# in jinja2 autoescape is for html; jinjasql supports autoescape for sql
# TODO Look into jinjasql for sql templating
autoescape=False)
| 32.385246
| 112
| 0.802835
|
794d0aaa64db85bc24d6fdf70948726225287d3f
| 2,605
|
py
|
Python
|
papers/CS-F-LTR/src/decision_tree_semi.py
|
mindspore-ai/contrib
|
85dccac7a2ba6e962092ecd51aefd962d7f2aeac
|
[
"Apache-2.0"
] | 2
|
2021-11-10T06:16:55.000Z
|
2022-02-22T11:30:04.000Z
|
papers/CS-F-LTR/src/decision_tree_semi.py
|
mindspore-ai/contrib
|
85dccac7a2ba6e962092ecd51aefd962d7f2aeac
|
[
"Apache-2.0"
] | null | null | null |
papers/CS-F-LTR/src/decision_tree_semi.py
|
mindspore-ai/contrib
|
85dccac7a2ba6e962092ecd51aefd962d7f2aeac
|
[
"Apache-2.0"
] | 1
|
2022-03-22T06:03:15.000Z
|
2022-03-22T06:03:15.000Z
|
"""[summary]
"""
import pickle
import os
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from utils import evaluation
from scipy.stats import mode
class DecisionTreeSemi:
"""[summary]
"""
def __init__(self, train_relevance_labels, train_features,
test_relevance_labels, test_features, test_query_ids, train_features_u):
"""[summary]
Args:
train_relevance_labels ([type]): [description]
train_features ([type]): [description]
test_relevance_labels ([type]): [description]
test_features ([type]): [description]
test_query_ids ([type]): [description]
train_features_u ([type]): [description]
"""
self.y_labeled2 = train_relevance_labels
self.x_labeled = train_features
self.x_unlabeled = train_features_u
self.test_labels = test_relevance_labels
self.test_features = test_features
self.test_ids = test_query_ids
x = self.x_labeled
y = self.y_labeled2.reshape(-1, 1)
x_y = np.concatenate((x, y), axis=1)
np.random.seed(1)
np.random.shuffle(x_y)
self.x_labeled = x_y[:, :-1]
self.y_labeled2 = x_y[:, -1].reshape(-1,)
def fit(self, fed_num, file_path):
"""[summary]
Args:
fed_num ([type]): [description]
file_path ([type]): [description]
"""
clfs = []
for i in range(fed_num):
clfs.append(
pickle.load(
open(
os.path.join(
file_path,
"decision_tree%d" %
i),
"rb")))
res = np.zeros([fed_num, len(self.x_unlabeled)])
for i in range(fed_num):
res[i] = clfs[i].predict(self.x_unlabeled)
# for i in range(len(self.x_unlabeled)):
# res[i] = res[i] // fed_num
res = mode(res)[0][0]
print(res)
x_aug = np.concatenate((self.x_labeled, self.x_unlabeled))
y_aug = np.concatenate((self.y_labeled2, res))
clf = DecisionTreeClassifier().fit(x_aug, y_aug)
result = clf.predict(self.test_features)
# avg_err, avg_ndcg, avg_full_ndcg, avg_map, avg_auc = \
_, _, _, _, _ = \
evaluation(
result,
self.test_labels,
self.test_ids,
self.test_features)
# pickle.dump(clf, open(os.path.join(file_path, "decision_tree%d" % fed_id), "wb"))
| 33.831169
| 91
| 0.54856
|
794d0b22d99afa9075fe32c6a957c3565c066b46
| 1,540
|
py
|
Python
|
samples/generated_samples/dialogflow_generated_dialogflowcx_v3beta1_deployments_list_deployments_sync.py
|
nicain/python-dialogflow-cx
|
2292ff540aea24c3c831a5ffe1604c2c022ccb82
|
[
"Apache-2.0"
] | null | null | null |
samples/generated_samples/dialogflow_generated_dialogflowcx_v3beta1_deployments_list_deployments_sync.py
|
nicain/python-dialogflow-cx
|
2292ff540aea24c3c831a5ffe1604c2c022ccb82
|
[
"Apache-2.0"
] | null | null | null |
samples/generated_samples/dialogflow_generated_dialogflowcx_v3beta1_deployments_list_deployments_sync.py
|
nicain/python-dialogflow-cx
|
2292ff540aea24c3c831a5ffe1604c2c022ccb82
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListDeployments
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_generated_dialogflowcx_v3beta1_Deployments_ListDeployments_sync]
from google.cloud import dialogflowcx_v3beta1
def sample_list_deployments():
# Create a client
client = dialogflowcx_v3beta1.DeploymentsClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.ListDeploymentsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_deployments(request=request)
for response in page_result:
print(response)
# [END dialogflow_generated_dialogflowcx_v3beta1_Deployments_ListDeployments_sync]
| 34.222222
| 85
| 0.771429
|
794d0c085a26f65be9aa6736bace3173213bdac3
| 279
|
py
|
Python
|
bootcamp/account/signup/forms.py
|
alexandrelff/bootcamp
|
57f7dff02d1dfbfae25fb993ef42710b1b61b7ba
|
[
"MIT"
] | null | null | null |
bootcamp/account/signup/forms.py
|
alexandrelff/bootcamp
|
57f7dff02d1dfbfae25fb993ef42710b1b61b7ba
|
[
"MIT"
] | null | null | null |
bootcamp/account/signup/forms.py
|
alexandrelff/bootcamp
|
57f7dff02d1dfbfae25fb993ef42710b1b61b7ba
|
[
"MIT"
] | null | null | null |
from allauth.account.forms import SignupForm
from captcha.fields import ReCaptchaField
class UserSpamForm(SignupForm):
reCAPTCHA = ReCaptchaField()
field_order = [
"username",
"email",
"password1",
"password2",
"reCAPTCHA"
]
| 23.25
| 44
| 0.634409
|
794d0c4b0521e92018696b3ef0a2269ba019cd37
| 2,042
|
py
|
Python
|
malcolm/parts/ca/caactionpart.py
|
dls-controls/github-publish-test
|
25f6ce1af28eff9930f65e4f2c9fb0474e0b570c
|
[
"Apache-2.0"
] | null | null | null |
malcolm/parts/ca/caactionpart.py
|
dls-controls/github-publish-test
|
25f6ce1af28eff9930f65e4f2c9fb0474e0b570c
|
[
"Apache-2.0"
] | null | null | null |
malcolm/parts/ca/caactionpart.py
|
dls-controls/github-publish-test
|
25f6ce1af28eff9930f65e4f2c9fb0474e0b570c
|
[
"Apache-2.0"
] | null | null | null |
import cothread
from cothread import catools
from malcolm.core import Part, method_takes, REQUIRED, MethodMeta
from malcolm.core.vmetas import StringMeta, NumberMeta, BooleanMeta
from malcolm.controllers.defaultcontroller import DefaultController
@method_takes(
"name", StringMeta("name of the created method"), REQUIRED,
"description", StringMeta("desc of created method"), REQUIRED,
"pv", StringMeta("full pv to write to when method called"), REQUIRED,
"status_pv", StringMeta("Status pv to see if successful"), None,
"good_status", StringMeta("Good value for status pv"), "",
"value", NumberMeta("int32", "value to write to pv when method called"), 1,
"wait", BooleanMeta("Wait for caput callback?"), True)
class CAActionPart(Part):
method = None
def create_methods(self):
# MethodMeta instance
self.method = MethodMeta(self.params.description)
yield self.params.name, self.method, self.caput
@DefaultController.Resetting
def connect_pvs(self, _):
# make the connection in cothread's thread
pvs = [self.params.pv]
if self.params.status_pv:
pvs.append(self.params.status_pv)
ca_values = cothread.CallbackResult(catools.caget, pvs)
# check connection is ok
for i, v in enumerate(ca_values):
assert v.ok, "CA connect failed with %s" % v.state_strings[v.state]
def caput(self):
if self.params.wait:
cmd = "caput -c -w 1000"
else:
cmd = "caput"
self.log_info("%s %s %s", cmd, self.params.pv, self.params.value)
cothread.CallbackResult(
catools.caput, self.params.pv, self.params.value,
wait=self.params.wait, timeout=None)
if self.params.status_pv:
value = cothread.CallbackResult(
catools.caget, self.params.status_pv,
datatype=catools.DBR_STRING)
assert value == self.params.good_status, \
"Action failed with status %r" % (value,)
| 40.039216
| 79
| 0.653281
|
794d0c84aa5e1a93487113f7d468fc8fea0c66cd
| 4,759
|
py
|
Python
|
predict_styleptb.py
|
yoonkim/neural-qcfg
|
c5a2ea05e3108f83e5833f8d0bc368638bab6c9a
|
[
"MIT"
] | 35
|
2021-09-03T01:47:47.000Z
|
2022-03-18T00:59:35.000Z
|
predict_styleptb.py
|
yoonkim/neural-qcfg
|
c5a2ea05e3108f83e5833f8d0bc368638bab6c9a
|
[
"MIT"
] | null | null | null |
predict_styleptb.py
|
yoonkim/neural-qcfg
|
c5a2ea05e3108f83e5833f8d0bc368638bab6c9a
|
[
"MIT"
] | 1
|
2021-12-09T11:59:57.000Z
|
2021-12-09T11:59:57.000Z
|
#!/usr/bin/env python3
import sys
import os
import argparse
import json
import random
import shutil
import copy
import pickle
import torch
from torch import cuda
import numpy as np
import time
import logging
from tokenizer import Tokenizer
from utils import *
from torch.nn.utils.rnn import pad_sequence
parser = argparse.ArgumentParser()
parser.add_argument('--data_file', default='data/StylePTB/ATP/test.tsv')
parser.add_argument('--out_file', default='styleptb-pred-atp.txt')
parser.add_argument('--model_path', default='')
parser.add_argument('--gpu', default=0, type=int, help='which gpu to use')
parser.add_argument('--num_samples', default=1000, type=int, help='samples')
parser.add_argument('--seed', default=3435, type=int, help='random seed')
def get_data(data_file):
data = []
for d in open(data_file):
src, tgt = d.split("\t")
if ";" in src:
src, emph = src.strip().split(";")
emph = emph.strip()
src = src.strip().split()
emph_mask = []
for w in src:
if w == emph:
emph_mask.append(1)
else:
emph_mask.append(0)
data.append({"src": src, "tgt": tgt.strip().split(), "emph_mask": emph_mask})
else:
data.append({"src": src.strip().split(), "tgt": tgt.strip().split()})
return data
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cuda.set_device(args.gpu)
device = torch.device("cuda:"+str(args.gpu))
data = get_data(args.data_file)
model_checkpoint = torch.load(args.model_path)
encoder = model_checkpoint["encoder"]
decoder = model_checkpoint["decoder"]
enc_parser = model_checkpoint["parser"]
tokenizer = model_checkpoint["tokenizer"]
model_args = model_checkpoint["args"]
encoder.to(device)
decoder.to(device)
enc_parser.to(device)
out = open(args.out_file, "w")
eval(data, encoder, decoder, enc_parser, device, tokenizer, model_args, out)
out.close()
def eval(data, encoder, decoder, enc_parser, device, tokenizer, model_args, out):
num_sents = 0
num_words_pred = 0
total_nll_pred = 0.
for d in data:
x = [d["src"]]
y = [d["tgt"]]
x_tensor, _, _ = tokenizer.convert_batch(x)
y_tensor, _, _ = tokenizer.convert_batch(y)
x_tensor, y_tensor = x_tensor.to(device), y_tensor.to(device)
emph_mask = torch.LongTensor(d["emph_mask"]).to(device) if "emph_mask" in d else None
x_lengths = torch.Tensor([len(d["src"])]).long().to(device)
y_lengths = torch.Tensor([len(d["tgt"])]).long().to(device)
_, x_spans, _, x_actions, _ = enc_parser(x_tensor, x_lengths)
with torch.no_grad():
node_features, node_spans = encoder(x_tensor, x_lengths, spans = x_spans,
token_type = emph_mask)
new_spans = []
for span, x_str in zip(node_spans, x):
new_span = []
for s in span:
new_span.append([s[0], s[1], x_str[s[0]:s[1]+1]])
new_spans.append(new_span)
node_spans = new_spans
y_preds = decoder.decode(node_features, node_spans, tokenizer,
num_samples = args.num_samples)
best_pred = None
best_nll = 1e5
best_length = None
best_ppl = 1e5
best_derivation = None
for k, y_pred in enumerate(y_preds[0]):
y_pred = [y_pred]
y_pred_tensor, _, _ = tokenizer.convert_batch(y_pred)
y_pred_tensor = y_pred_tensor.to(device)
y_pred_lengths = torch.Tensor([len(y_pred[0])]).long().to(device)
with torch.no_grad():
if len(y_pred[0]) > 30 or len(y_pred[0]) < 2:
continue
pred_nll = decoder(y_pred_tensor, y_pred_lengths,
node_features, node_spans, argmax=False,
x_str = y_pred)
ppl = np.exp(pred_nll.item() / y_pred_lengths.sum().item())
# if pred_nll.item() < best_nll:
if ppl < best_ppl:
best_ppl = ppl
best_pred = y_pred[0]
best_nll = pred_nll.item()
best_length = y_pred_lengths.sum().item()
y_pred_tree, pred_all_spans, pred_all_spans_node = decoder(
y_pred_tensor, y_pred_lengths, node_features, node_spans,
x_str=y_pred, argmax=True)
num_words_pred += best_length
total_nll_pred += best_nll
print(np.exp(total_nll_pred/num_words_pred))
pred = " ".join(best_pred)
gold = " ".join(y[0])
src = " ".join(x[0])
out.write(pred + "\n")
x_parse = get_tree(x_actions[0], x[0])
print("X: %s" % x_parse)
print("SRC: %s\nPRED: %s\nGOLD: %s" % (" ".join(x[0]), pred, gold))
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 34.992647
| 95
| 0.630595
|
794d0ddae18317df417802939150574e8eafd5e9
| 412
|
py
|
Python
|
tilecloud/store/wmts.py
|
wendymhelson/tilecloud
|
70f54d24d05e4e350317b12defd56e4eebe1f2c1
|
[
"Unlicense"
] | null | null | null |
tilecloud/store/wmts.py
|
wendymhelson/tilecloud
|
70f54d24d05e4e350317b12defd56e4eebe1f2c1
|
[
"Unlicense"
] | null | null | null |
tilecloud/store/wmts.py
|
wendymhelson/tilecloud
|
70f54d24d05e4e350317b12defd56e4eebe1f2c1
|
[
"Unlicense"
] | null | null | null |
from tilecloud.layout.wmts import WMTSTileLayout
from tilecloud.store.url import URLTileStore
class WMTSTileStore(URLTileStore):
def __init__(
self, url=None, layer=None, style=None, format=None, tile_matrix_set=None, tile_matrix=None, **kwargs
):
layout = WMTSTileLayout(url, layer, style, format, tile_matrix_set, tile_matrix)
URLTileStore.__init__(self, (layout,), **kwargs)
| 37.454545
| 109
| 0.735437
|
794d0e2ef3fbb74fb662a438dd3ec008f84f7a62
| 403
|
py
|
Python
|
facebook_lite/wsgi.py
|
mobolajijohnson13/facebook-all
|
e1d04694f9ddd7c50b63871aa9a39843d2977971
|
[
"MIT"
] | null | null | null |
facebook_lite/wsgi.py
|
mobolajijohnson13/facebook-all
|
e1d04694f9ddd7c50b63871aa9a39843d2977971
|
[
"MIT"
] | null | null | null |
facebook_lite/wsgi.py
|
mobolajijohnson13/facebook-all
|
e1d04694f9ddd7c50b63871aa9a39843d2977971
|
[
"MIT"
] | null | null | null |
"""
WSGI config for facebook_lite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'facebook_lite.settings')
application = get_wsgi_application()
| 23.705882
| 78
| 0.791563
|
794d0e492cbbab665290c66ad10631e29f150508
| 1,724
|
py
|
Python
|
Exscript/util/collections.py
|
saveshodhan/exscript
|
72718eee3e87b345d5a5255be9824e867e42927b
|
[
"MIT"
] | 226
|
2015-01-20T19:59:06.000Z
|
2022-01-02T11:13:01.000Z
|
Exscript/util/collections.py
|
saveshodhan/exscript
|
72718eee3e87b345d5a5255be9824e867e42927b
|
[
"MIT"
] | 155
|
2015-01-02T07:56:27.000Z
|
2022-01-09T20:56:19.000Z
|
Exscript/util/collections.py
|
saveshodhan/exscript
|
72718eee3e87b345d5a5255be9824e867e42927b
|
[
"MIT"
] | 114
|
2015-01-03T11:48:17.000Z
|
2022-01-26T02:50:43.000Z
|
from __future__ import unicode_literals, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import chr
from builtins import range
from builtins import object
import copy
from collections import OrderedDict, Callable, defaultdict
class OrderedDefaultDict(OrderedDict):
"""
A fusion of Python's defaultdict and Python's OrderedDict.
"""
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
| 31.345455
| 74
| 0.642691
|
794d0ed375dacffa47ccd1e6871e98f506fcfc90
| 1,385
|
py
|
Python
|
lweutils.py
|
lucidworks/citibike-demo
|
57c0ff44e28673e1ebb3dadebbdcd8758e98cbb4
|
[
"Apache-2.0"
] | 2
|
2017-01-18T00:12:21.000Z
|
2018-10-04T13:31:33.000Z
|
lweutils.py
|
lucidworks/citibike-demo
|
57c0ff44e28673e1ebb3dadebbdcd8758e98cbb4
|
[
"Apache-2.0"
] | null | null | null |
lweutils.py
|
lucidworks/citibike-demo
|
57c0ff44e28673e1ebb3dadebbdcd8758e98cbb4
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
import httplib2
HTTP_CLIENT = httplib2.Http()
##
def get_env(key, default=None):
if key in os.environ:
return os.environ[key]
return default
##
def parse_opts(args):
data = {}
for arg in args:
key, val = arg.split('=', 1)
# make a list out of any key specified more then once
if key in data:
oldval = data[key]
if type(oldval) is list:
oldval.append(val)
else:
data[key] = [oldval, val]
else:
data[key] = val
return data
##
def json_http(url, method='GET', data=None):
body = None;
if (data): body = json.dumps(data)
resp, content = HTTP_CLIENT.request(
url, method=method, body=body,
headers={'Content-Type':'application/json'})
# fail if not status 2xx
if 0 != str(resp.status).find('2'):
err = content
try:
err = pretty_json(json.loads(content))
except:
# IGNORE, use the raw error instead
pass
raise Exception(method+' '+url+' => '+str(resp.status)+"\n"+err)
if 204 == resp.status: return None
if content:
return json.loads(content)
else:
return None
##
def pretty_json(data, indent=''):
pretty = json.dumps(data,indent=2)
return pretty.replace("\n","\n"+indent)
##
| 21.640625
| 72
| 0.555957
|
794d0f18af99b3e0fb307f54e58251adb3517805
| 9,437
|
py
|
Python
|
frappe/app.py
|
stephenBDT/frappe
|
4756b70812973d38725405aa342a4b436239c296
|
[
"MIT"
] | null | null | null |
frappe/app.py
|
stephenBDT/frappe
|
4756b70812973d38725405aa342a4b436239c296
|
[
"MIT"
] | 2
|
2021-04-25T22:12:37.000Z
|
2021-04-30T13:31:35.000Z
|
frappe/app.py
|
ahashad/frappe-1
|
3fb0af4c2d99f4365c37734abbbc3715a9afd755
|
[
"MIT"
] | 1
|
2021-11-13T10:27:20.000Z
|
2021-11-13T10:27:20.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os
from six import iteritems
import logging
from werkzeug.local import LocalManager
from werkzeug.wrappers import Request, Response
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.middleware.profiler import ProfilerMiddleware
from werkzeug.middleware.shared_data import SharedDataMiddleware
import frappe
import frappe.handler
import frappe.auth
import frappe.api
import frappe.utils.response
import frappe.website.render
from frappe.utils import get_site_name, sanitize_html
from frappe.middlewares import StaticDataMiddleware
from frappe.utils.error import make_error_snapshot
from frappe.core.doctype.comment.comment import update_comments_in_parent_after_request
from frappe import _
import frappe.recorder
import frappe.monitor
import frappe.rate_limiter
local_manager = LocalManager([frappe.local])
_site = None
_sites_path = os.environ.get("SITES_PATH", ".")
class RequestContext(object):
def __init__(self, environ):
self.request = Request(environ)
def __enter__(self):
init_request(self.request)
def __exit__(self, type, value, traceback):
frappe.destroy()
@Request.application
def application(request):
response = None
try:
rollback = True
init_request(request)
frappe.recorder.record()
frappe.monitor.start()
frappe.rate_limiter.apply()
frappe.api.validate_auth()
if request.method == "OPTIONS":
response = Response()
elif frappe.form_dict.cmd:
response = frappe.handler.handle()
elif request.path.startswith("/api/"):
response = frappe.api.handle()
elif request.path.startswith('/backups'):
response = frappe.utils.response.download_backup(request.path)
elif request.path.startswith('/private/files/'):
response = frappe.utils.response.download_private_file(request.path)
elif request.method in ('GET', 'HEAD', 'POST'):
response = frappe.website.render.render()
else:
raise NotFound
except HTTPException as e:
return e
except frappe.SessionStopped as e:
response = frappe.utils.response.handle_session_stopped()
except Exception as e:
response = handle_exception(e)
else:
rollback = after_request(rollback)
finally:
if request.method in ("POST", "PUT") and frappe.db and rollback:
frappe.db.rollback()
frappe.rate_limiter.update()
frappe.monitor.stop(response)
frappe.recorder.dump()
if hasattr(frappe.local, 'conf') and frappe.local.conf.enable_frappe_logger:
frappe.logger("frappe.web", allow_site=frappe.local.site).info({
"site": get_site_name(request.host),
"remote_addr": getattr(request, "remote_addr", "NOTFOUND"),
"base_url": getattr(request, "base_url", "NOTFOUND"),
"full_path": getattr(request, "full_path", "NOTFOUND"),
"method": getattr(request, "method", "NOTFOUND"),
"scheme": getattr(request, "scheme", "NOTFOUND"),
"http_status_code": getattr(response, "status_code", "NOTFOUND")
})
process_response(response)
frappe.destroy()
return response
def init_request(request):
frappe.local.request = request
frappe.local.is_ajax = frappe.get_request_header("X-Requested-With")=="XMLHttpRequest"
site = _site or request.headers.get('X-Frappe-Site-Name') or get_site_name(request.host)
frappe.init(site=site, sites_path=_sites_path)
if not (frappe.local.conf and frappe.local.conf.db_name):
# site does not exist
raise NotFound
if frappe.local.conf.get('maintenance_mode'):
frappe.connect()
raise frappe.SessionStopped('Session Stopped')
else:
frappe.connect(set_admin_as_user=False)
make_form_dict(request)
if request.method != "OPTIONS":
frappe.local.http_request = frappe.auth.HTTPRequest()
def process_response(response):
if not response:
return
# set cookies
if hasattr(frappe.local, 'cookie_manager'):
frappe.local.cookie_manager.flush_cookies(response=response)
# rate limiter headers
if hasattr(frappe.local, 'rate_limiter'):
response.headers.extend(frappe.local.rate_limiter.headers())
# CORS headers
if hasattr(frappe.local, 'conf') and frappe.conf.allow_cors:
set_cors_headers(response)
def set_cors_headers(response):
origin = frappe.request.headers.get('Origin')
allow_cors = frappe.conf.allow_cors
if not (origin and allow_cors):
return
if allow_cors != "*":
if not isinstance(allow_cors, list):
allow_cors = [allow_cors]
if origin not in allow_cors:
return
response.headers.extend({
'Access-Control-Allow-Origin': origin,
'Access-Control-Allow-Credentials': 'true',
'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS',
'Access-Control-Allow-Headers': ('Authorization,DNT,X-Mx-ReqToken,'
'Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,'
'Cache-Control,Content-Type')
})
def make_form_dict(request):
import json
request_data = request.get_data(as_text=True)
if 'application/json' in (request.content_type or '') and request_data:
args = json.loads(request_data)
else:
args = request.form or request.args
if not isinstance(args, dict):
frappe.throw("Invalid request arguments")
try:
frappe.local.form_dict = frappe._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \
for k, v in iteritems(args) })
except IndexError:
frappe.local.form_dict = frappe._dict(args)
if "_" in frappe.local.form_dict:
# _ is passed by $.ajax so that the request is not cached by the browser. So, remove _ from form_dict
frappe.local.form_dict.pop("_")
def handle_exception(e):
response = None
http_status_code = getattr(e, "http_status_code", 500)
return_as_message = False
accept_header = frappe.get_request_header("Accept") or ""
respond_as_json = (
frappe.get_request_header('Accept')
and (frappe.local.is_ajax or 'application/json' in accept_header)
or (
frappe.local.request.path.startswith("/api/") and not accept_header.startswith("text")
)
)
if frappe.conf.get('developer_mode'):
# don't fail silently
print(frappe.get_traceback())
if respond_as_json:
# handle ajax responses first
# if the request is ajax, send back the trace or error message
response = frappe.utils.response.report_error(http_status_code)
elif (http_status_code==500
and (frappe.db and isinstance(e, frappe.db.InternalError))
and (frappe.db and (frappe.db.is_deadlocked(e) or frappe.db.is_timedout(e)))):
http_status_code = 508
elif http_status_code==401:
frappe.respond_as_web_page(_("Session Expired"),
_("Your session has expired, please login again to continue."),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==403:
frappe.respond_as_web_page(_("Not Permitted"),
_("You do not have enough permissions to complete the action"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==404:
frappe.respond_as_web_page(_("Not Found"),
_("The resource you are looking for is not available"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code == 429:
response = frappe.rate_limiter.respond()
else:
traceback = "<pre>" + sanitize_html(frappe.get_traceback()) + "</pre>"
# disable traceback in production if flag is set
if frappe.local.flags.disable_traceback and not frappe.local.dev_server:
traceback = ""
frappe.respond_as_web_page("Server Error",
traceback, http_status_code=http_status_code,
indicator_color='red', width=640)
return_as_message = True
if e.__class__ == frappe.AuthenticationError:
if hasattr(frappe.local, "login_manager"):
frappe.local.login_manager.clear_cookies()
if http_status_code >= 500:
make_error_snapshot(e)
if return_as_message:
response = frappe.website.render.render("message",
http_status_code=http_status_code)
return response
def after_request(rollback):
if (frappe.local.request.method in ("POST", "PUT") or frappe.local.flags.commit) and frappe.db:
if frappe.db.transaction_writes:
frappe.db.commit()
rollback = False
# update session
if getattr(frappe.local, "session_obj", None):
updated_in_db = frappe.local.session_obj.update()
if updated_in_db:
frappe.db.commit()
rollback = False
update_comments_in_parent_after_request()
return rollback
application = local_manager.make_middleware(application)
def serve(port=8000, profile=False, no_reload=False, no_threading=False, site=None, sites_path='.'):
global application, _site, _sites_path
_site = site
_sites_path = sites_path
from werkzeug.serving import run_simple
if profile:
application = ProfilerMiddleware(application, sort_by=('cumtime', 'calls'))
if not os.environ.get('NO_STATICS'):
application = SharedDataMiddleware(application, {
str('/assets'): str(os.path.join(sites_path, 'assets'))
})
application = StaticDataMiddleware(application, {
str('/files'): str(os.path.abspath(sites_path))
})
application.debug = True
application.config = {
'SERVER_NAME': 'localhost:8000'
}
log = logging.getLogger('werkzeug')
log.propagate = False
in_test_env = os.environ.get('CI')
if in_test_env:
log.setLevel(logging.ERROR)
run_simple('0.0.0.0', int(port), application,
use_reloader=False if in_test_env else not no_reload,
use_debugger=not in_test_env,
use_evalex=not in_test_env,
threaded=not no_threading)
| 28.859327
| 103
| 0.751828
|
794d0f90924c7a7aa4c9c4fc29622ea7e2257f2c
| 3,710
|
py
|
Python
|
models/ciee/weather_model.py
|
phgupta/XBOS
|
acc59f33600943569d62c145dae11a1775296b44
|
[
"BSD-2-Clause"
] | 27
|
2016-04-26T17:26:56.000Z
|
2021-08-22T15:11:55.000Z
|
models/ciee/weather_model.py
|
phgupta/XBOS
|
acc59f33600943569d62c145dae11a1775296b44
|
[
"BSD-2-Clause"
] | 75
|
2017-02-17T18:00:37.000Z
|
2019-06-20T04:12:08.000Z
|
models/ciee/weather_model.py
|
vishalbelsare/XBOS
|
1fea0b024d97ae142d97b3a94510403928ed44b7
|
[
"BSD-2-Clause"
] | 20
|
2017-07-28T14:50:04.000Z
|
2020-01-16T05:04:54.000Z
|
from xbos import get_client
from xbos.services.mdal import *
from xbos.services.hod import HodClient
import pandas as pd
import pytz
from sklearn.metrics import mean_squared_error
from dateutil import rrule
from datetime import datetime, timedelta
# data clients
mdal = MDALClient("xbos/mdal")
hod = HodClient("xbos/hod")
# temporal parameters
SITE = "ciee"
def predict_day(targetday="2018-01-30 00:00:00 PST", WINDOW="30m", N_DAYS=10):
T0 = "2017-09-18 00:00:00 PST"
day = datetime.strptime(targetday, "%Y-%m-%d %H:%M:%S %Z")
day = pytz.timezone('US/Pacific').localize(day)
T1 = (day - timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S %Z")
tomorrow = (day + timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S %Z")
today_start = targetday
today_end = (day + timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S %Z")
print today_start, today_end
# retrieve data
weather_query = {
"Composition": ["1c467b79-b314-3c1e-83e6-ea5e7048c37b"],
"Variables": [],
"Selectors": [MEAN],
"Time": {
"T0": T0, "T1": T1,
"WindowSize": WINDOW,
"Aligned": True,
}
}
resp = mdal.do_query(weather_query)
df = resp['df']
weather_today_sofar = {
"Composition": ["1c467b79-b314-3c1e-83e6-ea5e7048c37b"],
"Variables": [],
"Selectors": [MEAN],
"Time": {
"T0": today_start,
"T1": today_end,
"WindowSize": WINDOW,
"Aligned": True,
}
}
resp = mdal.do_query(weather_today_sofar)
sample = resp['df']
# Similarity-based estimation implementation
begin = df.index[0].to_pydatetime()
# convert to midnight of the next day
begin = datetime(begin.year,begin.month,begin.day, tzinfo=begin.tzinfo) + timedelta(days=1)
end = df.index[-1].to_pydatetime()
# convert to midnight of previous day
end = datetime(end.year, end.month, end.day, tzinfo=end.tzinfo)
weather = df.columns[0]
hop = rrule.DAILY
hop_day = 1
errors = []
for dt in rrule.rrule(hop, dtstart=begin, until=end):
# data for the current day
day_weatherdata = df[dt:dt+timedelta(days=hop_day)]
# avoids indexing errors by making sure the # of data points aligns
num_sample = len(sample)
num_weatherdata = len(day_weatherdata)
num_use = min(num_sample, num_weatherdata)
today_data = sample.copy()[:num_use]
use_weather = day_weatherdata[:num_use]
today_data.index = use_weather.index # move them onto the same day to aid subtraction
sample_weather = today_data.columns[0]
# compare MSE error of today compared with the historical day
use_weather.dropna(inplace=True)
today_data[sample_weather] = today_data[sample_weather].dropna()
common = use_weather.join(today_data[sample_weather], how='inner', lsuffix='_').index
if len(common) == 0:
continue
mse = mean_squared_error(today_data[sample_weather].ix[common], use_weather.ix[common])
errors.append(mse)
d = pd.DataFrame(errors)
# sort errors ascending and take first 10 values
best_10_days = d.sort_values(0, ascending=True).head(N_DAYS)
# use the index of the value to figure out how many days since the first date ("start", above)
best_10_days_dates = [begin+timedelta(days=hop_day*x) for x in best_10_days.index]
# grab the daily data for each of those days and put it into a new dataframe
best_10_days_data = [df[weather][x:x+timedelta(days=hop_day)].values for x in best_10_days_dates]
predictor_days_df = pd.DataFrame(best_10_days_data)
predicted_day = predictor_days_df.mean(axis=0)
predicted_day.index = pd.date_range(targetday, tomorrow, freq="30min")
return predicted_day
if __name__ == '__main__':
print predict_day("2017-10-06 00:00:00 PST")
| 34.672897
| 101
| 0.687871
|
794d104d5919bdffde44dbd4facd472c2810fae2
| 13,654
|
py
|
Python
|
Lib/test/test_peepholer.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 854
|
2017-09-11T16:42:28.000Z
|
2022-03-27T14:17:09.000Z
|
Lib/test/test_peepholer.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 164
|
2017-09-24T20:40:32.000Z
|
2021-10-30T01:35:05.000Z
|
Lib/test/test_peepholer.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 73
|
2017-09-13T18:07:48.000Z
|
2022-03-17T13:02:29.000Z
|
import dis
import unittest
from test.bytecode_helper import BytecodeTestCase
def count_instr_recursively(f, opname):
count = 0
for instr in dis.get_instructions(f):
if instr.opname == opname:
count += 1
if hasattr(f, '__code__'):
f = f.__code__
for c in f.co_consts:
if hasattr(c, 'co_code'):
count += count_instr_recursively(c, opname)
return count
class TestTranforms(BytecodeTestCase):
def test_unot(self):
# UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE'
def unot(x):
if not x == 2:
del x
self.assertNotInBytecode(unot, 'UNARY_NOT')
self.assertNotInBytecode(unot, 'POP_JUMP_IF_FALSE')
self.assertInBytecode(unot, 'POP_JUMP_IF_TRUE')
def test_elim_inversion_of_is_or_in(self):
for line, cmp_op in (
('not a is b', 'is not',),
('not a in b', 'not in',),
('not a is not b', 'is',),
('not a not in b', 'in',),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'COMPARE_OP', cmp_op)
def test_global_as_constant(self):
# LOAD_GLOBAL None/True/False --> LOAD_CONST None/True/False
def f():
x = None
x = None
return x
def g():
x = True
return x
def h():
x = False
return x
for func, elem in ((f, None), (g, True), (h, False)):
self.assertNotInBytecode(func, 'LOAD_GLOBAL')
self.assertInBytecode(func, 'LOAD_CONST', elem)
def f():
'Adding a docstring made this test fail in Py2.5.0'
return None
self.assertNotInBytecode(f, 'LOAD_GLOBAL')
self.assertInBytecode(f, 'LOAD_CONST', None)
def test_while_one(self):
# Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx
def f():
while 1:
pass
return list
for elem in ('LOAD_CONST', 'POP_JUMP_IF_FALSE'):
self.assertNotInBytecode(f, elem)
for elem in ('JUMP_ABSOLUTE',):
self.assertInBytecode(f, elem)
def test_pack_unpack(self):
for line, elem in (
('a, = a,', 'LOAD_CONST',),
('a, b = a, b', 'ROT_TWO',),
('a, b, c = a, b, c', 'ROT_THREE',),
):
code = compile(line,'','single')
self.assertInBytecode(code, elem)
self.assertNotInBytecode(code, 'BUILD_TUPLE')
self.assertNotInBytecode(code, 'UNPACK_TUPLE')
def test_folding_of_tuples_of_constants(self):
for line, elem in (
('a = 1,2,3', (1, 2, 3)),
('("a","b","c")', ('a', 'b', 'c')),
('a,b,c = 1,2,3', (1, 2, 3)),
('(None, 1, None)', (None, 1, None)),
('((1, 2), 3, 4)', ((1, 2), 3, 4)),
):
code = compile(line,'','single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertNotInBytecode(code, 'BUILD_TUPLE')
# Long tuples should be folded too.
code = compile(repr(tuple(range(10000))),'','single')
self.assertNotInBytecode(code, 'BUILD_TUPLE')
# One LOAD_CONST for the tuple, one for the None return value
load_consts = [instr for instr in dis.get_instructions(code)
if instr.opname == 'LOAD_CONST']
self.assertEqual(len(load_consts), 2)
# Bug 1053819: Tuple of constants misidentified when presented with:
# . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . .
# The following would segfault upon compilation
def crater():
(~[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
],)
def test_folding_of_lists_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_LIST should be folded to a tuple:
('a in [1,2,3]', (1, 2, 3)),
('a not in ["a","b","c"]', ('a', 'b', 'c')),
('a in [None, 1, None]', (None, 1, None)),
('a not in [(1, 2), 3, 4]', ((1, 2), 3, 4)),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertNotInBytecode(code, 'BUILD_LIST')
def test_folding_of_sets_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_SET should be folded to a frozenset:
('a in {1,2,3}', frozenset({1, 2, 3})),
('a not in {"a","b","c"}', frozenset({'a', 'c', 'b'})),
('a in {None, 1, None}', frozenset({1, None})),
('a not in {(1, 2), 3, 4}', frozenset({(1, 2), 3, 4})),
('a in {1, 2, 3, 3, 2, 1}', frozenset({1, 2, 3})),
):
code = compile(line, '', 'single')
self.assertNotInBytecode(code, 'BUILD_SET')
self.assertInBytecode(code, 'LOAD_CONST', elem)
# Ensure that the resulting code actually works:
def f(a):
return a in {1, 2, 3}
def g(a):
return a not in {1, 2, 3}
self.assertTrue(f(3))
self.assertTrue(not f(4))
self.assertTrue(not g(3))
self.assertTrue(g(4))
def test_folding_of_binops_on_constants(self):
for line, elem in (
('a = 2+3+4', 9), # chained fold
('"@"*4', '@@@@'), # check string ops
('a="abc" + "def"', 'abcdef'), # check string ops
('a = 3**4', 81), # binary power
('a = 3*4', 12), # binary multiply
('a = 13//4', 3), # binary floor divide
('a = 14%4', 2), # binary modulo
('a = 2+3', 5), # binary add
('a = 13-4', 9), # binary subtract
('a = (12,13)[1]', 13), # binary subscr
('a = 13 << 2', 52), # binary lshift
('a = 13 >> 2', 3), # binary rshift
('a = 13 & 7', 5), # binary and
('a = 13 ^ 7', 10), # binary xor
('a = 13 | 7', 15), # binary or
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('BINARY_'))
# Verify that unfoldables are skipped
code = compile('a=2+"b"', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 2)
self.assertInBytecode(code, 'LOAD_CONST', 'b')
# Verify that large sequences do not result from folding
code = compile('a="x"*10000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 10000)
self.assertNotIn("x"*10000, code.co_consts)
code = compile('a=1<<1000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 1000)
self.assertNotIn(1<<1000, code.co_consts)
code = compile('a=2**1000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 1000)
self.assertNotIn(2**1000, code.co_consts)
def test_binary_subscr_on_unicode(self):
# valid code get optimized
code = compile('"foo"[0]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 'f')
self.assertNotInBytecode(code, 'BINARY_SUBSCR')
code = compile('"\u0061\uffff"[1]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', '\uffff')
self.assertNotInBytecode(code,'BINARY_SUBSCR')
# With PEP 393, non-BMP char get optimized
code = compile('"\U00012345"[0]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', '\U00012345')
self.assertNotInBytecode(code, 'BINARY_SUBSCR')
# invalid code doesn't get optimized
# out of range
code = compile('"fuu"[10]', '', 'single')
self.assertInBytecode(code, 'BINARY_SUBSCR')
def test_folding_of_unaryops_on_constants(self):
for line, elem in (
('-0.5', -0.5), # unary negative
('-0.0', -0.0), # -0.0
('-(1.0-1.0)', -0.0), # -0.0 after folding
('-0', 0), # -0
('~-2', 1), # unary invert
('+1', 1), # unary positive
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
# Check that -0.0 works after marshaling
def negzero():
return -(1.0-1.0)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
# Verify that unfoldables are skipped
for line, elem, opname in (
('-"abc"', 'abc', 'UNARY_NEGATIVE'),
('~"abc"', 'abc', 'UNARY_INVERT'),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertInBytecode(code, opname)
def test_elim_extra_return(self):
# RETURN LOAD_CONST None RETURN --> RETURN
def f(x):
return x
self.assertNotInBytecode(f, 'LOAD_CONST', None)
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 1)
def test_elim_jump_to_return(self):
# JUMP_FORWARD to RETURN --> RETURN
def f(cond, true_value, false_value):
return true_value if cond else false_value
self.assertNotInBytecode(f, 'JUMP_FORWARD')
self.assertNotInBytecode(f, 'JUMP_ABSOLUTE')
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 2)
def test_elim_jump_after_return1(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
if cond1: return 1
if cond2: return 2
while 1:
return 3
while 1:
if cond1: return 4
return 5
return 6
self.assertNotInBytecode(f, 'JUMP_FORWARD')
self.assertNotInBytecode(f, 'JUMP_ABSOLUTE')
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertLessEqual(len(returns), 6)
def test_elim_jump_after_return2(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
while 1:
if cond1: return 4
self.assertNotInBytecode(f, 'JUMP_FORWARD')
# There should be one jump for the while loop.
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'JUMP_ABSOLUTE']
self.assertEqual(len(returns), 1)
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertLessEqual(len(returns), 2)
def test_make_function_doesnt_bail(self):
def f():
def g()->1+1:
pass
return g
self.assertNotInBytecode(f, 'BINARY_ADD')
def test_constant_folding(self):
# Issue #11244: aggressive constant folding.
exprs = [
'3 * -5',
'-3 * 5',
'2 * (3 * 4)',
'(2 * 3) * 4',
'(-1, 2, 3)',
'(1, -2, 3)',
'(1, 2, -3)',
'(1, 2, -3) * 6',
'lambda x: x in {(3 * -5) + (-1 - 6), (1, -2, 3) * 2, None}',
]
for e in exprs:
code = compile(e, '', 'single')
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
self.assertFalse(instr.opname.startswith('BINARY_'))
self.assertFalse(instr.opname.startswith('BUILD_'))
def test_in_literal_list(self):
def containtest():
return x in [a, b]
self.assertEqual(count_instr_recursively(containtest, 'BUILD_LIST'), 0)
def test_iterate_literal_list(self):
def forloop():
for x in [a, b]:
pass
self.assertEqual(count_instr_recursively(forloop, 'BUILD_LIST'), 0)
class TestBuglets(unittest.TestCase):
def test_bug_11510(self):
# folded constant set optimization was commingled with the tuple
# unpacking optimization which would fail if the set had duplicate
# elements so that the set length was unexpected
def f():
x, y = {1, 1}
return x, y
with self.assertRaises(ValueError):
f()
if __name__ == "__main__":
unittest.main()
| 38.570621
| 81
| 0.508056
|
794d1177cae6c33f0bc2ec787b8a58029b889893
| 379
|
py
|
Python
|
python/testData/inspections/PyMethodParametersInspectionInitSubclass/test.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyMethodParametersInspectionInitSubclass/test.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyMethodParametersInspectionInitSubclass/test.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class QuestBase:
def __init_subclass__(cls, swallow, **kwargs):
cls.swallow = swallow
super().__init_subclass__(**kwargs)
class QuestBase:
def __init_subclass__(<weak_warning descr="Usually first parameter of such methods is named 'cls'">self</weak_warning>, swallow, **kwargs):
self.swallow = swallow
super().__init_subclass__(**kwargs)
| 37.9
| 143
| 0.699208
|
794d11823cd260f3694a72b71aab70719c3081bb
| 2,821
|
py
|
Python
|
deeplytough/datasets/custom.py
|
truatpasteurdotfr/DeeplyTough
|
fd4737b464c5724312a97654548bcf9cb3b2e258
|
[
"FTL",
"Xnet",
"Net-SNMP"
] | 105
|
2019-04-03T20:39:32.000Z
|
2022-03-25T01:24:46.000Z
|
deeplytough/datasets/custom.py
|
truatpasteurdotfr/DeeplyTough
|
fd4737b464c5724312a97654548bcf9cb3b2e258
|
[
"FTL",
"Xnet",
"Net-SNMP"
] | 11
|
2020-01-10T17:16:57.000Z
|
2022-02-21T12:55:39.000Z
|
deeplytough/datasets/custom.py
|
truatpasteurdotfr/DeeplyTough
|
fd4737b464c5724312a97654548bcf9cb3b2e258
|
[
"FTL",
"Xnet",
"Net-SNMP"
] | 32
|
2019-04-07T12:18:58.000Z
|
2022-02-06T21:51:18.000Z
|
import os
from misc.utils import htmd_featurizer
class Custom:
""" An arbitrary user dataset
Assumes that the dataset is placed in `$STRUCTURE_DATA_DIR/relpath`, containing
bunch of protein and pocket structures, which are referred in `pairs.csv`. This
file contains a quadruplet on each line indicating matches to evaluate:
relative_path_to_pdbA, relative_path_to_pocketA, relative_path_to_pdbB, relative_path_to_pocketB
"""
def __init__(self, relpath='custom'):
self.relpath = relpath
def preprocess_once(self):
""" Computes featurization """
htmd_featurizer(self.get_structures(), skip_existing=True)
def get_structures(self):
""" Get list of PDB structures with metainfo """
root = os.path.join(os.environ.get('STRUCTURE_DATA_DIR'), self.relpath)
npz_root = os.path.join(os.environ.get('STRUCTURE_DATA_DIR'), 'processed/htmd', self.relpath)
custom_pdbs = set()
with open(os.path.join(root, 'pairs.csv')) as f:
for i, line in enumerate(f.readlines()):
tokens = line.split(',')
assert len(tokens)==4, 'pairs.csv is expected to have four columns.'
custom_pdbs.add((tokens[0].strip(), tokens[1].strip()))
custom_pdbs.add((tokens[2].strip(), tokens[3].strip()))
entries = []
for pdb, pocket in custom_pdbs:
pdb1 = pdb if os.path.splitext(pdb)[1] != '' else pdb + '.pdb'
pocket1 = pocket if os.path.splitext(pocket)[1] != '' else pocket + '.pdb'
entries.append({'protein': os.path.join(root, pdb1),
'pocket': os.path.join(root, pocket1),
'protein_htmd': os.path.join(npz_root, pdb1.replace('.pdb', '.npz')),
'key': pdb + ',' + pocket})
return entries
def evaluate_matching(self, descriptor_entries, matcher):
"""
Compute pocket matching scores on the custom dataset.
:param descriptor_entries: List of entries
:param matcher: PocketMatcher instance
:return:
"""
target_dict = {d['key']: d for d in descriptor_entries}
root = os.path.join(os.environ.get('STRUCTURE_DATA_DIR'), self.relpath)
pairs = []
with open(os.path.join(root, 'pairs.csv')) as f:
for i, line in enumerate(f.readlines()):
tokens = line.split(',')
assert len(tokens)==4, 'pairs.csv is expected to have four columns.'
key1 = tokens[0].strip() + ',' + tokens[1].strip()
key2 = tokens[2].strip() + ',' + tokens[3].strip()
pairs.append((target_dict[key1], target_dict[key2]))
scores = matcher.pair_match(pairs)
return {'pairs': pairs, 'scores': scores}
| 40.884058
| 101
| 0.601914
|
794d12373a8b3f25a31cfb2d92eef859882ba71e
| 7,588
|
py
|
Python
|
app/myBasicAsyncioPubSub.py
|
elsampsa/aws-iot-testbed
|
0668d3df9b2d9d937001ccf321464e7c4a6470fc
|
[
"WTFPL"
] | null | null | null |
app/myBasicAsyncioPubSub.py
|
elsampsa/aws-iot-testbed
|
0668d3df9b2d9d937001ccf321464e7c4a6470fc
|
[
"WTFPL"
] | null | null | null |
app/myBasicAsyncioPubSub.py
|
elsampsa/aws-iot-testbed
|
0668d3df9b2d9d937001ccf321464e7c4a6470fc
|
[
"WTFPL"
] | null | null | null |
"""Connecting, subscribing, receiving and sending messages with MQTT client
This version uses the callback version of the API (connectAsync, publishAsync, etc.)
..but we turn & twist that version into asyncio (async/await) code that starts
to look pretty nice.
API reference for the original library:
https://s3.amazonaws.com/aws-iot-device-sdk-python-docs/html/index.html
- client id is "MyPubSub"
- topic is "kikkelis/kokkelis"
In order for this to work, you need to set correct permissions, so head to
``AWS IoT => Secure => Policies``
And append your thing's policies as follows:
::
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"iot:Publish",
"iot:Receive",
"iot:RetainPublish"
],
"Resource": [
...
"arn:aws:iot:us-west-2:263211xxxxxx:topic/kikkelis/kokkelis"
]
},
{
"Effect": "Allow",
"Action": "iot:Subscribe",
"Resource": [
...
"arn:aws:iot:us-west-2:263211xxxxxx:topicfilter/kikkelis/kokkelis"
]
},
{
"Effect": "Allow",
"Action": "iot:Connect",
"Resource": [
...
"arn:aws:iot:us-west-2:263211xxxxxx:client/myPubSub"
]
}
]
}
Send some messages from the ``test_publish_mqtt.py`` notebook and see if you can receive them here
"""
# https://github.com/aws/aws-iot-device-sdk-python
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import time, json, sys
import asyncio
# certificates for this IoT "thing"
# created aw AWS IoT console
cert_file ="/app/cert/thing1.cert.pem"
ca_cert_file ="/app/cert/root-CA.crt"
cert_pk_file="/app/cert/thing1.private.key"
"""AWS IoT data endpoint for messaging
Can be obtained like this:
::
iot_client = boto3.client('iot')
response = iot_client.describe_endpoint(
endpointType='iot:Data-ATS'
)
print(response["endpointAddress"])
end_point_adr = response["endpointAddress"]
"""
host = "a20qyadf9v1i2e-ats.iot.us-west-2.amazonaws.com"
class Namespace:
pass
globals = Namespace() # this hack allows us to access event loop in messageCallback
class AWSIoTMQTTClientAsync(AWSIoTMQTTClient):
def saveEventLoop(self):
global globals
self.event_loop = asyncio.get_event_loop()
globals.event_loop = self.event_loop
async def connectAio(self, keepAliveIntervalSecond=3, timeout=None):
def func(mid, data, event, loop, ns: Namespace):
"""AWSIoTMQTTClient seems to call this ....er from another thread
so we must use loop.call_soon_threadsafe
see: https://stackoverflow.com/questions/47673104/asyncio-event-wait-function-does-not-continue-after-event-has-been-stored
parameters, namespace
"""
print("connect_cb", mid, data)
ns.result = "newval"
# print("setting event")
loop.call_soon_threadsafe(event.set)
event.set()
# print("event set")
event = asyncio.Event()
ns = Namespace()
# loop = asyncio.get_event_loop()
self.connectAsync(
keepAliveIntervalSecond=keepAliveIntervalSecond,
ackCallback = lambda mid, data: func(mid, data, event, self.event_loop, ns)
)
print("awaiting event")
await asyncio.wait_for(event.wait(), timeout = timeout)
print("event awaited")
# now ns contains results.. could contain exceptions as well..?
# return ns.result
async def subscribeAio(self, topic, Qos, messageCallback: callable, timeout = None):
def func(mid, data, event, loop, ns: Namespace):
print("subs_cb", mid, data)
ns.result = "newval"
loop.call_soon_threadsafe(event.set)
event.set()
print("event set")
event = asyncio.Event()
ns = Namespace()
# loop = asyncio.get_event_loop()
self.subscribeAsync(
topic,
Qos,
ackCallback = lambda mid, data: func(mid, data, event, self.event_loop, ns),
messageCallback = messageCallback
)
await asyncio.wait_for(event.wait(), timeout = timeout)
return ns.result
async def publishAio(self, topic, payload, Qos, timeout = None):
def func(mid, event, loop, ns: Namespace):
print("pub_cb", mid)
ns.result = "newval"
loop.call_soon_threadsafe(event.set)
event.set()
print("event set")
event = asyncio.Event()
ns = Namespace()
# loop = asyncio.get_event_loop()
self.publishAsync(
topic,
payload,
Qos,
ackCallback = lambda mid: func(mid, event, self.event_loop, ns)
)
await asyncio.wait_for(event.wait(), timeout= timeout)
return ns.result
async def main():
def messageCallback(client, userdata, message):
"""WARNING: this is called from another thread
"""
# print("client>", client) # deprecated!
# print("userdata>", userdata) # deprecated!
# event_loop = client.event_loop
# global event_loop # only way to pass information into here!
# event_loop = asyncio.get_event_loop() # nopes
# global global_queue
global globals
print("messageCallback: event_loop", globals.event_loop)
print("Received a new message: ")
print(message.payload)
print("from topic: ")
print(message.topic)
print("--------------\n\n")
globals.event_loop.call_soon_threadsafe(
globals.queue.put_nowait,
message
)
port=8883
clientId="myPubSub"
topic = "kikkelis/kokkelis"
# also send messages?
# send = False
send = True
client = AWSIoTMQTTClientAsync(clientId)
client.configureEndpoint(host, port)
client.configureCredentials(ca_cert_file, cert_pk_file, cert_file)
client.configureAutoReconnectBackoffTime(1, 32, 20)
client.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
client.configureDrainingFrequency(2) # Draining: 2 Hz
client.configureConnectDisconnectTimeout(10) # 10 sec
client.configureMQTTOperationTimeout(5) # 5 sec
print("main thread event loop:", asyncio.get_event_loop())
client.saveEventLoop() # saves main thread's event loop to globals.event_loop
global globals
globals.queue = asyncio.Queue()
print("connecting")
res = await client.connectAio()
print("connect got", res)
res = await client.subscribeAio(topic, 1, messageCallback)
print("subscribe got", res)
print("waiting 60 secs")
count = 0
while True:
print("still listening to topic", topic)
await asyncio.sleep(3)
if send:
message = {}
message['message'] = "message from IoT device"
message['sequence'] = count
messageJson = json.dumps(message)
print('publishing topic %s: %s\n' % (topic, messageJson))
res = await client.publishAio(topic, messageJson, 1)
print("publish got result", res)
item = await globals.queue.get()
print("got from queue", item.payload)
count += 1
asyncio.get_event_loop().run_until_complete(main())
| 30.845528
| 135
| 0.60622
|
794d125939ff0c264838a6e9d76b6c99a692c07c
| 22,714
|
py
|
Python
|
meta_pseudo_labels/training_utils.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
meta_pseudo_labels/training_utils.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
meta_pseudo_labels/training_utils.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=logging-format-interpolation
# pylint: disable=unused-import
# pylint: disable=protected-access
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-long-lambda
r"""Docs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import os
import sys
import time
import traceback
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from meta_pseudo_labels import common_utils
from meta_pseudo_labels import data_utils
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
from tensorflow.python.tpu import tpu_feed
MODEL_SCOPE = 'model'
def eval_step_fn(params, model):
"""Build `step_fn` for eval."""
dtypes = [tf.bfloat16 if params.use_bfloat16 else tf.float32,
tf.float32, tf.float32]
batch_size = params.eval_batch_size // params.num_replicas
image_size = (params.eval_image_size if 'eval_image_size' in params
else params.image_size)
shapes = [[batch_size, image_size, image_size, 3],
[batch_size, params.num_classes],
[batch_size]]
if params.use_xla_sharding and params.num_cores_per_replica > 1:
q = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=3,
host_id=0,
input_partition_dims=[[1, 1, params.num_cores_per_replica, 1],
[1, 1], [1]],
device_assignment=params.device_assignment)
q.set_tuple_types(dtypes)
q.set_tuple_shapes(shapes)
images, labels, mask = q.generate_dequeue_op()
images = xla_sharding.split(images, 2, params.num_cores_per_replica)
else:
with tf.device(tf.tpu.core(0)):
images, labels, mask = tf.raw_ops.InfeedDequeueTuple(dtypes=dtypes,
shapes=shapes)
if len(labels.shape) > 1: # `labels` is one_hot. turn it to `int.32`
labels = tf.argmax(labels, axis=-1, output_type=tf.int32)
labels = tf.expand_dims(labels, axis=-1)
_ = tf.train.get_or_create_global_step()
with tf.variable_scope(MODEL_SCOPE):
logits = model(images, training=False)
logits = tf.cast(logits, tf.float32)
return logits, labels, mask
class Supervised(object):
"""Supervised information."""
def __init__(self):
step_info = collections.OrderedDict()
self.step_info = step_info
def outfeed_signature(self):
"""Returns the sigature of `step_info` as returned by `step_fn`."""
return self.step_info
def step_fn(self, params, model):
"""A single step for supervised learning."""
batch_size = params.train_batch_size // params.num_replicas
dtypes = [tf.bfloat16 if params.use_bfloat16 else tf.float32, tf.float32]
shapes = [[batch_size, params.image_size, params.image_size, 3],
[batch_size, params.num_classes]]
if params.use_xla_sharding and params.num_cores_per_replica > 1:
q = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=2,
host_id=0,
input_partition_dims=[[1, 1, params.num_cores_per_replica, 1],
[1, 1]],
device_assignment=params.device_assignment)
q.set_tuple_types(dtypes)
q.set_tuple_shapes(shapes)
images, labels = q.generate_dequeue_op()
images = xla_sharding.split(images, 2, params.num_cores_per_replica)
else:
with tf.device(tf.tpu.core(0)):
images, labels = tf.raw_ops.InfeedDequeueTuple(dtypes=dtypes,
shapes=shapes)
if labels.dtype == tf.int32:
labels = tf.one_hot(labels, depth=params.num_classes, dtype=tf.float32)
global_step = tf.train.get_or_create_global_step()
train_batch_size = tf.cast(params.train_batch_size, tf.float32)
num_replicas = tf.cast(params.num_replicas, tf.float32)
with tf.variable_scope(MODEL_SCOPE):
logits = model(images, training=True)
if 'noisy_student' in params.dataset_name.lower():
cross_entropy = labels * tf.nn.log_softmax(logits, axis=-1)
cross_entropy = tf.reduce_sum(-cross_entropy) / train_batch_size
else:
cross_entropy = tf.losses.softmax_cross_entropy(
onehot_labels=labels, logits=logits,
label_smoothing=params.label_smoothing,
reduction=tf.losses.Reduction.SUM) / train_batch_size
l2_reg_rate = tf.cast(params.weight_decay / params.num_replicas, tf.float32)
weight_dec = common_utils.get_l2_loss()
total_loss = cross_entropy + weight_dec * l2_reg_rate
variables = tf.trainable_variables()
gradients = tf.gradients(total_loss, variables)
gradients = [tf.tpu.cross_replica_sum(g) for g in gradients]
gradients, grad_norm = tf.clip_by_global_norm(gradients, params.grad_bound)
learning_rate, optimizer = common_utils.get_optimizer(params)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.cond(
tf.math.is_finite(grad_norm),
lambda: optimizer.apply_gradients(zip(gradients, variables),
global_step=global_step),
tf.no_op)
with tf.control_dependencies(update_ops + [train_op]):
ema_train_op = common_utils.setup_ema(params,
f'{MODEL_SCOPE}/{model.name}')
with tf.control_dependencies([ema_train_op]):
logs = collections.OrderedDict()
logs['global_step'] = tf.cast(global_step, tf.float32)
logs['loss/total'] = total_loss
logs['loss/weight_decay'] = weight_dec / num_replicas
logs['loss/cross_entropy'] = cross_entropy
logs['loss/lr'] = tf.identity(learning_rate) / num_replicas
logs['loss/grad_norm'] = grad_norm / num_replicas
tensors = [tf.expand_dims(t, axis=0) for t in logs.values()]
self.step_info = {k: [tf.float32, [1]] for k in logs.keys()}
outfeed_enqueue_op = tf.cond(
common_utils.should_log(params),
lambda: tf.raw_ops.OutfeedEnqueueTuple(inputs=tensors), tf.no_op)
return outfeed_enqueue_op
class UDA(object):
"""UDA (https://arxiv.org/abs/1904.12848)."""
def __init__(self):
self.step_info = collections.OrderedDict()
def outfeed_signature(self):
"""Returns the sigature of `step_info` as returned by `step_fn`."""
return self.step_info
@staticmethod
def build_uda_cross_entropy(params, model, all_images, l_labels):
"""Compute the UDA loss."""
train_batch_size = params.train_batch_size
num_replicas = params.num_replicas
uda_data = params.uda_data
batch_size = train_batch_size // num_replicas
labels = {}
if l_labels.dtype == tf.int32: # l_labels is sparse. turn into one_hot
labels['l'] = tf.one_hot(l_labels, params.num_classes, dtype=tf.float32)
else:
labels['l'] = l_labels
global_step = tf.train.get_or_create_global_step()
masks = {}
logits = {}
cross_entropy = {}
all_logits = model(all_images, training=True)
logits['l'], logits['u_ori'], logits['u_aug'] = tf.split(
all_logits, [batch_size, batch_size*uda_data, batch_size*uda_data], 0)
# sup loss
cross_entropy['l'] = tf.losses.softmax_cross_entropy(
onehot_labels=labels['l'],
logits=logits['l'],
label_smoothing=params.label_smoothing,
reduction=tf.losses.Reduction.NONE)
probs = tf.nn.softmax(logits['l'], axis=-1)
correct_probs = tf.reduce_sum(labels['l']*probs, axis=-1)
r = tf.cast(global_step, tf.float32) / float(params.num_train_steps)
l_threshold = r * (1. - 1./params.num_classes) + 1. / params.num_classes
masks['l'] = tf.less_equal(correct_probs, l_threshold)
masks['l'] = tf.cast(masks['l'], tf.float32)
masks['l'] = tf.stop_gradient(masks['l'])
cross_entropy['l'] = tf.reduce_sum(cross_entropy['l']) / float(
train_batch_size)
# unsup loss
labels['u_ori'] = tf.nn.softmax(logits['u_ori'] / params.uda_temp, axis=-1)
labels['u_ori'] = tf.stop_gradient(labels['u_ori'])
cross_entropy['u'] = (labels['u_ori'] *
tf.nn.log_softmax(logits['u_aug'], axis=-1))
largest_probs = tf.reduce_max(labels['u_ori'], axis=-1, keepdims=True)
masks['u'] = tf.greater_equal(largest_probs, params.uda_threshold)
masks['u'] = tf.cast(masks['u'], tf.float32)
masks['u'] = tf.stop_gradient(masks['u'])
cross_entropy['u'] = tf.reduce_sum(-cross_entropy['u']*masks['u']) / float(
train_batch_size*uda_data)
return logits, labels, masks, cross_entropy
def step_fn(self, params, model):
"""Separate implementation."""
train_batch_size = params.train_batch_size
num_replicas = params.num_replicas
batch_size = train_batch_size // num_replicas
dtypes = [
tf.bfloat16 if params.use_bfloat16 else tf.float32,
tf.float32,
tf.bfloat16 if params.use_bfloat16 else tf.float32,
tf.bfloat16 if params.use_bfloat16 else tf.float32]
shapes = [
[batch_size, params.image_size, params.image_size, 3],
[batch_size, params.num_classes],
[batch_size*params.uda_data, params.image_size, params.image_size, 3],
[batch_size*params.uda_data, params.image_size, params.image_size, 3]]
if params.use_xla_sharding and params.num_cores_per_replica > 1:
q = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=4,
host_id=0,
input_partition_dims=[[1, 1, params.num_cores_per_replica, 1],
[1, 1],
[1, 1, params.num_cores_per_replica, 1],
[1, 1, params.num_cores_per_replica, 1],],
device_assignment=params.device_assignment)
q.set_tuple_types(dtypes)
q.set_tuple_shapes(shapes)
l_images, l_labels, u_images_ori, u_images_aug = q.generate_dequeue_op()
l_images = xla_sharding.split(l_images, 2,
params.num_cores_per_replica)
u_images_ori = xla_sharding.split(u_images_ori, 2,
params.num_cores_per_replica)
u_images_aug = xla_sharding.split(u_images_aug, 2,
params.num_cores_per_replica)
else:
with tf.device(tf.tpu.core(0)):
(l_images, l_labels, u_images_ori,
u_images_aug) = tf.raw_ops.InfeedDequeueTuple(dtypes=dtypes,
shapes=shapes)
all_images = tf.concat([l_images, u_images_ori, u_images_aug], axis=0)
global_step = tf.train.get_or_create_global_step()
num_replicas = tf.cast(params.num_replicas, tf.float32)
with tf.variable_scope(MODEL_SCOPE, reuse=tf.AUTO_REUSE):
_, _, masks, cross_entropy = UDA.build_uda_cross_entropy(
params, model, all_images, l_labels)
l2_reg_rate = tf.cast(params.weight_decay / params.num_replicas, tf.float32)
weight_dec = common_utils.get_l2_loss()
uda_weight = params.uda_weight * tf.minimum(
1., tf.cast(global_step, tf.float32) / float(params.uda_steps))
total_loss = (cross_entropy['u'] * uda_weight +
cross_entropy['l'] +
weight_dec * l2_reg_rate)
variables = tf.trainable_variables()
gradients = tf.gradients(total_loss, variables)
gradients = [tf.tpu.cross_replica_sum(g) for g in gradients]
gradients, grad_norm = tf.clip_by_global_norm(gradients, params.grad_bound)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
learning_rate, optimizer = common_utils.get_optimizer(params)
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(zip(gradients, variables),
global_step=global_step)
with tf.control_dependencies([train_op]):
ema_train_op = common_utils.setup_ema(
params, f'{MODEL_SCOPE}/{model.name}')
with tf.control_dependencies([ema_train_op]):
logs = collections.OrderedDict()
logs['global_step'] = tf.cast(global_step, tf.float32)
logs['loss/total'] = total_loss
logs['loss/cross_entropy'] = cross_entropy['l']
logs['loss/lr'] = tf.identity(learning_rate) / num_replicas
logs['loss/grad_norm'] = tf.identity(grad_norm) / num_replicas
logs['loss/weight_dec'] = weight_dec / num_replicas
logs['uda/cross_entropy'] = cross_entropy['u']
logs['uda/u_ratio'] = tf.reduce_mean(masks['u']) / num_replicas
logs['uda/l_ratio'] = tf.reduce_mean(masks['l']) / num_replicas
logs['uda/weight'] = uda_weight / num_replicas
tensors = [tf.expand_dims(t, axis=0) for t in logs.values()]
self.step_info = {k: [tf.float32, [1]] for k in logs.keys()}
outfeed_enqueue_op = tf.cond(
common_utils.should_log(params),
lambda: tf.raw_ops.OutfeedEnqueueTuple(inputs=tensors), tf.no_op)
return outfeed_enqueue_op
class MPL(object):
"""Meta Pseudo Labels."""
def __init__(self):
self.step_info = collections.OrderedDict()
def outfeed_signature(self):
"""Returns the sigature of `step_info` as returned by `step_fn`."""
return self.step_info
def step_fn(self, params, model):
"""Separate implementation."""
train_batch_size = params.train_batch_size
num_replicas = params.num_replicas
uda_data = params.uda_data
batch_size = train_batch_size // num_replicas
dtypes = [
tf.bfloat16 if params.use_bfloat16 else tf.float32,
tf.float32,
tf.bfloat16 if params.use_bfloat16 else tf.float32,
tf.bfloat16 if params.use_bfloat16 else tf.float32]
shapes = [
[batch_size, params.image_size, params.image_size, 3],
[batch_size, params.num_classes],
[batch_size*params.uda_data, params.image_size, params.image_size, 3],
[batch_size*params.uda_data, params.image_size, params.image_size, 3]]
if params.use_xla_sharding and params.num_cores_per_replica > 1:
q = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=4,
host_id=0,
input_partition_dims=[[1, 1, params.num_cores_per_replica, 1],
[1, 1],
[1, 1, params.num_cores_per_replica, 1],
[1, 1, params.num_cores_per_replica, 1],],
device_assignment=params.device_assignment)
q.set_tuple_types(dtypes)
q.set_tuple_shapes(shapes)
l_images, l_labels, u_images_ori, u_images_aug = q.generate_dequeue_op()
l_images = xla_sharding.split(l_images, 2,
params.num_cores_per_replica)
u_images_ori = xla_sharding.split(u_images_ori, 2,
params.num_cores_per_replica)
u_images_aug = xla_sharding.split(u_images_aug, 2,
params.num_cores_per_replica)
else:
with tf.device(tf.tpu.core(0)):
(l_images, l_labels, u_images_ori,
u_images_aug) = tf.raw_ops.InfeedDequeueTuple(dtypes=dtypes,
shapes=shapes)
global_step = tf.train.get_or_create_global_step()
num_replicas = tf.cast(params.num_replicas, tf.float32)
all_images = tf.concat([l_images, u_images_ori, u_images_aug], axis=0)
# all calls to teacher
with tf.variable_scope('teacher', reuse=tf.AUTO_REUSE):
logits, labels, masks, cross_entropy = UDA.build_uda_cross_entropy(
params, model, all_images, l_labels)
# 1st call to student
with tf.variable_scope(MODEL_SCOPE):
u_aug_and_l_images = tf.concat([u_images_aug, l_images], axis=0)
logits['s_on_u_aug_and_l'] = model(u_aug_and_l_images, training=True)
logits['s_on_u'], logits['s_on_l_old'] = tf.split(
logits['s_on_u_aug_and_l'],
[u_images_aug.shape[0].value, l_images.shape[0].value], axis=0)
# for backprop
cross_entropy['s_on_u'] = tf.losses.softmax_cross_entropy(
onehot_labels=tf.stop_gradient(tf.nn.softmax(logits['u_aug'], -1)),
logits=logits['s_on_u'],
label_smoothing=params.label_smoothing,
reduction=tf.losses.Reduction.NONE)
cross_entropy['s_on_u'] = tf.reduce_sum(cross_entropy['s_on_u']) / float(
train_batch_size*uda_data)
# for Taylor
cross_entropy['s_on_l_old'] = tf.losses.softmax_cross_entropy(
onehot_labels=labels['l'],
logits=logits['s_on_l_old'],
reduction=tf.losses.Reduction.SUM)
cross_entropy['s_on_l_old'] = tf.tpu.cross_replica_sum(
cross_entropy['s_on_l_old']) / float(train_batch_size)
shadow = tf.get_variable(
name='cross_entropy_old', shape=[], trainable=False, dtype=tf.float32)
shadow_update = tf.assign(shadow, cross_entropy['s_on_l_old'])
w_s = {}
g_s = {}
g_n = {}
lr = {}
optim = {}
w_s['s'] = [w for w in tf.trainable_variables()
if w.name.lower().startswith(MODEL_SCOPE)]
g_s['s_on_u'] = tf.gradients(cross_entropy['s_on_u'], w_s['s'])
# g_s['s_on_u'] = [tf.tpu.cross_replica_sum(g) for g in g_s['s_on_u']]
lr['s'] = common_utils.get_learning_rate(
params,
initial_lr=params.mpl_student_lr,
num_warmup_steps=params.mpl_student_lr_warmup_steps,
num_wait_steps=params.mpl_student_lr_wait_steps)
lr['s'], optim['s'] = common_utils.get_optimizer(
params, learning_rate=lr['s'])
optim['s']._create_slots(w_s['s']) # pylint: disable=protected-access
update_ops = [op for op in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if op.name.startswith(f'train/{MODEL_SCOPE}/')]
with tf.control_dependencies(update_ops + [shadow_update]):
g_s['s_on_u'] = common_utils.add_weight_decay(
params, w_s['s'], g_s['s_on_u'])
g_s['s_on_u'], g_n['s_on_u'] = tf.clip_by_global_norm(
g_s['s_on_u'], params.grad_bound)
train_op = optim['s'].apply_gradients(zip(g_s['s_on_u'], w_s['s']))
with tf.control_dependencies([train_op]):
ema_train_op = common_utils.setup_ema(
params, name_scope=f'{MODEL_SCOPE}/{model.name}')
# 2nd call to student
with tf.control_dependencies([ema_train_op]):
with tf.variable_scope(MODEL_SCOPE, reuse=tf.AUTO_REUSE):
logits['s_on_l_new'] = model(l_images, training=True)
cross_entropy['s_on_l_new'] = tf.losses.softmax_cross_entropy(
onehot_labels=labels['l'],
logits=logits['s_on_l_new'],
reduction=tf.losses.Reduction.SUM)
cross_entropy['s_on_l_new'] = tf.tpu.cross_replica_sum(
cross_entropy['s_on_l_new']) / float(train_batch_size)
dot_product = cross_entropy['s_on_l_new'] - shadow
# dot_product = tf.clip_by_value(
# dot_product,
# clip_value_min=-params.mpl_dot_product_bound,
# clip_value_max=params.mpl_dot_product_bound)
moving_dot_product = tf.get_variable(
'moving_dot_product', shape=[], trainable=False, dtype=tf.float32)
moving_dot_product_update = tf.assign_sub(
moving_dot_product, 0.01 * (moving_dot_product - dot_product))
with tf.control_dependencies([moving_dot_product_update]):
dot_product = dot_product - moving_dot_product
dot_product = tf.stop_gradient(dot_product)
cross_entropy['mpl'] = tf.losses.softmax_cross_entropy(
onehot_labels=tf.stop_gradient(tf.nn.softmax(logits['u_aug'], axis=-1)),
logits=logits['u_aug'],
reduction=tf.losses.Reduction.NONE)
cross_entropy['mpl'] = tf.reduce_sum(cross_entropy['mpl']) / float(
train_batch_size*uda_data)
# teacher train op
uda_weight = params.uda_weight * tf.minimum(
1., tf.cast(global_step, tf.float32) / float(params.uda_steps))
teacher_loss = (cross_entropy['u'] * uda_weight +
cross_entropy['l'] +
cross_entropy['mpl'] * dot_product)
w_s['t'] = [w for w in tf.trainable_variables() if 'teacher' in w.name]
g_s['t'] = tf.gradients(teacher_loss, w_s['t'])
g_s['t'] = common_utils.add_weight_decay(params, w_s['t'], g_s['t'])
g_s['t'], g_n['t'] = tf.clip_by_global_norm(g_s['t'], params.grad_bound)
lr['t'] = common_utils.get_learning_rate(
params,
initial_lr=params.mpl_teacher_lr,
num_warmup_steps=params.mpl_teacher_lr_warmup_steps)
lr['t'], optim['t'] = common_utils.get_optimizer(params,
learning_rate=lr['t'])
teacher_train_op = optim['t'].apply_gradients(zip(g_s['t'], w_s['t']),
global_step=global_step)
with tf.control_dependencies([teacher_train_op]):
logs = collections.OrderedDict()
logs['global_step'] = tf.cast(global_step, tf.float32)
logs['cross_entropy/student_on_u'] = cross_entropy['s_on_u']
logs['cross_entropy/student_on_l'] = (cross_entropy['s_on_l_new'] /
num_replicas)
logs['cross_entropy/teacher_on_u'] = cross_entropy['u']
logs['cross_entropy/teacher_on_l'] = cross_entropy['l']
logs['lr/student'] = tf.identity(lr['s']) / num_replicas
logs['lr/teacher'] = tf.identity(lr['t']) / num_replicas
logs['mpl/dot_product'] = dot_product / num_replicas
logs['mpl/moving_dot_product'] = moving_dot_product / num_replicas
logs['uda/u_ratio'] = tf.reduce_mean(masks['u']) / num_replicas
logs['uda/l_ratio'] = tf.reduce_mean(masks['l']) / num_replicas
logs['uda/weight'] = uda_weight / num_replicas
tensors = [tf.expand_dims(t, axis=0) for t in logs.values()]
self.step_info = {k: [tf.float32, [1]] for k in logs.keys()}
def outfeed(tensors):
with tf.device(tf.tpu.core(params.num_cores_per_replica-1)):
return tf.raw_ops.OutfeedEnqueueTuple(inputs=tensors)
outfeed_enqueue_op = tf.cond(
common_utils.should_log(params), lambda: outfeed(tensors), tf.no_op)
return outfeed_enqueue_op
| 42.219331
| 80
| 0.663511
|
794d137cd47d15940e210c1c23a33d6f90ffb0b7
| 3,566
|
py
|
Python
|
tests/integration/session/test_sign_out_and_exit.py
|
petechd/eq-questionnaire-runner
|
1c5b182a7f8bc878cfdd767ae080410fa679abd6
|
[
"MIT"
] | null | null | null |
tests/integration/session/test_sign_out_and_exit.py
|
petechd/eq-questionnaire-runner
|
1c5b182a7f8bc878cfdd767ae080410fa679abd6
|
[
"MIT"
] | null | null | null |
tests/integration/session/test_sign_out_and_exit.py
|
petechd/eq-questionnaire-runner
|
1c5b182a7f8bc878cfdd767ae080410fa679abd6
|
[
"MIT"
] | null | null | null |
from app.survey_config.business_config import BASE_URL
from tests.integration.integration_test_case import IntegrationTestCase
SIGN_OUT_URL_PATH = "/sign-out"
SIGNED_OUT_URL_PATH = "/signed-out"
ACCOUNT_SERVICE_LOG_OUT_URL = "http://localhost/logout"
ACCOUNT_SERVICE_LOG_OUT_URL_PATH = "/logout"
class TestSaveAndSignOut(IntegrationTestCase):
# Test the behaviour when using Hub/No Hub
def test_no_session_cookie_redirects_to_correct_location(self):
for schema in ["test_textfield", "test_hub_and_spoke"]:
with self.subTest(schema=schema):
self.get(SIGN_OUT_URL_PATH, follow_redirects=False)
self.assertInRedirect(BASE_URL)
def test_no_account_service_log_out_url_redirects_to_signed_out_page(self):
for schema in ["test_textfield", "test_hub_and_spoke"]:
with self.subTest(schema=schema):
self.launchSurvey(schema)
self.get(SIGN_OUT_URL_PATH)
self.assertInUrl(SIGNED_OUT_URL_PATH)
def test_redirects_to_account_service_log_out_url_when_present(self):
for schema in ["test_textfield", "test_hub_and_spoke"]:
with self.subTest(schema=schema):
self.launchSurvey(
schema, account_service_log_out_url=ACCOUNT_SERVICE_LOG_OUT_URL
)
self.get(SIGN_OUT_URL_PATH)
self.assertInUrl(ACCOUNT_SERVICE_LOG_OUT_URL_PATH)
def test_head_request_doesnt_sign_out(self):
self.launchSurvey("test_textfield")
self.head(SIGN_OUT_URL_PATH)
self.assertStatusCode(302)
self.get("/questionnaire/name-block")
self.assertStatusOK()
class TestExitPostSubmissionTestCase(IntegrationTestCase):
def _launch_and_submit_questionnaire(self, schema, **kwargs):
self.launchSurvey(schema, **kwargs)
self.post()
self.post()
self.assertInUrl("/thank-you")
class TestExitPostSubmissionWithFinalSummaryDefaultTheme(
TestExitPostSubmissionTestCase
):
def test_no_account_service_log_out_url_redirects_to_signed_out_page(self):
self._launch_and_submit_questionnaire(schema="test_textfield")
self.get(SIGN_OUT_URL_PATH)
self.assertInUrl(SIGNED_OUT_URL_PATH)
def test_redirects_to_account_service_log_out_url_when_present(self):
self._launch_and_submit_questionnaire(
schema="test_textfield",
account_service_log_out_url=ACCOUNT_SERVICE_LOG_OUT_URL,
)
self.get(SIGN_OUT_URL_PATH)
self.assertInUrl(ACCOUNT_SERVICE_LOG_OUT_URL_PATH)
class TestExitPostSubmissionWithHubDefaultTheme(IntegrationTestCase):
def _launch_and_submit_questionnaire(self, schema, **kwargs):
self.launchSurvey(schema, **kwargs)
self.post({"household-relationships-answer": "No"})
self.post()
self.assertInUrl("/thank-you")
def test_no_account_service_log_out_url_redirects_to_signed_out_page(self):
self._launch_and_submit_questionnaire(
schema="test_hub_section_required_and_enabled"
)
self.get(SIGN_OUT_URL_PATH, follow_redirects=False)
self.assertInUrl(SIGN_OUT_URL_PATH)
def test_redirects_to_account_service_log_out_url_when_present(self):
self._launch_and_submit_questionnaire(
schema="test_hub_section_required_and_enabled",
account_service_log_out_url=ACCOUNT_SERVICE_LOG_OUT_URL,
)
self.get(SIGN_OUT_URL_PATH)
self.assertInUrl(ACCOUNT_SERVICE_LOG_OUT_URL_PATH)
| 40.067416
| 83
| 0.727145
|
794d13ce8f3ebeb2ae5d1ab85599214871e13dfd
| 10,197
|
py
|
Python
|
corehq/apps/smsbillables/interface.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/smsbillables/interface.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | 1
|
2021-06-02T04:45:16.000Z
|
2021-06-02T04:45:16.000Z
|
corehq/apps/smsbillables/interface.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db.models.aggregates import Count
from couchexport.models import Format
from dimagi.utils.dates import DateSpan
from corehq.apps.accounting.filters import DateCreatedFilter
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.dispatcher import SMSAdminInterfaceDispatcher
from corehq.apps.smsbillables.filters import (
CountryCodeFilter,
DateSentFilter,
DirectionFilter,
DomainFilter,
GatewayTypeFilter,
HasGatewayFeeFilter,
ShowBillablesFilter,
SpecificGateway,
)
from corehq.apps.smsbillables.models import (
SmsBillable,
SmsGatewayFee,
SmsGatewayFeeCriteria,
)
class SMSBillablesInterface(GenericTabularReport):
base_template = "accounting/report_filter_actions.html"
section_name = "Accounting"
dispatcher = SMSAdminInterfaceDispatcher
name = "SMS Billables"
description = "List of all SMS Billables"
slug = "sms_billables"
ajax_pagination = True
exportable = True
exportable_all = True
export_format_override = Format.UNZIPPED_CSV
fields = [
'corehq.apps.smsbillables.interface.DateSentFilter',
'corehq.apps.accounting.interface.DateCreatedFilter',
'corehq.apps.smsbillables.interface.ShowBillablesFilter',
'corehq.apps.smsbillables.interface.DomainFilter',
'corehq.apps.smsbillables.interface.HasGatewayFeeFilter',
'corehq.apps.smsbillables.interface.GatewayTypeFilter',
]
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Date of Message"),
DataTablesColumn("Project Space"),
DataTablesColumn("Direction"),
DataTablesColumn("SMS parts"),
DataTablesColumn("Gateway", sortable=False),
DataTablesColumn("Gateway Charge", sortable=False),
DataTablesColumn("Usage Charge", sortable=False),
DataTablesColumn("Total Charge", sortable=False),
DataTablesColumn("Message Log ID", sortable=False),
DataTablesColumn("Is Valid?", sortable=False),
DataTablesColumn("Date Created"),
)
@property
def sort_field(self):
sort_fields = [
'date_sent',
'domain',
'direction',
'multipart_count',
None,
None,
None,
None,
None,
'date_created',
]
sort_index = int(self.request.GET.get('iSortCol_0', 1))
field = sort_fields[sort_index]
sort_descending = self.request.GET.get('sSortDir_0', 'asc') == 'desc'
return field if not sort_descending else '-{0}'.format(field)
@property
def shared_pagination_GET_params(self):
return DateSentFilter.shared_pagination_GET_params(self.request) + \
DateCreatedFilter.shared_pagination_GET_params(self.request) + [
{
'name': DateCreatedFilter.optional_filter_slug(),
'value': DateCreatedFilter.optional_filter_string_value(self.request)
},
{
'name': ShowBillablesFilter.slug,
'value': ShowBillablesFilter.get_value(self.request, self.domain)
},
{
'name': DomainFilter.slug,
'value': DomainFilter.get_value(self.request, self.domain)
},
{
'name': HasGatewayFeeFilter.slug,
'value': HasGatewayFeeFilter.get_value(self.request, self.domain)
},
{
'name': GatewayTypeFilter.slug,
'value': GatewayTypeFilter.get_value(self.request, self.domain)
},
]
@property
def get_all_rows(self):
query = self.sms_billables
query = query.order_by(self.sort_field)
return self._format_billables(query)
@property
def total_records(self):
query = self.sms_billables
return query.aggregate(Count('id'))['id__count']
@property
def rows(self):
query = self.sms_billables
query = query.order_by(self.sort_field)
sms_billables = query[self.pagination.start:(self.pagination.start + self.pagination.count)]
return self._format_billables(sms_billables)
def _format_billables(self, sms_billables):
return [
[
sms_billable.date_sent,
sms_billable.domain,
{
INCOMING: "Incoming",
OUTGOING: "Outgoing",
}.get(sms_billable.direction, ""),
sms_billable.multipart_count,
sms_billable.gateway_fee.criteria.backend_api_id if sms_billable.gateway_fee else "",
sms_billable.gateway_charge,
sms_billable.usage_charge,
sms_billable.gateway_charge + sms_billable.usage_charge,
sms_billable.log_id,
sms_billable.is_valid,
sms_billable.date_created,
]
for sms_billable in sms_billables
]
@property
def sms_billables(self):
datespan = DateSpan(DateSentFilter.get_start_date(self.request), DateSentFilter.get_end_date(self.request))
selected_billables = SmsBillable.objects.filter(
date_sent__gte=datespan.startdate,
date_sent__lt=datespan.enddate_adjusted,
)
if DateCreatedFilter.use_filter(self.request):
date_span = DateSpan(
DateCreatedFilter.get_start_date(self.request), DateCreatedFilter.get_end_date(self.request)
)
selected_billables = selected_billables.filter(
date_created__gte=date_span.startdate,
date_created__lt=date_span.enddate_adjusted,
)
show_billables = ShowBillablesFilter.get_value(
self.request, self.domain)
if show_billables:
selected_billables = selected_billables.filter(
is_valid=(show_billables == ShowBillablesFilter.VALID),
)
domain = DomainFilter.get_value(self.request, self.domain)
if domain:
selected_billables = selected_billables.filter(
domain=domain,
)
has_gateway_fee = HasGatewayFeeFilter.get_value(
self.request, self.domain
)
if has_gateway_fee:
if has_gateway_fee == HasGatewayFeeFilter.YES:
selected_billables = selected_billables.exclude(
gateway_fee=None
)
else:
selected_billables = selected_billables.filter(
gateway_fee=None
)
gateway_type = GatewayTypeFilter.get_value(self.request, self.domain)
if gateway_type:
selected_billables = selected_billables.filter(
gateway_fee__criteria__backend_api_id=gateway_type,
)
return selected_billables
class SMSGatewayFeeCriteriaInterface(GenericTabularReport):
base_template = "accounting/report_filter_actions.html"
section_name = "Accounting"
dispatcher = SMSAdminInterfaceDispatcher
name = "SMS Gateway Fee Criteria"
description = "List of all SMS Gateway Fee Criteria"
slug = "sms_gateway_fee_criteria"
exportable = True
exportable_all = True
fields = [
'corehq.apps.smsbillables.interface.GatewayTypeFilter',
'corehq.apps.smsbillables.interface.SpecificGateway',
'corehq.apps.smsbillables.interface.DirectionFilter',
'corehq.apps.smsbillables.interface.CountryCodeFilter',
]
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Gateway Type"),
DataTablesColumn("Specific Gateway"),
DataTablesColumn("Direction"),
DataTablesColumn("Country Code"),
DataTablesColumn("Prefix"),
DataTablesColumn("Fee (Amount, Currency)"),
DataTablesColumn("Is Active"),
)
@property
def get_all_rows(self):
return self.rows
@property
def rows(self):
rows = []
for criteria in self.sms_gateway_fee_criteria:
gateway_fee = SmsGatewayFee.get_by_criteria_obj(criteria)
rows.append([
criteria.backend_api_id,
(criteria.backend_instance
if criteria.backend_instance is not None else "Any"),
criteria.direction,
(criteria.country_code
if criteria.country_code is not None else "Any"),
criteria.prefix or "Any",
"%(amount)s %(currency)s" % {
'amount': str(gateway_fee.amount),
'currency': gateway_fee.currency.code,
},
criteria.is_active,
])
return rows
@property
def sms_gateway_fee_criteria(self):
selected_criteria = SmsGatewayFeeCriteria.objects.filter()
gateway_type = GatewayTypeFilter.get_value(self.request, self.domain)
if gateway_type:
selected_criteria = selected_criteria.filter(
backend_api_id=gateway_type,
)
specific_gateway = SpecificGateway.get_value(self.request, self.domain)
if specific_gateway:
selected_criteria = selected_criteria.filter(
backend_instance=specific_gateway,
)
direction = DirectionFilter.get_value(self.request, self.domain)
if direction:
selected_criteria = selected_criteria.filter(
direction=direction,
)
country_code = CountryCodeFilter.get_value(self.request, self.domain)
if country_code:
selected_criteria = selected_criteria.filter(
country_code=int(country_code),
)
return selected_criteria
| 37.488971
| 115
| 0.620771
|
794d13df2cc88c7ef5554a121f8cb1abfa053606
| 1,004
|
py
|
Python
|
beacon/nn/models/sequential.py
|
dusanerdeljan/beacon
|
be0bfc324ed2def9fb5d39be39f346b914b73686
|
[
"MIT"
] | 10
|
2020-04-17T20:26:22.000Z
|
2021-10-14T03:17:53.000Z
|
beacon/nn/models/sequential.py
|
dusanerdeljan/beacon
|
be0bfc324ed2def9fb5d39be39f346b914b73686
|
[
"MIT"
] | null | null | null |
beacon/nn/models/sequential.py
|
dusanerdeljan/beacon
|
be0bfc324ed2def9fb5d39be39f346b914b73686
|
[
"MIT"
] | 2
|
2020-10-15T12:05:08.000Z
|
2022-01-23T15:42:36.000Z
|
from beacon.nn.module import Module
class Sequential(Module):
def __init__(self, *layers):
"""
Sequential model.
## Parameters
layers: `tuple(Module)` - List of modules
## Example usage
```python
from beacon.nn.modules import Sequential
from beacon.nn import Linear
from beacon.nn.activations import Sigmoid
model = Sequential(
Linear(2,4),
Sigmoid(),
Linear(4,4),
Sigmoid(),
Linear(4,1),
Sigmoid()
)
layers = [Linear(2,4),Sigmoid(),Linear(4,4),Sigmoid(),Linear(4,1),Sigmoid()]
model2 = Sequential(*layers)
```
"""
self.layers = layers
for layer in self.layers:
if not isinstance(layer, Module):
raise RuntimeError("Invalid arguments to sequential model.")
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
| 26.421053
| 84
| 0.531873
|
794d143ac832a69b52404f6643e4698173c3e6b1
| 98,213
|
py
|
Python
|
tests/components/mqtt/test_cover.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
tests/components/mqtt/test_cover.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/mqtt/test_cover.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""The tests for the MQTT cover platform."""
from unittest.mock import patch
import pytest
from homeassistant.components import cover
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
ATTR_POSITION,
ATTR_TILT_POSITION,
)
from homeassistant.components.mqtt import CONF_STATE_TOPIC
from homeassistant.components.mqtt.cover import (
CONF_GET_POSITION_TEMPLATE,
CONF_GET_POSITION_TOPIC,
CONF_SET_POSITION_TEMPLATE,
CONF_SET_POSITION_TOPIC,
CONF_TILT_COMMAND_TEMPLATE,
CONF_TILT_COMMAND_TOPIC,
CONF_TILT_STATUS_TEMPLATE,
CONF_TILT_STATUS_TOPIC,
MQTT_COVER_ATTRIBUTES_BLOCKED,
MqttCover,
)
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
CONF_VALUE_TEMPLATE,
SERVICE_CLOSE_COVER,
SERVICE_CLOSE_COVER_TILT,
SERVICE_OPEN_COVER,
SERVICE_OPEN_COVER_TILT,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_STOP_COVER,
SERVICE_TOGGLE,
SERVICE_TOGGLE_COVER_TILT,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
cover.DOMAIN: {"platform": "mqtt", "name": "test", "state_topic": "test-topic"}
}
async def test_state_via_state_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", STATE_CLOSED)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "state-topic", STATE_OPEN)
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async def test_opening_and_closing_state_via_custom_state_payload(hass, mqtt_mock):
"""Test the controlling opening and closing state via a custom payload."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"state_opening": "34",
"state_closing": "--43",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "34")
state = hass.states.get("cover.test")
assert state.state == STATE_OPENING
async_fire_mqtt_message(hass, "state-topic", "--43")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSING
async_fire_mqtt_message(hass, "state-topic", STATE_CLOSED)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_open_closed_state_from_position_optimistic(hass, mqtt_mock):
"""Test the state after setting the position using optimistic mode."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "position-topic",
"set_position_topic": "set-position-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"optimistic": True,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 0},
blocking=True,
)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 100},
blocking=True,
)
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_position_via_position_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"position_open": 100,
"position_closed": 0,
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "get-position-topic", "0")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "100")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async def test_state_via_template(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"value_template": "\
{% if (value | multiply(0.01) | int) == 0 %}\
closed\
{% else %}\
open\
{% endif %}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", "10000")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "99")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_state_via_template_and_entity_id(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"value_template": '\
{% if value == "open" or value == "closed" %}\
{{ value }}\
{% else %}\
{{ states(entity_id) }}\
{% endif %}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", "open")
async_fire_mqtt_message(hass, "state-topic", "invalid")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "closed")
async_fire_mqtt_message(hass, "state-topic", "invalid")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_state_via_template_with_json_value(hass, mqtt_mock, caplog):
"""Test the controlling state via topic with JSON value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"value_template": "{{ value_json.Var1 }}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", '{ "Var1": "open", "Var2": "other" }')
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(
hass, "state-topic", '{ "Var1": "closed", "Var2": "other" }'
)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "state-topic", '{ "Var2": "other" }')
assert (
"Template variable warning: 'dict object' has no attribute 'Var1' when rendering"
) in caplog.text
async def test_position_via_template_and_entity_id(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"qos": 0,
"position_template": '\
{% if state_attr(entity_id, "current_position") == None %}\
{{ value }}\
{% else %}\
{{ state_attr(entity_id, "current_position") + value | int }}\
{% endif %}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "get-position-topic", "10")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 10
async_fire_mqtt_message(hass, "get-position-topic", "10")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 20
@pytest.mark.parametrize(
"config, assumed_state",
[
({"command_topic": "abc"}, True),
({"command_topic": "abc", "state_topic": "abc"}, False),
# ({"set_position_topic": "abc"}, True), - not a valid configuration
({"set_position_topic": "abc", "position_topic": "abc"}, False),
({"tilt_command_topic": "abc"}, True),
({"tilt_command_topic": "abc", "tilt_status_topic": "abc"}, False),
],
)
async def test_optimistic_flag(hass, mqtt_mock, config, assumed_state):
"""Test assumed_state is set correctly."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{cover.DOMAIN: {**config, "platform": "mqtt", "name": "test", "qos": 0}},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
if assumed_state:
assert ATTR_ASSUMED_STATE in state.attributes
else:
assert ATTR_ASSUMED_STATE not in state.attributes
async def test_optimistic_state_change(hass, mqtt_mock):
"""Test changing state optimistically."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
cover.DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
await hass.services.async_call(
cover.DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_optimistic_state_change_with_position(hass, mqtt_mock):
"""Test changing state optimistically."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"optimistic": True,
"command_topic": "command-topic",
"position_topic": "position-topic",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(ATTR_CURRENT_POSITION) is None
await hass.services.async_call(
cover.DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
assert state.attributes.get(ATTR_CURRENT_POSITION) == 100
await hass.services.async_call(
cover.DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
assert state.attributes.get(ATTR_CURRENT_POSITION) == 0
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
assert state.attributes.get(ATTR_CURRENT_POSITION) == 100
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
assert state.attributes.get(ATTR_CURRENT_POSITION) == 0
async def test_send_open_cover_command(hass, mqtt_mock):
"""Test the sending of open_cover."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 2, False)
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async def test_send_close_cover_command(hass, mqtt_mock):
"""Test the sending of close_cover."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 2, False)
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async def test_send_stop__cover_command(hass, mqtt_mock):
"""Test the sending of stop_cover."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "STOP", 2, False)
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async def test_current_cover_position(hass, mqtt_mock):
"""Test the current cover position."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_POSITION not in state_attributes_dict
assert ATTR_CURRENT_TILT_POSITION not in state_attributes_dict
assert 4 & hass.states.get("cover.test").attributes["supported_features"] != 4
async_fire_mqtt_message(hass, "get-position-topic", "0")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 0
async_fire_mqtt_message(hass, "get-position-topic", "50")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 50
async_fire_mqtt_message(hass, "get-position-topic", "non-numeric")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 50
async_fire_mqtt_message(hass, "get-position-topic", "101")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 100
async def test_current_cover_position_inverted(hass, mqtt_mock):
"""Test the current cover position."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_POSITION not in state_attributes_dict
assert ATTR_CURRENT_TILT_POSITION not in state_attributes_dict
assert 4 & hass.states.get("cover.test").attributes["supported_features"] != 4
async_fire_mqtt_message(hass, "get-position-topic", "100")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 0
assert hass.states.get("cover.test").state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "0")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 100
assert hass.states.get("cover.test").state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "50")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 50
assert hass.states.get("cover.test").state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "non-numeric")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 50
assert hass.states.get("cover.test").state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "101")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 0
assert hass.states.get("cover.test").state == STATE_CLOSED
async def test_optimistic_position(hass, mqtt_mock):
"""Test optimistic position is not supported."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state is None
async def test_position_update(hass, mqtt_mock):
"""Test cover position update from received MQTT message."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_POSITION not in state_attributes_dict
assert ATTR_CURRENT_TILT_POSITION not in state_attributes_dict
assert 4 & hass.states.get("cover.test").attributes["supported_features"] == 4
async_fire_mqtt_message(hass, "get-position-topic", "22")
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_POSITION in state_attributes_dict
assert ATTR_CURRENT_TILT_POSITION not in state_attributes_dict
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 22
@pytest.mark.parametrize(
"pos_template,pos_call,pos_message",
[("{{position-1}}", 43, "42"), ("{{100-62}}", 100, "38")],
)
async def test_set_position_templated(
hass, mqtt_mock, pos_template, pos_call, pos_message
):
"""Test setting cover position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 100,
"position_closed": 0,
"set_position_topic": "set-position-topic",
"set_position_template": pos_template,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: pos_call},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"set-position-topic", pos_message, 0, False
)
async def test_set_position_templated_and_attributes(hass, mqtt_mock):
"""Test setting cover position via template and using entities attributes."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 100,
"position_closed": 0,
"set_position_topic": "set-position-topic",
"set_position_template": '\
{% if position > 99 %}\
{% if state_attr(entity_id, "current_position") == None %}\
{{ 5 }}\
{% else %}\
{{ 23 }}\
{% endif %}\
{% else %}\
{{ 42 }}\
{% endif %}',
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 100},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("set-position-topic", "5", 0, False)
async def test_set_tilt_templated(hass, mqtt_mock):
"""Test setting cover tilt position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"tilt_command_topic": "tilt-command-topic",
"position_open": 100,
"position_closed": 0,
"set_position_topic": "set-position-topic",
"set_position_template": "{{position-1}}",
"tilt_command_template": "{{tilt_position+1}}",
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 41},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "42", 0, False
)
async def test_set_tilt_templated_and_attributes(hass, mqtt_mock):
"""Test setting cover tilt position via template and using entities attributes."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"tilt_command_topic": "tilt-command-topic",
"position_open": 100,
"position_closed": 0,
"set_position_topic": "set-position-topic",
"set_position_template": "{{position-1}}",
"tilt_command_template": '\
{% if state_attr(entity_id, "friendly_name") != "test" %}\
{{ 5 }}\
{% else %}\
{{ 23 }}\
{% endif %}',
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 99},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "23", 0, False
)
async def test_set_position_untemplated(hass, mqtt_mock):
"""Test setting cover position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "position-topic",
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 62},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("position-topic", "62", 0, False)
async def test_set_position_untemplated_custom_percentage_range(hass, mqtt_mock):
"""Test setting cover position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "position-topic",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 38},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("position-topic", "62", 0, False)
async def test_no_command_topic(hass, mqtt_mock):
"""Test with no command topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command",
"tilt_status_topic": "tilt-status",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 240
async def test_no_payload_close(hass, mqtt_mock):
"""Test with no close payload."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": None,
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 9
async def test_no_payload_open(hass, mqtt_mock):
"""Test with no open payload."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"qos": 0,
"payload_open": None,
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 10
async def test_no_payload_stop(hass, mqtt_mock):
"""Test with no stop payload."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": None,
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 3
async def test_with_command_topic_and_tilt(hass, mqtt_mock):
"""Test with command topic and tilt config."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"command_topic": "test",
"platform": "mqtt",
"name": "test",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command",
"tilt_status_topic": "tilt-status",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 251
async def test_tilt_defaults(hass, mqtt_mock):
"""Test the defaults."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command",
"tilt_status_topic": "tilt-status",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_TILT_POSITION in state_attributes_dict
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_position == STATE_UNKNOWN
async def test_tilt_via_invocation_defaults(hass, mqtt_mock):
"""Test tilt defaults on close/open."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", "0", 0, False)
mqtt_mock.async_publish.reset_mock()
# Close tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Open tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "100")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", "0", 0, False)
async def test_tilt_given_value(hass, mqtt_mock):
"""Test tilting to a given value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 80,
"tilt_closed_value": 25,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "80", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Close tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "25")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 25
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "80", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Open tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "80")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 80
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
async def test_tilt_given_value_optimistic(hass, mqtt_mock):
"""Test tilting to a given value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 80,
"tilt_closed_value": 25,
"tilt_optimistic": True,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 80
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "80", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 50},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "50", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 25
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
async def test_tilt_given_value_altered_range(hass, mqtt_mock):
"""Test tilting to a given value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 25,
"tilt_closed_value": 0,
"tilt_min": 0,
"tilt_max": 50,
"tilt_optimistic": True,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", "0", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
async def test_tilt_via_topic(hass, mqtt_mock):
"""Test tilt by updating status via MQTT."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "50")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_via_topic_template(hass, mqtt_mock):
"""Test tilt by updating status via MQTT and template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_status_template": "{{ (value | multiply(0.01)) | int }}",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "99")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "5000")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_via_topic_template_json_value(hass, mqtt_mock, caplog):
"""Test tilt by updating status via MQTT and template with JSON value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_status_template": "{{ value_json.Var1 }}",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", '{"Var1": 9, "Var2": 30}')
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 9
async_fire_mqtt_message(hass, "tilt-status-topic", '{"Var1": 50, "Var2": 10}')
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async_fire_mqtt_message(hass, "tilt-status-topic", '{"Var2": 10}')
assert (
"Template variable warning: 'dict object' has no attribute 'Var1' when rendering"
) in caplog.text
async def test_tilt_via_topic_altered_range(hass, mqtt_mock):
"""Test tilt status via MQTT with altered tilt range."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "50")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
async_fire_mqtt_message(hass, "tilt-status-topic", "25")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_status_out_of_range_warning(hass, caplog, mqtt_mock):
"""Test tilt status via MQTT tilt out of range warning message."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "60")
assert (
"Payload '60' is out of range, must be between '0' and '50' inclusive"
) in caplog.text
async def test_tilt_status_not_numeric_warning(hass, caplog, mqtt_mock):
"""Test tilt status via MQTT tilt not numeric warning message."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "abc")
assert ("Payload 'abc' is not numeric") in caplog.text
async def test_tilt_via_topic_altered_range_inverted(hass, mqtt_mock):
"""Test tilt status via MQTT with altered tilt range and inverted tilt position."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_min": 50,
"tilt_max": 0,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
async_fire_mqtt_message(hass, "tilt-status-topic", "50")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "25")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_via_topic_template_altered_range(hass, mqtt_mock):
"""Test tilt status via MQTT and template with altered tilt range."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_status_template": "{{ (value | multiply(0.01)) | int }}",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "99")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "5000")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
async_fire_mqtt_message(hass, "tilt-status-topic", "2500")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_position(hass, mqtt_mock):
"""Test tilt via method invocation."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 50},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "50", 0, False
)
async def test_tilt_position_templated(hass, mqtt_mock):
"""Test tilt position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_command_template": "{{100-32}}",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 100},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "68", 0, False
)
async def test_tilt_position_altered_range(hass, mqtt_mock):
"""Test tilt via method invocation with altered range."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 50},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
async def test_find_percentage_in_range_defaults(hass, mqtt_mock):
"""Test find percentage in range with default range."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 0,
"tilt_max": 100,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(44) == 44
assert mqtt_cover.find_percentage_in_range(44, "cover") == 44
async def test_find_percentage_in_range_altered(hass, mqtt_mock):
"""Test find percentage in range with altered range."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 180,
"position_closed": 80,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 80,
"tilt_max": 180,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(120) == 40
assert mqtt_cover.find_percentage_in_range(120, "cover") == 40
async def test_find_percentage_in_range_defaults_inverted(hass, mqtt_mock):
"""Test find percentage in range with default range but inverted."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 100,
"tilt_max": 0,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(44) == 56
assert mqtt_cover.find_percentage_in_range(44, "cover") == 56
async def test_find_percentage_in_range_altered_inverted(hass, mqtt_mock):
"""Test find percentage in range with altered range and inverted."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 80,
"position_closed": 180,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 180,
"tilt_max": 80,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(120) == 60
assert mqtt_cover.find_percentage_in_range(120, "cover") == 60
async def test_find_in_range_defaults(hass, mqtt_mock):
"""Test find in range with default range."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 0,
"tilt_max": 100,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(44) == 44
assert mqtt_cover.find_in_range_from_percent(44, "cover") == 44
async def test_find_in_range_altered(hass, mqtt_mock):
"""Test find in range with altered range."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 180,
"position_closed": 80,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 80,
"tilt_max": 180,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(40) == 120
assert mqtt_cover.find_in_range_from_percent(40, "cover") == 120
async def test_find_in_range_defaults_inverted(hass, mqtt_mock):
"""Test find in range with default range but inverted."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 100,
"tilt_max": 0,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(56) == 44
assert mqtt_cover.find_in_range_from_percent(56, "cover") == 44
async def test_find_in_range_altered_inverted(hass, mqtt_mock):
"""Test find in range with altered range and inverted."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 80,
"position_closed": 180,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 180,
"tilt_max": 80,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(60) == 120
assert mqtt_cover.find_in_range_from_percent(60, "cover") == 120
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_valid_device_class(hass, mqtt_mock):
"""Test the setting of a valid sensor class."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"device_class": "garage",
"state_topic": "test-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.attributes.get("device_class") == "garage"
async def test_invalid_device_class(hass, mqtt_mock):
"""Test the setting of an invalid sensor class."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"device_class": "abc123",
"state_topic": "test-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state is None
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG, MQTT_COVER_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique_id option only creates one cover per id."""
config = {
cover.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, cover.DOMAIN, config)
async def test_discovery_removal_cover(hass, mqtt_mock, caplog):
"""Test removal of discovered cover."""
data = '{ "name": "test", "command_topic": "test_topic" }'
await help_test_discovery_removal(hass, mqtt_mock, caplog, cover.DOMAIN, data)
async def test_discovery_update_cover(hass, mqtt_mock, caplog):
"""Test update of discovered cover."""
config1 = {"name": "Beer", "command_topic": "test_topic"}
config2 = {"name": "Milk", "command_topic": "test_topic"}
await help_test_discovery_update(
hass, mqtt_mock, caplog, cover.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_cover(hass, mqtt_mock, caplog):
"""Test update of discovered cover."""
data1 = '{ "name": "Beer", "command_topic": "test_topic" }'
with patch(
"homeassistant.components.mqtt.cover.MqttCover.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, cover.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer", "command_topic": "test_topic#" }'
data2 = '{ "name": "Milk", "command_topic": "test_topic" }'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, cover.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT cover device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT cover device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_state_and_position_topics_state_not_set_via_position_topic(
hass, mqtt_mock
):
"""Test state is not set via position topic when both state and position topics are set."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"position_topic": "get-position-topic",
"position_open": 100,
"position_closed": 0,
"state_open": "OPEN",
"state_closed": "CLOSE",
"command_topic": "command-topic",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "OPEN")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "0")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "100")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "CLOSE")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "0")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "100")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_set_state_via_position_using_stopped_state(hass, mqtt_mock):
"""Test the controlling state via position topic using stopped state."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"position_topic": "get-position-topic",
"position_open": 100,
"position_closed": 0,
"state_open": "OPEN",
"state_closed": "CLOSE",
"state_stopped": "STOPPED",
"command_topic": "command-topic",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "OPEN")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "0")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "STOPPED")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "100")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "state-topic", "STOPPED")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async def test_position_via_position_topic_template(hass, mqtt_mock):
"""Test position by updating status via position template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": "{{ (value | multiply(0.01)) | int }}",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "99")
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 0
async_fire_mqtt_message(hass, "get-position-topic", "5000")
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 50
async def test_position_via_position_topic_template_json_value(hass, mqtt_mock, caplog):
"""Test position by updating status via position template with a JSON value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": "{{ value_json.Var1 }}",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", '{"Var1": 9, "Var2": 60}')
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 9
async_fire_mqtt_message(hass, "get-position-topic", '{"Var1": 50, "Var2": 10}')
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 50
async_fire_mqtt_message(hass, "get-position-topic", '{"Var2": 60}')
assert (
"Template variable warning: 'dict object' has no attribute 'Var1' when rendering"
) in caplog.text
async def test_position_template_with_entity_id(hass, mqtt_mock):
"""Test position by updating status via position template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": '\
{% if state_attr(entity_id, "current_position") != None %}\
{{ value | int + state_attr(entity_id, "current_position") }} \
{% else %} \
{{ value }} \
{% endif %}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "10")
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 10
async_fire_mqtt_message(hass, "get-position-topic", "10")
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 20
async def test_position_via_position_topic_template_return_json(hass, mqtt_mock):
"""Test position by updating status via position template and returning json."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": '{{ {"position" : value} | tojson }}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "55")
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 55
async def test_position_via_position_topic_template_return_json_warning(
hass, caplog, mqtt_mock
):
"""Test position by updating status via position template returning json without position attribute."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": '{{ {"pos" : value} | tojson }}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "55")
assert (
"Template (position_template) returned JSON without position attribute"
in caplog.text
)
async def test_position_and_tilt_via_position_topic_template_return_json(
hass, mqtt_mock
):
"""Test position and tilt by updating the position via position template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": '\
{{ {"position" : value, "tilt_position" : (value | int / 2)| int } | tojson }}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "0")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
current_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_position == 0 and current_tilt_position == 0
async_fire_mqtt_message(hass, "get-position-topic", "99")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
current_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_position == 99 and current_tilt_position == 49
async def test_position_via_position_topic_template_all_variables(hass, mqtt_mock):
"""Test position by updating status via position template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"tilt_command_topic": "tilt-command-topic",
"position_open": 99,
"position_closed": 1,
"tilt_min": 11,
"tilt_max": 22,
"position_template": "\
{% if value | int < tilt_max %}\
{{ tilt_min }}\
{% endif %}\
{% if value | int > position_closed %}\
{{ position_open }}\
{% endif %}",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "0")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 10
async_fire_mqtt_message(hass, "get-position-topic", "55")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 100
async def test_set_state_via_stopped_state_no_position_topic(hass, mqtt_mock):
"""Test the controlling state via stopped state when no position topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"state_open": "OPEN",
"state_closed": "CLOSE",
"state_stopped": "STOPPED",
"state_opening": "OPENING",
"state_closing": "CLOSING",
"command_topic": "command-topic",
"qos": 0,
"optimistic": False,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "state-topic", "OPEN")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "OPENING")
state = hass.states.get("cover.test")
assert state.state == STATE_OPENING
async_fire_mqtt_message(hass, "state-topic", "STOPPED")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "CLOSING")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSING
async_fire_mqtt_message(hass, "state-topic", "STOPPED")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_position_via_position_topic_template_return_invalid_json(
hass, caplog, mqtt_mock
):
"""Test position by updating status via position template and returning invalid json."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": '{{ {"position" : invalid_json} }}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "55")
assert ("Payload '{'position': Undefined}' is not numeric") in caplog.text
async def test_set_position_topic_without_get_position_topic_error(
hass, caplog, mqtt_mock
):
"""Test error when set_position_topic is used without position_topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"value_template": "{{100-62}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_SET_POSITION_TOPIC}' must be set together with '{CONF_GET_POSITION_TOPIC}'."
) in caplog.text
async def test_value_template_without_state_topic_error(hass, caplog, mqtt_mock):
"""Test error when value_template is used and state_topic is missing."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"value_template": "{{100-62}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_VALUE_TEMPLATE}' must be set together with '{CONF_STATE_TOPIC}'."
) in caplog.text
async def test_position_template_without_position_topic_error(hass, caplog, mqtt_mock):
"""Test error when position_template is used and position_topic is missing."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"position_template": "{{100-52}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_GET_POSITION_TEMPLATE}' must be set together with '{CONF_GET_POSITION_TOPIC}'."
in caplog.text
)
async def test_set_position_template_without_set_position_topic(
hass, caplog, mqtt_mock
):
"""Test error when set_position_template is used and set_position_topic is missing."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"set_position_template": "{{100-42}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_SET_POSITION_TEMPLATE}' must be set together with '{CONF_SET_POSITION_TOPIC}'."
in caplog.text
)
async def test_tilt_command_template_without_tilt_command_topic(
hass, caplog, mqtt_mock
):
"""Test error when tilt_command_template is used and tilt_command_topic is missing."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"tilt_command_template": "{{100-32}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_TILT_COMMAND_TEMPLATE}' must be set together with '{CONF_TILT_COMMAND_TOPIC}'."
in caplog.text
)
async def test_tilt_status_template_without_tilt_status_topic_topic(
hass, caplog, mqtt_mock
):
"""Test error when tilt_status_template is used and tilt_status_topic is missing."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"tilt_status_template": "{{100-22}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_TILT_STATUS_TEMPLATE}' must be set together with '{CONF_TILT_STATUS_TOPIC}'."
in caplog.text
)
| 32.095752
| 107
| 0.591123
|
794d1639433e46b61c5a932940ac1015c7fa9ee0
| 593
|
py
|
Python
|
tomolab/DataSources/PET/__init__.py
|
TomographyLab/TomoLab
|
86b9a5894ef1660d7f4de39f560f1f92024b40f8
|
[
"Apache-2.0"
] | 5
|
2019-06-01T13:16:00.000Z
|
2022-03-02T10:21:59.000Z
|
tomolab/DataSources/PET/__init__.py
|
TomographyLab/TomoLab
|
86b9a5894ef1660d7f4de39f560f1f92024b40f8
|
[
"Apache-2.0"
] | null | null | null |
tomolab/DataSources/PET/__init__.py
|
TomographyLab/TomoLab
|
86b9a5894ef1660d7f4de39f560f1f92024b40f8
|
[
"Apache-2.0"
] | 1
|
2019-06-01T13:19:18.000Z
|
2019-06-01T13:19:18.000Z
|
# -*- coding: utf-8 -*-
# tomolab
# Michele Scipioni
# Harvard University, Martinos Center for Biomedical Imaging
# University of Pisa
__all__ = ['convert_listmode_dicom_to_interfile',
'import_interfile_projection', 'export_interfile_projection', 'import_h5f_projection',
'import_interfile_volume', 'export_interfile_volume']
from .PET_listmode import convert_listmode_dicom_to_interfile
from .PET_sinogram import import_interfile_projection, export_interfile_projection, import_h5f_projection
from .PET_volume import import_interfile_volume, export_interfile_volume
| 37.0625
| 105
| 0.812816
|
794d1659c3149283d507df6f36c3a4dd468bc07d
| 1,525
|
py
|
Python
|
analyzer/migrations/0001_initial.py
|
awwong1/semscrape
|
7e7184a35d6cc96bad9a2e64ab7211b20df4dad6
|
[
"Apache-2.0"
] | null | null | null |
analyzer/migrations/0001_initial.py
|
awwong1/semscrape
|
7e7184a35d6cc96bad9a2e64ab7211b20df4dad6
|
[
"Apache-2.0"
] | null | null | null |
analyzer/migrations/0001_initial.py
|
awwong1/semscrape
|
7e7184a35d6cc96bad9a2e64ab7211b20df4dad6
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-11-06 19:19
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
("crawler", "0004_rssentry_dl_html"),
]
operations = [
migrations.CreateModel(
name="Article",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("title", models.CharField(default=None, max_length=2048, null=True)),
(
"keywords",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=1024),
default=list,
size=None,
),
),
("author", models.CharField(default=None, max_length=2048, null=True)),
("body", models.TextField(default=None, null=True)),
(
"rss_entry",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to="crawler.rssentry",
),
),
],
),
]
| 29.901961
| 87
| 0.443934
|
794d167a257241b4ad475e04e3c5f64a99e7b07b
| 2,008
|
py
|
Python
|
tenable/nessus/permissions.py
|
tecnobabble/pyTenable
|
29e7ee65cdee70fbaaade295a9a5055c4d80b17f
|
[
"MIT"
] | 1
|
2022-03-01T17:17:19.000Z
|
2022-03-01T17:17:19.000Z
|
tenable/nessus/permissions.py
|
tecnobabble/pyTenable
|
29e7ee65cdee70fbaaade295a9a5055c4d80b17f
|
[
"MIT"
] | null | null | null |
tenable/nessus/permissions.py
|
tecnobabble/pyTenable
|
29e7ee65cdee70fbaaade295a9a5055c4d80b17f
|
[
"MIT"
] | 1
|
2022-03-01T17:17:30.000Z
|
2022-03-01T17:17:30.000Z
|
'''
Permissions
===========
Methods described in this section relate to the the permissions API.
These methods can be accessed at ``Nessus.permissions``.
.. rst-class:: hide-signature
.. autoclass:: PermissionsAPI
:members:
'''
from typing import Dict, Optional, List
from typing_extensions import Literal
from tenable.base.endpoint import APIEndpoint
class PermissionsAPI(APIEndpoint):
_path = 'permissions'
def details(self,
object_type: Literal['scanner'],
object_id: int
) -> List[Dict]:
'''
Retrieves the access control list for the specified object
Args:
object_type (str): The type of permissions object
object_id (int): The unique id of the object to retrieve
Returns:
List:
List of ACL objects.
Example:
>>> nessus.permissions.details('scanner', 1)
'''
return self._get(f'{object_type}/{object_id}')
def edit(self,
object_type: Literal['scanner'],
object_id: int,
acls: List[Dict]
) -> None:
'''
Updates the permissions for the specified object
Args:
object_type (str): The type of object to modify
object_id (int): The unique id of the object to modify
acls (list[dict]): The list of access control objects to apply
Example:
>>> nessus.permissions.edit('scanner', 1, acls=[
... {
... 'type': 'default',
... 'permissions': 16
... }, {
... 'type': 'user',
... 'permissions': 64,
... 'name': 'admin',
... 'id': 1,
... 'owner': 1
... })
'''
self._put(f'{object_type}/{object_id}', json={'acls': acls})
| 29.529412
| 74
| 0.49502
|
794d168ef4b9a1b8ff2aafa438c8aee846acfaa2
| 3,375
|
py
|
Python
|
chemreg/lists/tests/factories.py
|
Chemical-Curation/chemcurator
|
bcd7fab84e407f06502e6873c38820724d4e54e7
|
[
"MIT"
] | 1
|
2020-10-05T18:02:24.000Z
|
2020-10-05T18:02:24.000Z
|
chemreg/lists/tests/factories.py
|
Chemical-Curation/chemcurator_django
|
bcd7fab84e407f06502e6873c38820724d4e54e7
|
[
"MIT"
] | 207
|
2020-01-30T19:17:44.000Z
|
2021-02-24T19:45:29.000Z
|
chemreg/lists/tests/factories.py
|
Chemical-Curation/chemcurator_django
|
bcd7fab84e407f06502e6873c38820724d4e54e7
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import factory
from chemreg.common.factory import ControlledVocabularyFactory, DjangoSerializerFactory
from chemreg.lists.serializers import (
AccessibilityTypeSerializer,
ExternalContactSerializer,
IdentifierTypeSerializer,
ListSerializer,
ListTypeSerializer,
RecordIdentifierSerializer,
RecordSerializer,
)
from chemreg.substance.tests.factories import SubstanceFactory
class AccessibilityTypeFactory(DjangoSerializerFactory, ControlledVocabularyFactory):
"""Manufactures `AccessibilityType` models and serializers."""
class Meta:
model = AccessibilityTypeSerializer
class ExternalContactFactory(DjangoSerializerFactory):
"""Manufactures `ExternalContact` models and serializers."""
name = factory.Faker("text", max_nb_chars=49)
email = factory.Faker("text", max_nb_chars=49)
phone = factory.Faker("text", max_nb_chars=15)
class Meta:
model = ExternalContactSerializer
class IdentifierTypeFactory(DjangoSerializerFactory, ControlledVocabularyFactory):
"""Manufactures `IdentifierType` models."""
class Meta:
model = IdentifierTypeSerializer
class ListTypeFactory(DjangoSerializerFactory, ControlledVocabularyFactory):
"""Manufactures `ListType` models and serializers."""
class Meta:
model = ListTypeSerializer
class ListFactory(DjangoSerializerFactory):
"""Manufactures `List` models and serializers."""
name = factory.Sequence(lambda n: f"{factory.Faker('slug').generate()}-{n}")
label = factory.Faker("text", max_nb_chars=255)
short_description = factory.Faker("sentence")
long_description = factory.Faker("sentence")
source_url = factory.Faker("text", max_nb_chars=500)
source_reference = factory.Faker("text", max_nb_chars=500)
source_doi = factory.Faker("text", max_nb_chars=500)
date_of_source_collection = datetime.now()
# Related Factories
list_accessibility = factory.SubFactory(AccessibilityTypeFactory)
external_contact = factory.SubFactory(ExternalContactFactory)
class Meta:
model = ListSerializer
@factory.post_generation
def owners(self, create, extracted, **kwargs):
if not create:
return
if extracted:
self.owners.add(extracted)
@factory.post_generation
def types(self, create, extracted, **kwargs):
if not create:
return
if extracted:
self.types.add(extracted)
class RecordFactory(DjangoSerializerFactory):
"""Manufactures `Record` models and serializers."""
external_id = factory.Sequence(lambda n: n)
score = factory.Faker("pyfloat")
message = factory.Faker("text", max_nb_chars=500)
is_validated = factory.Faker("pybool")
# Related Factories
list = factory.SubFactory(ListFactory)
substance = factory.SubFactory(SubstanceFactory)
class Meta:
model = RecordSerializer
class RecordIdentifierFactory(DjangoSerializerFactory):
"""Manufactures `RecordIdentifier` models and serializers."""
identifier = factory.Faker("text")
identifier_label = factory.Faker("text", max_nb_chars=100)
# Related Factories
record = factory.SubFactory(RecordFactory)
identifier_type = factory.SubFactory(IdentifierTypeFactory)
class Meta:
model = RecordIdentifierSerializer
| 30.133929
| 87
| 0.730667
|
794d173ea68a6bca97c13d15b53a7a43930d651f
| 10,423
|
py
|
Python
|
skimage/measure/tests/test_fit.py
|
blink1073/scikit-image
|
46a8df9c32c5b79d38bc3a1f75dd4fbfeddf98f7
|
[
"BSD-3-Clause"
] | null | null | null |
skimage/measure/tests/test_fit.py
|
blink1073/scikit-image
|
46a8df9c32c5b79d38bc3a1f75dd4fbfeddf98f7
|
[
"BSD-3-Clause"
] | null | null | null |
skimage/measure/tests/test_fit.py
|
blink1073/scikit-image
|
46a8df9c32c5b79d38bc3a1f75dd4fbfeddf98f7
|
[
"BSD-3-Clause"
] | 1
|
2019-12-13T15:25:06.000Z
|
2019-12-13T15:25:06.000Z
|
import numpy as np
from numpy.testing import assert_equal, assert_raises, assert_almost_equal
from skimage.measure import LineModelND, CircleModel, EllipseModel, ransac
from skimage.transform import AffineTransform
from skimage.measure.fit import _dynamic_max_trials
from skimage._shared._warnings import expected_warnings
def test_line_model_invalid_input():
assert_raises(ValueError, LineModelND().estimate, np.empty((1, 3)))
def test_line_model_predict():
model = LineModelND()
model.params = ((0, 0), (1, 1))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_estimate():
# generate original data without noise
model0 = LineModelND()
model0.params = ((0, 0), (1, 1))
x0 = np.arange(-100, 100)
y0 = model0.predict_y(x0)
data = np.column_stack([x0, y0])
# estimate parameters of noisy data
model_est = LineModelND()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
x = np.random.rand(100, 2)
assert_almost_equal(model0.predict(x), model_est.predict(x), 1)
def test_line_model_residuals():
model = LineModelND()
model.params = (np.array([0, 0]), np.array([0, 1]))
assert_equal(model.residuals(np.array([[0, 0]])), 0)
assert_equal(model.residuals(np.array([[0, 10]])), 0)
assert_equal(model.residuals(np.array([[10, 0]])), 10)
model.params = (np.array([-2, 0]), np.array([1, 1]) / np.sqrt(2))
assert_equal(model.residuals(np.array([[0, 0]])), np.sqrt(2))
assert_almost_equal(model.residuals(np.array([[-4, 0]])), np.sqrt(2))
def test_line_model_under_determined():
data = np.empty((1, 2))
assert_raises(ValueError, LineModelND().estimate, data)
def test_line_modelND_invalid_input():
assert_raises(ValueError, LineModelND().estimate, np.empty((5, 1)))
def test_line_modelND_predict():
model = LineModelND()
model.params = (np.array([0, 0]), np.array([0.2, 0.98]))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_modelND_estimate():
# generate original data without noise
model0 = LineModelND()
model0.params = (np.array([0,0,0], dtype='float'),
np.array([1,1,1], dtype='float')/np.sqrt(3))
# we scale the unit vector with a factor 10 when generating points on the
# line in order to compensate for the scale of the random noise
data0 = (model0.params[0] +
10 * np.arange(-100,100)[...,np.newaxis] * model0.params[1])
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModelND()
model_est.estimate(data)
# test whether estimated parameters are correct
# we use the following geometric property: two aligned vectors have
# a cross-product equal to zero
# test if direction vectors are aligned
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1],
model_est.params[1])), 0, 1)
# test if origins are aligned with the direction
a = model_est.params[0] - model0.params[0]
if np.linalg.norm(a) > 0:
a /= np.linalg.norm(a)
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1], a)), 0, 1)
def test_line_modelND_residuals():
model = LineModelND()
model.params = (np.array([0, 0, 0]), np.array([0, 0, 1]))
assert_equal(abs(model.residuals(np.array([[0, 0, 0]]))), 0)
assert_equal(abs(model.residuals(np.array([[0, 0, 1]]))), 0)
assert_equal(abs(model.residuals(np.array([[10, 0, 0]]))), 10)
def test_line_modelND_under_determined():
data = np.empty((1, 3))
assert_raises(ValueError, LineModelND().estimate, data)
def test_circle_model_invalid_input():
assert_raises(ValueError, CircleModel().estimate, np.empty((5, 3)))
def test_circle_model_predict():
model = CircleModel()
r = 5
model.params = (0, 0, r)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 5), (-5, 0), (0, -5)))
assert_almost_equal(xy, model.predict_xy(t))
def test_circle_model_estimate():
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = CircleModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params, model_est.params, 1)
def test_circle_model_residuals():
model = CircleModel()
model.params = (0, 0, 5)
assert_almost_equal(abs(model.residuals(np.array([[5, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[6, 6]]))),
np.sqrt(2 * 6**2) - 5)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 5)
def test_ellipse_model_invalid_input():
assert_raises(ValueError, EllipseModel().estimate, np.empty((5, 3)))
def test_ellipse_model_predict():
model = EllipseModel()
r = 5
model.params = (0, 0, 5, 10, 0)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 10), (-5, 0), (0, -10)))
assert_almost_equal(xy, model.predict_xy(t))
def test_ellipse_model_estimate():
# generate original data without noise
model0 = EllipseModel()
model0.params = (10, 20, 15, 25, 0)
t = np.linspace(0, 2 * np.pi, 100)
data0 = model0.predict_xy(t)
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = EllipseModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params, model_est.params, 0)
def test_ellipse_model_residuals():
model = EllipseModel()
# vertical line through origin
model.params = (0, 0, 10, 5, 0)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 5]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 10]]))), 5)
def test_ransac_shape():
np.random.seed(1)
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add some faulty data
outliers = (10, 30, 200)
data0[outliers[0], :] = (1000, 1000)
data0[outliers[1], :] = (-50, 50)
data0[outliers[2], :] = (-100, -10)
# estimate parameters of corrupted data
model_est, inliers = ransac(data0, CircleModel, 3, 5)
# test whether estimated parameters equal original parameters
assert_equal(model0.params, model_est.params)
for outlier in outliers:
assert outlier not in inliers
def test_ransac_geometric():
np.random.seed(1)
# generate original data without noise
src = 100 * np.random.random((50, 2))
model0 = AffineTransform(scale=(0.5, 0.3), rotation=1,
translation=(10, 20))
dst = model0(src)
# add some faulty data
outliers = (0, 5, 20)
dst[outliers[0]] = (10000, 10000)
dst[outliers[1]] = (-100, 100)
dst[outliers[2]] = (50, 50)
# estimate parameters of corrupted data
model_est, inliers = ransac((src, dst), AffineTransform, 2, 20)
# test whether estimated parameters equal original parameters
assert_almost_equal(model0.params, model_est.params)
assert np.all(np.nonzero(inliers == False)[0] == outliers)
def test_ransac_is_data_valid():
np.random.seed(1)
is_data_valid = lambda data: data.shape[0] > 2
model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
is_data_valid=is_data_valid)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_is_model_valid():
np.random.seed(1)
def is_model_valid(model, data):
return False
model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
is_model_valid=is_model_valid)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 5
assert_equal(_dynamic_max_trials(1, 100, 5, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 5, 1), np.inf)
def test_ransac_invalid_input():
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, max_trials=-1)
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, stop_probability=-1)
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, stop_probability=1.01)
def test_deprecated_params_attribute():
model = LineModelND()
model.params = ((0, 0), (1, 1))
x = np.arange(-10, 10)
y = model.predict_y(x)
with expected_warnings(['`_params`']):
assert_equal(model.params, model._params)
if __name__ == "__main__":
np.testing.run_module_suite()
| 33.088889
| 77
| 0.655473
|
794d17db7957e3659bb203ff3714f3a1b33655eb
| 374
|
py
|
Python
|
timeweb/timewebapp/migrations/0012_auto_20210416_2315.py
|
snapsnap123/TimeWeb
|
9e3a3459fa71f6edf730addf3c7e15d87b6c80b5
|
[
"Apache-2.0"
] | 1
|
2021-11-29T18:37:02.000Z
|
2021-11-29T18:37:02.000Z
|
timeweb/timewebapp/migrations/0012_auto_20210416_2315.py
|
renardtnt1/TimeWeb
|
3684bb2ba368b55a4d50995e7e0e3b9405cba004
|
[
"Apache-2.0"
] | null | null | null |
timeweb/timewebapp/migrations/0012_auto_20210416_2315.py
|
renardtnt1/TimeWeb
|
3684bb2ba368b55a4d50995e7e0e3b9405cba004
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.8 on 2021-04-17 06:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('timewebapp', '0011_auto_20210416_2312'),
]
operations = [
migrations.RenameField(
model_name='settingsmodel',
old_name='defnwd',
new_name='def_nwd',
),
]
| 19.684211
| 50
| 0.596257
|
794d17e2c844c24ebee9cb94a448a6c8ad243842
| 1,702
|
py
|
Python
|
odex/section/header.py
|
callmejacob/dexfactory
|
2de996927ee9f036b2c7fc6cb04f43ac790f35af
|
[
"BSD-2-Clause"
] | 7
|
2018-06-14T10:40:47.000Z
|
2021-05-18T08:55:34.000Z
|
odex/section/header.py
|
callmejacob/dexfactory
|
2de996927ee9f036b2c7fc6cb04f43ac790f35af
|
[
"BSD-2-Clause"
] | 1
|
2020-05-28T08:59:50.000Z
|
2020-05-28T08:59:50.000Z
|
odex/section/header.py
|
callmejacob/dexfactory
|
2de996927ee9f036b2c7fc6cb04f43ac790f35af
|
[
"BSD-2-Clause"
] | 3
|
2018-02-28T02:08:06.000Z
|
2018-09-12T03:09:18.000Z
|
# -- coding: utf-8 --
from tool import *
class OdexHeader(object):
"""
头部结构,固定大小0x28
struct header {
u1 magic[8]; // 魔数,必须为'dey\n036\n'
u4 dex_off; // dex段的字节偏移量
u4 dex_size; // dex段的字节长度
u4 deps_off; // deps段的字节偏移量
u4 deps_size; // deps段的字节长度
u4 opt_off; // opt段的字节偏移量
u4 opt_size; // opt段的字节长度
u4 flags; // 标志位
u4 checksum; // 总checksum
}
"""
def __init__(self, bytes):
self.magic = bytes[0x00:0x08]
self.dex_off = convertBytesToInt(bytes[0x08:0x0c])
self.dex_size = convertBytesToInt(bytes[0x0c:0x10])
self.deps_off = convertBytesToInt(bytes[0x10:0x14])
self.deps_size = convertBytesToInt(bytes[0x14:0x18])
self.opt_off = convertBytesToInt(bytes[0x18:0x1c])
self.opt_size = convertBytesToInt(bytes[0x1c:0x20])
self.flags = convertBytesToInt(bytes[0x20:0x24])
self.checksum = convertBytesToInt(bytes[0x24:0x28])
self.bytes = bytes[0x00:0x28]
def getBytesSize(self):
return len(self.bytes)
def tostring(self):
string = 'header: {\n'
string += ' ' * 4 + 'magic: [%s]\n' % convertBytesToHexStr(self.magic)
string += ' ' * 4 + 'dex_off: %.4x\n' % self.dex_off
string += ' ' * 4 + 'dex_size: %.4x\n' % self.dex_size
string += ' ' * 4 + 'deps_off: %.4x\n' % self.deps_off
string += ' ' * 4 + 'deps_size: %.4x\n' % self.deps_size
string += ' ' * 4 + 'opt_off: %.4x\n' % self.opt_off
string += ' ' * 4 + 'opt_size: %.4x\n' % self.opt_size
string += ' ' * 4 + 'flags: %.4x\n' % self.flags
string += ' ' * 4 + 'checksum: %.4x\n' % self.checksum
string += '}\n'
return string
| 31.518519
| 79
| 0.582844
|
794d185553b96ff1311ceba62539896445d54115
| 28,290
|
py
|
Python
|
salt/cloud/clouds/proxmox.py
|
johngrasty/salt
|
f66b18d8ee52eb988c3dbd8ba3ff6a6173c8aea5
|
[
"Apache-2.0"
] | null | null | null |
salt/cloud/clouds/proxmox.py
|
johngrasty/salt
|
f66b18d8ee52eb988c3dbd8ba3ff6a6173c8aea5
|
[
"Apache-2.0"
] | 1
|
2015-09-02T12:49:48.000Z
|
2015-09-02T19:22:58.000Z
|
salt/cloud/clouds/proxmox.py
|
johngrasty/salt
|
f66b18d8ee52eb988c3dbd8ba3ff6a6173c8aea5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Proxmox Cloud Module
======================
.. versionadded:: 2014.7.0
The Proxmox cloud module is used to control access to cloud providers using
the Proxmox system (KVM / OpenVZ).
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/proxmox.conf``:
.. code-block:: yaml
my-proxmox-config:
# Proxmox account information
user: myuser@pam or myuser@pve
password: mypassword
url: hypervisor.domain.tld
driver: proxmox
verify_ssl: True
:maintainer: Frank Klaassen <frank@cloudright.nl>
:depends: requests >= 2.2.1
:depends: IPy >= 0.81
'''
# Import python libs
from __future__ import absolute_import
import time
import pprint
import logging
# Import salt libs
import salt.ext.six as six
import salt.utils
# Import salt cloud libs
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
from IPy import IP
HAS_IPY = True
except ImportError:
HAS_IPY = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'proxmox'
def __virtual__():
'''
Check for PROXMOX configurations
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('user',)
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
deps = {
'requests': HAS_REQUESTS,
'IPy': HAS_IPY
}
return config.check_driver_dependencies(
__virtualname__,
deps
)
url = None
ticket = None
csrf = None
verify_ssl = None
def _authenticate():
'''
Retrieve CSRF and API tickets for the Proxmox API
'''
global url, ticket, csrf, verify_ssl
url = config.get_cloud_config_value(
'url', get_configured_provider(), __opts__, search_global=False
)
username = config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
),
passwd = config.get_cloud_config_value(
'password', get_configured_provider(), __opts__, search_global=False
)
verify_ssl = config.get_cloud_config_value(
'verify_ssl', get_configured_provider(), __opts__, search_global=False
)
if verify_ssl is None:
verify_ssl = True
connect_data = {'username': username, 'password': passwd}
full_url = 'https://{0}:8006/api2/json/access/ticket'.format(url)
returned_data = requests.post(
full_url, verify=verify_ssl, data=connect_data).json()
ticket = {'PVEAuthCookie': returned_data['data']['ticket']}
csrf = str(returned_data['data']['CSRFPreventionToken'])
def query(conn_type, option, post_data=None):
'''
Execute the HTTP request to the API
'''
if ticket is None or csrf is None or url is None:
log.debug('Not authenticated yet, doing that now..')
_authenticate()
full_url = 'https://{0}:8006/api2/json/{1}'.format(url, option)
log.debug('{0}: {1} ({2})'.format(conn_type, full_url, post_data))
httpheaders = {'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'salt-cloud-proxmox'}
if conn_type == 'post':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.post(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'put':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.put(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'delete':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.delete(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'get':
response = requests.get(full_url, verify=verify_ssl,
cookies=ticket)
response.raise_for_status()
try:
returned_data = response.json()
if 'data' not in returned_data:
raise SaltCloudExecutionFailure
return returned_data['data']
except Exception:
log.error('Error in trying to process JSON')
log.error(response)
def _getVmByName(name, allDetails=False):
'''
Since Proxmox works based op id's rather than names as identifiers this
requires some filtering to retrieve the required information.
'''
vms = get_resources_vms(includeConfig=allDetails)
if name in vms:
return vms[name]
log.info('VM with name "{0}" could not be found.'.format(name))
return False
def _getVmById(vmid, allDetails=False):
'''
Retrieve a VM based on the ID.
'''
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)):
if str(vm_details['vmid']) == str(vmid):
return vm_details
log.info('VM with ID "{0}" could not be found.'.format(vmid))
return False
def _get_next_vmid():
'''
Proxmox allows the use of alternative ids instead of autoincrementing.
Because of that its required to query what the first available ID is.
'''
return int(query('get', 'cluster/nextid'))
def _check_ip_available(ip_addr):
'''
Proxmox VMs refuse to start when the IP is already being used.
This function can be used to prevent VMs being created with duplicate
IP's or to generate a warning.
'''
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)):
vm_config = vm_details['config']
if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr:
log.debug('IP "{0}" is already defined'.format(ip_addr))
return False
log.debug('IP \'{0}\' is available to be defined'.format(ip_addr))
return True
def _parse_proxmox_upid(node, vm_=None):
'''
Upon requesting a task that runs for a longer period of time a UPID is given.
This includes information about the job and can be used to lookup information in the log.
'''
ret = {}
upid = node
# Parse node response
node = node.split(':')
if node[0] == 'UPID':
ret['node'] = str(node[1])
ret['pid'] = str(node[2])
ret['pstart'] = str(node[3])
ret['starttime'] = str(node[4])
ret['type'] = str(node[5])
ret['vmid'] = str(node[6])
ret['user'] = str(node[7])
# include the upid again in case we'll need it again
ret['upid'] = str(upid)
if vm_ is not None and 'technology' in vm_:
ret['technology'] = str(vm_['technology'])
return ret
def _lookup_proxmox_task(upid):
'''
Retrieve the (latest) logs and retrieve the status for a UPID.
This can be used to verify whether a task has completed.
'''
log.debug('Getting creation status for upid: {0}'.format(upid))
tasks = query('get', 'cluster/tasks')
if tasks:
for task in tasks:
if task['upid'] == upid:
log.debug('Found upid task: {0}'.format(task))
return task
return False
def get_resources_nodes(call=None, resFilter=None):
'''
Retrieve all hypervisors (nodes) available on this environment
CLI Example:
.. code-block:: bash
salt-cloud -f get_resources_nodes my-proxmox-config
'''
log.debug('Getting resource: nodes.. (filter: {0})'.format(resFilter))
resources = query('get', 'cluster/resources')
ret = {}
for resource in resources:
if 'type' in resource and resource['type'] == 'node':
name = resource['node']
ret[name] = resource
if resFilter is not None:
log.debug('Filter given: {0}, returning requested '
'resource: nodes'.format(resFilter))
return ret[resFilter]
log.debug('Filter not given: {0}, returning all resource: nodes'.format(ret))
return ret
def get_resources_vms(call=None, resFilter=None, includeConfig=True):
'''
Retrieve all VMs available on this environment
CLI Example:
.. code-block:: bash
salt-cloud -f get_resources_vms my-proxmox-config
'''
log.debug('Getting resource: vms.. (filter: {0})'.format(resFilter))
resources = query('get', 'cluster/resources')
ret = {}
for resource in resources:
if 'type' in resource and resource['type'] in ['openvz', 'qemu']:
name = resource['name']
ret[name] = resource
if includeConfig:
# Requested to include the detailed configuration of a VM
ret[name]['config'] = get_vmconfig(
ret[name]['vmid'],
ret[name]['node'],
ret[name]['type']
)
if resFilter is not None:
log.debug('Filter given: {0}, returning requested '
'resource: nodes'.format(resFilter))
return ret[resFilter]
log.debug('Filter not given: {0}, returning all resource: nodes'.format(ret))
return ret
def script(vm_):
'''
Return the script deployment object
'''
script_name = config.get_cloud_config_value('script', vm_, __opts__)
if not script_name:
script_name = 'bootstrap-salt'
return salt.utils.cloud.os_script(
script_name,
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
def avail_locations(call=None):
'''
Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-proxmox-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
# could also use the get_resources_nodes but speed is ~the same
nodes = query('get', 'nodes')
ret = {}
for node in nodes:
name = node['node']
ret[name] = node
return ret
def avail_images(call=None, location='local'):
'''
Return a list of the images that are on the provider
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-proxmox-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
ret = {}
for host_name, host_details in six.iteritems(avail_locations()):
for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)):
ret[item['volid']] = item
return ret
def list_nodes(call=None):
'''
Return a list of the VMs that are managed by the provider
CLI Example:
.. code-block:: bash
salt-cloud -Q my-proxmox-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)):
log.debug('VM_Name: {0}'.format(vm_name))
log.debug('vm_details: {0}'.format(vm_details))
# Limit resultset on what Salt-cloud demands:
ret[vm_name] = {}
ret[vm_name]['id'] = str(vm_details['vmid'])
ret[vm_name]['image'] = str(vm_details['vmid'])
ret[vm_name]['size'] = str(vm_details['disk'])
ret[vm_name]['state'] = str(vm_details['status'])
# Figure out which is which to put it in the right column
private_ips = []
public_ips = []
if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-':
ips = vm_details['config']['ip_address'].split(' ')
for ip_ in ips:
if IP(ip_).iptype() == 'PRIVATE':
private_ips.append(str(ip_))
else:
public_ips.append(str(ip_))
ret[vm_name]['private_ips'] = private_ips
ret[vm_name]['public_ips'] = public_ips
return ret
def list_nodes_full(call=None):
'''
Return a list of the VMs that are on the provider
CLI Example:
.. code-block:: bash
salt-cloud -F my-proxmox-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
return get_resources_vms(includeConfig=True)
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
CLI Example:
.. code-block:: bash
salt-cloud -S my-proxmox-config
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__['query.selection'], call,
)
def create(vm_):
'''
Create a single VM from a data dict
CLI Example:
.. code-block:: bash
salt-cloud -p proxmox-ubuntu vmhostname
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'proxmox',
vm_['profile']) is False:
return False
except AttributeError:
pass
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
ret = {}
salt.utils.cloud.fire_event(
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
},
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
if 'use_dns' in vm_ and 'ip_address' not in vm_:
use_dns = vm_['use_dns']
if use_dns:
from socket import gethostbyname, gaierror
try:
ip_address = gethostbyname(str(vm_['name']))
except gaierror:
log.debug('Resolving of {hostname} failed'.format(hostname=str(vm_['name'])))
else:
vm_['ip_address'] = str(ip_address)
try:
data = create_node(vm_)
except Exception as exc:
log.error(
'Error creating {0} on PROXMOX\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
ret['creation_data'] = data
name = vm_['name'] # hostname which we know
vmid = data['vmid'] # vmid which we have received
host = data['node'] # host which we have received
nodeType = data['technology'] # VM tech (Qemu / OpenVZ)
# Determine which IP to use in order of preference:
if 'ip_address' in vm_:
ip_address = str(vm_['ip_address'])
elif 'public_ips' in data:
ip_address = str(data['public_ips'][0]) # first IP
elif 'private_ips' in data:
ip_address = str(data['private_ips'][0]) # first IP
else:
raise SaltCloudExecutionFailure # err.. not a good idea i reckon
log.debug('Using IP address {0}'.format(ip_address))
# wait until the vm has been created so we can start it
if not wait_for_created(data['upid'], timeout=300):
return {'Error': 'Unable to create {0}, command timed out'.format(name)}
# VM has been created. Starting..
if not start(name, vmid, call='action'):
log.error('Node {0} ({1}) failed to start!'.format(name, vmid))
raise SaltCloudExecutionFailure
# Wait until the VM has fully started
log.debug('Waiting for state "running" for vm {0} on {1}'.format(vmid, host))
if not wait_for_state(vmid, 'running'):
return {'Error': 'Unable to start {0}, command timed out'.format(name)}
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
ssh_password = config.get_cloud_config_value(
'password', vm_, __opts__,
)
ret['ip_address'] = ip_address
ret['username'] = ssh_username
ret['password'] = ssh_password
vm_['ssh_host'] = ip_address
vm_['password'] = ssh_password
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
# Report success!
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
)
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
},
)
return ret
def create_node(vm_):
'''
Build and submit the requestdata to create a new node
'''
newnode = {}
if 'technology' not in vm_:
vm_['technology'] = 'openvz' # default virt tech if none is given
if vm_['technology'] not in ['qemu', 'openvz']:
# Wrong VM type given
raise SaltCloudExecutionFailure
if 'host' not in vm_:
# Use globally configured/default location
vm_['host'] = config.get_cloud_config_value(
'default_host', get_configured_provider(), __opts__, search_global=False
)
if vm_['host'] is None:
# No location given for the profile
log.error('No host given to create this VM on')
raise SaltCloudExecutionFailure
# Required by both OpenVZ and Qemu (KVM)
vmhost = vm_['host']
newnode['vmid'] = _get_next_vmid()
for prop in ('cpuunits', 'description', 'memory', 'onboot'):
if prop in vm_: # if the property is set, use it for the VM request
newnode[prop] = vm_[prop]
if vm_['technology'] == 'openvz':
# OpenVZ related settings, using non-default names:
newnode['hostname'] = vm_['name']
newnode['ostemplate'] = vm_['image']
# optional VZ settings
for prop in ('cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage'):
if prop in vm_: # if the property is set, use it for the VM request
newnode[prop] = vm_[prop]
elif vm_['technology'] == 'qemu':
# optional Qemu settings
for prop in ('acpi', 'cores', 'cpu', 'pool', 'storage'):
if prop in vm_: # if the property is set, use it for the VM request
newnode[prop] = vm_[prop]
# The node is ready. Lets request it to be added
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
{'kwargs': newnode},
)
log.debug('Preparing to generate a node using these parameters: {0} '.format(
newnode))
node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode)
return _parse_proxmox_upid(node, vm_)
def show_instance(name, call=None):
'''
Show the details from Proxmox concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def get_vmconfig(vmid, node=None, node_type='openvz'):
'''
Get VM configuration
'''
if node is None:
# We need to figure out which node this VM is on.
for host_name, host_details in six.iteritems(avail_locations()):
for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)):
if item['vmid'] == vmid:
node = host_name
# If we reached this point, we have all the information we need
data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid))
return data
def wait_for_created(upid, timeout=300):
'''
Wait until a the vm has been created successfully
'''
start_time = time.time()
info = _lookup_proxmox_task(upid)
if not info:
log.error('wait_for_created: No task information '
'retrieved based on given criteria.')
raise SaltCloudExecutionFailure
while True:
if 'status' in info and info['status'] == 'OK':
log.debug('Host has been created!')
return True
time.sleep(3) # Little more patience, we're not in a hurry
if time.time() - start_time > timeout:
log.debug('Timeout reached while waiting for host to be created')
return False
info = _lookup_proxmox_task(upid)
def wait_for_state(vmid, state, timeout=300):
'''
Wait until a specific state has been reached on a node
'''
start_time = time.time()
node = get_vm_status(vmid=vmid)
if not node:
log.error('wait_for_state: No VM retrieved based on given criteria.')
raise SaltCloudExecutionFailure
while True:
if node['status'] == state:
log.debug('Host {0} is now in "{1}" state!'.format(
node['name'], state
))
return True
time.sleep(1)
if time.time() - start_time > timeout:
log.debug('Timeout reached while waiting for {0} to '
'become {1}'.format(node['name'], state))
return False
node = get_vm_status(vmid=vmid)
log.debug('State for {0} is: "{1}" instead of "{2}"'.format(
node['name'], node['status'], state))
def destroy(name, call=None):
'''
Destroy a node.
CLI Example:
.. code-block:: bash
salt-cloud --destroy mymachine
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
salt.utils.cloud.fire_event(
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
{'name': name},
transport=__opts__['transport']
)
vmobj = _getVmByName(name)
if vmobj is not None:
# stop the vm
if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped':
stop(name, vmobj['vmid'], 'action')
# wait until stopped
if not wait_for_state(vmobj['vmid'], 'stopped'):
return {'Error': 'Unable to stop {0}, command timed out'.format(name)}
query('delete', 'nodes/{0}/{1}'.format(
vmobj['node'], vmobj['id']
))
salt.utils.cloud.fire_event(
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
{'name': name},
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
return {'Destroyed': '{0} was destroyed.'.format(name)}
def set_vm_status(status, name=None, vmid=None):
'''
Convenience function for setting VM status
'''
log.debug('Set status to {0} for {1} ({2})'.format(status, name, vmid))
if vmid is not None:
log.debug('set_vm_status: via ID - VMID {0} ({1}): {2}'.format(
vmid, name, status))
vmobj = _getVmById(vmid)
else:
log.debug('set_vm_status: via name - VMID {0} ({1}): {2}'.format(
vmid, name, status))
vmobj = _getVmByName(name)
if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj:
log.error('Unable to set status {0} for {1} ({2})'.format(
status, name, vmid))
raise SaltCloudExecutionTimeout
log.debug("VM_STATUS: Has desired info ({0}). Setting status..".format(vmobj))
data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format(
vmobj['node'], vmobj['type'], vmobj['vmid'], status))
result = _parse_proxmox_upid(data, vmobj)
if result is not False and result is not None:
log.debug('Set_vm_status action result: {0}'.format(result))
return True
return False
def get_vm_status(vmid=None, name=None):
'''
Get the status for a VM, either via the ID or the hostname
'''
if vmid is not None:
log.debug('get_vm_status: VMID {0}'.format(vmid))
vmobj = _getVmById(vmid)
elif name is not None:
log.debug('get_vm_status: name {0}'.format(name))
vmobj = _getVmByName(name)
else:
log.debug("get_vm_status: No ID or NAME given")
raise SaltCloudExecutionFailure
log.debug('VM found: {0}'.format(vmobj))
if vmobj is not None and 'node' in vmobj:
log.debug("VM_STATUS: Has desired info. Retrieving.. ({0})".format(
vmobj['name']))
data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format(
vmobj['node'], vmobj['type'], vmobj['vmid']))
return data
log.error('VM or requested status not found..')
return False
def start(name, vmid=None, call=None):
'''
Start a node.
CLI Example:
.. code-block:: bash
salt-cloud -a start mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The start action must be called with -a or --action.'
)
log.debug('Start: {0} ({1}) = Start'.format(name, vmid))
if not set_vm_status('start', name, vmid=vmid):
log.error('Unable to bring VM {0} ({1}) up..'.format(name, vmid))
raise SaltCloudExecutionFailure
# xxx: TBD: Check here whether the status was actually changed to 'started'
return {'Started': '{0} was started.'.format(name)}
def stop(name, vmid=None, call=None):
'''
Stop a node ("pulling the plug").
CLI Example:
.. code-block:: bash
salt-cloud -a stop mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
if not set_vm_status('stop', name, vmid=vmid):
log.error('Unable to bring VM {0} ({1}) down..'.format(name, vmid))
raise SaltCloudExecutionFailure
# xxx: TBD: Check here whether the status was actually changed to 'stopped'
return {'Stopped': '{0} was stopped.'.format(name)}
def shutdown(name=None, vmid=None, call=None):
'''
Shutdown a node via ACPI.
CLI Example:
.. code-block:: bash
salt-cloud -a shutdown mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The shutdown action must be called with -a or --action.'
)
if not set_vm_status('shutdown', name, vmid=vmid):
log.error('Unable to shut VM {0} ({1}) down..'.format(name, vmid))
raise SaltCloudExecutionFailure
# xxx: TBD: Check here whether the status was actually changed to 'stopped'
return {'Shutdown': '{0} was shutdown.'.format(name)}
| 29.841772
| 107
| 0.596783
|
794d186f38322d8e4be5910f8bb9db43181109f8
| 12,561
|
py
|
Python
|
audio/edison/mfcc/mfcc_utils.py
|
noah95/edison
|
652af8ed4c99c11000a6c0136d770583a5318d8e
|
[
"Apache-2.0"
] | 3
|
2020-09-16T18:43:40.000Z
|
2022-03-24T12:09:05.000Z
|
audio/edison/mfcc/mfcc_utils.py
|
noah95/edison
|
652af8ed4c99c11000a6c0136d770583a5318d8e
|
[
"Apache-2.0"
] | 13
|
2020-11-13T18:50:47.000Z
|
2022-03-12T00:29:36.000Z
|
audio/edison/mfcc/mfcc_utils.py
|
noah95/edison
|
652af8ed4c99c11000a6c0136d770583a5318d8e
|
[
"Apache-2.0"
] | 2
|
2020-09-16T18:43:46.000Z
|
2021-04-14T06:33:53.000Z
|
# -*- coding: utf-8 -*-
# @Author: Noah Huetter
# @Date: 2020-04-16 16:23:59
# @Last Modified by: Noah Huetter
# @Last Modified time: 2020-05-27 16:31:39
import numpy as np
from scipy.fftpack import dct
from tqdm import tqdm
# mel freq. constants -> https://en.wikipedia.org/wiki/Mel_scale
from config import *
def frames(data, frame_length=3, frame_step=1):
"""
Split a data vector into (possibly overlapipng) frames
frame_length: length of each frame
frame_step: how many sample to advance the frame each step
"""
n_frames = 1 + (data.shape[0] - frame_length) // frame_step
out = np.zeros((n_frames,frame_length))
for i in range(n_frames):
out[i] = data[i*frame_step:i*frame_step+frame_length]
return out
def hertz_to_mel(frequencies_hertz):
"""
Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.
"""
return MEL_HIGH_FREQUENCY_Q * np.log(1.0 + (frequencies_hertz / MEL_BREAK_FREQUENCY_HERTZ))
def gen_mel_weight_matrix(num_mel_bins=20, num_spectrogram_bins=129, sample_rate=8000, \
lower_edge_hertz=125.0, upper_edge_hertz=3800.0):
"""
Generate mel weight matric from linear frequency spectrum, inspired by
https://www.tensorflow.org/api_docs/python/tf/signal/linear_to_mel_weight_matrix
"""
nyquist_hertz = sample_rate / 2.0
# excludes DC spectrogram bin
n_bands_to_zero = 1
linear_frequencies = np.linspace(0, nyquist_hertz, num_spectrogram_bins)[n_bands_to_zero:]
# convert linear frequency vector to mel scale
spectrogram_bins_mel = np.expand_dims( hertz_to_mel(linear_frequencies), 1)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = frames(
np.linspace(hertz_to_mel(lower_edge_hertz), hertz_to_mel(upper_edge_hertz), num_mel_bins + 2),
frame_length=3, frame_step=1)
# Split the triples up and reshape them into [1, num_mel_bins] vectors, one vector for
# lower edge, one for center and one for uppers
lower_edge_mel, center_mel, upper_edge_mel = tuple(np.reshape( t, [1, num_mel_bins] ) for t in np.split(band_edges_mel, 3, axis=1))
# Calculate lower and upper slopes for every spectrogram bin. Line segments are
# linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = np.maximum(0, np.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above
return np.pad(mel_weights_matrix, [[n_bands_to_zero, 0], [0, 0]])
def batch_mfcc(data, \
fs, nSamples, frame_len, frame_step, frame_count, \
fft_len, \
mel_nbins, mel_lower_hz, mel_upper_hz):
"""
Runs windowed mfcc on a strem of data
data input data fo shape [..., samples]
fs input sample rate
nSamples number of samples in input
frame_len length of each frame
frame_step how many samples to advance the frame
frame_count how many frames to compute
fft_len length of FFT, ideally frame_len
mel_nbins number of mel filter banks to create
mel_lower_hz lowest frequency of mel bank
mel_upper_hz highest frequency of mel bank
"""
if frame_count == 0:
frame_count = 1 + (nSamples - frame_len) // frame_step
print("Running mfcc for %d frames with %d step on %d samples" % (frame_count, frame_step, data.shape[0]))
# will return a list with a dict for each frame
output = np.zeros((data.shape[0], frame_count, mel_nbins))
for sampleCtr in tqdm(range(data.shape[0])):
for frame_ctr in range(frame_count):
# get chunk of data
chunk = data[sampleCtr][frame_ctr*frame_step : frame_ctr*frame_step+frame_len]
# calculate FFT
stfft = np.fft.fft(chunk)[:frame_len//2]
# calcualte spectorgram
spectrogram = np.abs(stfft)
num_spectrogram_bins = len(spectrogram)
# calculate mel weights
mel_weight_matrix = gen_mel_weight_matrix(num_mel_bins=mel_nbins,
num_spectrogram_bins=num_spectrogram_bins, sample_rate=fs,
lower_edge_hertz=mel_lower_hz, upper_edge_hertz=mel_upper_hz)
# dot product of spectrum and mel matrix to get mel spectrogram
mel_spectrogram = np.dot(spectrogram, mel_weight_matrix)
# take log of mel spectrogram
log_mel_spectrogram = np.log(mel_spectrogram + 1e-6)
# calculate DCT-II
mfcc = dct(log_mel_spectrogram, type=2) / np.sqrt(2*mel_nbins)
frame = np.array(mfcc)
# Add frame to output list
output[sampleCtr, frame_ctr, ...] = frame
return output
def mfcc(data, \
fs, nSamples, frame_len, frame_step, frame_count, \
fft_len, \
mel_nbins, mel_lower_hz, mel_upper_hz, dummy=None):
"""
Runs windowed mfcc on a strem of data
data input data
fs input sample rate
nSamples number of samples in input
frame_len length of each frame
frame_step how many samples to advance the frame
frame_count how many frames to compute
fft_len length of FFT, ideally frame_len
mel_nbins number of mel filter banks to create
mel_lower_hz lowest frequency of mel bank
mel_upper_hz highest frequency of mel bank
"""
if frame_count == 0:
frame_count = 1 + (nSamples - frame_len) // frame_step
# print("Running mfcc for %d frames with %d step" % (frame_count, frame_step))
# will return a list with a dict for each frame
output = []
for frame_ctr in range(frame_count):
frame = {}
frame['t_start'] = frame_ctr*frame_step/fs
frame['t_end'] = (frame_ctr*frame_step+frame_len)/fs
# print("frame %d start %f end %f"%(frame_ctr, frame['t_start'],frame['t_end']))
# get chunk of data
chunk = data[frame_ctr*frame_step : frame_ctr*frame_step+frame_len]
# calculate FFT
frame['fft'] = np.fft.fft(chunk)[:frame_len//2]
# calcualte spectorgram
spectrogram = np.abs(frame['fft'])
frame['spectrogram'] = spectrogram
num_spectrogram_bins = len(frame['spectrogram'])
# calculate mel weights
mel_weight_matrix = gen_mel_weight_matrix(num_mel_bins=mel_nbins,
num_spectrogram_bins=num_spectrogram_bins, sample_rate=fs,
lower_edge_hertz=mel_lower_hz, upper_edge_hertz=mel_upper_hz)
frame['mel_weight_matrix'] = mel_weight_matrix
# dot product of spectrum and mel matrix to get mel spectrogram
mel_spectrogram = np.dot(spectrogram, mel_weight_matrix)
frame['mel_spectrogram'] = mel_spectrogram
# take log of mel spectrogram
log_mel_spectrogram = np.log(mel_spectrogram + 1e-6)
frame['log_mel_spectrogram'] = log_mel_spectrogram
# calculate DCT-II
mfcc = dct(log_mel_spectrogram, type=2) / np.sqrt(2*mel_nbins)
frame['mfcc'] = mfcc
# Add frame to output list
output.append(frame)
return output
def mfcc_tf(data, \
fs, nSamples, frame_len, frame_step, frame_count, \
fft_len, \
mel_nbins, mel_lower_hz, mel_upper_hz, unused=None):
"""
Calculate same mfcc using tensor flow functions
"""
import tensorflow as tf
if tf.__version__.startswith('1'):
tf.enable_eager_execution()
# sess = tf.InteractiveSession()
framed = frames(data, frame_length=frame_len, frame_step=frame_step)
# pack data into a tensor of [1, nFrames, frame_len] so we compute only 1 sample
tensor = tf.convert_to_tensor(framed.reshape((1,framed.shape[0], framed.shape[1])), dtype=tf.float32)
# stfts has shape [..., frames, fft_unique_bins], here [1, nFrames, 1, fft_len/2+1)
stfts = tf.signal.stft(tensor, frame_length=frame_len, frame_step=frame_step, fft_length=fft_len)
spectrograms = tf.abs(stfts)
# reshape spectrograms to [1, nFrames, fft_len/2+1)
spectrograms = tf.reshape(spectrograms, (spectrograms.shape[0],spectrograms.shape[1],-1))
num_spectrogram_bins = stfts.shape[-1]
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
mel_nbins, num_spectrogram_bins, fs, mel_lower_hz,
mel_upper_hz)
# mel_spectrograms has shape [1, nFrames, mel_nbins]
mel_spectrograms = tf.tensordot(spectrograms, linear_to_mel_weight_matrix, 1)
# log_mel_spectrograms has shape [1, nFrames, mel_nbins]
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6)
# mfccs has shape [1, nFrames, mel_nbins]
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrograms)[..., :mel_nbins]
# fill in same structure as own mfcc implementation
# for all spectrograms, cut first element corresponding to the DC component
if frame_count == 0:
frame_count = 1 + (nSamples - frame_len) // frame_step
output = []
for frame_ctr in range(frame_count):
frame = {}
frame['t_start'] = frame_ctr*frame_step/fs
frame['t_end'] = (frame_ctr*frame_step+frame_len)/fs
frame['fft'] = tf.reshape(stfts, (stfts.shape[0],stfts.shape[1],-1))[0, frame_ctr, 1:]
frame['spectrogram'] = spectrograms[0, frame_ctr, 1:].numpy()
# strip DC component from weights matrix
frame['mel_weight_matrix'] = linear_to_mel_weight_matrix[1:,...].numpy()
frame['mel_spectrogram'] = mel_spectrograms[0, frame_ctr, ...]
frame['log_mel_spectrogram'] = log_mel_spectrograms[0, frame_ctr, ...].numpy()
frame['mfcc'] = mfccs[0, frame_ctr, ...].numpy()
output.append(frame)
return output
def mfcc_mcu(data, \
fs, nSamples, frame_len, frame_step, frame_count, \
fft_len, \
mel_nbins, mel_lower_hz, mel_upper_hz, mel_mtx_scale, use_log=False):
"""
Runs windowed mfcc on a strem of data, with similar calculation to MCU and scaled to match
output of MCU
data input data
fs input sample rate
nSamples number of samples in input
frame_len length of each frame
frame_step how many samples to advance the frame
frame_count how many frames to compute
fft_len length of FFT, ideally frame_len
mel_nbins number of mel filter banks to create
mel_lower_hz lowest frequency of mel bank
mel_upper_hz highest frequency of mel bank
"""
# Calculate number of frames
if frame_count == 0:
frame_count = 1 + (nSamples - frame_len) // frame_step
output = []
# calculate mel matrix
mel_weight_matrix = mel_mtx_scale*gen_mel_weight_matrix(num_mel_bins=mel_nbins,
num_spectrogram_bins=frame_len//2+1, sample_rate=fs,
lower_edge_hertz=mel_lower_hz, upper_edge_hertz=mel_upper_hz)
# Iterate over each frame of data
for frame_ctr in range(frame_count):
frame = {}
frame['t_start'] = frame_ctr*frame_step/fs
frame['t_end'] = (frame_ctr*frame_step+frame_len)/fs
# get chunk of data
chunk = np.array(data[frame_ctr*frame_step : frame_ctr*frame_step+frame_len])
sample_size = chunk.shape[0]
# calculate FFT
frame['fft'] = 1.0/1024*np.fft.fft(chunk)
# calcualte spectorgram
spectrogram = 1.0/np.sqrt(2)*np.abs(frame['fft'])
frame['spectrogram'] = spectrogram
num_spectrogram_bins = len(frame['spectrogram'])
# calculate mel weights
frame['mel_weight_matrix'] = mel_weight_matrix
# dot product of spectrum and mel matrix to get mel spectrogram
mel_spectrogram = np.dot(spectrogram[:(sample_size//2)+1], mel_weight_matrix)
mel_spectrogram /= mel_mtx_scale
frame['mel_spectrogram'] = mel_spectrogram
# log(x) is intentionally left out to safe computation resources
if use_log:
mel_spectrogram = np.log(mel_spectrogram+1e-6)
frame['log_mel_spectrogram'] = mel_spectrogram
# calculate DCT-II
mfcc = 1.0/64*dct(mel_spectrogram, type=2)
frame['mfcc'] = mfcc
# Add frame to output list
output.append(frame)
return output
def dct2Makhoul(x):
"""
Calculate DCT-II using N-point FFT as in "A Fast Cosine Transform in O'ne and Two Dimensions" - Makhoul1980
Source: https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
"""
N = x.shape[0]
k = np.arange(N)
v = np.empty_like(x)
v[:(N-1)//2+1] = x[::2]
if N % 2: # odd length
v[(N-1)//2+1:] = x[-2::-2]
else: # even length
v[(N-1)//2+1:] = x[::-2]
V = np.fft.fft(v)
Vr = V * 2 * np.exp(-1j*np.pi*k/(2*N))
return Vr.real, v, V
| 36.835777
| 133
| 0.699944
|
794d1874242c368798f7f24525a407f9eb8dbe45
| 95
|
py
|
Python
|
test.py
|
davidcoe/arvsearth
|
72e1d938a93eecf0424585d545aa2939d7c075fd
|
[
"Unlicense"
] | null | null | null |
test.py
|
davidcoe/arvsearth
|
72e1d938a93eecf0424585d545aa2939d7c075fd
|
[
"Unlicense"
] | null | null | null |
test.py
|
davidcoe/arvsearth
|
72e1d938a93eecf0424585d545aa2939d7c075fd
|
[
"Unlicense"
] | null | null | null |
import arvsearth
if __name__ == '__main__':
import doctest
doctest.testmod(arvsearth)
| 15.833333
| 30
| 0.726316
|
794d188e1cf2569448b86adc3865abef9bd8bcf8
| 1,290
|
py
|
Python
|
supervisor/bus.py
|
agners/hassio
|
42d993575ba94159046112d5ab16fac82ff5fe57
|
[
"Apache-2.0"
] | 584
|
2020-01-31T18:53:10.000Z
|
2022-03-29T21:12:15.000Z
|
supervisor/bus.py
|
agners/hassio
|
42d993575ba94159046112d5ab16fac82ff5fe57
|
[
"Apache-2.0"
] | 1,056
|
2020-01-30T09:59:44.000Z
|
2022-03-31T10:15:32.000Z
|
supervisor/bus.py
|
agners/hassio
|
42d993575ba94159046112d5ab16fac82ff5fe57
|
[
"Apache-2.0"
] | 295
|
2020-02-03T11:30:42.000Z
|
2022-03-31T18:53:14.000Z
|
"""Bus event system."""
from __future__ import annotations
import logging
from typing import Any, Awaitable, Callable
import attr
from .const import BusEvent
from .coresys import CoreSys, CoreSysAttributes
_LOGGER: logging.Logger = logging.getLogger(__name__)
class Bus(CoreSysAttributes):
"""Handle Bus event system."""
def __init__(self, coresys: CoreSys):
"""Initialize bus backend."""
self.coresys = coresys
self._listeners: dict[BusEvent, list[EventListener]] = {}
def register_event(
self, event: BusEvent, callback: Callable[[Any], Awaitable[None]]
) -> EventListener:
"""Register callback for an event."""
listener = EventListener(event, callback)
self._listeners.setdefault(event, []).append(listener)
return listener
def fire_event(self, event: BusEvent, reference: Any) -> None:
"""Fire an event to the bus."""
_LOGGER.debug("Fire event '%s' with '%s'", event, reference)
for listener in self._listeners.get(event, []):
self.sys_create_task(listener.callback(reference))
@attr.s(slots=True, frozen=True)
class EventListener:
"""Event listener."""
event_type: BusEvent = attr.ib()
callback: Callable[[Any], Awaitable[None]] = attr.ib()
| 29.318182
| 73
| 0.670543
|
794d1a5db76da527d03c25e4deb2280d8b45cbda
| 1,889
|
py
|
Python
|
djangoProject/assignments/forms.py
|
swasthikcnayak/team-sankalp-scl-maxo
|
4b8d66e5866ac954cbc6434da4c68d3e08a146c5
|
[
"MIT"
] | 1
|
2020-12-27T15:12:06.000Z
|
2020-12-27T15:12:06.000Z
|
djangoProject/assignments/forms.py
|
swasthikcnayak/team-sankalp-scl-maxo
|
4b8d66e5866ac954cbc6434da4c68d3e08a146c5
|
[
"MIT"
] | 3
|
2020-12-12T18:00:45.000Z
|
2021-01-06T09:52:32.000Z
|
djangoProject/assignments/forms.py
|
swasthikcnayak/team-sankalp-scl-maxo
|
4b8d66e5866ac954cbc6434da4c68d3e08a146c5
|
[
"MIT"
] | 4
|
2020-12-10T13:01:53.000Z
|
2020-12-29T19:14:09.000Z
|
from django import forms
from assignments.models import Assignment, Submission
#Creating assignments
class AssignmentCreationForm(forms.ModelForm):
class Meta:
model = Assignment
fields = ['assignment_name', 'start_time', 'end_time', 'maximum_marks', 'description', 'question']
widgets = {
'assignment_name': forms.TextInput(attrs={'placeholder': 'Assignment Name'}),
'description': forms.Textarea(attrs={'placeholder': 'Give some description', 'cols': '20', 'rows': '5'}),
'maximum_marks': forms.NumberInput(attrs={'placeholder': 'Maximum Marks'}),
}
def __init__(self, *args, **kwargs):
super(AssignmentCreationForm, self).__init__(*args, **kwargs)
self.fields['description'].required = False
self.fields['assignment_name'].label = "Assignment name"
self.fields['description'].label = "Provide some details for student"
self.fields['maximum_marks'].label = "Maximum marks possible"
self.fields['start_time'].label = "Assignment starts at"
self.fields['end_time'].label = "Assignment ends at"
self.fields['question'].label = "Upload question paper"
#Updating marks
class MarksUpdateForm(forms.ModelForm):
class Meta:
model = Submission
fields = ['student', 'marks_obtained']
def __init__(self, *args, **kwargs):
super(MarksUpdateForm, self).__init__(*args, **kwargs)
self.fields['student'].label = "Select the student"
self.fields['marks_obtained'].label = "Set the marks"
#submitting new assignment
class AssignmentSubmissionForm(forms.ModelForm):
class Meta:
model = Submission
fields = ['answer']
def __init__(self, *args, **kwargs):
super(AssignmentSubmissionForm, self).__init__(*args, **kwargs)
self.fields['answer'].label = "Upload answer script"
| 41.977778
| 117
| 0.663843
|
794d1b16ac75e4c9977e6b246051ef02897283a9
| 3,932
|
py
|
Python
|
PYTHONG/GUI/Builder.py
|
roadkillsanta/JAPY
|
1a5383d7fc3a1c08c689f609cdcbb7d58fb84956
|
[
"Apache-2.0"
] | 1
|
2016-10-03T23:00:44.000Z
|
2016-10-03T23:00:44.000Z
|
PYTHONG/GUI/Builder.py
|
roadkillsanta/JAPY
|
1a5383d7fc3a1c08c689f609cdcbb7d58fb84956
|
[
"Apache-2.0"
] | null | null | null |
PYTHONG/GUI/Builder.py
|
roadkillsanta/JAPY
|
1a5383d7fc3a1c08c689f609cdcbb7d58fb84956
|
[
"Apache-2.0"
] | null | null | null |
import Tkinter as tk
from Tkinter import *
import init, time, os
global gridx
global gridy
root = tk.tk()
def textBox(text, lines, chars, row, column, rowspan, columnspan, ipadx, ipady, padx, pady, sticky, self):
self = Text(root, height=lines, width=chars)
self.grid(row=row, column=column, rowspan=rowspan, columnspan=columnspan, ipadx=ipadx, ipady=ipady, padx=padx,
pady=pady, sticky=sticky)
self.insert(END, text)
def entryBox(row, column, rowspan, columnspan, ipadx, ipady, padx, pady, sticky):
bo = Entry(root)
bo.grid(row=row, column=column, rowspan=rowspan, columnspan=columnspan, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=sticky)
def input(): bo.get()
def Label(text, row, column, rowspan, columnspan, ipadx, ipady, padx, pady, sticky):
self = Label(text=text)
self.grid(row=row, column=column, rowspan=rowspan, columnspan=columnspan, ipadx=ipadx, ipady=ipady, padx=padx,
pady=pady, sticky=sticky)
def Button(text, command, row, column, rowspan, columnspan, ipadx, ipady, padx, pady, sticky, self):
self = tk.Button(self, text=text, command=command)
self.grid(row=row, column=column, rowspan=rowspan, columnspan=columnspan, ipadx=ipadx, ipady=ipady, padx=padx,
pady=pady, sticky=sticky)
def clock(set):
if set == True:
def setDate():
def setTime():
setting = True
while setting:
jkl = Label("Please select Timezone:"
jkl.grid(row=0, column=0)
mla = entryBox(1, 0)
mla.input()
def wid_countdown(Integer int, Integer gridx, Integer gridy):
for(i in range int):
textBox(int, 1, 99999999999, gridx, gridy)
#gr and gc are starting cell of grid place
def wid_Login(gr, gc):
ubc = gc + 1
pl1 = gr + 1
pl2 = gc + 1
sbr = gr + 2
usernameL=Label("Username:", gr, gc)
usernameBox=entryBox(gr, ubc)
passwordL=Label("Password:", pl1, gc)
passwordBox=entryBox(pl1, pl2)
def getUsername():
usernameBox.get()
def getPassword():
passwordBox.get()
def checkValues():
username=usernameBox.get()
password=passwordBox.get()
loginint=init.login(username, password)
if(loginint==0):
unload()
lo1=Label("Login Successful", gr, gc, 1, 2)
time.sleep(5)
lo1.grid_remove()
userwelcome="Welcome "+username+"!"
lo2=Label(userwelcome, gr, gc, 1, 2)
loginButton=Button("Login", checkValues(), sbr, pl2)
registerButton=Button("Register", init.register(), sbr, gc)
def unload():
usernameL.grid_remove()
usernameBox.grid_remove()
passwordL.grid_remove()
passwordBox.grid_remove()
loginButton.grid_remove()
registerButton.grid_remove()
def Menu():
console = Text(root, height=16)
console.grid(row=1, column=0, rowspan=4, columnspan=3)
console.see(END)
consoleL = Label("Console:")
consoleL.grid(row=0, column=1)
def redirect(out=sys.stdout.write):
console.insert(INSERT, out)
inpt = Entry(root)
inpt.grid(row=5, column=0, columnspan=2)
def getInp(): input.get()
global input = getInp()
enterSign = "⏎"
enterCommand = Button(enterSign, input, 4, 1, 1, 1, 0, 0, 0, 0)
clock = clock()
wid_clock = Button(clock, clock.setTime(), 8, 12)
quit = Button(quit, "Exit ⇨", init.exit(), 0, 12)
%
%
def unload():
consoleL.grid_remove()
console.grid_remove()
inpt.grid_remove()
enterCommand.grid_remove()
wid_clock.grid_remove()
quit.grid_remove()
def mustLogin(self):
Label(parent, text=input()).pack()
entry = Entry(parent, **options)
if width:
entry.config(width=width)
entry.grid()
return entry
class renderStuffs(screen):
if screen==0: Menu()
| 29.787879
| 138
| 0.623093
|
794d1ded48b8c8cefd36c4052d8bb72b409ef5a3
| 1,491
|
py
|
Python
|
backend/prompt/urls.py
|
yashpatel7025/django-prompt
|
07fef6967d6bfe1d3bb4f663470e7b26c1aa25c2
|
[
"MIT"
] | null | null | null |
backend/prompt/urls.py
|
yashpatel7025/django-prompt
|
07fef6967d6bfe1d3bb4f663470e7b26c1aa25c2
|
[
"MIT"
] | null | null | null |
backend/prompt/urls.py
|
yashpatel7025/django-prompt
|
07fef6967d6bfe1d3bb4f663470e7b26c1aa25c2
|
[
"MIT"
] | null | null | null |
"""prompt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from project.views import *
feedback_request_list = FeedbackRequestViewSet.as_view({
'get': 'get_list'
})
feedback_request_detail = FeedbackRequestViewSet.as_view({
'get': 'get_detail',
'patch': 'patch_pick_up_request'
})
urlpatterns = [
path('admin/', admin.site.urls),
path('', HomeView.as_view(), name='home'),
path('login/', LoginView.as_view(), name='user-login'),
path('logout/', LogoutView.as_view(), name='user-logout'),
path('platform/', PlatformView.as_view(), name='platform'),
path('api/feedback-request/',feedback_request_list, name='feedback-request-list'),
path('api/feedback-request/<int:pk>/',feedback_request_detail, name='feedback-request-detail'),
path('api/feedback-request/comment/', CommentView.as_view(), name='feedback-request-comment'),
]
| 36.365854
| 99
| 0.715627
|
794d1ea78efbba4cd5017f22273c660e0c444f0b
| 11,321
|
py
|
Python
|
src/onevision/cv/imgproc/filtering/filter.py
|
phlong3105/onevision
|
90552b64df7213e7fbe23c80ffd8a89583289433
|
[
"MIT"
] | 2
|
2022-03-28T09:46:38.000Z
|
2022-03-28T14:12:32.000Z
|
src/onevision/cv/imgproc/filtering/filter.py
|
phlong3105/onevision
|
90552b64df7213e7fbe23c80ffd8a89583289433
|
[
"MIT"
] | null | null | null |
src/onevision/cv/imgproc/filtering/filter.py
|
phlong3105/onevision
|
90552b64df7213e7fbe23c80ffd8a89583289433
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import annotations
import torch
import torch.nn.functional as F
from torch import Tensor
from onevision.cv.imgproc.filtering.kernels import normalize_kernel2d
__all__ = [
"filter2d",
"filter2d_separable",
"filter3d",
]
# MARK: - Functional
def _compute_padding(kernel_size: list[int]) -> list[int]:
"""Compute padding tuple.
https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad
kernel_size (list[int]):
4 or 6 ints (padding_left, padding_right, padding_top, padding_bottom).
"""
if len(kernel_size) < 2:
raise ValueError(f"Length of `kernel_size` must >= 2. But got: {kernel_size}.")
computed = [k // 2 for k in kernel_size]
# For even kernels we need to do asymmetric padding :(
out_padding = 2 * len(kernel_size) * [0]
for i in range(len(kernel_size)):
computed_tmp = computed[-(i + 1)]
if kernel_size[i] % 2 == 0:
padding = computed_tmp - 1
else:
padding = computed_tmp
out_padding[2 * i + 0] = padding
out_padding[2 * i + 1] = computed_tmp
return out_padding
def filter2d(
image : Tensor,
kernel : Tensor,
border_type: str = "reflect",
normalized : bool = False,
padding : str = "same"
) -> Tensor:
"""Convolve an image with a 2d kernel.
Function applies a given kernel to an image. Kernel is applied independently
at each depth channel of the image. Before applying the kernel, the function
applies padding according to the specified mode so that the output remains
in the same shape.
Args:
image (Tensor[B, C, H, W]):
Input image.
kernel (Tensor[B, kH, kW]):
Kernel to be convolved with the input image.
border_type (str):
Padding mode to be applied before convolving.
One of: [`constant`, `reflect`, `replicate`, or `circular`].
normalized (bool):
If `True`, kernel will be L1 normalized.
padding (str):
This defines the type of padding. One of: [`same`, `valid`].
Return:
out (Tensor[B, C, H, W]):
Convolved image of same size and numbers of channels as the
input with shape [B, C, H, W].
Example:
>>> input = torch.tensor([[[
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 5., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],]]])
>>> kernel = torch.ones(1, 3, 3)
>>> filter2d(input, kernel, padding='same')
image([[[[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.]]]])
"""
if not isinstance(image, Tensor):
raise TypeError(f"`image` must be a `Tensor`. But got: {type(image)}.")
if not isinstance(kernel, Tensor):
raise TypeError(f"`kernel` must be a `Tensor`. But got: {type(kernel)}.")
if not isinstance(border_type, str):
raise TypeError(f"`border_type` must be a `str`. But got: {type(border_type)}.")
if border_type not in ["constant", "reflect", "replicate", "circular"]:
raise ValueError(f"`border_type` must be one of: `constant`, `reflect`, "
f"`replicate`, `circular`. But got: {border_type}.")
if not isinstance(padding, str):
raise TypeError(f"`padding` must be a `str`. But got: {type(padding)}.")
if padding not in ["valid", "same"]:
raise ValueError(f"`padding` must be `valid` or `same`. But got: {padding}.")
if not image.ndim == 4:
raise ValueError(f"`image` must have the shape of [B, C, H, W]. "
f"But got: {image.shape}.")
if ((not len(kernel.shape) == 3) and
not ((kernel.shape[0] == 0) or (kernel.shape[0] == image.shape[0]))):
raise ValueError(f"`kernel` must have the shape of [1, H. W] or "
f"[B, H, W]. But got: {kernel.shape}.")
# Prepare kernel
b, c, h, w = image.shape
tmp_kernel = kernel.unsqueeze(1).to(image)
if normalized:
tmp_kernel = normalize_kernel2d(tmp_kernel)
tmp_kernel = tmp_kernel.expand(-1, c, -1, -1)
height, width = tmp_kernel.shape[-2:]
# Pad the input image
if padding == "same":
padding_shape = _compute_padding([height, width])
image = F.pad(image, padding_shape, mode=border_type)
# Kernel and input image reshape to align element-wise or batch-wise params
tmp_kernel = tmp_kernel.reshape(-1, 1, height, width)
image = image.view(-1, tmp_kernel.size(0), image.size(-2), image.size(-1))
# Convolve the image with the kernel.
output = F.conv2d(image, tmp_kernel, groups=tmp_kernel.size(0), padding=0,
stride=1)
if padding == "same":
out = output.view(b, c, h, w)
else:
out = output.view(b, c, h - height + 1, w - width + 1)
return out
def filter2d_separable(
image : Tensor,
kernel_x : Tensor,
kernel_y : Tensor,
border_type: str = "reflect",
normalized : bool = False,
padding : str = "same"
) -> Tensor:
"""Convolve an image with two 1d kernels, in x and y directions.
Function applies a given kernel to a image. Kernel is applied independently
at each depth channel of the image. Before applying the kernel, the function
applies padding according to the specified mode so that the output remains
in the same shape.
Args:
image (Tensor[B, C, H, W]):
Input image.
kernel_x (Tensor[B, kW]):
Kernel to be convolved with the input image.
kernel_y (Tensor[B, kH]):
Kernel to be convolved with the input image.
border_type (str):
Padding mode to be applied before convolving.
One of: [`constant`, `reflect`, `replicate`, or `circular`].
normalized (bool):
If `True`, kernel will be L1 normalized.
padding (str):
This defines the type of padding. One of: [`same`, `valid`].
Return:
out (Tensor[B, C, H, W]):
Convolved image of same size and numbers of channels as the input.
Example:
>>> input = torch.tensor([[[
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 5., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],]]])
>>> kernel = torch.ones(1, 3)
>>> filter2d_separable(input, kernel, kernel, padding='same')
image([[[[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.]]]])
"""
out_x = filter2d(image, kernel_x.unsqueeze(0), border_type, normalized,
padding)
out = filter2d(out_x, kernel_y.unsqueeze(-1), border_type, normalized,
padding)
return out
def filter3d(
image : Tensor,
kernel : Tensor,
border_type: str = "replicate",
normalized : bool = False
) -> Tensor:
"""Convolve an image with a 3d kernel.
Function applies a given kernel to an image. Kernel is applied independently
at each depth channel of the image. Before applying the kernel, the function
applies padding according to the specified mode so that the output remains
in the same shape.
Args:
image (Tensor[B, C, D, H, W]):
Input image.
kernel (Tensor[B, kD, kH, kW]):
Kernel to be convolved with the input image.
border_type (str):
Padding mode to be applied before convolving.
One of: [`constant`, `replicate`, or `circular`].
normalized (bool):
If `True`, kernel will be L1 normalized.
Return:
out (Tensor[B, D, C, H, W]):
Convolved image of same size and numbers of channels as the input.
Example:
>>> input = torch.tensor([[[
... [[0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.]],
... [[0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 5., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.]],
... [[0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.]]
... ]]])
>>> kernel = torch.ones(1, 3, 3, 3)
>>> filter3d(input, kernel)
image([[[[[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.]],
<BLANKLINE>
[[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.]],
<BLANKLINE>
[[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.]]]]])
"""
if not isinstance(image, Tensor):
raise TypeError(f"Input border_type is not Tensor. "
f"Got: {type(image)}")
if not isinstance(kernel, Tensor):
raise TypeError(f"Input border_type is not Tensor. "
f"Got: {type(kernel)}")
if not isinstance(border_type, str):
raise TypeError(f"Input border_type is not string. Got: {type(kernel)}")
if not len(image.shape) == 5:
raise ValueError(f"Invalid input shape, we expect [B, C, D, H, W]. "
f"Got: {image.shape}")
if not len(kernel.shape) == 4 and kernel.shape[0] != 1:
raise ValueError(f"Invalid kernel shape, we expect [1, D, H, W]. "
f"Got: {kernel.shape}")
# Prepare kernel
b, c, d, h, w = image.shape
tmp_kernel = kernel.unsqueeze(1).to(image)
if normalized:
bk, dk, hk, wk = kernel.shape
tmp_kernel = normalize_kernel2d(
tmp_kernel.view(bk, dk, hk * wk)
).view_as(tmp_kernel)
tmp_kernel = tmp_kernel.expand(-1, c, -1, -1, -1)
# Pad the input image
depth, height, width = tmp_kernel.shape[-3:]
padding_shape = _compute_padding([depth, height, width])
input_pad = F.pad(image, padding_shape, mode=border_type)
# Kernel and input image reshape to align element-wise or batch-wise params
tmp_kernel = tmp_kernel.reshape(-1, 1, depth, height, width)
input_pad = input_pad.view(
-1, tmp_kernel.size(0), input_pad.size(-3), input_pad.size(-2),
input_pad.size(-1)
)
# Convolve the image with the kernel.
output = F.conv3d(
input_pad, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1
)
return output.view(b, c, d, h, w)
| 35.712934
| 88
| 0.514177
|
794d1f0bac4c187350a0638abb0682a58cc74e6f
| 8,119
|
py
|
Python
|
evap/evaluation/tests/test_auth.py
|
PonderKoKo/EvaP
|
3e44eb660eec244846fdf10a1460f2e32dcfdef0
|
[
"MIT"
] | null | null | null |
evap/evaluation/tests/test_auth.py
|
PonderKoKo/EvaP
|
3e44eb660eec244846fdf10a1460f2e32dcfdef0
|
[
"MIT"
] | null | null | null |
evap/evaluation/tests/test_auth.py
|
PonderKoKo/EvaP
|
3e44eb660eec244846fdf10a1460f2e32dcfdef0
|
[
"MIT"
] | null | null | null |
import urllib
from unittest.mock import patch
from django.conf import settings
from django.contrib.auth.models import Group
from django.core import mail
from django.test import override_settings
from django.urls import reverse
from model_bakery import baker
from evap.evaluation import auth
from evap.evaluation.models import Contribution, Evaluation, UserProfile
from evap.evaluation.tests.tools import WebTest
@override_settings(PASSWORD_HASHERS=["django.contrib.auth.hashers.MD5PasswordHasher"])
class LoginTests(WebTest):
csrf_checks = False
@classmethod
def setUpTestData(cls):
cls.external_user = baker.make(UserProfile, email="extern@extern.com")
cls.external_user.ensure_valid_login_key()
cls.inactive_external_user = baker.make(UserProfile, email="inactive@extern.com", is_active=False)
cls.inactive_external_user.ensure_valid_login_key()
evaluation = baker.make(Evaluation, state=Evaluation.State.PUBLISHED)
baker.make(
Contribution,
evaluation=evaluation,
contributor=iter([cls.external_user, cls.inactive_external_user]),
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
_quantity=2,
_bulk_create=True,
)
@override_settings(PAGE_URL="https://example.com")
def test_login_url_generation(self):
generated_url = self.external_user.login_url
self.assertEqual(generated_url, f"https://example.com/key/{self.external_user.login_key}")
reversed_url = reverse("evaluation:login_key_authentication", args=[self.external_user.login_key])
self.assertEqual(reversed_url, f"/key/{self.external_user.login_key}")
def test_login_url_works(self):
self.assertRedirects(self.app.get(reverse("contributor:index")), "/?next=/contributor/")
url_with_key = reverse("evaluation:login_key_authentication", args=[self.external_user.login_key])
old_login_key = self.external_user.login_key
old_login_key_valid_until = self.external_user.login_key_valid_until
page = self.app.get(url_with_key)
self.external_user.refresh_from_db()
self.assertEqual(old_login_key, self.external_user.login_key)
self.assertEqual(old_login_key_valid_until, self.external_user.login_key_valid_until)
self.assertContains(page, "Login")
self.assertContains(page, self.external_user.full_name)
page = self.app.post(url_with_key).follow().follow()
self.assertContains(page, "Logout")
self.assertContains(page, self.external_user.full_name)
def test_login_key_valid_only_once(self):
page = self.app.get(reverse("evaluation:login_key_authentication", args=[self.external_user.login_key]))
self.assertContains(page, self.external_user.full_name)
url_with_key = reverse("evaluation:login_key_authentication", args=[self.external_user.login_key])
page = self.app.post(url_with_key).follow().follow()
self.assertContains(page, "Logout")
page = self.app.get(reverse("django-auth-logout")).follow()
self.assertNotContains(page, "Logout")
page = self.app.get(url_with_key).follow()
self.assertContains(page, "The login URL is not valid anymore.")
self.assertEqual(len(mail.outbox), 1) # a new login key was sent
new_key = UserProfile.objects.get(id=self.external_user.id).login_key
page = self.app.post(reverse("evaluation:login_key_authentication", args=[new_key])).follow().follow()
self.assertContains(page, self.external_user.full_name)
def test_inactive_external_users_can_not_login(self):
page = self.app.get(
reverse("evaluation:login_key_authentication", args=[self.inactive_external_user.login_key])
).follow()
self.assertContains(page, "Inactive users are not allowed to login")
self.assertNotContains(page, "Logout")
def test_login_key_resend_if_still_valid(self):
old_key = self.external_user.login_key
page = self.app.post("/", params={"submit_type": "new_key", "email": self.external_user.email}).follow()
new_key = UserProfile.objects.get(id=self.external_user.id).login_key
self.assertEqual(old_key, new_key)
self.assertEqual(len(mail.outbox), 1) # a login key was sent
self.assertContains(page, "We sent you an email with a one-time login URL. Please check your inbox.")
@override_settings(
OIDC_OP_AUTHORIZATION_ENDPOINT="https://oidc.example.com/auth",
ACTIVATE_OPEN_ID_LOGIN=True,
)
def test_oidc_login(self):
# This should send them to /oidc/authenticate
page = self.app.get("/").click("Login")
# which should then redirect them to OIDC_OP_AUTHORIZTATION_ENDPOINT
location = page.headers["location"]
self.assertIn(settings.OIDC_OP_AUTHORIZATION_ENDPOINT, location)
parse_result = urllib.parse.urlparse(location)
parsed_query = urllib.parse.parse_qs(parse_result.query)
self.assertIn("email", parsed_query["scope"][0].split(" "))
self.assertIn("/oidc/callback/", parsed_query["redirect_uri"][0])
state = parsed_query["state"][0]
user = baker.make(UserProfile)
# usually, the browser would now open that page and login. Then, they'd be redirected to /oidc/callback
with patch.object(auth.OIDCAuthenticationBackend, "authenticate", return_value=user, __name__="authenticate"):
page = self.app.get(f"/oidc/callback/?code=secret-code&state={state}")
# The oidc module will now send a request to the oidc provider, asking whether the code is valid.
# We've mocked the method that does that and will just return a UserProfile.
# Thus, at this point, the user should be logged in and be redirected back to the start page.
location = page.headers["location"]
parse_result = urllib.parse.urlparse(location)
self.assertEqual(parse_result.path, "/")
page = self.app.get(location)
# A GET here should then redirect to the users real start page.
# This should be a 403 since the user is external and has no course participation
page = page.follow(status=403)
# user should see the Logout button then.
self.assertIn("Logout", page.body.decode())
@override_settings(PASSWORD_HASHERS=["django.contrib.auth.hashers.MD5PasswordHasher"])
class LoginTestsWithCSRF(WebTest):
@classmethod
def setUpTestData(cls):
cls.staff_user = baker.make(
UserProfile, email="staff@institution.example.com", groups=[Group.objects.get(name="Manager")]
)
cls.staff_user_password = "staff"
cls.staff_user.set_password(cls.staff_user_password)
cls.staff_user.save()
def test_entering_staff_mode_after_logout_and_login(self):
"""
Asserts that managers can enter the staff mode after logging out and logging in again.
Regression test for #1530.
"""
page = self.app.get(reverse("evaluation:index"))
form = page.forms["email-login-form"]
form["email"] = self.staff_user.email
form["password"] = self.staff_user_password
page = form.submit().follow().follow()
# staff user should now be logged in and see the logout button
self.assertContains(page, "Logout")
# log out user
page = self.app.get(reverse("django-auth-logout")).follow()
self.assertNotContains(page, "Logout")
# log user in again
page = self.app.get(reverse("evaluation:index"))
form = page.forms["email-login-form"]
form["email"] = self.staff_user.email
form["password"] = self.staff_user_password
page = form.submit().follow().follow()
# enter staff mode
page = page.forms["enter-staff-mode-form"].submit().follow().follow()
self.assertTrue("staff_mode_start_time" in self.app.session)
self.assertContains(page, "Users")
| 45.61236
| 118
| 0.6975
|
794d20b6c2b3ee1594a307763cfab85c129b5a81
| 2,093
|
py
|
Python
|
aliyun-python-sdk-das/aliyunsdkdas/request/v20200116/GetQueryOptimizeExecErrorSampleRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-das/aliyunsdkdas/request/v20200116/GetQueryOptimizeExecErrorSampleRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-das/aliyunsdkdas/request/v20200116/GetQueryOptimizeExecErrorSampleRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdas.endpoint import endpoint_data
class GetQueryOptimizeExecErrorSampleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'DAS', '2020-01-16', 'GetQueryOptimizeExecErrorSample','das')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SqlId(self):
return self.get_query_params().get('SqlId')
def set_SqlId(self,SqlId):
self.add_query_param('SqlId',SqlId)
def get_ConsoleContext(self):
return self.get_query_params().get('ConsoleContext')
def set_ConsoleContext(self,ConsoleContext):
self.add_query_param('ConsoleContext',ConsoleContext)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Engine(self):
return self.get_query_params().get('Engine')
def set_Engine(self,Engine):
self.add_query_param('Engine',Engine)
def get_Time(self):
return self.get_query_params().get('Time')
def set_Time(self,Time):
self.add_query_param('Time',Time)
| 33.758065
| 90
| 0.760153
|
794d21fb1c97d16a25443f88ec5dccad5d4ec07a
| 1,365
|
py
|
Python
|
test/test_skill_entry.py
|
NeonDaniel/ovos_skill_manager
|
4be30da451c5620f12f5029874414a4503981388
|
[
"Apache-2.0"
] | null | null | null |
test/test_skill_entry.py
|
NeonDaniel/ovos_skill_manager
|
4be30da451c5620f12f5029874414a4503981388
|
[
"Apache-2.0"
] | null | null | null |
test/test_skill_entry.py
|
NeonDaniel/ovos_skill_manager
|
4be30da451c5620f12f5029874414a4503981388
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from ovos_skills_manager.skill_entry import SkillEntry
# TODO setup a test skill repo, since a random url can simply vanish or be
# modified
class TestGithubBranchParsing(unittest.TestCase):
def test_url(self):
branch_from_json = "https://github.com/JarbasSkills/skill-bandcamp"
branch_from_tree = "https://github.com/JarbasSkills/skill-ddg/tree/v0.1.0"
commit_from_blob = "https://github.com/OpenVoiceOS/OVOS-skills-store/blob" \
"/f4ab4ea00e47955798c9906c8c03807391bc20f0/skill-icanhazdadjokes.json"
branch_from_git = "https://github.com/NeonGeckoCom/caffeinewiz.neon@dev"
# should get branch defined in https://github.com/JarbasSkills/skill-ddg/blob/master/res/desktop/skill.json
entry = SkillEntry.from_github_url(branch_from_json)
self.assertEqual(entry.branch, "v0.3.1")
# should match commit pinned in url
entry = SkillEntry.from_github_url(commit_from_blob)
self.assertEqual(entry.branch, "f4ab4ea00e47955798c9906c8c03807391bc20f0")
# dev branch implicit in url
entry = SkillEntry.from_github_url(branch_from_git)
self.assertEqual(entry.branch, "dev")
# github release implicit in url
entry = SkillEntry.from_github_url(branch_from_tree)
self.assertEqual(entry.branch, "v0.1.0")
| 40.147059
| 115
| 0.721612
|
794d22138aff2d55c69b99da4853b8f10a1802d9
| 20
|
py
|
Python
|
test/__init__.py
|
superkerokero/hinabe
|
9fa00d1cbdee2b046426c8ed0c7c269556125337
|
[
"MIT"
] | 26
|
2018-02-17T02:47:44.000Z
|
2021-12-30T16:31:56.000Z
|
test/__init__.py
|
superkerokero/hinabe
|
9fa00d1cbdee2b046426c8ed0c7c269556125337
|
[
"MIT"
] | 7
|
2018-10-09T14:09:10.000Z
|
2022-02-17T07:25:54.000Z
|
test/__init__.py
|
superkerokero/hinabe
|
9fa00d1cbdee2b046426c8ed0c7c269556125337
|
[
"MIT"
] | 3
|
2021-09-13T02:20:07.000Z
|
2022-02-22T06:43:20.000Z
|
"""
Test module
"""
| 5
| 11
| 0.5
|
794d2233d344593821b4bd386d38fcb40a2c9dfd
| 1,305
|
py
|
Python
|
minecraft/backend/trace_player_other.py
|
Utmost-Happiness-Planet/pyCraft-UHP
|
dad580e8114e904bbb052fc0d27f1ff8a93dd953
|
[
"Apache-2.0"
] | 5
|
2022-01-17T05:45:48.000Z
|
2022-02-19T03:33:02.000Z
|
minecraft/backend/trace_player_other.py
|
Utmost-Happiness-Planet/pyCraft-UHP
|
dad580e8114e904bbb052fc0d27f1ff8a93dd953
|
[
"Apache-2.0"
] | 1
|
2022-01-15T15:05:23.000Z
|
2022-01-21T06:02:49.000Z
|
minecraft/backend/trace_player_other.py
|
Utmost-Happiness-Planet/pyCraft-UHP
|
dad580e8114e904bbb052fc0d27f1ff8a93dd953
|
[
"Apache-2.0"
] | null | null | null |
from minecraft.networking.connection import Connection
from minecraft.networking.packets.clientbound.play import SpawnPlayerPacket, EntityPositionDeltaPacket
from .Player import Player, PlayerSelf
def register_connection(c: Connection, player: PlayerSelf):
connection = c
"""
TODO:
实时记录其他玩家的位置信息提交到player_list
"""
@connection.listener(SpawnPlayerPacket)
def player_set(_p):
p = Player()
p.set_id(_p.entity_id)
p.set_uuid(_p.player_UUID)
p.set_position([_p.x, _p.y, _p.z])
p.set_rotation([_p.yaw, _p.pitch])
c.add_player(p)
print(f"玩家 {p.uuid} 出现,位置:x={_p.x} y={_p.y} z={_p.z} yaw={_p.yaw} pitch={_p.pitch}")
print(_p.entity_id)
# TODO:无法对齐服务器中坐标
# @connection.listener(EntityPositionDeltaPacket)
# def get_player_position(_p):
# if _p.delta_x_float != 0 and _p.delta_y_float != 0 and _p.delta_z_float != 0:
# for i in c.player_list:
# if c.player_list[i].get_id() == _p.entity_id:
# c.player_list[i].set_position(
# c.player_list[i] + [_p.delta_x_float, _p.delta_y_float, _p.delta_z_float])
# print(f"{c.player_list[i].get_uuid()} - {[_p.delta_x_float, _p.delta_y_float, _p.delta_z_float]}")
| 38.382353
| 120
| 0.634483
|
794d226d11aeb72089609a2493b953cd6462a253
| 34,893
|
py
|
Python
|
jupyter/.build/config/jupyter_notebook_config.py
|
sdwalker62/Log-Diagnostics-Archive
|
50f898435901e130d9f78059a6fc243a51ad8701
|
[
"MIT"
] | 25
|
2019-09-10T02:27:51.000Z
|
2022-03-16T05:28:31.000Z
|
jupyter/.build/config/jupyter_notebook_config.py
|
sdwalker62/Log-Diagnostics-Archive
|
50f898435901e130d9f78059a6fc243a51ad8701
|
[
"MIT"
] | 15
|
2021-05-07T15:07:32.000Z
|
2022-03-08T17:32:36.000Z
|
jupyter/.build/config/jupyter_notebook_config.py
|
sdwalker62/Log-Diagnostics-Archive
|
50f898435901e130d9f78059a6fc243a51ad8701
|
[
"MIT"
] | 12
|
2019-11-05T12:36:41.000Z
|
2022-02-09T20:26:49.000Z
|
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Allow password to be changed at login for the notebook server.
#
# While loggin in with a token, the notebook server UI will give the opportunity
# to the user to enter a new password at the same time that will replace the
# token login mechanism.
#
# This can be set to false to prevent changing password from the UI/API.
#c.NotebookApp.allow_password_change = True
## Allow requests where the Host header doesn't point to a local server
#
# By default, requests get a 403 forbidden response if the 'Host' header shows
# that the browser thinks it's on a non-local domain. Setting this option to
# True disables this check.
#
# This protects against 'DNS rebinding' attacks, where a remote web server
# serves you a page and then changes its DNS to send later requests to a local
# IP, bypassing same-origin checks.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local, along
# with hostnames configured in local_hostnames.
#c.NotebookApp.allow_remote_access = False
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## Override URL shown to users.
#
# Replace actual URL, including protocol, address, port and base URL, with the
# given value when displaying URL to the users. Do not change the actual
# connection URL. If authentication token is enabled, the token is added to the
# custom URL automatically.
#
# This option is intended to be used when the URL to display to the user cannot
# be determined reliably by the Jupyter notebook server (proxified or
# containerized setups for example).
#c.NotebookApp.custom_display_url = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
#c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Extra keyword arguments to pass to `get_secure_cookie`. See tornado's
# get_secure_cookie docs for details.
#c.NotebookApp.get_secure_cookie_kwargs = {}
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## Hostnames to allow as local when allow_remote_access is False.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted as
# local as well.
#c.NotebookApp.local_hostnames = ['localhost']
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Sets the maximum allowed size of the client request body, specified in the
# Content-Length request header field. If the size in a request exceeds the
# configured value, a malformed HTTP message is returned to the client.
#
# Note: max_body_size is applied even in streaming mode.
#c.NotebookApp.max_body_size = 536870912
## Gets or sets the maximum amount of memory, in bytes, that is allocated for
# use by the buffer manager.
#c.NotebookApp.max_buffer_size = 536870912
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## If True, display a button in the dashboard to quit (shutdown the notebook
# server).
#c.NotebookApp.quit_button = True
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
#c.NotebookApp.shutdown_no_activity_timeout = 0
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
c.NotebookApp.terminado_settings = {'shell_command': ['/bin/bash']}
## Set to False to disable terminals.
#
# This does *not* make the notebook server more secure by itself. Anything the
# user can in a terminal, they can also do in a notebook.
#
# Terminals may also be automatically disabled if the terminado package is not
# available.
#c.NotebookApp.terminals_enabled = True
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the `new` argument
# passed to the standard library method `webbrowser.open`. The behaviour is not
# guaranteed, but depends on browser support. Valid values are:
#
# - 2 opens a new tab,
# - 1 opens a new window,
# - 0 opens in an existing window.
#
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# LabApp(NotebookApp) configuration
#------------------------------------------------------------------------------
## The app directory to launch JupyterLab from.
#c.LabApp.app_dir = '/usr/local/share/jupyter/lab'
## Whether to start the app in core mode. In this mode, JupyterLab will run using
# the JavaScript assets that are within the installed JupyterLab Python package.
# In core mode, third party extensions are disabled. The `--dev-mode` flag is an
# alias to this to be used when the Python package itself is installed in
# development mode (`pip install -e .`).
#c.LabApp.core_mode = False
## The default URL to redirect to from `/`
#c.LabApp.default_url = '/lab'
## Whether to start the app in dev mode. Uses the unpublished local JavaScript
# packages in the `dev_mode` folder. In this case JupyterLab will show a red
# stripe at the top of the page. It can only be used if JupyterLab is installed
# as `pip install -e .`.
#c.LabApp.dev_mode = False
## The override url for static lab assets, typically a CDN.
#c.LabApp.override_static_url = ''
## The override url for static lab theme assets, typically a CDN.
#c.LabApp.override_theme_url = ''
## The directory for user settings.
#c.LabApp.user_settings_dir = '/root/.jupyter/lab/user-settings'
## Whether to serve the app in watch mode
#c.LabApp.watch = False
## The directory for workspaces
#c.LabApp.workspaces_dir = '/root/.jupyter/lab/workspaces'
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## White list of allowed kernel message types. When the list is empty, all
# message types are allowed.
#c.MappingKernelManager.allowed_message_types = []
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout > 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout > 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. Very short timeouts may result
# in kernels being culled for users with poor network connections.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
## Timeout for giving up on a kernel (in seconds).
#
# On starting and restarting kernels, we check whether the kernel is running and
# responsive by sending kernel_info_requests. This sets the timeout in seconds
# for how long the kernel can take before being presumed dead. This affects the
# MappingKernelManager (which handles kernel restarts) and the
# ZMQChannelsHandler (which handles the startup).
#c.MappingKernelManager.kernel_info_timeout = 60
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
#c.ContentsManager.allow_hidden = False
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# successfully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system without operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## If True (default), deleting files will send them to the platform's
# trash/recycle bin, where they can be recovered. If False, deleting files
# really deletes them.
#c.FileContentsManager.delete_to_trash = True
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# GatewayKernelManager(MappingKernelManager) configuration
#------------------------------------------------------------------------------
## Kernel manager that supports remote kernels hosted by Jupyter Kernel or
# Enterprise Gateway.
#------------------------------------------------------------------------------
# GatewayKernelSpecManager(KernelSpecManager) configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# GatewayClient(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This class manages the configuration. It's its own singleton class so that we
# can share these values across all objects. It also contains some helper methods
# to build request arguments out of the various config options.
## The authorization token used in the HTTP headers. (JUPYTER_GATEWAY_AUTH_TOKEN
# env var)
#c.GatewayClient.auth_token = None
## The filename of CA certificates or None to use defaults.
# (JUPYTER_GATEWAY_CA_CERTS env var)
#c.GatewayClient.ca_certs = None
## The filename for client SSL certificate, if any. (JUPYTER_GATEWAY_CLIENT_CERT
# env var)
#c.GatewayClient.client_cert = None
## The filename for client SSL key, if any. (JUPYTER_GATEWAY_CLIENT_KEY env var)
#c.GatewayClient.client_key = None
## The time allowed for HTTP connection establishment with the Gateway server.
# (JUPYTER_GATEWAY_CONNECT_TIMEOUT env var)
#c.GatewayClient.connect_timeout = 60.0
## A comma-separated list of environment variable names that will be included,
# along with their values, in the kernel startup request. The corresponding
# `env_whitelist` configuration value must also be set on the Gateway server -
# since that configuration value indicates which environmental values to make
# available to the kernel. (JUPYTER_GATEWAY_ENV_WHITELIST env var)
#c.GatewayClient.env_whitelist = ''
## Additional HTTP headers to pass on the request. This value will be converted
# to a dict. (JUPYTER_GATEWAY_HEADERS env var)
#c.GatewayClient.headers = '{}'
## The password for HTTP authentication. (JUPYTER_GATEWAY_HTTP_PWD env var)
#c.GatewayClient.http_pwd = None
## The username for HTTP authentication. (JUPYTER_GATEWAY_HTTP_USER env var)
#c.GatewayClient.http_user = None
## The gateway API endpoint for accessing kernel resources
# (JUPYTER_GATEWAY_KERNELS_ENDPOINT env var)
#c.GatewayClient.kernels_endpoint = '/api/kernels'
## The gateway API endpoint for accessing kernelspecs
# (JUPYTER_GATEWAY_KERNELSPECS_ENDPOINT env var)
#c.GatewayClient.kernelspecs_endpoint = '/api/kernelspecs'
## The gateway endpoint for accessing kernelspecs resources
# (JUPYTER_GATEWAY_KERNELSPECS_RESOURCE_ENDPOINT env var)
#c.GatewayClient.kernelspecs_resource_endpoint = '/kernelspecs'
## The time allowed for HTTP request completion. (JUPYTER_GATEWAY_REQUEST_TIMEOUT
# env var)
#c.GatewayClient.request_timeout = 60.0
## The url of the Kernel or Enterprise Gateway server where kernel specifications
# are defined and kernel management takes place. If defined, this Notebook
# server acts as a proxy for all kernel management and kernel specification
# retrieval. (JUPYTER_GATEWAY_URL env var)
#c.GatewayClient.url = None
## For HTTPS requests, determines if server's certificate should be validated or
# not. (JUPYTER_GATEWAY_VALIDATE_CERT env var)
#c.GatewayClient.validate_cert = True
## The websocket url of the Kernel or Enterprise Gateway server. If not
# provided, this value will correspond to the value of the Gateway url with 'ws'
# in place of 'http'. (JUPYTER_GATEWAY_WS_URL env var)
#c.GatewayClient.ws_url = None
| 38.986592
| 103
| 0.701
|
794d236580930323b9d39b0f32440e15b29271d8
| 865
|
py
|
Python
|
byceps/services/email/transfer/models.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 23
|
2015-08-03T23:28:54.000Z
|
2018-12-12T20:11:45.000Z
|
byceps/services/email/transfer/models.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 1
|
2018-09-30T18:18:24.000Z
|
2018-09-30T18:18:24.000Z
|
byceps/services/email/transfer/models.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 9
|
2015-08-06T16:41:36.000Z
|
2018-09-25T11:17:31.000Z
|
"""
byceps.services.email.transfer.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from dataclasses import dataclass
from email.utils import formataddr
from typing import Optional
from ....typing import BrandID
@dataclass(frozen=True)
class NameAndAddress:
name: Optional[str]
address: str
def format(self):
"""Format the name and address as a string value suitable for an
e-mail header.
"""
return formataddr((self.name, self.address))
@dataclass(frozen=True)
class EmailConfig:
brand_id: BrandID
sender: NameAndAddress
contact_address: str
@dataclass(frozen=True)
class Message:
sender: NameAndAddress
recipients: list[str]
subject: str
body: str
| 20.595238
| 72
| 0.685549
|
794d23f6922189c75b7751aa5aa16ae22ec0df1c
| 1,123
|
py
|
Python
|
yt/data_objects/tests/test_clone.py
|
Carreau/yt
|
d7e1cf22a8349b8a62b9c569017643ee233d9c4f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/data_objects/tests/test_clone.py
|
Carreau/yt
|
d7e1cf22a8349b8a62b9c569017643ee233d9c4f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/data_objects/tests/test_clone.py
|
Carreau/yt
|
d7e1cf22a8349b8a62b9c569017643ee233d9c4f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
from yt.testing import assert_array_equal, assert_equal, fake_random_ds
def test_clone_sphere():
# Now we test that we can get different radial velocities based on field
# parameters.
fields = ("density", "velocity_x", "velocity_y", "velocity_z")
units = ("g/cm**3", "cm/s", "cm/s", "cm/s")
# Get the first sphere
ds = fake_random_ds(16, fields=fields, units=units)
sp0 = ds.sphere(ds.domain_center, 0.25)
assert_equal(list(sp0.keys()), [])
sp1 = sp0.clone()
sp0[("gas", "density")]
assert_equal(list(sp0.keys()), (("gas", "density"),))
assert_equal(list(sp1.keys()), [])
sp1[("gas", "density")]
assert_array_equal(sp0[("gas", "density")], sp1[("gas", "density")])
def test_clone_cut_region():
fields = ("density", "temperature")
units = ("g/cm**3", "K")
ds = fake_random_ds(64, nprocs=4, fields=fields, units=units)
dd = ds.all_data()
reg1 = dd.cut_region(
["obj['gas', 'temperature'] > 0.5", "obj['gas', 'density'] < 0.75"]
)
reg2 = reg1.clone()
assert_array_equal(reg1[("gas", "density")], reg2[("gas", "density")])
| 32.085714
| 76
| 0.609973
|
794d2434c2ad823274f70638e6da883a50cd1ff1
| 2,963
|
py
|
Python
|
pra_request_tracker/pra_request_tracker/record_requests/tests/test_models.py
|
SeattleDSA/pra-request-tracker
|
0c4c628d249efc579faaa8f1d352fa0d120a510c
|
[
"MIT"
] | 1
|
2021-09-30T20:25:49.000Z
|
2021-09-30T20:25:49.000Z
|
pra_request_tracker/pra_request_tracker/record_requests/tests/test_models.py
|
SeattleDSA/pra-request-tracker
|
0c4c628d249efc579faaa8f1d352fa0d120a510c
|
[
"MIT"
] | 14
|
2021-06-29T18:04:09.000Z
|
2021-07-23T20:15:46.000Z
|
pra_request_tracker/pra_request_tracker/record_requests/tests/test_models.py
|
SeattleDSA/pra-request-tracker
|
0c4c628d249efc579faaa8f1d352fa0d120a510c
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from ..models import Agency, RecordRequest, RecordRequestFile
from .factories import (
AgencyFactory,
CorrespondenceFactory,
RecordRequestFactory,
RecordRequestFileFactory,
)
class AgencyTestCase(TestCase):
def test_agency_request_count(self):
agency = AgencyFactory.create()
for _ in range(2):
RecordRequestFactory.create(agency=agency)
self.assertEqual(agency.request_count, 2)
def test_agency_delete(self):
agency = AgencyFactory.create()
pk = agency.pk
with self.assertRaises(NotImplementedError):
agency.delete()
self.assertIsNotNone(Agency.objects.filter(pk=pk).first())
class RecordRequestTestCase(TestCase):
def test_record_request_status_label(self):
status = RecordRequest.Status.INSTALLMENTS
record_request = RecordRequestFactory.create(status=str(status))
self.assertEqual(record_request.status_label, status.label)
def test_record_requeset_delete(self):
record_request = RecordRequestFactory.create()
pk = record_request.pk
with self.assertRaises(NotImplementedError):
record_request.delete()
self.assertIsNotNone(RecordRequest.objects.filter(pk=pk).first())
def test_record_request_files(self):
record_request = RecordRequestFactory.create()
files = []
for _ in range(3):
files.append(RecordRequestFileFactory.create(request=record_request))
self.assertEqual(files, list(record_request.files))
def test_record_request_correspondences(self):
record_request = RecordRequestFactory.create()
correspondences = []
for _ in range(3):
correspondences.append(CorrespondenceFactory.create(request=record_request))
self.assertEqual(correspondences, list(record_request.correspondences))
def test_record_request_correspondences_prefetch_files(self):
record_request = RecordRequestFactory.create()
for _ in range(3):
correspondence = CorrespondenceFactory.create(request=record_request)
for _ in range(2):
RecordRequestFileFactory.create(correspondence=correspondence)
with self.assertNumQueries(2):
record_request.correspondences[0].recordrequestfile_set.all()
record_request.correspondences[1].recordrequestfile_set.all()
record_request.correspondences[2].recordrequestfile_set.all()
class RecordRequestFileTestCase(TestCase):
def test_record_request_file_delete(self):
file = RecordRequestFileFactory.create()
pk = file.pk
file.delete()
self.assertIsNone(RecordRequestFile.objects.filter(pk=pk).first())
def test_auto_name(self):
file = RecordRequestFileFactory.create()
file.title = None
file.save()
self.assertEqual(file.file.name, file.title)
| 32.206522
| 88
| 0.703341
|
794d25035da68882bb0e87718b5181168167842e
| 21,726
|
py
|
Python
|
docker/third-party/nnUNet/nnunet/network_architecture/generic_UNet.py
|
LucasFidon/trustworthy-ai-fetal-brain-segmentation
|
84959da54d8c2fb156da2b06cca30fa31a1c926d
|
[
"BSD-3-Clause"
] | null | null | null |
docker/third-party/nnUNet/nnunet/network_architecture/generic_UNet.py
|
LucasFidon/trustworthy-ai-fetal-brain-segmentation
|
84959da54d8c2fb156da2b06cca30fa31a1c926d
|
[
"BSD-3-Clause"
] | null | null | null |
docker/third-party/nnUNet/nnunet/network_architecture/generic_UNet.py
|
LucasFidon/trustworthy-ai-fetal-brain-segmentation
|
84959da54d8c2fb156da2b06cca30fa31a1c926d
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
import torch
import numpy as np
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
import torch.nn.functional
class ConvDropoutNormNonlin(nn.Module):
"""
fixes a bug in ConvDropoutNormNonlin where lrelu was used regardless of nonlin. Bad.
"""
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = self.nonlin(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
class ConvDropoutNonlinNorm(ConvDropoutNormNonlin):
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.instnorm(self.lrelu(x))
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin):
'''
stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers
:param input_feature_channels:
:param output_feature_channels:
:param num_convs:
:param dilation:
:param kernel_size:
:param padding:
:param dropout:
:param initial_stride:
:param conv_op:
:param norm_op:
:param dropout_op:
:param inplace:
:param neg_slope:
:param norm_affine:
:param conv_bias:
'''
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([basic_block(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[basic_block(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def print_module_training_status(module):
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \
isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \
or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \
or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module,
nn.BatchNorm1d):
print(str(module), module.training)
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False):
super(Upsample, self).__init__()
self.align_corners = align_corners
self.mode = mode
self.scale_factor = scale_factor
self.size = size
def forward(self, x):
return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode,
align_corners=self.align_corners)
class Generic_UNet(SegmentationNetwork):
DEFAULT_BATCH_SIZE_3D = 2
DEFAULT_PATCH_SIZE_3D = (64, 192, 160)
SPACING_FACTOR_BETWEEN_STAGES = 2
BASE_NUM_FEATURES_3D = 30
MAX_NUMPOOL_3D = 999
MAX_NUM_FILTERS_3D = 320
DEFAULT_PATCH_SIZE_2D = (256, 256)
BASE_NUM_FEATURES_2D = 30
DEFAULT_BATCH_SIZE_2D = 50
MAX_NUMPOOL_2D = 999
MAX_FILTERS_2D = 480
use_this_for_batch_size_computation_2D = 19739648
use_this_for_batch_size_computation_3D = 520000000 # 505789440
def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
conv_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,
max_num_features=None, basic_block=ConvDropoutNormNonlin,
seg_output_use_bias=False):
"""
basically more flexible than v1, architecture is the same
Does this look complicated? Nah bro. Functionality > usability
This does everything you need, including world peace.
Questions? -> f.isensee@dkfz.de
"""
super(Generic_UNet, self).__init__()
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
self.num_classes = num_classes
self.final_nonlin = final_nonlin
self._deep_supervision = deep_supervision
self.do_ds = deep_supervision
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3)] * (num_pool + 1)
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.conv_kernel_sizes = conv_kernel_sizes
self.conv_pad_sizes = []
for krnl in self.conv_kernel_sizes:
self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])
if max_num_features is None:
if self.conv_op == nn.Conv3d:
self.max_num_features = self.MAX_NUM_FILTERS_3D
else:
self.max_num_features = self.MAX_FILTERS_2D
else:
self.max_num_features = max_num_features
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_pool):
# determine the first stride
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d - 1]
else:
first_stride = None
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]
self.conv_kwargs['padding'] = self.conv_pad_sizes[d]
# add convolutions
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride, basic_block=basic_block))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(np.round(output_features * feat_map_mul_on_downscale))
output_features = min(output_features, self.max_num_features)
# now the bottleneck.
# determine the first stride
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
# the output of the last conv must match the number of features from the skip connection if we are not using
# convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be
# done by the transposed conv
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]
self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride, basic_block=basic_block),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, basic_block=basic_block)))
# if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
# now lets build the localization pathway
for u in range(num_pool):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[
-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_pool - 1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],
pool_op_kernel_sizes[-(u + 1)], bias=False))
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]
self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,
self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs, basic_block=basic_block)
))
for ds in range(len(self.conv_blocks_localization)):
# Last convolution operation(s) to obtain the labels score maps before softmax/argmax
self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,
1, 1, 0, 1, 1, seg_output_use_bias))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_pool - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),
mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(
self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
if self.weightInitializer is not None:
self.apply(self.weightInitializer)
# self.apply(print_module_training_status)
def forward(self, x):
skips = []
seg_outputs = []
# Encoder
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
# Decoder
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))
if self._deep_supervision and self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in
zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
def embedding(self, x):
# return the last embedding map (before last conv + softmax)
skips = []
# Encoder
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
# Decoder
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
return x
@staticmethod
def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,
conv_per_stage=2):
"""
This only applies for num_conv_per_stage and convolutional_upsampling=True
not real vram consumption. just a constant term to which the vram consumption will be approx proportional
(+ offset for parameter storage)
:param deep_supervision:
:param patch_size:
:param num_pool_per_axis:
:param base_num_features:
:param max_num_features:
:param num_modalities:
:param num_classes:
:param pool_op_kernel_sizes:
:return:
"""
if not isinstance(num_pool_per_axis, np.ndarray):
num_pool_per_axis = np.array(num_pool_per_axis)
npool = len(pool_op_kernel_sizes)
map_size = np.array(patch_size)
tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +
num_modalities * np.prod(map_size, dtype=np.int64) +
num_classes * np.prod(map_size, dtype=np.int64))
num_feat = base_num_features
for p in range(npool):
for pi in range(len(num_pool_per_axis)):
map_size[pi] /= pool_op_kernel_sizes[p][pi]
num_feat = min(num_feat * 2, max_num_features)
num_blocks = (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv
tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat
if deep_supervision and p < (npool - 2):
tmp += np.prod(map_size, dtype=np.int64) * num_classes
# print(p, map_size, num_feat, tmp)
return tmp
| 46.324094
| 180
| 0.626715
|
794d2577c8dc7b5115f2c003d637576be8634a86
| 1,956
|
py
|
Python
|
server/db_util.py
|
sherryx8/Shortcut
|
4ccc3fef7965406eb72205c707f46998af1fdd6a
|
[
"Apache-2.0"
] | null | null | null |
server/db_util.py
|
sherryx8/Shortcut
|
4ccc3fef7965406eb72205c707f46998af1fdd6a
|
[
"Apache-2.0"
] | null | null | null |
server/db_util.py
|
sherryx8/Shortcut
|
4ccc3fef7965406eb72205c707f46998af1fdd6a
|
[
"Apache-2.0"
] | null | null | null |
from texttable import Texttable
def pretty_print_results(cur, rv):
t = Texttable()
colnames = [col[0] for col in cur.description]
t.add_rows([colnames] + rv)
print t.draw()
def make_dicts(cursor, row):
"""
Turn query results into dictionaries keyed by column name
"""
colnames = [col[0] for col in cursor.description]
fmtrow = {}
for idx, value in enumerate(row):
fmtrow[colnames[idx]] = value
return fmtrow
def connect_to_db():
import vertica_python
import re
import os
try:
DB_NAME = os.environ['DB_NAME']
except Exception, e:
DB_NAME = 'test'
try:
DB_USER = os.environ['DB_USER']
except Exception, e:
DB_USER = 'dbadmin'
DB_PASSWORD = ''
DB_HOST = os.environ['DB_HOST']
conn_info = {'host': DB_HOST,
'port': 5433,
'user': DB_USER,
'password': '',
'database': DB_NAME,
# 10 minutes timeout on queries
'read_timeout': 600,
# default throw error on invalid UTF-8 results
'unicode_error': 'strict',
# SSL is disabled by default
'ssl': False}
db = vertica_python.connect(**conn_info)
return db
def query_db(query, args=(), one=False, db = None, pretty_print=False):
print "Query string: " + query
if not db:
db = connect_to_db()
cur = db.cursor()
try:
cur.execute(query, args)
rv = cur.fetchall()
if rv and pretty_print:
pretty_print_results(cur, rv)
# Turn into colname->val dict representation of tuple
# this isn't very efficient but will suffice for now
rv = [make_dicts(cur, row) for row in rv]
except Exception, e:
print e
rv = [{'error': e}]
cur.close()
return (rv[0] if rv else None) if one else rv
| 24.45
| 71
| 0.557771
|
794d258be85f140b8af1b9f6438ea4b7f067e9d8
| 13,931
|
py
|
Python
|
viadot/flows/supermetrics_to_adls.py
|
PanczykowskiK/viadot
|
44e269790b3debb02318ff4c4f07638b3a37d800
|
[
"MIT"
] | null | null | null |
viadot/flows/supermetrics_to_adls.py
|
PanczykowskiK/viadot
|
44e269790b3debb02318ff4c4f07638b3a37d800
|
[
"MIT"
] | null | null | null |
viadot/flows/supermetrics_to_adls.py
|
PanczykowskiK/viadot
|
44e269790b3debb02318ff4c4f07638b3a37d800
|
[
"MIT"
] | null | null | null |
import json
import os
import shutil
from pathlib import Path
from typing import Any, Dict, List, Union
import pandas as pd
import pendulum
import prefect
from prefect import Flow, Task, apply_map, task
from prefect.backend import set_key_value
from prefect.tasks.secrets import PrefectSecret
from prefect.utilities import logging
from visions.functional import infer_type
from visions.typesets.complete_set import CompleteSet
from ..task_utils import (
add_ingestion_metadata_task,
df_get_data_types_task,
df_mapp_mixed_dtypes_for_parquet,
update_dtypes_dict,
)
from ..tasks import (
AzureDataLakeUpload,
DownloadGitHubFile,
RunGreatExpectationsValidation,
SupermetricsToDF,
)
logger = logging.get_logger(__name__)
supermetrics_to_df_task = SupermetricsToDF()
download_github_file_task = DownloadGitHubFile()
validation_task = RunGreatExpectationsValidation()
file_to_adls_task = AzureDataLakeUpload()
json_to_adls_task = AzureDataLakeUpload()
@task
def write_to_json(dict_, path):
logger = prefect.context.get("logger")
if os.path.isfile(path):
logger.warning(f"File {path} already exists. Overwriting...")
else:
logger.debug(f"Writing to {path}...")
# create parent directories if they don't exist
Path(path).parent.mkdir(parents=True, exist_ok=True)
with open(path, mode="w") as f:
json.dump(dict_, f)
logger.debug(f"Successfully wrote to {path}.")
@task
def union_dfs_task(dfs: List[pd.DataFrame]):
return pd.concat(dfs, ignore_index=True)
@task
def dtypes_to_json_task(dtypes_dict, local_json_path: str):
with open(local_json_path, "w") as fp:
json.dump(dtypes_dict, fp)
@task
def df_to_parquet_task(df, path: str, if_exists: str = "replace"):
if if_exists == "append":
if os.path.isfile(path):
parquet_df = pd.read_parquet(path)
out_df = pd.concat([parquet_df, df])
else:
out_df = df
elif if_exists == "replace":
out_df = df
out_df.to_parquet(path, index=False)
@task
def df_to_csv_task(df, path: str, if_exists: str = "replace"):
if if_exists == "append":
if os.path.isfile(path):
csv_df = pd.read_csv(path)
out_df = pd.concat([csv_df, df])
else:
out_df = df
elif if_exists == "replace":
out_df = df
out_df.to_csv(path, index=False)
@task
def cleanup_validation_clutter(expectations_path):
ge_project_path = Path(expectations_path).parent
shutil.rmtree(ge_project_path)
class SupermetricsToADLS(Flow):
def __init__(
self,
name: str,
ds_id: str,
ds_accounts: List[str],
fields: List[str],
ds_user: str = None,
ds_segments: List[str] = None,
date_range_type: str = None,
start_date: str = None,
end_date: str = None,
settings: Dict[str, Any] = None,
filter: str = None,
max_rows: int = 1000000,
max_columns: int = None,
order_columns: str = None,
expectation_suite: dict = None,
evaluation_parameters: dict = None,
keep_validation_output: bool = False,
output_file_extension: str = ".parquet",
local_file_path: str = None,
adls_dir_path: str = None,
overwrite_adls: bool = True,
if_empty: str = "warn",
if_exists: str = "replace",
adls_sp_credentials_secret: str = None,
max_download_retries: int = 5,
supermetrics_task_timeout: int = 60 * 30,
parallel: bool = True,
tags: List[str] = ["extract"],
vault_name: str = None,
*args: List[any],
**kwargs: Dict[str, Any],
):
"""
Flow for downloading data from different marketing APIs to a local CSV
using Supermetrics API, then uploading it to Azure Data Lake.
Args:
name (str): The name of the flow.
ds_id (str): A query parameter passed to the SupermetricsToCSV task
ds_accounts (List[str]): A query parameter passed to the SupermetricsToCSV task
ds_user (str): A query parameter passed to the SupermetricsToCSV task
fields (List[str]): A query parameter passed to the SupermetricsToCSV task
ds_segments (List[str], optional): A query parameter passed to the SupermetricsToCSV task. Defaults to None.
date_range_type (str, optional): A query parameter passed to the SupermetricsToCSV task. Defaults to None.
start_date (str, optional): A query paramter to pass start date to the date range filter. Defaults to None.
end_date (str, optional): A query paramter to pass end date to the date range filter. Defaults to None.
settings (Dict[str, Any], optional): A query parameter passed to the SupermetricsToCSV task. Defaults to None.
filter (str, optional): A query parameter passed to the SupermetricsToCSV task. Defaults to None.
max_rows (int, optional): A query parameter passed to the SupermetricsToCSV task. Defaults to 1000000.
max_columns (int, optional): A query parameter passed to the SupermetricsToCSV task. Defaults to None.
order_columns (str, optional): A query parameter passed to the SupermetricsToCSV task. Defaults to None.
expectation_suite (dict, optional): The Great Expectations suite used to valiaate the data. Defaults to None.
evaluation_parameters (str, optional): A dictionary containing evaluation parameters for the validation. Defaults to None.
keep_validation_output (bool, optional): Whether to keep the output files generated by the Great Expectations task. Defaults to False.
Currently, only GitHub URLs are supported. Defaults to None.
local_file_path (str, optional): Local destination path. Defaults to None.
output_file_extension (str, optional): Output file extension - to allow selection of .csv for data which is not easy to handle with parquet. Defaults to ".parquet"..
adls_dir_path (str, optional): Azure Data Lake destination folder/catalog path. Defaults to None.
sep (str, optional): The separator to use in the CSV. Defaults to "\t".
overwrite_adls (bool, optional): Whether to overwrite the file in ADLS. Defaults to True.
if_empty (str, optional): What to do if the Supermetrics query returns no data. Defaults to "warn".
adls_sp_credentials_secret (str, optional): The name of the Azure Key Vault secret containing a dictionary with
ACCOUNT_NAME and Service Principal credentials (TENANT_ID, CLIENT_ID, CLIENT_SECRET) for the Azure Data Lake.
Defaults to None.
max_download_retries (int, optional): How many times to retry the download. Defaults to 5.
supermetrics_task_timeout (int, optional): The timeout for the download task. Defaults to 60*30.
parallel (bool, optional): Whether to parallelize the downloads. Defaults to True.
tags (List[str], optional): Flow tags to use, eg. to control flow concurrency. Defaults to ["extract"].
vault_name (str, optional): The name of the vault from which to obtain the secrets. Defaults to None.
"""
if not ds_user:
try:
ds_user = PrefectSecret("SUPERMETRICS_DEFAULT_USER").run()
except ValueError as e:
msg = "Neither 'ds_user' parameter nor 'SUPERMETRICS_DEFAULT_USER' secret were not specified"
raise ValueError(msg) from e
# SupermetricsToDF
self.ds_id = ds_id
self.ds_accounts = ds_accounts
self.ds_segments = ds_segments
self.ds_user = ds_user
self.fields = fields
self.date_range_type = date_range_type
self.start_date = start_date
self.end_date = end_date
self.settings = settings
self.filter = filter
self.max_rows = max_rows
self.max_columns = max_columns
self.order_columns = order_columns
self.if_exists = if_exists
self.output_file_extension = output_file_extension
# RunGreatExpectationsValidation
self.expectation_suite = expectation_suite
self.expectations_path = "/home/viadot/tmp/expectations"
self.expectation_suite_name = expectation_suite["expectation_suite_name"]
self.evaluation_parameters = evaluation_parameters
self.keep_validation_output = keep_validation_output
# AzureDataLakeUpload
self.local_file_path = (
local_file_path or self.slugify(name) + self.output_file_extension
)
self.local_json_path = self.slugify(name) + ".json"
self.now = str(pendulum.now("utc"))
self.adls_dir_path = adls_dir_path
self.adls_file_path = os.path.join(
adls_dir_path, self.now + self.output_file_extension
)
self.adls_schema_file_dir_file = os.path.join(
adls_dir_path, "schema", self.now + ".json"
)
self.overwrite_adls = overwrite_adls
self.if_empty = if_empty
self.adls_sp_credentials_secret = adls_sp_credentials_secret
# Global
self.max_download_retries = max_download_retries
self.supermetrics_task_timeout = supermetrics_task_timeout
self.parallel = parallel
self.tags = tags
self.vault_name = vault_name
super().__init__(*args, name=name, **kwargs)
self.gen_flow()
@staticmethod
def slugify(name):
return name.replace(" ", "_").lower()
def gen_supermetrics_task(
self, ds_accounts: Union[str, List[str]], flow: Flow = None
) -> Task:
t = supermetrics_to_df_task.bind(
ds_id=self.ds_id,
ds_accounts=ds_accounts,
ds_segments=self.ds_segments,
ds_user=self.ds_user,
fields=self.fields,
date_range_type=self.date_range_type,
start_date=self.start_date,
end_date=self.end_date,
settings=self.settings,
filter=self.filter,
max_rows=self.max_rows,
max_columns=self.max_columns,
order_columns=self.order_columns,
if_empty=self.if_empty,
max_retries=self.max_download_retries,
timeout=self.supermetrics_task_timeout,
flow=flow,
)
return t
def gen_flow(self) -> Flow:
if self.parallel:
# generate a separate task for each account
dfs = apply_map(self.gen_supermetrics_task, self.ds_accounts, flow=self)
df = union_dfs_task.bind(dfs, flow=self)
else:
df = self.gen_supermetrics_task(ds_accounts=self.ds_accounts, flow=self)
write_json = write_to_json.bind(
dict_=self.expectation_suite,
path=os.path.join(
self.expectations_path, self.expectation_suite_name + ".json"
),
flow=self,
)
validation = validation_task.bind(
df=df,
expectations_path=self.expectations_path,
expectation_suite_name=self.expectation_suite_name,
evaluation_parameters=self.evaluation_parameters,
keep_output=self.keep_validation_output,
flow=self,
)
if not self.keep_validation_output:
validation_cleanup = cleanup_validation_clutter.bind(
expectations_path=self.expectations_path, flow=self
)
validation_cleanup.set_upstream(validation, flow=self)
validation_upstream = validation_cleanup
else:
validation_upstream = validation
df_with_metadata = add_ingestion_metadata_task.bind(df, flow=self)
dtypes_dict = df_get_data_types_task.bind(df_with_metadata, flow=self)
df_to_be_loaded = df_mapp_mixed_dtypes_for_parquet(
df_with_metadata, dtypes_dict, flow=self
)
if self.output_file_extension == ".parquet":
df_to_file = df_to_parquet_task.bind(
df=df_to_be_loaded,
path=self.local_file_path,
if_exists=self.if_exists,
flow=self,
)
else:
df_to_file = df_to_csv_task.bind(
df=df_with_metadata,
path=self.local_file_path,
if_exists=self.if_exists,
flow=self,
)
file_to_adls_task.bind(
from_path=self.local_file_path,
to_path=self.adls_file_path,
overwrite=self.overwrite_adls,
sp_credentials_secret=self.adls_sp_credentials_secret,
vault_name=self.vault_name,
flow=self,
)
dtypes_updated = update_dtypes_dict(dtypes_dict, flow=self)
dtypes_to_json_task.bind(
dtypes_dict=dtypes_updated, local_json_path=self.local_json_path, flow=self
)
json_to_adls_task.bind(
from_path=self.local_json_path,
to_path=self.adls_schema_file_dir_file,
overwrite=self.overwrite_adls,
sp_credentials_secret=self.adls_sp_credentials_secret,
vault_name=self.vault_name,
flow=self,
)
write_json.set_upstream(df, flow=self)
validation.set_upstream(write_json, flow=self)
df_with_metadata.set_upstream(validation_upstream, flow=self)
df_to_be_loaded.set_upstream(dtypes_dict, flow=self)
dtypes_dict.set_upstream(df_with_metadata, flow=self)
dtypes_to_json_task.set_upstream(dtypes_updated, flow=self)
file_to_adls_task.set_upstream(df_to_file, flow=self)
json_to_adls_task.set_upstream(dtypes_to_json_task, flow=self)
set_key_value(key=self.adls_dir_path, value=self.adls_file_path)
| 39.916905
| 177
| 0.658316
|
794d25a7c42e13c36332e8c16e8fd9fe705bba82
| 14,628
|
py
|
Python
|
docs/conf.py
|
appsembler/LingoX
|
70c3e0080fda72443161d9c5d4927405cbe14919
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
appsembler/LingoX
|
70c3e0080fda72443161d9c5d4927405cbe14919
|
[
"MIT"
] | 8
|
2018-05-23T15:00:40.000Z
|
2019-02-26T07:19:45.000Z
|
docs/conf.py
|
appsembler/LingoX
|
70c3e0080fda72443161d9c5d4927405cbe14919
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""
localizerx documentation build configuration file.
This file is execfile()d with the current directory set to its
containing dir.
Note that not all possible configuration values are present in this
autogenerated file.
All configuration values have a default; values that are commented out
serve to show the default.
"""
from __future__ import absolute_import, unicode_literals
import os
import re
import sys
from subprocess import check_call
import edx_theme
import django
from django.conf import settings
from django.utils import six
def get_version(*file_paths):
"""
Extract the version string from the file at the given relative path fragments.
"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read() # pylint: disable=open-builtin
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
REPO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(REPO_ROOT)
VERSION = get_version('../localizerx', '__init__.py')
# Configure Django for autodoc usage
settings.configure()
django.setup()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'edx_theme',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon'
]
# A list of warning types to suppress arbitrary warning messages.
suppress_warnings = [
'image.nonlocal_uri',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'localizerx'
copyright = edx_theme.COPYRIGHT # pylint: disable=redefined-builtin
author = edx_theme.AUTHOR
project_title = 'LocalizerX'
documentation_title = "{project_title}".format(project_title=project_title)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'edx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [edx_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'localizerx v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = '{project_name}doc'.format(project_name=project)
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_target = '{project}.tex'.format(project=project)
latex_documents = [
(master_doc, latex_target, documentation_title,
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project_title, documentation_title,
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project_title, documentation_title,
author, project_title, 'An app to enforce a default language for the edX Platform regardless of browser language.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3.6', None),
'django': ('https://docs.djangoproject.com/en/1.11/', 'https://docs.djangoproject.com/en/1.11/_objects/'),
'model_utils': ('https://django-model-utils.readthedocs.io/en/latest/', None),
}
def on_init(app): # pylint: disable=unused-argument
"""
Run sphinx-apidoc after Sphinx initialization.
Read the Docs won't run tox or custom shell commands, so we need this to
avoid checking in the generated reStructuredText files.
"""
docs_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.abspath(os.path.join(docs_path, '..'))
apidoc_path = 'sphinx-apidoc'
if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv
# If we are, assemble the path manually
bin_path = os.path.abspath(os.path.join(sys.prefix, 'bin'))
apidoc_path = os.path.join(bin_path, apidoc_path)
check_call([apidoc_path, '-o', docs_path, os.path.join(root_path, 'localizerx'),
os.path.join(root_path, 'localizerx/migrations')])
def setup(app):
"""Sphinx extension: run sphinx-apidoc."""
event = 'builder-inited' if six.PY3 else b'builder-inited'
app.connect(event, on_init)
| 28.852071
| 120
| 0.708641
|
794d26dc5503dd677feb335d2df7d3116df66b62
| 185
|
py
|
Python
|
project/api/serializers/city.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | null | null | null |
project/api/serializers/city.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | 2
|
2021-06-07T14:06:05.000Z
|
2021-06-18T16:27:29.000Z
|
project/api/serializers/city.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | 2
|
2021-07-27T20:40:18.000Z
|
2021-09-12T16:48:19.000Z
|
from rest_framework import serializers
from ..models import City
class CitySerializer(serializers.ModelSerializer):
class Meta:
exclude = ['region']
model = City
| 18.5
| 50
| 0.708108
|
794d27cee69624dda6864e452cd6acfa7d81f3e2
| 1,637
|
py
|
Python
|
core/tests/performance_tests/exploration_player_test.py
|
bching/oppia
|
9e9b6d756859b8bc1e46f88a1be8736f8398a8d8
|
[
"Apache-2.0"
] | 3
|
2021-06-26T09:45:19.000Z
|
2021-11-17T11:11:39.000Z
|
core/tests/performance_tests/exploration_player_test.py
|
bching/oppia
|
9e9b6d756859b8bc1e46f88a1be8736f8398a8d8
|
[
"Apache-2.0"
] | 7
|
2019-08-20T08:30:43.000Z
|
2022-02-12T18:47:57.000Z
|
core/tests/performance_tests/exploration_player_test.py
|
bching/oppia
|
9e9b6d756859b8bc1e46f88a1be8736f8398a8d8
|
[
"Apache-2.0"
] | 1
|
2021-08-04T13:03:16.000Z
|
2021-08-04T13:03:16.000Z
|
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance tests for the exploration player."""
from core.tests.performance_tests import base
from core.tests.performance_tests import test_config
class ExplorationPlayerPerformanceTest(base.TestBase):
"""Performance tests for the exploration player."""
PAGE_KEY = test_config.PAGE_KEY_EXPLORATION_PLAYER
def setUp(self):
super(ExplorationPlayerPerformanceTest, self).setUp()
page_config = test_config.TEST_DATA[self.PAGE_KEY]
self._set_page_config(page_config)
self._initialize_data_fetcher()
self._load_page_to_cache_server_resources()
def test_page_size_under_specified_limit(self):
self._test_total_page_size()
def test_page_size_under_specified_limit_for_cached_session(self):
self._test_total_page_size_for_cached_session()
def test_page_loads_under_specified_limit(self):
self._test_page_load_time()
def test_page_loads_under_specified_limit_cached_session(self):
self._test_page_load_time_for_cached_session()
| 36.377778
| 74
| 0.772755
|
794d28850ddc86cf5e794ac63077d54bdf11f3a3
| 1,983
|
py
|
Python
|
autotest/gdrivers/dipex.py
|
mihadyuk/gdal
|
d4627981715b82ff368547ef00ef26e0b9207048
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/dipex.py
|
mihadyuk/gdal
|
d4627981715b82ff368547ef00ef26e0b9207048
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/dipex.py
|
mihadyuk/gdal
|
d4627981715b82ff368547ef00ef26e0b9207048
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test DIPEx driver
# Author: Even Rouault, <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2009, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Test a fake DIPex dataset
def dipex_1():
tst = gdaltest.GDALTest( 'DIPEx', 'fakedipex.dat', 1, 1 )
return tst.testOpen()
gdaltest_list = [
dipex_1 ]
if __name__ == '__main__':
gdaltest.setup_run( 'dipex' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 35.410714
| 79
| 0.621281
|
794d29a5de5b42198864b009ff5c65ea111b8408
| 1,087
|
py
|
Python
|
bin/updatenotable.py
|
doksu/TA-updatenotable
|
b6f017cacff417a2612703cb439946a748f8b857
|
[
"MIT"
] | 1
|
2020-12-10T16:47:10.000Z
|
2020-12-10T16:47:10.000Z
|
bin/updatenotable.py
|
doksu/TA-updatenotable
|
b6f017cacff417a2612703cb439946a748f8b857
|
[
"MIT"
] | null | null | null |
bin/updatenotable.py
|
doksu/TA-updatenotable
|
b6f017cacff417a2612703cb439946a748f8b857
|
[
"MIT"
] | null | null | null |
import splunk.Intersplunk, splunk.rest
# The following is derived from Luke Murphey's post: https://www.splunk.com/en_us/blog/tips-and-tricks/how-to-edit-notable-events-in-es-programatically.html
keywords, options = splunk.Intersplunk.getKeywordsAndOptions()
results, dummyresults, settings = splunk.Intersplunk.getOrganizedResults()
sessionKey = settings.get("sessionKey")
for result in results:
args = {'ruleUIDs': result['event_id']}
if 'comment' in result:
args['comment'] = result['comment']
if 'status' in result:
args['status'] = result['status']
if 'urgency' in result:
args['urgency'] = result['urgency']
if 'owner' in result:
args['newOwner'] = result['owner']
serverResponse, serverContent = splunk.rest.simpleRequest('/services/notable_update', sessionKey=sessionKey, method='POST', postargs=args)
if serverResponse['status'] != '200':
splunk.Intersplunk.generateErrorResults(serverContent)
else:
result['updatenotable_result'] = serverContent
splunk.Intersplunk.outputResults(results)
| 35.064516
| 156
| 0.713891
|
794d2b267d0bbcfba91f58c57c889ca84c4c0baf
| 3,417
|
py
|
Python
|
utilities/test/test_postgres_util.py
|
diego-hermida/ClimateChangeApp
|
576d49ec5b76f709cc86874ffb03f4a38dbbbbfd
|
[
"MIT"
] | 2
|
2018-07-01T20:36:46.000Z
|
2019-11-01T22:47:06.000Z
|
utilities/test/test_postgres_util.py
|
diego-hermida/ClimateChangeApp
|
576d49ec5b76f709cc86874ffb03f4a38dbbbbfd
|
[
"MIT"
] | 1
|
2021-06-10T20:28:53.000Z
|
2021-06-10T20:28:53.000Z
|
utilities/test/test_postgres_util.py
|
diego-hermida/ClimateChangeApp
|
576d49ec5b76f709cc86874ffb03f4a38dbbbbfd
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from unittest import TestCase, mock
from unittest.mock import Mock
import utilities.postgres_util
_GLOBAL_CONFIG = deepcopy(utilities.postgres_util.GLOBAL_CONFIG)
_GLOBAL_CONFIG['POSTGRES_DATABASE'] = _GLOBAL_CONFIG['POSTGRES_DATABASE'] + '_test'
_GLOBAL_CONFIG['POSTGRES_USERNAME'] = _GLOBAL_CONFIG['POSTGRES_USERNAME'] + '_test'
@mock.patch('utilities.postgres_util.GLOBAL_CONFIG', _GLOBAL_CONFIG)
class TestPostgresUtil(TestCase):
def test_create_application_database(self):
connection = utilities.postgres_util.get_connection()
utilities.postgres_util.create_application_database(connection)
c = connection.cursor()
c.execute('SELECT EXISTS(SELECT 1 FROM pg_database WHERE DATNAME = %(dname)s)',
{'dname': _GLOBAL_CONFIG['POSTGRES_DATABASE']})
exists = c.fetchone()
c.close()
connection.close()
self.assertTrue(exists)
def test_get_connection(self):
connection = utilities.postgres_util.get_connection()
self.assertEqual(0, connection.closed)
connection.close()
self.assertNotEqual(0, connection.closed)
def test_create_application_user(self):
connection = utilities.postgres_util.get_connection()
utilities.postgres_util.create_application_user(connection)
c = connection.cursor()
c.execute('SELECT EXISTS(SELECT 1 FROM pg_roles WHERE ROLNAME = %(usrname)s)',
{'usrname': _GLOBAL_CONFIG['POSTGRES_USERNAME']})
exists = c.fetchone()
c.close()
connection.close()
self.assertTrue(exists)
@mock.patch('utilities.postgres_util.connect')
def test_ping_database_ok(self, mock_connect):
utilities.postgres_util.ping_database(close_after=True)
self.assertEqual(1, mock_connect.call_count)
self.assertEqual(1, mock_connect.return_value.close.call_count)
@mock.patch('utilities.postgres_util.connect', Mock(side_effect=Exception('Test error (this is OK).')))
def test_ping_database_failure(self):
with self.assertRaises(EnvironmentError):
utilities.postgres_util.ping_database(close_after=True)
def test_remove_database(self):
connection = utilities.postgres_util.get_connection()
utilities.postgres_util.drop_application_database(connection)
c = connection.cursor()
c.execute('SELECT EXISTS(SELECT 1 FROM pg_database WHERE DATNAME = %(dname)s)',
{'dname': _GLOBAL_CONFIG['POSTGRES_DATABASE']})
exists = c.fetchone()[0]
c.close()
connection.close()
self.assertFalse(exists)
def test_normalize_query(self):
self.assertEqual([], utilities.postgres_util.normalize_query(' " " '))
self.assertEqual(['foo', 'baz'], utilities.postgres_util.normalize_query(' foo " baz "'))
def test_get_query(self):
def get_all_terms(q):
return [get_all_terms(x) for x in q.children] if isinstance(q, utilities.postgres_util.Q) else q
q = utilities.postgres_util.get_query(['foo', 'baz'], ['name', 'description'])
all_terms = [item for sublist in get_all_terms(q) for item in sublist]
self.assertListEqual([('name__icontains', 'foo'), ('description__icontains', 'foo'), ('name__icontains', 'baz'),
('description__icontains', 'baz')], all_terms)
| 44.376623
| 120
| 0.689494
|
794d2bb5660160e76ef13703312b94dd3ec4d9b7
| 1,398
|
py
|
Python
|
epsilonGreedy/greedyOptimistic.py
|
JackSchaible/sulphur
|
1d054131cfc427c0e962d95a32203be075cf730c
|
[
"MIT"
] | null | null | null |
epsilonGreedy/greedyOptimistic.py
|
JackSchaible/sulphur
|
1d054131cfc427c0e962d95a32203be075cf730c
|
[
"MIT"
] | null | null | null |
epsilonGreedy/greedyOptimistic.py
|
JackSchaible/sulphur
|
1d054131cfc427c0e962d95a32203be075cf730c
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import bandit
class greedyOptimistic:
def __init__(self):
self.payoutModifier1 = 1.0
self.payoutModifier2 = 2.0
self.payoutModifier3 = 3.0
self.iterations = 10000
self.means = [10, 10, 10]
self.bandits = [bandit.Bandit(self.payoutModifier1), bandit.Bandit(self.payoutModifier2), bandit.Bandit(self.payoutModifier3)]
self.data = np.empty(self.iterations)
def run(self):
for i in range(self.iterations):
selectedMachine = np.argmax(self.means)
result = self.bandits[selectedMachine].pull()
n = i + 1
self.means[selectedMachine] = (1 - 1.0/n)*self.bandits[selectedMachine].payoutModifier + 1.0/n*result
# for the plot
self.data[i] = result
cumulative_average = np.cumsum(self.data) / (np.arange(self.iterations) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(self.iterations) * 1)
plt.plot(np.ones(self.iterations) * 2)
plt.plot(np.ones(self.iterations) * 3)
plt.title('Greedy Optimistic')
plt.xscale('log')
plt.xlabel('Iteration')
plt.ylabel('Cumulative Average')
plt.show()
for result in self.means:
print(result)
return cumulative_average
| 31.066667
| 134
| 0.612303
|
794d2ca5430bb4706bc35727ca97fade2867c566
| 551
|
py
|
Python
|
manage.py
|
alternativebackend/alternative_backend
|
d27ea4c10eb039e79080be2ced34f22e2558bdf7
|
[
"MIT"
] | null | null | null |
manage.py
|
alternativebackend/alternative_backend
|
d27ea4c10eb039e79080be2ced34f22e2558bdf7
|
[
"MIT"
] | null | null | null |
manage.py
|
alternativebackend/alternative_backend
|
d27ea4c10eb039e79080be2ced34f22e2558bdf7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "alternative_backend.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.4375
| 83
| 0.693285
|
794d2d03334a3dfcb866f33008c4e5055e29500e
| 1,012
|
py
|
Python
|
vars/generateAccounts.py
|
awalende/workshop-image
|
3b219733c2a9b28e423bad66ed08f2d789e1b3c0
|
[
"Apache-2.0"
] | null | null | null |
vars/generateAccounts.py
|
awalende/workshop-image
|
3b219733c2a9b28e423bad66ed08f2d789e1b3c0
|
[
"Apache-2.0"
] | null | null | null |
vars/generateAccounts.py
|
awalende/workshop-image
|
3b219733c2a9b28e423bad66ed08f2d789e1b3c0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import argparse
import sys
import random
import string
import hashlib
import crypt
from passlib.hash import sha512_crypt
def randomString(stringLength=6):
"""Generate a random string of letters and digits """
lettersAndDigits = string.ascii_lowercase + string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
if len(sys.argv) != 2:
print("Wrong input")
sys.exit(-1)
account_list = []
with open('main.yml', 'w') as the_file:
the_file.write("Accounts:\n")
for i in range(1, int(sys.argv[1])):
pw = randomString()
pw_hash=sha512_crypt.using(rounds
=5000).hash(pw)
the_file.write(" - name: " + 'user' + str(i) + "\n")
the_file.write(" password: " + pw_hash + "\n")
the_file.write(" pw_clr: " + pw + "\n")
account_list.append(('user' + str(i), pw))
for account in account_list:
print(account[0])
print(account[1])
print("")
| 25.3
| 80
| 0.619565
|
794d2d31817e82a43a2daaa31b9a081e86171541
| 16,954
|
py
|
Python
|
benchmarks/benchmark.py
|
Dotnester/estee
|
55c0834db3d7da407b7c37d46fa41b5b563e2bbe
|
[
"MIT"
] | 4
|
2018-09-26T07:57:36.000Z
|
2021-05-13T10:35:34.000Z
|
benchmarks/benchmark.py
|
Dotnester/estee
|
55c0834db3d7da407b7c37d46fa41b5b563e2bbe
|
[
"MIT"
] | 2
|
2018-11-06T21:01:14.000Z
|
2019-03-21T22:06:52.000Z
|
benchmarks/benchmark.py
|
Dotnester/estee
|
55c0834db3d7da407b7c37d46fa41b5b563e2bbe
|
[
"MIT"
] | 4
|
2018-10-02T20:42:05.000Z
|
2019-02-19T14:49:57.000Z
|
import collections
import itertools
import multiprocessing
import os
import random
import re
import signal
import sys
import threading
import time
import traceback
import click
import numpy
import pandas as pd
from tqdm import tqdm
from estee.common import imode
from estee.schedulers import WorkStealingScheduler
from estee.schedulers.basic import AllOnOneScheduler, RandomAssignScheduler
from estee.schedulers.camp import Camp2Scheduler
from estee.schedulers.clustering import LcScheduler
from estee.schedulers.genetic import GeneticScheduler
from estee.schedulers.others import BlevelScheduler, DLSScheduler, ETFScheduler, MCPGTScheduler, \
MCPScheduler, TlevelScheduler
from estee.schedulers.queue import BlevelGtScheduler, RandomGtScheduler, TlevelGtScheduler
from estee.serialization.dask_json import json_deserialize, json_serialize
from estee.simulator import MaxMinFlowNetModel, SimpleNetModel
from estee.simulator import Simulator, Worker
from estee.simulator.trace import FetchEndTraceEvent
def generate_seed():
seed = os.getpid() * time.time()
for b in os.urandom(4):
seed *= b
seed = int(seed) % 2**32
random.seed(seed)
numpy.random.seed(seed)
generate_seed()
SCHEDULERS = {
"single": AllOnOneScheduler,
"blevel": BlevelScheduler,
"blevel-gt": BlevelGtScheduler,
"tlevel": TlevelScheduler,
"tlevel-gt": TlevelGtScheduler,
"random": RandomAssignScheduler,
"random-gt": RandomGtScheduler,
"dls": DLSScheduler,
"etf": ETFScheduler,
"mcp": MCPScheduler,
"mcp-gt": MCPGTScheduler,
"genetic": GeneticScheduler,
"camp2": lambda: Camp2Scheduler(5000),
"lc": LcScheduler,
"ws": WorkStealingScheduler
}
NETMODELS = {
"simple": SimpleNetModel,
"maxmin": MaxMinFlowNetModel
}
CLUSTERS = {
"2x8": [{"cpus": 8}] * 2,
"4x4": [{"cpus": 4}] * 4,
"8x4": [{"cpus": 4}] * 8,
"16x4": [{"cpus": 4}] * 16,
"32x4": [{"cpus": 4}] * 32,
"8x8": [{"cpus": 8}] * 8,
"16x8": [{"cpus": 8}] * 16,
"stairs16": [{"cpus": i} for i in range(1, 6)] + [{"cpus": 1}],
"32x16": [{"cpus": 16}] * 32,
"64x16": [{"cpus": 16}] * 64,
"128x16": [{"cpus": 16}] * 128,
"256x16": [{"cpus": 16}] * 256,
}
BANDWIDTHS = {
"8G": 8192,
"2G": 2048,
"512M": 512,
"128M": 128,
"32M": 32
}
IMODES = {
"exact": imode.process_imode_exact,
"blind": imode.process_imode_blind,
"mean": imode.process_imode_mean,
"user": imode.process_imode_user,
}
SCHED_TIMINGS = {
# min_sched_interval, sched_time
"0/0": (0, 0),
"0.1/0.05": (0.1, 0.05),
"0.4/0.05": (0.4, 0.05),
"1.6/0.05": (1.6, 0.05),
"6.4/0.05": (6.4, 0.05)
}
COLUMNS = ["graph_set",
"graph_name",
"graph_id",
"cluster_name",
"bandwidth",
"netmodel",
"scheduler_name",
"imode",
"min_sched_interval",
"sched_time",
"time",
"execution_time",
"total_transfer"]
Instance = collections.namedtuple("Instance",
("graph_set", "graph_name", "graph_id", "graph",
"cluster_name", "bandwidth", "netmodel",
"scheduler_name", "imode", "min_sched_interval", "sched_time",
"count"))
class BenchmarkConfig:
graph_cache = {}
def __init__(self, graph_frame, schedulers, clusters, netmodels, bandwidths,
imodes, sched_timings, count):
self.graph_frame = graph_frame
self.schedulers = schedulers
self.clusters = clusters
self.netmodels = netmodels
self.bandwidths = bandwidths
self.imodes = imodes
self.sched_timings = sched_timings
self.count = count
def generate_instances(self):
def calculate_imodes(graph, graph_id):
if graph_id not in BenchmarkConfig.graph_cache:
BenchmarkConfig.graph_cache[graph_id] = {}
for mode in IMODES:
g = json_deserialize(graph)
IMODES[mode](g)
BenchmarkConfig.graph_cache[graph_id][mode] = json_serialize(g)
for graph_def, cluster_name, bandwidth, netmodel, scheduler_name, mode, sched_timing \
in itertools.product(self.graph_frame.iterrows(), self.clusters, self.bandwidths,
self.netmodels, self.schedulers, self.imodes,
self.sched_timings):
g = graph_def[1]
calculate_imodes(g["graph"], g["graph_id"])
graph = BenchmarkConfig.graph_cache[g["graph_id"]][mode]
(min_sched_interval, sched_time) = SCHED_TIMINGS[sched_timing]
instance = Instance(
g["graph_set"], g["graph_name"], g["graph_id"], graph,
cluster_name, BANDWIDTHS[bandwidth], netmodel,
scheduler_name,
mode,
min_sched_interval, sched_time,
self.count)
yield instance
def __repr__(self):
return """
============ Config ========================
scheduler : {schedulers}
cluster : {clusters}
netmodel : {netmodels}
bandwidths: {bandwidths}
imode : {imodes}
timings : {timings}
REPEAT : {repeat}
============================================
""".format(
schedulers=", ".join(self.schedulers),
clusters=", ".join(self.clusters),
netmodels=", ".join(self.netmodels),
bandwidths=", ".join(self.bandwidths),
imodes=", ".join(self.imodes),
timings=", ".join(self.sched_timings),
repeat=self.count
)
def run_single_instance(instance):
time.sleep(1)
inf = 2**32
def create_worker(wargs):
if instance.netmodel == "simple":
return Worker(**wargs, max_downloads=inf, max_downloads_per_worker=inf)
return Worker(**wargs)
begin_time = time.monotonic()
workers = [create_worker(wargs) for wargs in CLUSTERS[instance.cluster_name]]
netmodel = NETMODELS[instance.netmodel](instance.bandwidth)
scheduler = SCHEDULERS[instance.scheduler_name]()
simulator = Simulator(instance.graph, workers, scheduler, netmodel, trace=True)
try:
sim_time = simulator.run()
runtime = time.monotonic() - begin_time
transfer = 0
for e in simulator.trace_events:
if isinstance(e, FetchEndTraceEvent):
transfer += e.output.size
return sim_time, runtime, transfer
except Exception:
traceback.print_exc()
print("ERROR INSTANCE: {}".format(instance), file=sys.stderr)
return None, None, None
def benchmark_scheduler(instance):
return [run_single_instance(instance)
for _ in range(instance.count)]
def process_multiprocessing(instance):
instance = instance._replace(graph=json_deserialize(instance.graph))
return benchmark_scheduler(instance)
def run_multiprocessing(pool, instances):
return pool.imap(process_multiprocessing, instances)
def process_dask(conf):
(graph, instance) = conf
instance = instance._replace(graph=json_deserialize(graph))
return benchmark_scheduler(instance)
def run_dask(instances, cluster):
from dask.distributed import Client
client = Client(cluster)
graphs = {}
instance_to_graph = {}
instances = list(instances)
for (i, instance) in enumerate(instances):
if instance.graph not in graphs:
graphs[instance.graph] = client.scatter([instance.graph], broadcast=True)[0]
inst = instance._replace(graph=None)
instance_to_graph[inst] = graphs[instance.graph]
instances[i] = inst
results = client.map(process_dask, ((instance_to_graph[i], i) for i in instances))
return client.gather(results)
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def compute(instances, timeout=0, dask_cluster=None):
rows = []
if not instances:
return rows
if dask_cluster:
iterator = run_dask(instances, dask_cluster)
else:
pool = multiprocessing.Pool(initializer=init_worker)
iterator = run_multiprocessing(pool, instances)
if timeout:
print("Timeout set to {} seconds".format(timeout))
def run():
counter = 0
try:
for instance, result in tqdm(zip(instances, iterator), total=len(instances)):
counter += 1
for r_time, r_runtime, r_transfer in result:
if r_time is not None:
rows.append((
instance.graph_set,
instance.graph_name,
instance.graph_id,
instance.cluster_name,
instance.bandwidth,
instance.netmodel,
instance.scheduler_name,
instance.imode,
instance.min_sched_interval,
instance.sched_time,
r_time,
r_runtime,
r_transfer
))
except:
print("Benchmark interrupted, iterated {} instances. Writing intermediate results"
.format(counter))
if timeout:
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
thread.join(timeout)
if thread.is_alive():
print("Timeout reached")
else:
run()
return rows
def run_benchmark(configs, oldframe, resultfile, skip_completed, timeout=0, dask_cluster=None):
for config in configs:
print(config)
instances = create_instances(configs, oldframe, skip_completed, 5)
rows = compute(instances, timeout, dask_cluster)
if not rows:
print("No results were computed")
return
frame = pd.DataFrame(rows, columns=COLUMNS)
print(frame.groupby(["graph_name", "graph_id", "cluster_name",
"bandwidth", "netmodel", "imode", "min_sched_interval", "sched_time",
"scheduler_name"]).mean())
if len(frame) > 0:
base, ext = os.path.splitext(resultfile)
path = "{}.backup{}".format(base, ext)
print("Creating backup of old results to '{}'".format(path))
write_resultfile(oldframe, path)
newframe = pd.concat([oldframe, frame], ignore_index=True)
write_resultfile(newframe, resultfile)
print("{} entries in new '{}'".format(newframe["time"].count(), resultfile))
def skip_completed_instances(instances, frame, repeat, columns, batch):
skipped_output = 0
skipped_batch = 0
counts = frame.groupby(columns).size()
result = []
for instance in instances:
hashed = tuple(getattr(instance, col) for col in columns)
existing_count = 0
if hashed in counts:
count = counts.loc[hashed]
skipped_output += count
existing_count += count
if hashed in batch:
count = batch[hashed]
skipped_batch += count
existing_count += count
if existing_count == 0:
result.append(instance)
elif existing_count < repeat:
result.append(instance._replace(count=repeat - existing_count))
if skipped_output or skipped_batch:
print("Skipping {} instances from output, {} from batch, {} left".format(skipped_output,
skipped_batch,
len(result)))
return result
def limit_max_count(instances, max_count):
result = []
for instance in instances:
if instance.count > max_count:
remaining = instance.count
while remaining > 0:
count = min(max_count, remaining)
remaining -= count
result.append(instance._replace(count=count))
else:
result.append(instance)
return result
def create_instances(configs, frame, skip_completed, max_count):
total_instances = []
columns = ["graph_id",
"cluster_name",
"bandwidth",
"netmodel",
"scheduler_name",
"imode",
"min_sched_interval",
"sched_time"]
batch = {}
for config in configs:
instances = list(config.generate_instances())
if skip_completed:
instances = skip_completed_instances(instances, frame, config.count, columns, batch)
instances = limit_max_count(instances, max_count)
for instance in instances:
hashed = tuple(getattr(instance, col) for col in columns)
batch[hashed] = instance.count + batch.get(hashed, 0)
total_instances += instances
return total_instances
def load_resultfile(resultfile, append):
if os.path.isfile(resultfile):
if not append:
print("Result file '{}' already exists\n"
"Remove --no-append to append results to it".format(resultfile),
file=sys.stderr)
exit(1)
print("Appending to result file '{}'".format(resultfile))
oldframe = pd.read_csv(resultfile)
assert list(oldframe.columns) == COLUMNS
else:
print("Creating result file '{}'".format(resultfile))
oldframe = pd.DataFrame([], columns=COLUMNS)
return oldframe
def write_resultfile(frame, resultfile):
frame.to_csv(resultfile, compression='zip', index=False)
def load_graphs(graphsets, graph_names=None):
frame = pd.DataFrame()
for path in graphsets:
graph = pd.read_json(path)
graph.insert(loc=0, column='graph_set', value=os.path.splitext(path)[0])
frame = pd.concat([frame, graph], ignore_index=True)
if graph_names:
frame = frame[frame["graph_name"].isin(graph_names)].reset_index()
return frame
def generate_help(keys):
return "all,{}".format(",".join(keys))
def parse_timeout(timeout):
if not timeout:
return 0
match = re.match(r"^(\d{2}):(\d{2}):(\d{2})$", timeout)
if not match:
print("Wrong timeout format. Enter timeout as hh:mm:ss.")
exit(1)
return int(match.group(1)) * 3600 + int(match.group(2)) * 60 + int(match.group(3))
@click.command()
@click.argument("graphset")
@click.argument("resultfile")
@click.option("--scheduler", default="all", help=generate_help(SCHEDULERS))
@click.option("--cluster", default="all", help=generate_help(CLUSTERS))
@click.option("--bandwidth", default="all", help=generate_help(BANDWIDTHS))
@click.option("--netmodel", default="all", help=generate_help(NETMODELS))
@click.option("--imode", default="all", help=generate_help(IMODES))
@click.option("--sched-timing", default="all", help=generate_help(SCHED_TIMINGS))
@click.option("--repeat", default=1)
@click.option("--append/--no-append", default=True, help="Exit if the resultfile already exists.")
@click.option("--skip-completed/--no-skip_completed", default=True,
help="Skip already computed instances found in the resultfile.")
@click.option("--graphs", help="Comma separated list of graphs to be used from the input graphset")
@click.option("--timeout", help="Timeout for the computation. Format hh:mm:ss.")
@click.option("--dask-cluster", help="Address of Dask scheduler.")
def compute_cmd(graphset, resultfile, scheduler, cluster, bandwidth,
netmodel, imode, sched_timing, repeat, append, skip_completed,
graphs, timeout, dask_cluster):
def parse_option(value, keys):
if value == "all":
return list(keys)
value = [v.strip() for v in value.split(",")]
assert all(v in keys for v in value)
return value
graphsets = graphset.split(",")
schedulers = parse_option(scheduler, SCHEDULERS)
clusters = parse_option(cluster, CLUSTERS)
bandwidths = parse_option(bandwidth, BANDWIDTHS)
netmodels = parse_option(netmodel, NETMODELS)
imodes = parse_option(imode, IMODES)
sched_timings = parse_option(sched_timing, SCHED_TIMINGS)
timeout = parse_timeout(timeout)
graph_frame = load_graphs(graphsets, None if graphs is None else graphs.split(","))
if len(graph_frame) == 0:
print("No graphs selected")
exit()
config = BenchmarkConfig(graph_frame, schedulers, clusters, netmodels, bandwidths, imodes,
sched_timings, repeat)
frame = load_resultfile(resultfile, append)
run_benchmark([config], frame, resultfile, skip_completed, timeout, dask_cluster)
if __name__ == "__main__":
compute_cmd()
| 33.374016
| 99
| 0.608411
|
794d2d4831f01310526020161cca56628c08fa26
| 3,065
|
py
|
Python
|
spotijjjy/abc_client.py
|
maxtarasuik/Spotijjjy
|
c08abedcec129afab29eaa58c04243cbf8600dd9
|
[
"MIT"
] | null | null | null |
spotijjjy/abc_client.py
|
maxtarasuik/Spotijjjy
|
c08abedcec129afab29eaa58c04243cbf8600dd9
|
[
"MIT"
] | null | null | null |
spotijjjy/abc_client.py
|
maxtarasuik/Spotijjjy
|
c08abedcec129afab29eaa58c04243cbf8600dd9
|
[
"MIT"
] | null | null | null |
import json
import requests
from datetime import datetime, timedelta
import difflib
from datetime import datetime
from spotijjjy import SongService
##### APC Config #####
__ABC_URL__ = "https://music.abcradio.net.au/api/v1/plays/search.json?"
__STATIONS__ = ["doublej", "triplej", "jazz"]
__JJJ_SEARCH_LIMIT__ = 100
class ABCClient(SongService):
def __init__(self, ranges=None, station_id="triplej"):
# If ranges is None, use default (morning and arvo sessions)
self._station_id = station_id
if ranges is None:
ranges = self.__get_previous_days_ranges()
self.__urls = self.__get_staion_urls(ranges)
def __get_previous_days_ranges(self):
"""
Gets ranges for morning and arvo sessions for jjj. to use instead of default values
"""
# We are sort of assuming this is ran at midday local time
d = datetime.today() - timedelta(days=1)
# Get days
day = d.isoformat()[:11]
return [(day + "20:00:00", day + "23:00:00"), (day + "05:00:00", day + "07:30:00")]
def __get_staion_urls(self, ranges):
"""
ranges - date ranges, array of tuples of ISO to/from - [(from,to),(from,to)]
returns - list of URLS assosiated with ranges
e.g. #https://music.abcradio.net.au/api/v1/plays/search.json?station=triplej&from=2017-09-13T05:00:00&to=2017-09-13T07:30:00&limit=100&order=asc
"""
limit = "&" + "limit=" + str(__JJJ_SEARCH_LIMIT__)
station = "station=" + self._station_id
__FULL_URL__ = __ABC_URL__ + station
urls = []
for ran in ranges:
dates = "&from=" + ran[0] + "&to=" + ran[1]
url = __FULL_URL__ + dates + limit
urls.append(url)
return urls
def get_songs(self):
"""
Returns a list of song - artist tuples for given urls
"""
# Query ABC urls for songs
songs = []
for url in self.__urls:
resp = requests.get(url=url)
resp.raise_for_status()
data = json.loads(resp.text)
songs.extend(data["items"])
# Convert songs to tuple
# we are using a set in case the same song is played in multiple URLs
song_pairs = set()
for song in songs:
title = song['recording']['title'].lower()
artists_json = song['recording']['artists']
artists = set()
if type(artists_json) is list:
for artist in artists_json:
artists.add(artist['name'].lower())
elif type(artists_json) is dict:
for artist in artists_json.values():
artists.add(artist['name'].lower())
else:
raise IOError("Type of artist is not dict or array")
song_pairs.add((title, frozenset(artists))) # Potenially this could be a list, shouldnt be dupe artists
return list(song_pairs)
| 38.3125
| 153
| 0.575204
|
794d2debbfc3a19eb39de114c9e136c439ee98e0
| 59
|
py
|
Python
|
features/__init__.py
|
Fumiya-Matsumoto/telecom_customer
|
18d2109c327765155ea82e746a3791185f10f8fb
|
[
"RSA-MD"
] | null | null | null |
features/__init__.py
|
Fumiya-Matsumoto/telecom_customer
|
18d2109c327765155ea82e746a3791185f10f8fb
|
[
"RSA-MD"
] | null | null | null |
features/__init__.py
|
Fumiya-Matsumoto/telecom_customer
|
18d2109c327765155ea82e746a3791185f10f8fb
|
[
"RSA-MD"
] | null | null | null |
from .base import Feature, get_arguments, generate_features
| 59
| 59
| 0.864407
|
794d2e8d8ccab1c7abe9145e49cec52e6e31be28
| 31,869
|
py
|
Python
|
geoist/catalog/QCmulti.py
|
irxat/geoist
|
658aadab8074bffcbc6b3861671d35b3012502e9
|
[
"MIT"
] | 53
|
2018-11-17T03:29:55.000Z
|
2022-03-18T02:36:25.000Z
|
geoist/catalog/QCmulti.py
|
irxat/geoist
|
658aadab8074bffcbc6b3861671d35b3012502e9
|
[
"MIT"
] | 3
|
2018-11-28T11:37:51.000Z
|
2019-01-30T01:52:45.000Z
|
geoist/catalog/QCmulti.py
|
irxat/geoist
|
658aadab8074bffcbc6b3861671d35b3012502e9
|
[
"MIT"
] | 35
|
2018-11-17T03:29:57.000Z
|
2022-03-23T17:57:06.000Z
|
#!/usr/bin/env python
"""Code for creating figures comparing two catalogs spanning the same time
frame. Run `QCmulti.py -h` for command line options.
"""
import os
import sys
import errno
import argparse
import time
import shutil
from datetime import datetime
from math import sqrt, degrees, radians, sin, cos, atan2, pi, ceil
import markdown
from scipy import stats
import numpy as np
import pandas as pd
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from obspy.geodetics.base import gps2dist_azimuth
# Python 2
try:
from urllib2 import urlopen, HTTPError
# Python 3
except ImportError:
from urllib.request import urlopen, HTTPError
from . import QCutils as qcu
from .decorators import retry, printstatus
###############################################################################
###############################################################################
###############################################################################
@printstatus('Creating basic catalog summary')
def basic_cat_sum(catalog, catname, dirname):
"""Gather basic catalog summary statistics."""
lines = []
lines.append('目录名称: %s\n\n' % catname.upper())
lines.append('起始日期: %s\n' % catalog['time'].min())
lines.append('截止日期: %s\n\n' % catalog['time'].max())
lines.append('事件总数: %s\n\n' % len(catalog))
lines.append('最小经度: %s\n' % catalog['latitude'].min())
lines.append('最大经度: %s\n' % catalog['latitude'].max())
lines.append('最小纬度: %s\n' % catalog['longitude'].min())
lines.append('最大纬度: %s\n\n' % catalog['longitude'].max())
lines.append('最小深度: %s\n' % catalog['depth'].min())
lines.append('最大深度: %s\n' % catalog['depth'].max())
lines.append('0 km深度事件数: %s\n'
% len(catalog[catalog['depth'] == 0]))
lines.append('NaN 深度事件数: %s\n\n'
% len(catalog[pd.isnull(catalog['depth'])]))
lines.append('最小震级: %s\n' % catalog['mag'].min())
lines.append('最大震级: %s\n' % catalog['mag'].max())
lines.append('0 震级事件数: %s\n'
% len(catalog[catalog['mag'] == 0]))
lines.append('NaN 震级事件数: %s'
% len(catalog[pd.isnull(catalog['mag'])]))
with open('%s_summary.txt' % catname, 'w') as sumfile:
for line in lines:
sumfile.write(line)
@printstatus('Creating summary of comparison criteria and statistics')
def comp_criteria(cat1, cat1name, cat1mids, cat2, cat2name, cat2mids, dirname,
otwindow=16, distwindow=100):
"""Trim catalogs and summarize comparison criteria/statistics."""
lines = []
nummatches = len(cat1mids)
unq1 = len(cat1) - len(cat1mids)
unq2 = len(cat2) - len(cat2mids)
if nummatches > 0:
newcat1 = cat1[cat1['id'].isin(cat1mids)].reset_index(drop=True)
newcat2 = cat2[cat2['id'].isin(cat2mids)].reset_index(drop=True)
mintime = min(newcat1['time'].min(), newcat2['time'].min())
mintime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(mintime))
maxtime = max(newcat1['time'].max(), newcat2['time'].max())
maxtime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(maxtime))
lines.append('重叠时间段: %s to %s\n\n'
% (mintime, maxtime))
lines.append('-- 匹配准则 --\n')
lines.append('时间窗口: %s s\n' % otwindow)
lines.append('距离窗口: %s km\n\n' % distwindow)
lines.append('-- 匹配结果 --\n')
lines.append('关联事件数: %s\n' % nummatches)
lines.append(' %s 未关联事件数: %s\n'
% (cat1name, unq1))
lines.append(' %s 未关联事件数: %s\n\n'
% (cat2name, unq2))
lines.append('最小匹配经度: %s\n' %
min(newcat1['latitude'].min(), newcat2['latitude'].min()))
lines.append('最大匹配经度: %s\n' %
max(newcat1['latitude'].max(), newcat2['latitude'].max()))
lines.append('最小匹配纬度: %s\n' %
min(newcat1['longitude'].min(),
newcat2['longitude'].max()))
lines.append('最大匹配纬度: %s\n\n' %
max(newcat1['longitude'].max(),
newcat2['longitude'].max()))
lines.append('最小匹配深度: %s\n' %
min(newcat1['depth'].min(), newcat2['depth'].min()))
lines.append('最大匹配深度: %s\n\n' %
max(newcat1['depth'].max(), newcat2['depth'].max()))
lines.append('最小匹配震级: %s\n' %
min(newcat1['mag'].min(), newcat2['mag'].min()))
lines.append('最大匹配震级: %s' %
max(newcat1['mag'].max(), newcat2['mag'].max()))
else:
lines.append('-- 匹配准则 --\n')
lines.append('事件窗口: %s s\n' % otwindow)
lines.append('距离窗口: %s km\n\n' % distwindow)
lines.append('-- 匹配结果 --\n')
lines.append('没有匹配事件发现')
with open('%s_comparisoncriteria.txt' % dirname, 'w') as compfile:
for line in lines:
compfile.write(line)
@printstatus('Matching events')
def match_events(cat1, cat2, dirname, otwindow=16, distwindow=100):
"""Match events within two catalogs."""
cat1ids, cat2ids = [], []
matchlines = [('使用 %ss 时间阈值和 %skm 距离阈值'
'匹配的事件\n') % (otwindow, distwindow),
'***********************\n']
pcolumns = ['convtime', 'id', 'latitude', 'longitude', 'depth', 'mag']
sep = '-----------------------\n'
for i in range(len(cat1)):
cat2ix = cat2[cat2['time'].between(cat1.ix[i]['time'] - otwindow,
cat1.ix[i]['time'] + otwindow)].index.values
if len(cat2ix) != 0:
dists = np.array([gps2dist_azimuth(cat1.ix[i]['latitude'],
cat1.ix[i]['longitude'], cat2.ix[x]['latitude'],
cat2.ix[x]['longitude'])[0] / 1000. for x in cat2ix])
dtimes = np.array([abs(cat1.ix[i]['time'] - cat2.ix[x]['time'])
for x in cat2ix])
carr = dists + 5*dtimes
ind = np.argmin(carr)
if (dists[ind] < distwindow) and (dtimes[ind] < otwindow):
cat1event = cat1.ix[i][pcolumns]
cat2event = cat2.ix[cat2ix[ind]][pcolumns]
dmag = cat1event['mag'] - cat2event['mag']
diffs = map('{:.2f}'.format, [dists[ind], dtimes[ind], dmag])
mline1 = ' '.join([str(x) for x in cat1event[:]]) + ' ' +\
' '.join(diffs) + '\n'
mline2 = ' '.join([str(x) for x in cat2event[:]]) + '\n'
matchlines.extend((sep, mline1, mline2))
cat1ids.append(cat1event['id'])
cat2ids.append(cat2event['id'])
cat1matched = cat1[cat1['id'].isin(cat1ids)].reset_index(drop=True)
cat2matched = cat2[cat2['id'].isin(cat2ids)].reset_index(drop=True)
with open('%s_matches.txt' % dirname, 'w') as matchfile:
for mline in matchlines:
matchfile.write(mline)
return cat1ids, cat2ids, cat1matched, cat2matched
@printstatus('Finding closest unassociated events')
def find_closest(cat1, cat1name, cat1mids, cat2, dirname):
"""Find closest event for unassociated events."""
cat1un = cat1[~cat1['id'].isin(cat1mids)].reset_index(drop=True)
clines = ['Closest unassociated events for %s events\n' % cat1name,
'***********************\n'
'date time id latitude longitude depth magnitude '
'(distance) (Δ time) (Δ magnitude)\n']
pcolumns = ['convtime', 'id', 'latitude', 'longitude', 'depth', 'mag']
sep = '-----------------------\n'
for i in range(len(cat1un)):
cat2ix = cat2[cat2['time'].between(cat1un.ix[i]['time'],
cat1.ix[i]['time'] + 300)].index.values
x = 600
while len(cat2ix) == 0:
cat2ix = cat2[cat2['time'].between(cat1un.ix[i]['time'],
cat1.ix[i]['time'] + x)].index.values
x += 6000
dists = np.array([gps2dist_azimuth(cat1un.ix[i]['latitude'],
cat1un.ix[i]['longitude'], cat2.ix[x]['latitude'],
cat2.ix[x]['longitude'])[0] / 1000. for x in cat2ix])
dtimes = np.array([abs(cat1un.ix[i]['time'] - cat2.ix[x]['time'])
for x in cat2ix])
carr = dists + 5*dtimes
ind = np.argmin(carr)
cat1event = cat1un.ix[i][pcolumns]
cat2event = cat2.ix[cat2ix[ind]][pcolumns]
dmag = cat1event['mag'] - cat2event['mag']
diffs = map('{:.2f}'.format, [dists[ind], dtimes[ind], dmag])
cline1 = ' '.join([str(x) for x in cat1event[:]]) + '\n'
cline2 = ' '.join([str(x) for x in cat2event[:]]) + ' ' +\
' '.join(diffs) + '\n'
clines.extend((sep, cline1, cline2))
with open('%s_closestunassociated.txt' % dirname, 'w') as unfile:
for cline in clines:
unfile.write(cline)
@printstatus('Mapping events from both catalogs')
def map_events(cat1, cat1name, cat2, cat2name, cat1mids, cat2mids, dirname):
"""Map matching events between catalogs."""
if len(cat1mids) == 0:
return
lllat, lllon, urlat, urlon, _, _, _, clon = qcu.get_map_bounds(cat1, cat2)
cat1lons, cat1lats, cat2lons, cat2lats = [], [], [], []
for i, mid in enumerate(cat1mids):
cat1lons.append(cat1[cat1['id'] == mid]['longitude'].get_values()[0])
cat1lats.append(cat1[cat1['id'] == mid]['latitude'].get_values()[0])
cat2lons.append(cat2[cat2['id'] == cat2mids[i]]['longitude'
].get_values()[0])
cat2lats.append(cat2[cat2['id'] == cat2mids[i]]['latitude'
].get_values()[0])
plt.figure(figsize=(12, 7))
mplmap = plt.axes(projection=ccrs.PlateCarree(central_longitude=clon))
mplmap.set_extent([lllon, urlon, lllat, urlat], ccrs.PlateCarree())
mplmap.coastlines('50m')
mplmap.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
for i, lat in enumerate(cat1lats):
mplmap.plot([cat1lons[i], cat2lons[i]], [lat, cat2lats[i]],
color='k', transform=ccrs.PlateCarree())
mplmap.scatter(cat1lons, cat1lats, color='b', s=2, zorder=4,
transform=ccrs.PlateCarree(), label=cat1name)
mplmap.scatter(cat2lons, cat2lats, color='r', s=2, zorder=4,
transform=ccrs.PlateCarree(), label=cat2name)
mplmap.add_feature(cfeature.NaturalEarthFeature('cultural',
'admin_1_states_provinces_lines', '50m', facecolor='none',
edgecolor='k', zorder=9))
mplmap.add_feature(cfeature.BORDERS)
plt.legend()
plt.savefig('%s_mapmatcheddetecs.png' % dirname, dpi=300)
plt.close()
@printstatus('Mapping unassociated events')
def map_unique_events(cat, catname, mids):
"""Map unassociated events from a catalog."""
if len(mids) == len(cat):
return
cat = cat[~cat['id'].isin(mids)].reset_index(drop=True)
lllat, lllon, urlat, urlon, _, _, _, clon = qcu.get_map_bounds(cat)
plt.figure(figsize=(12, 7))
mplmap = plt.axes(projection=ccrs.PlateCarree(central_longitude=clon))
mplmap.coastlines('50m')
mplmap.scatter(cat['longitude'].tolist(), cat['latitude'].tolist(),
color='r', s=2, zorder=4, transform=ccrs.PlateCarree())
mplmap.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
mplmap.add_feature(cfeature.NaturalEarthFeature('cultural',
'admin_1_states_provinces_lines', '50m', facecolor='none',
edgecolor='k', zorder=9))
mplmap.add_feature(cfeature.BORDERS)
plt.title('%s unassociated events' % catname, fontsize=20, y=1.08)
#print(catname)
plt.savefig('%s_uniquedetecs.png' % catname, dpi=300)
plt.close()
@printstatus('Graphing polar histogram of azimuths and distances')
def make_az_dist(cat1, cat1name, cat2, cat2name, cat1mids, cat2mids, dirname,
distwindow=100, numbins=16):
"""Make polar scatter/histogram of azimuth vs. distance."""
if len(cat1mids) == 0:
return
azimuths, distances = qcu.get_azs_and_dists(cat1, cat2, cat1mids, cat2mids)
width = 2*pi / numbins
razimuths = list(map(radians, azimuths))
bins = np.linspace(0, 2*pi, numbins+1)
azhist = np.histogram(razimuths, bins=bins)[0]
hist = (float(distwindow)/max(azhist)) * azhist
bins = (bins + width/2)[:-1]
plt.figure(figsize=(6, 6))
ax1 = plt.subplot(111, projection='polar')
ax1.scatter(razimuths, distances, color='b', s=10)
bars = ax1.bar(bins, hist, width=width)
ax1.set_theta_zero_location('N')
ax1.set_rmax(distwindow)
ax1.set_theta_direction(-1)
ax1.set_rlabel_position(112.5)
ax1.set_title('%s location relative to %s' % (cat1name, cat2name),
fontsize=20)
for _, hbar in list(zip(hist, bars)):
hbar.set_facecolor('b')
hbar.set_alpha(0.2)
plt.subplots_adjust(left=0.1, right=0.95, top=0.9, bottom=0.11)
plt.savefig('%s_polarazimuth.png' % dirname, dpi=300)
plt.close()
@printstatus('Comparing parameters of matched events')
def compare_params(cat1, cat1name, cat2, cat2name, cat1mids, cat2mids, param,
dirname):
"""Compare parameters of matched events."""
if len(cat1mids) == 0:
return
cat1params, cat2params = [], []
for ix, eid in enumerate(cat1mids):
param1 = float(cat1[cat1['id'] == eid][param])
param2 = float(cat2[cat2['id'] == cat2mids[ix]][param].get_values()[0])
cat1params.append(param1)
cat2params.append(param2)
minparam = min(min(cat1params), min(cat2params))
maxparam = max(max(cat1params), max(cat2params))
xes = range(int(minparam), ceil(maxparam))
mval, bval, rval, _, _ = stats.linregress(cat1params, cat2params)
linegraph = [mval*x + bval for x in xes]
r2val = rval*rval
aparam = param if param != 'mag' else 'magnitude'
tparam = aparam.capitalize()
plt.figure(figsize=(8, 8))
plt.scatter(cat1params, cat2params, edgecolor='b', facecolor=None)
plt.plot(xes, linegraph, c='r', linewidth=1, label='best fit')
plt.plot(xes, xes, c='k', linewidth=1, label='m = 1')
plt.legend(loc='upper left')
plt.xlim(minparam, maxparam)
plt.ylim(minparam, maxparam)
plt.xlabel('%s %s' % (cat1name, aparam), fontsize=14)
plt.ylabel('%s %s' % (cat2name, aparam), fontsize=14)
plt.axes().set_aspect('equal', 'box')
plt.title('%s correlation' % tparam, fontsize=20)
plt.savefig('%s_compare%s.png' % (dirname, param), dpi=300)
plt.close()
@printstatus('Graphing parameter differences between matched events')
def make_diff_hist(cat1, cat2, cat1mids, cat2mids, param, binsize, dirname,
title='', xlabel=''):
"""Make histogram of parameter differences between matched detections."""
if len(cat1mids) == 0:
return
paramdiffs = []
for idx, eid in enumerate(cat1mids):
c1mask = cat1['id'] == eid
c2mask = cat2['id'] == cat2mids[idx]
if param == 'distance':
cat1lat = cat1[c1mask]['latitude'].values[0]
cat1lon = cat1[c1mask]['longitude'].values[0]
cat2lat = cat2[c2mask]['latitude'].values[0]
cat2lon = cat2[c2mask]['longitude'].values[0]
pardiff = gps2dist_azimuth(cat1lat, cat1lon, cat2lat, cat2lon
)[0] / 1000.
paramdiffs.append(pardiff)
else:
cat1param = cat1[c1mask][param].values[0]
cat2param = cat2[c2mask][param].values[0]
if np.isnan(cat1param) or np.isnan(cat2param):
continue
pardiff = cat1param - cat2param
paramdiffs.append(pardiff)
minpardiff, maxpardiff = min(paramdiffs), max(paramdiffs)
pardiffdown = qcu.round2bin(minpardiff, binsize, 'down')
pardiffup = qcu.round2bin(maxpardiff, binsize, 'up')
numbins = int((pardiffup-pardiffdown) / binsize)
pardiffbins = np.linspace(pardiffdown, pardiffup+binsize,
numbins+2) - binsize/2.
plt.figure(figsize=(12, 6))
plt.title(title, fontsize=20)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel('Count', fontsize=14)
plt.hist(paramdiffs, pardiffbins, alpha=1, color='b', edgecolor='k')
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.11)
plt.tick_params(labelsize=12)
plt.xlim(pardiffdown-binsize/2., pardiffup+binsize/2.)
plt.ylim(0)
plt.savefig('%s_%sdiffs.png' % (dirname, param), dpi=300)
plt.close()
###############################################################################
def create_figures_new(db = None, catalog_file = None,
startyear = 2010 , endyear = 2012,
minmag = -5 , maxmag = 12, pathname = None, dhrs = 0):
"""Generate and save all relevant figures and text files."""
if db is None:
cat1 = catalog_file[0].lower()
cat2 = catalog_file[1].lower()
dirname = '%s-%s%s-%s' % (cat1, cat2, startyear, endyear)
datadf1 = qcu.get_local_data(cat1, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag)
datadf2 = qcu.get_local_data(cat2, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag, dhrs = dhrs)
cat1, cat2 = cat1.upper(), cat2.upper()
else:
db1 = db[0]
db2 = db[1]
dirname = '%s-%s%s-%s' % (db1.Header['Name'].lower(), db2.Header['Name'].lower(), startyear, endyear)
print(dirname)
datadf1 = qcu.get_db_data(db1, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag)
datadf2 = qcu.get_db_data(db2, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag, dhrs = dhrs)
cat1, cat2 = db1.Header['Name'].upper(), db2.Header['Name'].upper()
os.chdir(pathname)
#os.chdir(dirname)
basic_cat_sum(datadf1, cat1, dirname)
basic_cat_sum(datadf2, cat2, dirname)
datadf1.loc[:, 'convtime'] = [' '.join(x.split('T')) for x in
datadf1['time'].tolist()]
datadf1.loc[:, 'convtime'] = datadf1['convtime'].astype('datetime64[ns]')
datadf1.loc[:, 'time'] = [qcu.to_epoch(x) for x in datadf1['time']]
datadf2.loc[:, 'convtime'] = [' '.join(x.split('T')) for x in
datadf2['time'].tolist()]
datadf2.loc[:, 'convtime'] = datadf2['convtime'].astype('datetime64[ns]')
datadf2.loc[:, 'time'] = [qcu.to_epoch(x) for x in datadf2['time']]
datadf1, datadf2 = qcu.trim_times(datadf1, datadf2)
cat1ids, cat2ids, newcat1, newcat2 = match_events(datadf1, datadf2,
dirname, otwindow = 60, distwindow=100)
if len(cat1ids) == 0:
sys.stdout.write('*** No matching events found ***\n')
comp_criteria(datadf1, cat1, cat1ids, datadf2, cat2, cat2ids, dirname,
otwindow = 60, distwindow = 100)
#find_closest(datadf1, cat1, cat1ids, datadf2, dirname)
map_events(newcat1, cat1, newcat2, cat2, cat1ids, cat2ids, dirname)
print(cat1)
map_unique_events(datadf1, cat1, cat1ids)
print(cat2)
map_unique_events(datadf2, cat2, cat2ids)
make_az_dist(newcat1, cat1, newcat2, cat2, cat1ids, cat2ids, dirname)
compare_params(newcat1, cat1, newcat2, cat2, cat1ids, cat2ids, 'mag',
dirname)
compare_params(newcat1, cat1, newcat2, cat2, cat1ids, cat2ids, 'depth',
dirname)
make_diff_hist(newcat1, newcat2, cat1ids, cat2ids, 'time', 0.5, dirname,
xlabel='%s-%s time differences (sec)' % (cat1.upper(),
cat2.upper()))
make_diff_hist(newcat1, newcat2, cat1ids, cat2ids, 'mag', 0.1, dirname,
xlabel='%s-%s magnitude differences' % (cat1.upper(),
cat2.upper()))
make_diff_hist(newcat1, newcat2, cat1ids, cat2ids, 'depth', 2, dirname,
xlabel='%s-%s depth differences (km)' % (cat1.upper(),
cat2.upper()))
make_diff_hist(newcat1, newcat2, cat1ids, cat2ids, 'distance', 2, dirname,
xlabel='%s-%s distances (km)' % (cat1.upper(),
cat2.upper()))
return dirname
def create_figures():
"""Generate and save all relevant figures and text files."""
parser = argparse.ArgumentParser()
parser.add_argument('catalog1', nargs='?', type=str,
help='pick first catalog to download data from; to \
download data from all catalogs, use "preferred"; if \
using -sf, give catalog name')
parser.add_argument('catalog2', nargs='?', type=str,
help='pick second catalog to download data from; to \
download data from all catalogs, use "preferred"; if \
using -sf, give catalog name')
parser.add_argument('startyear', nargs='?', type=int,
help='pick starting year; if using -sf, give first \
year in catalog')
parser.add_argument('endyear', nargs='?', type=int,
help='pick end year (to get a single year of data, \
enter same year as startyear); if using -sf, give \
last year in catalog')
parser.add_argument('-mr', '--magrange', nargs=2, type=float,
default=[-5, 12],
help='give the magnitude range for downloading data \
(default range is from -5 to 12)')
parser.add_argument('-sf', '--specifyfiles', nargs=2, type=str,
help='specify two existing .csv files to use')
parser.add_argument('-fd', '--forcedownload', action='store_true',
help='forces downloading of data even if .csv file \
exists')
parser.add_argument('-nm', '--nomatches', action='store_false',
help='do not include list of matching events in HTML \
report')
args = parser.parse_args()
minmag, maxmag = args.magrange
if args.specifyfiles is None:
if not args.catalog1:
sys.stdout.write('No first catalog specified. Exiting...\n')
sys.exit()
elif not args.catalog2:
sys.stdout.write('No second catalog specified. Exiting...\n')
sys.exit()
elif not args.startyear:
sys.stdout.write('No starting year specified. Exiting...\n')
sys.exit()
elif not args.endyear:
sys.stdout.write('No ending year specified. Exiting...\n')
sys.exit()
cat1, cat2 = args.catalog1.lower(), args.catalog2.lower()
startyear, endyear = map(int, [args.startyear, args.endyear])
download = args.forcedownload
dirname = '%s-%s%s-%s' % (cat1, cat2, startyear, endyear)
if download:
try:
os.makedirs(dirname)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
datadf1 = qcu.get_data(cat1, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag)
datadf2 = qcu.get_data(cat2, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag)
else:
# Python 2
try:
try:
datadf1 = pd.read_csv('%s/%s%s-%s.csv' %
(dirname, cat1, startyear, endyear))
datadf2 = pd.read_csv('%s/%s%s-%s.csv' %
(dirname, cat2, startyear, endyear))
except IOError:
try:
os.makedirs(dirname)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
datadf1 = qcu.get_data(cat1, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag)
datadf2 = qcu.get_data(cat2, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag)
# Python 3
except:
try:
datadf1 = pd.read_csv('%s/%s%s-%s.csv' %
(dirname, cat1, startyear, endyear))
datadf2 = pd.read_csv('%s/%s%s-%s.csv' %
(dirname, cat2, startyear, endyear))
except FileNotFoundError:
try:
os.makedirs(dirname)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
datadf1 = qcu.get_data(cat1, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag)
datadf2 = qcu.get_data(cat2, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag)
else:
from shutil import copy2
sfcat1, sfcat2 = args.specifyfiles
cat1, cat2 = args.catalog1, args.catalog2
dirname = '%s-%s%s-%s' % (cat1, cat2, args.startyear, args.endyear)
try:
os.makedirs(dirname)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
datadf1, datadf2 = pd.read_csv(sfcat1), pd.read_csv(sfcat2)
try:
copy2(sfcat1, dirname)
copy2(sfcat2, dirname)
except shutil.SameFileError:
pass
if len(datadf1) == 0:
sys.stdout.write(('%s catalog has no data available for that time '
'period. Quitting...\n') % cat1.upper())
sys.exit()
if len(datadf2) == 0:
sys.stdout.write(('%s catalog has no data available for that time '
'period. Quitting...\n') % cat2.upper())
sys.exit()
cat1, cat2 = cat1.upper(), cat2.upper()
os.chdir(dirname)
basic_cat_sum(datadf1, cat1, dirname)
basic_cat_sum(datadf2, cat2, dirname)
datadf1.loc[:, 'convtime'] = [' '.join(x.split('T')) for x in
datadf1['time'].tolist()]
datadf1.loc[:, 'convtime'] = datadf1['convtime'].astype('datetime64[ns]')
datadf1.loc[:, 'time'] = [qcu.to_epoch(x) for x in datadf1['time']]
datadf2.loc[:, 'convtime'] = [' '.join(x.split('T')) for x in
datadf2['time'].tolist()]
datadf2.loc[:, 'convtime'] = datadf2['convtime'].astype('datetime64[ns]')
datadf2.loc[:, 'time'] = [qcu.to_epoch(x) for x in datadf2['time']]
datadf1, datadf2 = qcu.trim_times(datadf1, datadf2)
cat1ids, cat2ids, newcat1, newcat2 = match_events(datadf1, datadf2,
dirname)
if len(cat1ids) == 0:
sys.stdout.write('*** No matching events found ***\n')
comp_criteria(datadf1, cat1, cat1ids, datadf2, cat2, cat2ids, dirname)
#find_closest(datadf1, cat1, cat1ids, datadf2, dirname)
map_events(newcat1, cat1, newcat2, cat2, cat1ids, cat2ids, dirname)
map_unique_events(datadf1, cat1, cat1ids)
map_unique_events(datadf2, cat2, cat2ids)
make_az_dist(newcat1, cat1, newcat2, cat2, cat1ids, cat2ids, dirname)
compare_params(newcat1, cat1, newcat2, cat2, cat1ids, cat2ids, 'mag',
dirname)
compare_params(newcat1, cat1, newcat2, cat2, cat1ids, cat2ids, 'depth',
dirname)
make_diff_hist(newcat1, newcat2, cat1ids, cat2ids, 'time', 0.5, dirname,
xlabel='%s-%s time differences (sec)' % (cat1.upper(),
cat2.upper()))
make_diff_hist(newcat1, newcat2, cat1ids, cat2ids, 'mag', 0.1, dirname,
xlabel='%s-%s magnitude differences' % (cat1.upper(),
cat2.upper()))
make_diff_hist(newcat1, newcat2, cat1ids, cat2ids, 'depth', 2, dirname,
xlabel='%s-%s depth differences (km)' % (cat1.upper(),
cat2.upper()))
make_diff_hist(newcat1, newcat2, cat1ids, cat2ids, 'distance', 2, dirname,
xlabel='%s-%s distances (km)' % (cat1.upper(),
cat2.upper()))
return dirname, args.nomatches
def generate_html(dirname, matchbool):
"""Generate an HTML file containing all of the generated images and test
files."""
catalog1 = dirname.split('-')[0].upper()
catalog2 = dirname.split('-')[1][:-4].upper()
startyear = dirname.split('-')[1][-4:]
endyear = dirname.split('-')[-1]
#print(catalog1, catalog2)
with open('{0}_summary.txt'.format(catalog1)) as sum1file:
cat1sum = '\t\t' + '\t\t'.join(sum1file.readlines())
with open('{0}_summary.txt'.format(catalog2)) as sum2file:
cat2sum = '\t\t' + '\t\t'.join(sum2file.readlines())
with open('{0}_comparisoncriteria.txt'.format(dirname)) as compfile:
compcrit = '\t\t' + '\t\t'.join(compfile.readlines())
with open('{0}_matches.txt'.format(dirname)) as matchfile:
matches = '\t\t' + '\t\t'.join(matchfile.readlines())
if matchbool:
tocm = '- [匹配事件汇总信息](#matches)\n'
strm = ('### Summary of Matching Events <a name="matches"></a>\n---\n'
'{0}\n').format(matches)
else:
tocm, strm = '', ''
toc = ('## 地震目录 {1} 和 {2} 从 {3} 至 {4} 对比报告\n'
'### 内容\n'
'- [目录统计信息](#catstats)\n'
' - [目录 1](#cat1stats)\n'
' - [目录 2](#cat2stats)\n'
'- [对比方法](#compcrit)\n'
'- [匹配事件震中](#matchh)\n'
'- [匹配事件震级](#matchm)\n'
'- [未匹配事件](#missevs)\n'
'{0}---\n').format(tocm, catalog1, catalog2, startyear, endyear)
mdstring = ('## Basic Catalog Statistics <a name="catstats"></a>\n'
'### Catalog 1 <a name="cat1stats"></a>\n---\n'
'{5}\n'
'### Catalog 2 <a name="cat2stats"></a>\n---\n'
'{6}\n'
'### Comparison Criteria <a name="compcrit"></a>\n---\n'
'{7}\n'
'### Matching Event Hypocenters <a name="matchh"></a>\n---\n'
'<img width="80%" src="{0}_mapmatcheddetecs.png">\n'
'<img width="60%" src="{0}_polarazimuth.png">\n'
'<img width="80%" src="{0}_distancediffs.png">\n'
'<img width="80%" src="{0}_depthdiffs.png">\n'
'<img width="80%" src="{0}_comparedepth.png">\n'
'<img width="80%" src="{0}_timediffs.png">\n'
'### Matching Event Magnitudes <a name="matchm"></a>\n---\n'
'<img width="80%" src="{0}_magdiffs.png">\n'
'<img width="80%" src="{0}_comparemag.png">\n'
'### Unassociated Events <a name="missevs"></a>\n---\n'
'<img width="80%" src="{1}_uniquedetecs.png">\n'
'<img width="80%" src="{2}_uniquedetecs.png">\n'
'{8}'
).format(dirname, catalog1, catalog2, startyear, endyear,
cat1sum, cat2sum, compcrit, strm)
html = markdown.markdown(toc + mdstring)
with open('{0}_report.html'.format(dirname), 'w') as htmlfile:
htmlfile.write(html)
if __name__ == '__main__':
try:
dirname, matches = create_figures()
generate_html(dirname, matches)
except (KeyboardInterrupt, SystemError):
sys.stdout.write('\nProgram canceled. Exiting...\n')
sys.exit()
| 41.281088
| 109
| 0.561204
|
794d2f440a4a094a0bdf3b48ee2135d957481b0d
| 1,950
|
py
|
Python
|
webapps/nfs/rFile/NFPportable/spinal/pyscripts/go_association_experimental.py
|
xuwenjian85/NFPscanner-webserver
|
456208dbc526a7ce59b41e68051a3a28475c74f2
|
[
"Apache-2.0"
] | null | null | null |
webapps/nfs/rFile/NFPportable/spinal/pyscripts/go_association_experimental.py
|
xuwenjian85/NFPscanner-webserver
|
456208dbc526a7ce59b41e68051a3a28475c74f2
|
[
"Apache-2.0"
] | null | null | null |
webapps/nfs/rFile/NFPportable/spinal/pyscripts/go_association_experimental.py
|
xuwenjian85/NFPscanner-webserver
|
456208dbc526a7ce59b41e68051a3a28475c74f2
|
[
"Apache-2.0"
] | null | null | null |
import sys
class GoAssociation:
def __init__(self, id_filename, go_filename, out_filename):
outfile = open(out_filename, "w")
idtoname = open(id_filename)
gofile = open(go_filename)
M1 = {}
for line in idtoname:
cols = line.split()
M1[cols[0]] = []
for name in cols:
M1[cols[0]].append(name)
Go1 = {}
for key in M1:
Go1[key] = set()
lc = 0
for line in gofile:
lc = lc+1
if lc % 1000 == 0:
print lc
if line[0] == "!":
continue
cols = line.split("\t")
#if high-level category skip
if (cols[4] == "GO:0008150" or cols[4] == "GO:0005575" or cols[4] == "GO:0003674"):
continue
#if non-experimental skip
if (cols[6] != "IPI" and cols[6] != "IGI" and cols[6] != "IMP" and cols[6] != "IDA" and \
cols[6] != "IEP" and cols[6] != "TAS" and cols[6] != "IC"):
continue
found_key = False
for key in M1:
for name in M1[key]:
#check object symbol at cols[2] or object name at cols[9]
if (name == cols[2]) or (name == cols[9]):
found_key = True
break
#check object synonyms at cols[10] separated by "|"
synonyms = cols[10].split("|")
for synonym in synonyms:
if name == synonym:
found_key = True
break
if found_key == True:
break
if found_key == True:
#GO ID is at cols[4]
Go1[key].add(cols[4])
break
for key in Go1:
outfile.write('|'.join(M1[key]) + " ")
outfile.write('|'.join(list(Go1[key])) + "\n")
outfile.close()
if __name__ == "__main__":
#id_to_name file format:
#dm10840 CG6612 DIP:22971N
#at each line where dm10840 is the name used in the ppi network and the rest are assigned names
#sample run:
#python pyscripts/go_association.py data/scere_idtoname.txt data/gene_association_scere.sgd data/scere_gene_to_go.txt
#while in the main folder
inputdata = GoAssociation(sys.argv[1], sys.argv[2], sys.argv[3])
| 27.857143
| 118
| 0.598974
|
794d2fe9e536cf8b2ae96e4539cf9a964fa94fe5
| 798
|
py
|
Python
|
src_dataset/dataset_parallel.py
|
jamesrchen/Combinatorial-3D-Shape-Generation
|
0f2dc9a4c6f5844cbce53fe4f2b7244cadf6231e
|
[
"MIT"
] | null | null | null |
src_dataset/dataset_parallel.py
|
jamesrchen/Combinatorial-3D-Shape-Generation
|
0f2dc9a4c6f5844cbce53fe4f2b7244cadf6231e
|
[
"MIT"
] | null | null | null |
src_dataset/dataset_parallel.py
|
jamesrchen/Combinatorial-3D-Shape-Generation
|
0f2dc9a4c6f5844cbce53fe4f2b7244cadf6231e
|
[
"MIT"
] | null | null | null |
from geometric_primitives import brick
from geometric_primitives import bricks
from geometric_primitives import rules
import dataset_common
if __name__ == '__main__':
list_rules = rules.LIST_RULES_2_4
print(list_rules)
list_bricks_ = []
for rule in list_rules:
bricks_ = bricks.Bricks(100)
brick_ = brick.Brick()
brick_.set_position([0, 0, 0])
brick_.set_direction(0)
bricks_.add(brick_)
if rule[1][0] == 0:
brick_next = brick.Brick()
brick_next.set_position([rule[1][1][0], rule[1][1][1], 1])
brick_next.set_direction(rule[1][0])
bricks_.add(brick_next)
list_bricks_.append(bricks_)
dataset_common.create_bricks(list_bricks_, dataset_common.STR_LABEL_PARALLEL)
| 26.6
| 81
| 0.657895
|
794d30287bc2ece6b4a0e1666e47022d2e658a34
| 87,953
|
py
|
Python
|
virtual/lib/python3.6/site-packages/numpy/core/tests/test_regression.py
|
jameskomo/coffee-data-visualization
|
6b0812c8791c9cbcb264bafae0cf1d02a6ea30b8
|
[
"MIT"
] | 1
|
2019-09-20T04:38:37.000Z
|
2019-09-20T04:38:37.000Z
|
virtual/lib/python3.6/site-packages/numpy/core/tests/test_regression.py
|
jameskomo/coffee-data-visualization
|
6b0812c8791c9cbcb264bafae0cf1d02a6ea30b8
|
[
"MIT"
] | 1
|
2021-06-02T00:41:48.000Z
|
2021-06-02T00:41:48.000Z
|
virtual/lib/python3.6/site-packages/numpy/core/tests/test_regression.py
|
jameskomo/coffee-data-visualization
|
6b0812c8791c9cbcb264bafae0cf1d02a6ea30b8
|
[
"MIT"
] | null | null | null |
from __future__ import division, absolute_import, print_function
import copy
import sys
import gc
import tempfile
import pytest
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
assert_, assert_equal, IS_PYPY, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_raises,
assert_raises_regex, assert_warns, suppress_warnings,
_assert_valid_refcount, HAS_REFCOUNT,
)
from numpy.compat import asbytes, asunicode, long, pickle
try:
RecursionError
except NameError:
RecursionError = RuntimeError # python < 3.5
class TestRegression(object):
def test_invalid_round(self):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
f = BytesIO()
pickle.dump(a, f, protocol=proto)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a, b)
def test_typeNA(self):
# Issue gh-515
with suppress_warnings() as sup:
sup.filter(np.VisibleDeprecationWarning)
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self):
# Ticket #50
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
f = BytesIO()
pickle.dump(ca, f, protocol=proto)
f.seek(0)
ca = np.load(f, allow_pickle=True)
f.close()
def test_noncontiguous_fill(self):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
assert_raises(AttributeError, rs)
def test_bool(self):
# Ticket #60
np.bool_(1) # Should succeed
def test_indexing1(self):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
assert_(a[1] == 'auto')
assert_(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with suppress_warnings() as sup:
sup.filter(FutureWarning)
assert_(b != 'auto')
assert_(b[0] != 'auto')
def test_unicode_swapping(self):
# Ticket #79
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
def test_object_array_fill(self):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self):
# Ticket #93
assert_raises(TypeError, np.dtype,
{'names':['a'], 'formats':['foo']}, align=1)
def test_endian_bool_indexing(self):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a > 2) & (a < 6))
xb = np.where((b > 2) & (b < 6))
ya = ((a > 2) & (a < 6))
yb = ((b > 2) & (b < 6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
def test_arange_inf_step(self):
ref = np.arange(0, 1, 10)
x = np.arange(0, 1, np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, 1, -10)
x = np.arange(0, 1, -np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, -1, -10)
x = np.arange(0, -1, -np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, -1, 10)
x = np.arange(0, -1, np.inf)
assert_array_equal(ref, x)
def test_arange_underflow_stop_and_step(self):
finfo = np.finfo(np.float64)
ref = np.arange(0, finfo.eps, 2 * finfo.eps)
x = np.arange(0, finfo.eps, finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, finfo.eps, -2 * finfo.eps)
x = np.arange(0, finfo.eps, -finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, -finfo.eps, -2 * finfo.eps)
x = np.arange(0, -finfo.eps, -finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, -finfo.eps, 2 * finfo.eps)
x = np.arange(0, -finfo.eps, finfo.max)
assert_array_equal(ref, x)
def test_argmax(self):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
def test_mem_divmod(self):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
assert_raises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self):
# Ticket #143
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self):
# Ticket #151
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tobytes_FORTRANORDER_discontiguous(self):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.frombuffer(x.tobytes()))
def test_flat_assignment(self):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self):
# Ticket #194
x = np.empty((3, 1))
def bfa():
x[:] = np.arange(3)
def bfb():
x[:] = np.arange(3, dtype=float)
assert_raises(ValueError, bfa)
assert_raises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
f = BytesIO()
pickle.dump(dt, f, protocol=proto)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
assert_raises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
assert_raises(IndexError, index_tmp)
def test_chararray_rstrip(self):
# Ticket #222
x = np.chararray((1,), 5)
x[0] = b'a '
x = x.rstrip()
assert_equal(x[0], b'a')
def test_object_array_shape(self):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
def test_lexsort(self):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
class BuggySequence(object):
def __len__(self):
return 4
def __getitem__(self, key):
raise KeyError
assert_raises(KeyError, np.lexsort, BuggySequence())
def test_lexsort_zerolen_custom_strides(self):
# Ticket #14228
xs = np.array([], dtype='i8')
assert xs.strides == (8,)
assert np.lexsort((xs,)).shape[0] == 0 # Works
xs.strides = (16,)
assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError
def test_lexsort_zerolen_custom_strides_2d(self):
xs = np.array([], dtype='i8')
xs.shape = (0, 2)
xs.strides = (16, 16)
assert np.lexsort((xs,), axis=0).shape[0] == 0
xs.shape = (2, 0)
xs.strides = (16, 16)
assert np.lexsort((xs,), axis=0).shape[0] == 2
def test_lexsort_zerolen_element(self):
dt = np.dtype([]) # a void dtype with no fields
xs = np.empty(4, dt)
assert np.lexsort((xs,)).shape[0] == xs.shape[0]
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
b"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n."),
(np.array([9e123], dtype=np.float64),
b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
b"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
b"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
b"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb."),
(np.array([(9e123,)], dtype=[('name', float)]),
b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
b"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
b"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
b"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
b"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
b"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb."),
]
if sys.version_info[:2] >= (3, 4):
# encoding='bytes' was added in Py3.4
for original, data in test_data:
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
if isinstance(result, np.ndarray) and result.dtype.names is not None:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self):
# Ticket #251
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
pickle.dumps(float, protocol=proto)
def test_swap_real(self):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self):
# Ticket #270
assert_(np.array([1, 'A', None]).shape == (3,))
def test_multiple_assign(self):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
if res1.dtype.kind in 'uib':
assert_((res1 == res2).all(), func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self):
# Ticket #330
np.float64(1.0).imag
def test_dtype_tuple(self):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self):
# Ticket #335
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self):
# Ticket #341
assert_equal(np.array(['X'], 'c'), b'X')
def test_string_array_size(self):
# Ticket #342
assert_raises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
@pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
reason="Using relaxed stride checking")
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
def test_object_argmax(self):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
def test_refcount_vdot(self):
# Changeset #3443
_assert_valid_refcount(np.vdot)
def test_startswith(self):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self):
# Convolve should raise an error for empty input array.
assert_raises(ValueError, np.convolve, [], [1])
assert_raises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self):
# Ticket #483
r = np.array([[b'abc']], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == b'abc')
def test_take_output(self):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
if HAS_REFCOUNT:
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
if HAS_REFCOUNT:
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
def test_frompyfunc_endian(self):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
def test_arr_transpose(self):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
def test_string_mergesort(self):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
assert_raises(TypeError, rs)
def test_unicode_scalar(self):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
new = pickle.loads(pickle.dumps(el, protocol=proto))
assert_equal(new, el)
def test_arange_non_native_dtype(self):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_flat_indexing_invalid_nr_elements(self):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))
assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
def test_mem_scalar_indexing(self):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self):
# Check argsort for strings containing zeros.
x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self):
# Check sort for strings containing zeros.
x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_sign_bit(self):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[0. 0. 0.]')
def test_flat_index_byteswap(self):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
reason="Using relaxed stride checking")
def test_copy_detection_corner_case2(self):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1-d broadcast slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1-d -> 2-d broadcast slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:, :] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2-d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
tmp = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
tmp = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
tmp = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
tmp = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
del tmp # Avoid pyflakes unused variable warning
def test_mem_custom_float_to_array(self):
# Ticket 702
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
def test_object_array_refcount_self_assign(self):
# Ticket #711
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self):
x = [1, 2, 3]
assert_raises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self):
# Ticket #714
np.zeros(10)[np.array(0)]
def test_nonnative_endian_fill(self):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
y = pickle.loads(pickle.dumps(x, protocol=proto))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype=np.single)
try:
a.compress([True, False], axis=1, out=b)
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
def test_attributes(self):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
# These methods do not preserve subclasses
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
def test_recarray_tolist(self):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self):
a = np.array('123', dtype='c')
b = np.array([b'1', b'2', b'3'])
assert_equal(a, b)
def test_unaligned_unicode_access(self):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
x = np.array([(b'a', u'b')], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self):
# Ticket #816
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
assert_raises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self):
"Ticket #882"
a = np.array(1)
assert_raises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
assert_raises(ValueError, lambda: np.array([1], ndmin=33))
def test_void_scalar_with_titles(self):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tobytes())
assert_(test_record_void_scalar == test_record)
# Test pickle and unpickle of void and record scalars
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(
pickle.dumps(test_string, protocol=proto)) == test_string)
assert_(pickle.loads(
pickle.dumps(test_record, protocol=proto)) == test_record)
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
if IS_PYPY:
x.resize((m, 0), refcheck=False)
else:
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
if IS_PYPY:
y.resize((0, n), refcheck=False)
else:
y.resize((0, n))
# `dot` should just return zero (m, n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
# Regression test for #1061.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
with assert_raises_regex(ValueError,
'Maximum allowed dimension exceeded'):
np.empty(sz)
def test_huge_arange(self):
# Regression test for #1062.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
with assert_raises_regex(ValueError,
'Maximum allowed size exceeded'):
np.arange(sz)
assert_(np.size == sz)
def test_fromiter_bytes(self):
# Ticket #1058
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
# Ticket #1078: segfaults when creating an array with a sequence of
# 0d arrays.
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
# Ticket #1081: weird array with strange input...
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
# Ticket #1080.
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(TypeError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
# Ticket #1217, don't segfault.
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
# Ticket #1240.
a = np.array([[u'abc', u'\u03a3'],
[u'asdf', u'erw']],
dtype='U')
assert_raises(UnicodeEncodeError, np.array, a, 'S4')
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert_(a.itemsize == 16)
a = np.array([u'123', '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', u'123', '12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'1234'])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
# Ticket #1198 and #1267
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
# Ticket #1267
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
# Ticket #1267
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
# Ticket #1259 and gh-441
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
# Ticket #1299
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_structured_arrays_with_objects2(self):
# Ticket #1299 second test
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
assert_raises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, int, np.long):
test_type(t)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(b'aa, aa, 1.0', sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], np.bool_) # not x[0] because it is unordered
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tobytes())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, b"\x01\x02\x03")
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning)
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
# Ticket #2218
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
# The second use of itemsize was throwing an exception because in
# ctors.c, discover_itemsize was calling PyObject_Length without
# checking the return code. This failed to get the length of the
# number 2, and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
# Similar to GitHub issue #387
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_squeeze_axis_handling(self):
# Issue #10779
# Ensure proper handling of objects
# that don't support axis specification
# when squeezing
class OldSqueeze(np.ndarray):
def __new__(cls,
input_array):
obj = np.asarray(input_array).view(cls)
return obj
# it is perfectly reasonable that prior
# to numpy version 1.7.0 a subclass of ndarray
# might have been created that did not expect
# squeeze to have an axis argument
# NOTE: this example is somewhat artificial;
# it is designed to simulate an old API
# expectation to guard against regression
def squeeze(self):
return super(OldSqueeze, self).squeeze()
oldsqueeze = OldSqueeze(np.array([[1],[2],[3]]))
# if no axis argument is specified the old API
# expectation should give the correct result
assert_equal(np.squeeze(oldsqueeze),
np.array([1,2,3]))
# likewise, axis=None should work perfectly well
# with the old API expectation
assert_equal(np.squeeze(oldsqueeze, axis=None),
np.array([1,2,3]))
# however, specification of any particular axis
# should raise a TypeError in the context of the
# old API specification, even when using a valid
# axis specification like 1 for this array
with assert_raises(TypeError):
# this would silently succeed for array
# subclasses / objects that did not support
# squeeze axis argument handling before fixing
# Issue #10779
np.squeeze(oldsqueeze, axis=1)
# check for the same behavior when using an invalid
# axis specification -- in this case axis=0 does not
# have size 1, but the priority should be to raise
# a TypeError for the axis argument and NOT a
# ValueError for squeezing a non-empty dimension
with assert_raises(TypeError):
np.squeeze(oldsqueeze, axis=0)
# the new API knows how to handle the axis
# argument and will return a ValueError if
# attempting to squeeze an axis that is not
# of length 1
with assert_raises(ValueError):
np.squeeze(np.array([[1],[2],[3]]), axis=0)
def test_reduce_contiguous(self):
# GitHub issue #387
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(RecursionError, int, a)
assert_raises(RecursionError, long, a)
assert_raises(RecursionError, float, a)
if sys.version_info.major == 2:
# in python 3, this falls back on operator.index, which fails on
# on dtype=object
assert_raises(RecursionError, oct, a)
assert_raises(RecursionError, hex, a)
a[()] = None
def test_object_array_circular_reference(self):
# Test the same for a circular reference.
a = np.array(0, dtype=object)
b = np.array(0, dtype=object)
a[()] = b
b[()] = a
assert_raises(RecursionError, int, a)
# NumPy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
a[()] = None
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_object_array_nested(self):
# but is fine with a reference to a different array
a = np.array(0, dtype=object)
b = np.array(0, dtype=object)
a[()] = b
assert_equal(int(a), int(0))
assert_equal(long(a), long(0))
assert_equal(float(a), float(0))
if sys.version_info.major == 2:
# in python 3, this falls back on operator.index, which fails on
# on dtype=object
assert_equal(oct(a), oct(0))
assert_equal(hex(a), hex(0))
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
if HAS_REFCOUNT:
assert_(sys.getrefcount(a[()]) == 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield should not overwrite Object fields with non-Object data
x = np.array([1, 2, 3], dtype=object)
assert_raises(TypeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = b"hello1"
s2 = b"hello2"
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = b'black'
s2 = b'white'
s3 = b'other'
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
# Ticket #1756
s = b'0123456789abcdef'
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d" % i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"gh-2355"
r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
# Check that alignment flag is updated on stride setting
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except Exception:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
if sys.version_info[0] >= 3:
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data, protocol=proto))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
b"p13\ntp14\nb.")
if sys.version_info[0] >= 3:
# This should work:
result = pickle.loads(data, encoding='latin1')
assert_array_equal(result, np.array([129], dtype='b'))
# Should not segfault:
assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(...)
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
(np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
b"tp8\nRp9\n."),
'different'),
]
if sys.version_info[0] >= 3:
for original, data, koi8r_validity in datas:
result = pickle.loads(data, encoding='latin1')
assert_equal(result, original)
# Decoding under non-latin1 encoding (e.g.) KOI8-R can
# produce bad results, but should not segfault.
if koi8r_validity == 'different':
# Unicode code points happen to lie within latin1,
# but are different in koi8-r, resulting to silent
# bogus results
result = pickle.loads(data, encoding='koi8-r')
assert_(result != original)
elif koi8r_validity == 'invalid':
# Unicode code points outside latin1, so results
# to an encoding exception
assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
else:
raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for NumPy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([u'abcd'])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2, ), dtype)
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tobytes('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
def test_fortran_order_buffer(self):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'],
[u'F', u'o', u'o', u'b', u'']]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
# Ticket #4024.
arr = np.array([1, 2, 3])
assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
arr.__setitem__(slice(None), [9])
assert_equal(arr, [9, 9, 9])
def test_format_on_flex_array_element(self):
# Ticket #4369.
dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
arr = np.array([('2000-01-01', 1)], dt)
formatted = '{0}'.format(arr[0])
assert_equal(formatted, str(arr[0]))
def test_deepcopy_on_0d_array(self):
# Ticket #3311.
arr = np.array(3)
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
assert_(arr is not arr_cp)
assert_(isinstance(arr_cp, type(arr)))
def test_deepcopy_F_order_object_array(self):
# Ticket #6456.
a = {'a': 1}
b = {'b': 2}
arr = np.array([[a, b], [a, b]], order='F')
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_(arr is not arr_cp)
# Ensure that we have actually copied the item.
assert_(arr[0, 1] is not arr_cp[1, 1])
# Ensure we are allowed to have references to the same object.
assert_(arr[0, 1] is arr[1, 1])
# Check the references hold for the copied objects.
assert_(arr_cp[0, 1] is arr_cp[1, 1])
def test_deepcopy_empty_object_array(self):
# Ticket #8536.
# Deepcopy should succeed
a = np.array([], dtype=object)
b = copy.deepcopy(a)
assert_(a.shape == b.shape)
def test_bool_subscript_crash(self):
# gh-4494
c = np.rec.array([(1, 2, 3), (4, 5, 6)])
masked = c[np.array([True, False])]
base = masked.base
del masked, c
base.dtype
def test_richcompare_crash(self):
# gh-4613
import operator as op
# dummy class where __array__ throws exception
class Foo(object):
__array_priority__ = 1002
def __array__(self, *args, **kwargs):
raise Exception()
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
if sys.version_info[0] >= 3:
assert_raises(TypeError, f, lhs, rhs)
elif not sys.py3kwarning:
# With -3 switch in python 2, DeprecationWarning is raised
# which we are not interested in
f(lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
def test_richcompare_scalar_and_subclass(self):
# gh-4709
class Foo(np.ndarray):
def __eq__(self, other):
return "OK"
x = np.array([1, 2, 3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
def test_pickle_empty_string(self):
# gh-3926
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
test_string = np.string_('')
assert_equal(pickle.loads(
pickle.dumps(test_string, protocol=proto)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
def passer(*args):
pass
assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
def test_repeat_broadcasting(self):
# gh-5743
a = np.arange(60).reshape(3, 4, 5)
for axis in chain(range(-a.ndim, a.ndim), [None]):
assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
def test_frompyfunc_nout_0(self):
# gh-2014
def f(x):
x[0], x[-1] = x[-1], x[0]
uf = np.frompyfunc(f, 1, 0)
a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])
assert_equal(uf(a), ())
assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_leak_in_structured_dtype_comparison(self):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
('d', (str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
for i in range(100):
a == a
assert_(sys.getrefcount(a) < 10)
# The case in the bug report.
before = sys.getrefcount(a)
u, v = a[0], a[1]
u == v
del u, v
gc.collect()
after = sys.getrefcount(a)
assert_equal(before, after)
def test_empty_percentile(self):
# gh-6530 / gh-6553
assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
def test_void_compare_segfault(self):
# gh-6922. The following should not segfault
a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])
a.sort()
def test_reshape_size_overflow(self):
# gh-7455
a = np.ones(20)[::2]
if np.dtype(np.intp).itemsize == 8:
# 64 bit. The following are the prime factors of 2**63 + 5,
# plus a leading 2, so when multiplied together as int64,
# the result overflows to a total size of 10.
new_shape = (2, 13, 419, 691, 823, 2977518503)
else:
# 32 bit. The following are the prime factors of 2**31 + 5,
# plus a leading 2, so when multiplied together as int32,
# the result overflows to a total size of 10.
new_shape = (2, 7, 7, 43826197)
assert_raises(ValueError, a.reshape, new_shape)
def test_invalid_structured_dtypes(self):
# gh-2865
# mapping python objects to other dtypes
assert_raises(ValueError, np.dtype, ('O', [('name', 'i8')]))
assert_raises(ValueError, np.dtype, ('i8', [('name', 'O')]))
assert_raises(ValueError, np.dtype,
('i8', [('name', [('name', 'O')])]))
assert_raises(ValueError, np.dtype, ([('a', 'i4'), ('b', 'i4')], 'O'))
assert_raises(ValueError, np.dtype, ('i8', 'O'))
# wrong number/type of tuple elements in dict
assert_raises(ValueError, np.dtype,
('i', {'name': ('i', 0, 'title', 'oops')}))
assert_raises(ValueError, np.dtype,
('i', {'name': ('i', 'wrongtype', 'title')}))
# disallowed as of 1.13
assert_raises(ValueError, np.dtype,
([('a', 'O'), ('b', 'O')], [('c', 'O'), ('d', 'O')]))
# allowed as a special case due to existing use, see gh-2798
a = np.ones(1, dtype=('O', [('name', 'O')]))
assert_equal(a[0], 1)
def test_correct_hash_dict(self):
# gh-8887 - __hash__ would be None despite tp_hash being set
all_types = set(np.typeDict.values()) - {np.void}
for t in all_types:
val = t()
try:
hash(val)
except TypeError as e:
assert_equal(t.__hash__, None)
else:
assert_(t.__hash__ != None)
def test_scalar_copy(self):
scalar_types = set(np.sctypeDict.values())
values = {
np.void: b"a",
np.bytes_: b"a",
np.unicode_: "a",
np.datetime64: "2017-08-25",
}
for sctype in scalar_types:
item = sctype(values.get(sctype, 1))
item2 = copy.copy(item)
assert_equal(item, item2)
def test_void_item_memview(self):
va = np.zeros(10, 'V4')
x = va[:1].item()
va[0] = b'\xff\xff\xff\xff'
del va
assert_equal(x, b'\x00\x00\x00\x00')
def test_void_getitem(self):
# Test fix for gh-11668.
assert_(np.array([b'a'], 'V1').astype('O') == b'a')
assert_(np.array([b'ab'], 'V2').astype('O') == b'ab')
assert_(np.array([b'abc'], 'V3').astype('O') == b'abc')
assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd')
def test_structarray_title(self):
# The following used to segfault on pypy, due to NPY_TITLE_KEY
# not working properly and resulting to double-decref of the
# structured array field items:
# See: https://bitbucket.org/pypy/pypy/issues/2789
for j in range(5):
structure = np.array([1], dtype=[(('x', 'X'), np.object_)])
structure[0]['x'] = np.array([2])
gc.collect()
def test_dtype_scalar_squeeze(self):
# gh-11384
values = {
'S': b"a",
'M': "2018-06-20",
}
for ch in np.typecodes['All']:
if ch in 'O':
continue
sctype = np.dtype(ch).type
scvalue = sctype(values.get(ch, 3))
for axis in [None, ()]:
squeezed = scvalue.squeeze(axis=axis)
assert_equal(squeezed, scvalue)
assert_equal(type(squeezed), type(scvalue))
def test_field_access_by_title(self):
# gh-11507
s = 'Some long field name'
if HAS_REFCOUNT:
base = sys.getrefcount(s)
t = np.dtype([((s, 'f1'), np.float64)])
data = np.zeros(10, t)
for i in range(10):
str(data[['f1']])
if HAS_REFCOUNT:
assert_(base <= sys.getrefcount(s))
@pytest.mark.parametrize('val', [
# arrays and scalars
np.ones((10, 10), dtype='int32'),
np.uint64(10),
])
@pytest.mark.parametrize('protocol',
range(2, pickle.HIGHEST_PROTOCOL + 1)
)
def test_pickle_module(self, protocol, val):
# gh-12837
s = pickle.dumps(val, protocol)
assert b'_multiarray_umath' not in s
if protocol == 5 and len(val.shape) > 0:
# unpickling ndarray goes through _frombuffer for protocol 5
assert b'numpy.core.numeric' in s
else:
assert b'numpy.core.multiarray' in s
def test_object_casting_errors(self):
# gh-11993
arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
assert_raises(TypeError, arr.astype, 'c8')
def test_eff1d_casting(self):
# gh-12711
x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
assert_equal(res, [-99, 1, 2, 3, -7, 88, 99])
assert_raises(ValueError, np.ediff1d, x, to_begin=(1<<20))
assert_raises(ValueError, np.ediff1d, x, to_end=(1<<20))
def test_pickle_datetime64_array(self):
# gh-12745 (would fail with pickle5 installed)
d = np.datetime64('2015-07-04 12:59:59.50', 'ns')
arr = np.array([d])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(arr, protocol=proto)
assert_equal(pickle.loads(dumped), arr)
def test_bad_array_interface(self):
class T(object):
__array_interface__ = {}
np.array([T()])
| 35.42207
| 93
| 0.543654
|
794d30d10afb6110883e21e1cb40fee08c400786
| 825
|
py
|
Python
|
cotidia/crm/menu.py
|
guillaumepiot/cotidia-crm
|
9f46d3b4fb1bc84f553c649e121b945e68a0bdab
|
[
"BSD-3-Clause"
] | null | null | null |
cotidia/crm/menu.py
|
guillaumepiot/cotidia-crm
|
9f46d3b4fb1bc84f553c649e121b945e68a0bdab
|
[
"BSD-3-Clause"
] | null | null | null |
cotidia/crm/menu.py
|
guillaumepiot/cotidia-crm
|
9f46d3b4fb1bc84f553c649e121b945e68a0bdab
|
[
"BSD-3-Clause"
] | null | null | null |
from django.urls import reverse
from cotidia.crm.conf import settings
def admin_menu(context):
menu_list = []
if settings.CRM_ENABLE_ACTION:
menu_list.append({
"text": "Actions",
"url": reverse("crm-admin:action-list"),
"permissions": ["crm.add_action", "crm.change_action"],
})
if settings.CRM_ENABLE_CONTACT:
menu_list.append({
"text": "Contacts",
"url": reverse("crm-admin:contact-list"),
"permissions": ["crm.add_contact", "crm.change_contact"],
})
if settings.CRM_ENABLE_ENQUIRY:
menu_list.append({
"text": "Enquiries",
"url": reverse("crm-admin:enquiry-list"),
"permissions": ["crm.add_enquiry", "crm.change_enquiry"],
})
return menu_list
| 29.464286
| 69
| 0.578182
|
794d3190aba1765f850c7f6a0334b66a011ebcd5
| 13,420
|
py
|
Python
|
utils/eulerangles.py
|
jinyier/ai_pointnet_attack
|
4ef16a898f99e825c445ebc7aad7ba1fd953f8f0
|
[
"MIT"
] | 4
|
2020-04-23T01:26:19.000Z
|
2022-03-09T08:04:35.000Z
|
utils/eulerangles.py
|
jinyier/ai_pointnet_attack
|
4ef16a898f99e825c445ebc7aad7ba1fd953f8f0
|
[
"MIT"
] | 1
|
2021-12-26T23:56:38.000Z
|
2021-12-26T23:56:38.000Z
|
utils/eulerangles.py
|
jinyier/ai_pointnet_attack
|
4ef16a898f99e825c445ebc7aad7ba1fd953f8f0
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
''' Module implementing Euler angle rotations and their conversions
See:
* http://en.wikipedia.org/wiki/Rotation_matrix
* http://en.wikipedia.org/wiki/Euler_angles
* http://mathworld.wolfram.com/EulerAngles.html
See also: *Representing Attitude with Euler Angles and Quaternions: A
Reference* (2006) by James Diebel. A cached PDF link last found here:
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.110.5134
Euler's rotation theorem tells us that any rotation in 3D can be
described by 3 angles. Let's call the 3 angles the *Euler angle vector*
and call the angles in the vector :math:`alpha`, :math:`beta` and
:math:`gamma`. The vector is [ :math:`alpha`,
:math:`beta`. :math:`gamma` ] and, in this description, the order of the
parameters specifies the order in which the rotations occur (so the
rotation corresponding to :math:`alpha` is applied first).
In order to specify the meaning of an *Euler angle vector* we need to
specify the axes around which each of the rotations corresponding to
:math:`alpha`, :math:`beta` and :math:`gamma` will occur.
There are therefore three axes for the rotations :math:`alpha`,
:math:`beta` and :math:`gamma`; let's call them :math:`i` :math:`j`,
:math:`k`.
Let us express the rotation :math:`alpha` around axis `i` as a 3 by 3
rotation matrix `A`. Similarly :math:`beta` around `j` becomes 3 x 3
matrix `B` and :math:`gamma` around `k` becomes matrix `G`. Then the
whole rotation expressed by the Euler angle vector [ :math:`alpha`,
:math:`beta`. :math:`gamma` ], `R` is given by::
R = np.dot(G, np.dot(B, A))
See http://mathworld.wolfram.com/EulerAngles.html
The order :math:`G B A` expresses the fact that the rotations are
performed in the order of the vector (:math:`alpha` around axis `i` =
`A` first).
To convert a given Euler angle vector to a meaningful rotation, and a
rotation matrix, we need to define:
* the axes `i`, `j`, `k`
* whether a rotation matrix should be applied on the left of a vector to
be transformed (vectors are column vectors) or on the right (vectors
are row vectors).
* whether the rotations move the axes as they are applied (intrinsic
rotations) - compared the situation where the axes stay fixed and the
vectors move within the axis frame (extrinsic)
* the handedness of the coordinate system
See: http://en.wikipedia.org/wiki/Rotation_matrix#Ambiguities
We are using the following conventions:
* axes `i`, `j`, `k` are the `z`, `y`, and `x` axes respectively. Thus
an Euler angle vector [ :math:`alpha`, :math:`beta`. :math:`gamma` ]
in our convention implies a :math:`alpha` radian rotation around the
`z` axis, followed by a :math:`beta` rotation around the `y` axis,
followed by a :math:`gamma` rotation around the `x` axis.
* the rotation matrix applies on the left, to column vectors on the
right, so if `R` is the rotation matrix, and `v` is a 3 x N matrix
with N column vectors, the transformed vector set `vdash` is given by
``vdash = np.dot(R, v)``.
* extrinsic rotations - the axes are fixed, and do not move with the
rotations.
* a right-handed coordinate system
The convention of rotation around ``z``, followed by rotation around
``y``, followed by rotation around ``x``, is known (confusingly) as
"xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles.
'''
import sys
import math
if sys.version_info >= (3, 0):
from functools import reduce
import numpy as np
_FLOAT_EPS_4 = np.finfo(float).eps * 4.0
def euler2mat(z=0, y=0, x=0):
''' Return matrix for rotations around z, y and x axes
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
M : array shape (3,3)
Rotation matrix giving same rotation as for given angles
Examples
--------
>>> zrot = 1.3 # radians
>>> yrot = -0.1
>>> xrot = 0.2
>>> M = euler2mat(zrot, yrot, xrot)
>>> M.shape == (3, 3)
True
The output rotation matrix is equal to the composition of the
individual rotations
>>> M1 = euler2mat(zrot)
>>> M2 = euler2mat(0, yrot)
>>> M3 = euler2mat(0, 0, xrot)
>>> composed_M = np.dot(M3, np.dot(M2, M1))
>>> np.allclose(M, composed_M)
True
You can specify rotations by named arguments
>>> np.all(M3 == euler2mat(x=xrot))
True
When applying M to a vector, the vector should column vector to the
right of M. If the right hand side is a 2D array rather than a
vector, then each column of the 2D array represents a vector.
>>> vec = np.array([1, 0, 0]).reshape((3,1))
>>> v2 = np.dot(M, vec)
>>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array
>>> vecs2 = np.dot(M, vecs)
Rotations are counter-clockwise.
>>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3))
>>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])
True
>>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3))
>>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])
True
>>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3))
>>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])
True
Notes
-----
The direction of rotation is given by the right-hand rule (orient
the thumb of the right hand along the axis around which the rotation
occurs, with the end of the thumb at the positive end of the axis;
curl your fingers; the direction your fingers curl is the direction
of rotation). Therefore, the rotations are counterclockwise if
looking along the axis of rotation from positive to negative.
'''
Ms = []
if z:
cosz = math.cos(z)
sinz = math.sin(z)
Ms.append(np.array(
[[cosz, -sinz, 0],
[sinz, cosz, 0],
[0, 0, 1]]))
if y:
cosy = math.cos(y)
siny = math.sin(y)
Ms.append(np.array(
[[cosy, 0, siny],
[0, 1, 0],
[-siny, 0, cosy]]))
if x:
cosx = math.cos(x)
sinx = math.sin(x)
Ms.append(np.array(
[[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx]]))
if Ms:
return reduce(np.dot, Ms[::-1])
return np.eye(3)
def mat2euler(M, cy_thresh=None):
''' Discover Euler angle vector from 3x3 matrix
Uses the conventions above.
Parameters
----------
M : array-like, shape (3,3)
cy_thresh : None or scalar, optional
threshold below which to give up on straightforward arctan for
estimating x rotation. If None (default), estimate from
precision of input.
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Notes
-----
If there was no numerical error, the routine could be derived using
Sympy expression for z then y then x rotation matrix, which is::
[ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],
[cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],
[sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]
with the obvious derivations for z, y, and x
z = atan2(-r12, r11)
y = asin(r13)
x = atan2(-r23, r33)
Problems arise when cos(y) is close to zero, because both of::
z = atan2(cos(y)*sin(z), cos(y)*cos(z))
x = atan2(cos(y)*sin(x), cos(x)*cos(y))
will be close to atan2(0, 0), and highly unstable.
The ``cy`` fix for numerical instability below is from: *Graphics
Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN:
0123361559. Specifically it comes from EulerAngles.c by Ken
Shoemake, and deals with the case where cos(y) is close to zero:
See: http://www.graphicsgems.org/
The code appears to be licensed (from the website) as "can be used
without restrictions".
'''
M = np.asarray(M)
if cy_thresh is None:
try:
cy_thresh = np.finfo(M.dtype).eps * 4
except ValueError:
cy_thresh = _FLOAT_EPS_4
r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
# cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)
cy = math.sqrt(r33 * r33 + r23 * r23)
if cy > cy_thresh: # cos(y) not close to zero, standard form
z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))
else: # cos(y) (close to) zero, so x -> 0.0 (see above)
# so r21 -> sin(z), r22 -> cos(z) and
z = math.atan2(r21, r22)
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = 0.0
return z, y, x
def euler2quat(z=0, y=0, x=0):
''' Return quaternion corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
quat : array shape (4,)
Quaternion in w, x, y z (real, then vector) format
Notes
-----
We can derive this formula in Sympy using:
1. Formula giving quaternion corresponding to rotation of theta radians
about arbitrary axis:
http://mathworld.wolfram.com/EulerParameters.html
2. Generated formulae from 1.) for quaternions corresponding to
theta radians rotations about ``x, y, z`` axes
3. Apply quaternion multiplication formula -
http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to
formulae from 2.) to give formula for combined rotations.
'''
z = z / 2.0
y = y / 2.0
x = x / 2.0
cz = math.cos(z)
sz = math.sin(z)
cy = math.cos(y)
sy = math.sin(y)
cx = math.cos(x)
sx = math.sin(x)
return np.array([
cx * cy * cz - sx * sy * sz,
cx * sy * sz + cy * cz * sx,
cx * cz * sy - sx * cy * sz,
cx * cy * sz + sx * cz * sy])
def quat2euler(q):
''' Return Euler angles corresponding to quaternion `q`
Parameters
----------
q : 4 element sequence
w, x, y, z of quaternion
Returns
-------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``quat2mat`` and ``mat2euler`` functions, but
the reduction in computation is small, and the code repetition is
large.
'''
# delayed import to avoid cyclic dependencies
import nibabel.quaternions as nq
return mat2euler(nq.quat2mat(q))
def euler2angle_axis(z=0, y=0, x=0):
''' Return angle, axis corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
theta : scalar
angle of rotation
vector : array shape (3,)
axis around which rotation occurs
Examples
--------
>>> theta, vec = euler2angle_axis(0, 1.5, 0)
>>> print(theta)
1.5
>>> np.allclose(vec, [0, 1, 0])
True
'''
# delayed import to avoid cyclic dependencies
import nibabel.quaternions as nq
return nq.quat2angle_axis(euler2quat(z, y, x))
def angle_axis2euler(theta, vector, is_normalized=False):
''' Convert angle, axis pair to Euler angles
Parameters
----------
theta : scalar
angle of rotation
vector : 3 element sequence
vector specifying axis for rotation.
is_normalized : bool, optional
True if vector is already normalized (has norm of 1). Default
False
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Examples
--------
>>> z, y, x = angle_axis2euler(0, [1, 0, 0])
>>> np.allclose((z, y, x), 0)
True
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``angle_axis2mat`` and ``mat2euler``
functions, but the reduction in computation is small, and the code
repetition is large.
'''
# delayed import to avoid cyclic dependencies
import nibabel.quaternions as nq
M = nq.angle_axis2mat(theta, vector, is_normalized)
return mat2euler(M)
| 32.02864
| 99
| 0.613189
|
794d3383b89171f69db008f51626552173254cae
| 370
|
py
|
Python
|
wxpusher/tests/config.py
|
hnauto/wxpusher-sdk-python
|
6787c0bdd7bef673b1a3f56984f6587c07bb0546
|
[
"Apache-2.0"
] | null | null | null |
wxpusher/tests/config.py
|
hnauto/wxpusher-sdk-python
|
6787c0bdd7bef673b1a3f56984f6587c07bb0546
|
[
"Apache-2.0"
] | null | null | null |
wxpusher/tests/config.py
|
hnauto/wxpusher-sdk-python
|
6787c0bdd7bef673b1a3f56984f6587c07bb0546
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unittest configuration sample.
File: config.sample.py
Author: huxuan
Email: i(at)huxuan.org
"""
# the `appToken` for test.
TOKEN = ${{ secrets.WX_PUSHER_TOKEN }}
# the `uids` for test, note that it should be a list.
UIDS = [
'',
]
# the `topic_ids` for test, note that it should be a list.
TOPIC_IDS = [
'',
]
| 17.619048
| 58
| 0.637838
|
794d33882ffc01806d6ca32b4d020e2736af1b0a
| 65
|
py
|
Python
|
utils/hrnet/__init__.py
|
wufanyou/Traffic4Cast-2020-TLab
|
5226bb1d2db40badb33c6b0ffe659fc6e9dca544
|
[
"Apache-2.0"
] | 3
|
2020-11-03T16:04:22.000Z
|
2021-05-22T15:38:24.000Z
|
utils/hrnet/__init__.py
|
wufanyou/Traffic4Cast-2020-TLab
|
5226bb1d2db40badb33c6b0ffe659fc6e9dca544
|
[
"Apache-2.0"
] | null | null | null |
utils/hrnet/__init__.py
|
wufanyou/Traffic4Cast-2020-TLab
|
5226bb1d2db40badb33c6b0ffe659fc6e9dca544
|
[
"Apache-2.0"
] | null | null | null |
from .config import get_cfg
from .seg_hrnet import get_seg_model
| 21.666667
| 36
| 0.846154
|
794d342da46bb612a35842b1ed51c3d6d5287f97
| 2,376
|
py
|
Python
|
src/chat/consumers.py
|
ckz8780/django_channels
|
63518d7b9362f73e7222a519a1ae0dd4a0544793
|
[
"MIT"
] | null | null | null |
src/chat/consumers.py
|
ckz8780/django_channels
|
63518d7b9362f73e7222a519a1ae0dd4a0544793
|
[
"MIT"
] | null | null | null |
src/chat/consumers.py
|
ckz8780/django_channels
|
63518d7b9362f73e7222a519a1ae0dd4a0544793
|
[
"MIT"
] | null | null | null |
import asyncio
import json
from django.contrib.auth import get_user_model
from channels.consumer import AsyncConsumer
from channels.db import database_sync_to_async
from .models import Thread, ChatMessage
class ChatConsumer(AsyncConsumer):
async def websocket_connect(self, event):
# When the socket connects
print('Connected', event)
other_user = self.scope['url_route']['kwargs']['username']
me = self.scope['user']
thread_obj = await self.get_thread(me, other_user)
chat_room = f'thread_{thread_obj.id}'
self.chat_room = chat_room
self.thread_obj = thread_obj
await self.channel_layer.group_add(chat_room, self.channel_name)
await self.send({
'type': 'websocket.accept'
})
async def websocket_receive(self, event):
# When a message is received from the websocket
print('Received', event)
front_text = event.get('text', None)
if front_text is not None:
loaded_dict_data = json.loads(front_text)
msg = loaded_dict_data.get('message')
user = self.scope['user']
my_response = {
'message': msg,
'username': user.username if user.is_authenticated else 'Anonymous'
}
await self.create_chat_message(msg)
# Broadcast the message event to be sent out
await self.channel_layer.group_send(
self.chat_room,
{
'type': 'chat_message',
'text': json.dumps(my_response)
}
)
async def chat_message(self, event):
print('message', event)
# Send the message out to the chat room participants
await self.send({
'type': 'websocket.send',
'text': event['text']
})
async def websocket_disconnect(self, event):
# When the socket disconnects
print('Disconnected', event)
@database_sync_to_async
def get_thread(self, user, other_username):
return Thread.objects.get_or_new(user, other_username)[0]
@database_sync_to_async
def create_chat_message(self, message):
thread_obj = self.thread_obj
me = self.scope['user']
return ChatMessage.objects.create(thread=thread_obj, user=me, message=message)
| 33
| 86
| 0.617424
|
794d351df84fc9304e962f21f1b81b28f13b1aa0
| 3,106
|
py
|
Python
|
tests/ut/python/metrics/test_cosine_similarity.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
tests/ut/python/metrics/test_cosine_similarity.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
tests/ut/python/metrics/test_cosine_similarity.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test cosine_similarity"""
import pytest
import numpy as np
from sklearn.metrics import pairwise
from mindspore.nn.metrics import CosineSimilarity
def test_cosine_similarity():
"""test_cosine_similarity"""
test_data = np.array([[5, 8, 3, 2], [5, 8, 3, 2], [4, 2, 3, 4]])
metric = CosineSimilarity()
metric.clear()
metric.update(test_data)
square_matrix = metric.eval()
assert np.allclose(square_matrix, np.array([[0, 1, 0.78229315], [1, 0, 0.78229315], [0.78229315, 0.78229315, 0]]))
def test_cosine_similarity_compare():
"""test_cosine_similarity_compare"""
test_data = np.array([[5, 8, 3, 2], [5, 8, 3, 2], [4, 2, 3, 4]])
metric = CosineSimilarity(similarity='cosine', reduction='none', zero_diagonal=False)
metric.clear()
metric.update(test_data)
ms_square_matrix = metric.eval()
def sklearn_cosine_similarity(test_data, similarity, reduction):
"""sklearn_cosine_similarity"""
metric_func = {'cosine': pairwise.cosine_similarity,
'dot': pairwise.linear_kernel}[similarity]
square_matrix = metric_func(test_data, test_data)
if reduction == 'mean':
return square_matrix.mean(axis=-1)
if reduction == 'sum':
return square_matrix.sum(axis=-1)
return square_matrix
sk_square_matrix = sklearn_cosine_similarity(test_data, similarity='cosine', reduction='none')
assert np.allclose(sk_square_matrix, ms_square_matrix)
def test_cosine_similarity_init1():
"""test_cosine_similarity_init1"""
with pytest.raises(ValueError):
CosineSimilarity(similarity="4")
def test_cosine_similarity_init2():
"""test_cosine_similarity_init2"""
with pytest.raises(TypeError):
CosineSimilarity(similarity=4)
def test_cosine_similarity_init3():
"""test_cosine_similarity_init3"""
with pytest.raises(TypeError):
CosineSimilarity(reduction=2)
def test_cosine_similarity_init4():
"""test_cosine_similarity_init4"""
with pytest.raises(ValueError):
CosineSimilarity(reduction="1")
def test_cosine_similarity_init5():
"""test_cosine_similarity_init5"""
with pytest.raises(TypeError):
CosineSimilarity(zero_diagonal=3)
def test_cosine_similarity_runtime():
"""test_cosine_similarity_runtime"""
metric = CosineSimilarity()
metric.clear()
with pytest.raises(RuntimeError):
metric.eval()
| 32.354167
| 118
| 0.690277
|
794d358ea6ad1bc78020915df6010de98c8872f6
| 547
|
py
|
Python
|
manage.py
|
lixiaolongxl/DBlog
|
7de45c9d571499109f46c1e05be764062e7554f9
|
[
"MIT"
] | null | null | null |
manage.py
|
lixiaolongxl/DBlog
|
7de45c9d571499109f46c1e05be764062e7554f9
|
[
"MIT"
] | 11
|
2020-03-24T17:45:37.000Z
|
2022-03-12T00:04:10.000Z
|
manage.py
|
lixiaolongxl/DBlog
|
7de45c9d571499109f46c1e05be764062e7554f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'eastnotes.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.1875
| 79
| 0.689214
|
794d361ffaeae35d1cd85a3f772c74710c6c991b
| 9,267
|
py
|
Python
|
ur3_man_bringup/script/robotiq_2f_85_driver.py
|
I-Quotient-Robotics/ur3_man
|
0d87f553c3f5680289094ed3262b224f80f032a6
|
[
"MIT"
] | 1
|
2019-12-07T11:35:36.000Z
|
2019-12-07T11:35:36.000Z
|
ur3_man_bringup/script/robotiq_2f_85_driver.py
|
I-Quotient-Robotics/ur3_man
|
0d87f553c3f5680289094ed3262b224f80f032a6
|
[
"MIT"
] | null | null | null |
ur3_man_bringup/script/robotiq_2f_85_driver.py
|
I-Quotient-Robotics/ur3_man
|
0d87f553c3f5680289094ed3262b224f80f032a6
|
[
"MIT"
] | 1
|
2019-12-07T12:01:13.000Z
|
2019-12-07T12:01:13.000Z
|
#!/usr/bin/env python
"""--------------------------------------------------------------------
COPYRIGHT 2015 Stanley Innovation Inc.
Software License Agreement:
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\file robotiq_85_driver.py
\brief Driver for Robotiq 85 communication
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
from robotiq_85.robotiq_85_gripper import Robotiq85Gripper
from robotiq_85_msgs.msg import GripperCmd, GripperStat
from sensor_msgs.msg import JointState
import numpy as np
import rospy
class Robotiq85Driver:
def __init__(self):
gripper_prefix = rospy.get_param('~gripper_prefix','1A')
self._num_grippers = rospy.get_param('~num_grippers',1)
self._comport = rospy.get_param('~comport','/dev/ttyUSB0')
self._baud = rospy.get_param('~baud','115200')
self._gripper = Robotiq85Gripper(self._num_grippers,self._comport,self._baud)
if not self._gripper.init_success:
rospy.logerr("Unable to open commport to %s" % self._comport)
return
if (self._num_grippers == 1):
rospy.Subscriber(gripper_prefix+"/gripper/cmd", GripperCmd, self._update_gripper_cmd, queue_size=10)
self._gripper_pub = rospy.Publisher(gripper_prefix+'/gripper/stat', GripperStat, queue_size=10)
self._gripper_joint_state_pub = rospy.Publisher(gripper_prefix+'/gripper/joint_states', JointState, queue_size=10)
elif (self._num_grippers == 2):
rospy.Subscriber("/left_gripper/cmd", GripperCmd, self._update_gripper_cmd, queue_size=10)
self._left_gripper_pub = rospy.Publisher('/left_gripper/stat', GripperStat, queue_size=10)
self._left_gripper_joint_state_pub = rospy.Publisher('/left_gripper/joint_states', JointState, queue_size=10)
rospy.Subscriber("/right_gripper/cmd", GripperCmd, self._update_right_gripper_cmd, queue_size=10)
self._right_gripper_pub = rospy.Publisher('/right_gripper/stat', GripperStat, queue_size=10)
self._right_gripper_joint_state_pub = rospy.Publisher('/right_gripper/joint_states', JointState, queue_size=10)
else:
rospy.logerr("Number of grippers not supported (needs to be 1 or 2)")
return
self._seq = [0] * self._num_grippers
self._prev_js_pos = [0.0] * self._num_grippers
self._prev_js_time = [rospy.get_time()] * self._num_grippers
self._driver_state = 0
self._driver_ready = False
success = True
for i in range(self._num_grippers):
success &= self._gripper.process_stat_cmd(i)
if not success:
bad_gripper = i
if not success:
rospy.logerr("Failed to contact gripper %d....ABORTING"%bad_gripper)
return
self._run_driver(gripper_prefix)
def _clamp_cmd(self,cmd,lower,upper):
if (cmd < lower):
return lower
elif (cmd > upper):
return upper
else:
return cmd
def _update_gripper_cmd(self,cmd):
if (True == cmd.emergency_release):
self._gripper.activate_emergency_release(open_gripper=cmd.emergency_release_dir)
return
else:
self._gripper.deactivate_emergency_release()
if (True == cmd.stop):
self._gripper.stop()
else:
pos = self._clamp_cmd(cmd.position,0.0,0.085)
vel = self._clamp_cmd(cmd.speed,0.013,0.1)
force = self._clamp_cmd(cmd.force,5.0,220.0)
self._gripper.goto(dev=0,pos=pos,vel=vel,force=force)
def _update_right_gripper_cmd(self,cmd):
if (True == cmd.emergency_release):
self._gripper.activate_emergency_release(dev=1,open_gripper=cmd.emergency_release_dir)
return
else:
self._gripper.deactivate_emergency_release(dev=1)
if (True == cmd.stop):
self._gripper.stop(dev=1)
else:
pos = self._clamp_cmd(cmd.position,0.0,0.085)
vel = self._clamp_cmd(cmd.speed,0.013,0.1)
force = self._clamp_cmd(cmd.force,5.0,220.0)
self._gripper.goto(dev=1,pos=pos,vel=vel,force=force)
def _update_gripper_stat(self,dev=0):
stat = GripperStat()
stat.header.stamp = rospy.get_rostime()
stat.header.seq = self._seq[dev]
stat.is_ready = self._gripper.is_ready(dev)
stat.is_reset = self._gripper.is_reset(dev)
stat.is_moving = self._gripper.is_moving(dev)
stat.obj_detected = self._gripper.object_detected(dev)
stat.fault_status = self._gripper.get_fault_status(dev)
stat.position = self._gripper.get_pos(dev)
stat.requested_position = self._gripper.get_req_pos(dev)
stat.current = self._gripper.get_current(dev)
self._seq[dev]+=1
return stat
def _update_gripper_joint_state(self,gripper_prefix,dev=0):
js = JointState()
js.header.frame_id = ''
js.header.stamp = rospy.get_rostime()
js.header.seq = self._seq[dev]
js.name = [gripper_prefix+'_gripper_finger1_joint']
pos = np.clip(0.8 - ((0.8/0.085) * self._gripper.get_pos(dev)), 0., 0.8)
js.position = [pos]
dt = rospy.get_time() - self._prev_js_time[dev]
self._prev_js_time[dev] = rospy.get_time()
js.velocity = [(pos-self._prev_js_pos[dev])/dt]
self._prev_js_pos[dev] = pos
return js
def _run_driver(self,gripper_prefix):
last_time = rospy.get_time()
r = rospy.Rate(100)
while not rospy.is_shutdown():
dt = rospy.get_time() - last_time
if (0 == self._driver_state):
for i in range(self._num_grippers):
if (dt < 0.5):
self._gripper.deactivate_gripper(i)
else:
self._driver_state = 1
elif (1 == self._driver_state):
grippers_activated = True
for i in range(self._num_grippers):
self._gripper.activate_gripper(i)
grippers_activated &= self._gripper.is_ready(i)
if (grippers_activated):
self._driver_state = 2
elif (2 == self._driver_state):
self._driver_ready = True
for i in range(self._num_grippers):
success = True
success &= self._gripper.process_act_cmd(i)
success &= self._gripper.process_stat_cmd(i)
if not success:
rospy.logerr("Failed to contact gripper %d"%i)
else:
stat = GripperStat()
js = JointState()
stat = self._update_gripper_stat(i)
js = self._update_gripper_joint_state(gripper_prefix,i)
if (1 == self._num_grippers):
self._gripper_pub.publish(stat)
self._gripper_joint_state_pub.publish(js)
else:
if (i == 0):
self._left_gripper_pub.publish(stat)
self._left_gripper_joint_state_pub.publish(js)
else:
self._right_gripper_pub.publish(stat)
self._right_gripper_joint_state_pub.publish(js)
r.sleep()
self._gripper.shutdown()
if __name__ == "__main__":
"""
Initialize the node
"""
rospy.init_node('robotiq_85_driver')
driver = Robotiq85Driver()
| 43.303738
| 134
| 0.616704
|
794d3669b73656690d9655403d17fe8f79925a67
| 2,245
|
py
|
Python
|
rsa/txx.py
|
userElaina/hg8
|
235dbeca3d58b94e1378ac4240ed8424791ae561
|
[
"MIT"
] | null | null | null |
rsa/txx.py
|
userElaina/hg8
|
235dbeca3d58b94e1378ac4240ed8424791ae561
|
[
"MIT"
] | null | null | null |
rsa/txx.py
|
userElaina/hg8
|
235dbeca3d58b94e1378ac4240ed8424791ae561
|
[
"MIT"
] | null | null | null |
import math
import sympy
from Crypto.Util.number import *
e = 65537
def get_p():
x = 11124440021748127159092076861405454814981575144744508857178576572929321435002942998531420985771090167262256877805902135304112271641074498386662361391760451
y = 11124440021748127159092076861405454814981575144744508857178576572929321435002942998531420985771090167262256877805902135304112271641074498386662361391661439
value_p = sympy.nextprime((math.factorial(y)) % x) # Hint:这里直接计算会溢出,请你仔细观察 x 和 y 的特征
return value_p
def get_q():
value = [getPrime(256)]
for i in range(1, 10):
value.append(sympy.nextprime(value[i - 1]))
print("value[-1] = ", value[-1])
# value[-1] = 80096058210213458444437404275177554701604739094679033012396452382975889905967
n = 1
for i in range(10):
n = n * value[i]
q = getPrime(512)
value_q = pow(q, e, n)
print("value_q = ", value_q)
# value_q = 5591130088089053683141520294620171646179623062803708281023766040254675625012293743465254007970358536660934858789388093688621793201658889399155357407224541324547522479617669812322262372851929223461622559971534394847970366311206823328200747893961649255426063204482192349202005330622561575868946656570678176047822163692259375233925446556338917358118222905050574458037965803154233167594946713038301249145097770337253930655681648299249481985768272321820718607757023350742647019762122572886601905212830744868048802864679734428398229280780215896045509020793530842541217790352661324630048261329493088812057300480085895399922301827190211956061083460036781018660201163819104150988531352228650991733072010425499238731811243310625701946882701082178190402011133439065106720309788819
return sympy.nextprime(q)
# this destroyes the rsa cryptosystem
p = get_p()
q = get_q()
m = int.from_bytes(open("flag.txt", "rb").read(), "big")
c = pow(m, e, p * q)
print("c = ", c)
# c = 110644875422336073350488613774418819991169603750711465190260581119043921549811353108399064284589038384540018965816137286856268590507418636799746759551009749004176545414118128330198437101472882906564195341277423007542422286760940374859966152871273887950174522820162832774361714668826122465471705166574184367478
| 59.078947
| 786
| 0.838753
|
794d36e4400fb05c98982de2626e267a68185fd3
| 1,178
|
py
|
Python
|
embedding/_fasttext/_emb_matrix_fasttext.py
|
vd1371/CBSA
|
f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47
|
[
"MIT"
] | null | null | null |
embedding/_fasttext/_emb_matrix_fasttext.py
|
vd1371/CBSA
|
f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47
|
[
"MIT"
] | null | null | null |
embedding/_fasttext/_emb_matrix_fasttext.py
|
vd1371/CBSA
|
f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from multiprocessing import current_process
if current_process().name == "MainProcess":
from tensorflow.keras.preprocessing.text import Tokenizer
from ._load_embedding import _load_embedding
from DataLoader import load_unique_words
def emb_matrix_fasttext(X, **params):
emb_dimension = params.get("emb_dimension")
vocab_size = len(load_unique_words()) + 1
print('creating embedding matrix')
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X)
index_dict = tokenizer.word_index
# create a weight matrix for words in training docs
embedding_matrix = np.zeros((vocab_size, emb_dimension))
word_embedding = _load_embedding(**params)
for word, index in index_dict.items():
#word is the key and i is the value of tokenizer.word_index.items() dictionary
embedding_vector = word_embedding[emb_dimension].get(word)
if index < vocab_size:
if embedding_vector is not None:
#words not found in embedding index will be all-zeros
embedding_matrix[index] = embedding_vector
return index_dict, vocab_size, embedding_matrix
| 31
| 86
| 0.716469
|
794d373d4b031511167633d77764cf229ee06434
| 5,143
|
py
|
Python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/virtual_machine_scale_set_os_profile.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/virtual_machine_scale_set_os_profile.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 2
|
2016-09-30T21:40:24.000Z
|
2017-11-10T18:16:18.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/virtual_machine_scale_set_os_profile.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetOSProfile(Model):
"""Describes a virtual machine scale set OS profile.
:param computer_name_prefix: Specifies the computer name prefix for all of
the virtual machines in the scale set. Computer name prefixes must be 1 to
15 characters long.
:type computer_name_prefix: str
:param admin_username: Specifies the name of the administrator account.
<br><br> **Windows-only restriction:** Cannot end in "." <br><br>
**Disallowed values:** "administrator", "admin", "user", "user1", "test",
"user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm",
"admin2", "aspnet", "backup", "console", "david", "guest", "john",
"owner", "root", "server", "sql", "support", "support_388945a0", "sys",
"test2", "test3", "user4", "user5". <br><br> **Minimum-length (Linux):** 1
character <br><br> **Max-length (Linux):** 64 characters <br><br>
**Max-length (Windows):** 20 characters <br><br><li> For root access to
the Linux VM, see [Using root privileges on Linux virtual machines in
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)<br><li>
For a list of built-in system users on Linux that should not be used in
this field, see [Selecting User Names for Linux on
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
:type admin_username: str
:param admin_password: Specifies the password of the administrator
account. <br><br> **Minimum-length (Windows):** 8 characters <br><br>
**Minimum-length (Linux):** 6 characters <br><br> **Max-length
(Windows):** 123 characters <br><br> **Max-length (Linux):** 72 characters
<br><br> **Complexity requirements:** 3 out of 4 conditions below need to
be fulfilled <br> Has lower characters <br>Has upper characters <br> Has a
digit <br> Has a special character (Regex match [\\W_]) <br><br>
**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123",
"Pa$$word", "pass@word1", "Password!", "Password1", "Password22",
"iloveyou!" <br><br> For resetting the password, see [How to reset the
Remote Desktop service or its login password in a Windows
VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> For resetting root password, see [Manage users, SSH, and check or
repair disks on Azure Linux VMs using the VMAccess
Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password)
:type admin_password: str
:param custom_data: A base-64 encoded string of custom data.
:type custom_data: str
:param windows_configuration: The Windows Configuration of the OS profile.
:type windows_configuration:
~azure.mgmt.compute.v2016_04_30_preview.models.WindowsConfiguration
:param linux_configuration: The Linux Configuration of the OS profile.
:type linux_configuration:
~azure.mgmt.compute.v2016_04_30_preview.models.LinuxConfiguration
:param secrets: The List of certificates for addition to the VM.
:type secrets:
list[~azure.mgmt.compute.v2016_04_30_preview.models.VaultSecretGroup]
"""
_attribute_map = {
'computer_name_prefix': {'key': 'computerNamePrefix', 'type': 'str'},
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
}
def __init__(self, computer_name_prefix=None, admin_username=None, admin_password=None, custom_data=None, windows_configuration=None, linux_configuration=None, secrets=None):
super(VirtualMachineScaleSetOSProfile, self).__init__()
self.computer_name_prefix = computer_name_prefix
self.admin_username = admin_username
self.admin_password = admin_password
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
| 60.505882
| 185
| 0.685009
|
794d374c7379fa7888f12f42eec83ee89db77904
| 391
|
py
|
Python
|
canvas.py
|
emile-bernard/OpenCV-Mp4-HSV
|
c73b4338f5c38fc1027380c3f545f33ace432e2c
|
[
"MIT"
] | 1
|
2020-07-23T14:13:39.000Z
|
2020-07-23T14:13:39.000Z
|
canvas.py
|
emile-bernard/OpenCV_Real_Time_RBG_Histogram
|
a40fe620b6e6d0e53647d40f95ba743e01342c40
|
[
"MIT"
] | null | null | null |
canvas.py
|
emile-bernard/OpenCV_Real_Time_RBG_Histogram
|
a40fe620b6e6d0e53647d40f95ba743e01342c40
|
[
"MIT"
] | null | null | null |
import tkinter as tk
class Canvas:
def __init__(self, parent, videoCapture):
self.canvas = tk.Canvas(parent, width = videoCapture.getWidth(), height = videoCapture.getHeight())
self.draw()
def draw(self):
self.canvas.pack(side = "left")
def createImage(self, x, y, image, anchor):
self.canvas.create_image(x, y, image = image, anchor = anchor)
| 30.076923
| 107
| 0.652174
|
794d3823e703a9efb3834939777f06af7e8c6d00
| 4,665
|
py
|
Python
|
utils/lecture_utils.py
|
Hermes-Berkeley-Org/hermes-resource-server
|
ff8ac3a72e29db385c7def0c107e2d9d1d8aa9ed
|
[
"MIT"
] | 1
|
2020-05-25T04:58:00.000Z
|
2020-05-25T04:58:00.000Z
|
utils/lecture_utils.py
|
Hermes-Berkeley-Org/hermes-resource-server
|
ff8ac3a72e29db385c7def0c107e2d9d1d8aa9ed
|
[
"MIT"
] | 21
|
2018-09-01T17:53:18.000Z
|
2021-04-30T20:36:30.000Z
|
utils/lecture_utils.py
|
Hermes-Berkeley-Org/hermes-resource-server
|
ff8ac3a72e29db385c7def0c107e2d9d1d8aa9ed
|
[
"MIT"
] | null | null | null |
import requests
from requests.exceptions import RequestException, ConnectionError
from urllib.parse import urlparse, parse_qs
from utils.errors import (
InvalidLectureLinkError, VideoParseError, NoCourseFoundError, YoutubeError,
LectureAlreadyExists
)
from utils.youtube_client import YoutubeClient
from utils.db_utils import insert, create_reference, encode_url
from utils.db_utils import Course, Lecture, Video, Transcript
def create_lecture(course_ok_id, db, lecture_title,
date, link, youtube_access_token):
"""Executes full lecture creation process, which includes:
- Handling playlists and single videos
- Creates and stores a Lecture object in the DB, with a lookup key to a Course
- Creates and stores Video objects for each video in the YouTube link given,
with a lookup key to a Lecture and Course
- Creates and stores Transcript objects for each video,
with a lookup key to a Course, Lecture, and Video
"""
course = db[Course.collection].find_one({'course_ok_id': course_ok_id})
if not course:
raise NoCourseFoundError(
'Course associated with OK ID {0} does not exist in the database'
.format(course_ok_id)
)
# check for duplicate title
lecture_url_name = encode_url(lecture_title)
if db[Lecture.collection].find_one(
{
'lecture_url_name': lecture_url_name,
'course_ok_id': course_ok_id
}
):
raise LectureAlreadyExists(
'A lecture with title {0} in course OK ID {1} has already been created'
.format(lecture_title, course_ok_id)
)
lecture_index = course['num_lectures']
youtube_url = get_final_youtube_url(link)
youtube_client = YoutubeClient(youtube_access_token)
youtube_ids = get_youtube_ids(youtube_url, youtube_client)
# populate data first, so that on error objects aren't created
video_titles = []
videos = []
transcripts = []
no_transcript_videos = []
for video_index, youtube_id in enumerate(youtube_ids):
title, duration = youtube_client.get_video_metadata(youtube_id)
videos.append(
Video(
title=title,
duration=duration,
youtube_id=youtube_id,
course_ok_id=course_ok_id,
lecture_url_name=lecture_url_name,
video_index=video_index,
num_vitamins=0,
num_resources=0
)
)
video_titles.append(title)
try:
transcript = youtube_client.get_transcript(youtube_id)
transcripts.append(
Transcript(
transcript=transcript,
course_ok_id=course_ok_id,
lecture_url_name=lecture_url_name,
video_index=video_index
)
)
except YoutubeError as e:
# support for videos without a transcript
no_transcript_videos.append(video_index)
for video in videos:
insert(video, db)
for transcript in transcripts:
insert(transcript, db)
lecture = Lecture(
name=lecture_title,
date=date,
lecture_url_name=lecture_url_name,
lecture_index=lecture_index,
course_ok_id=course_ok_id,
video_titles=video_titles,
lecture_piazza_id=""
)
insert(lecture, db)
db[Course.collection].update_one(
{'course_ok_id': course_ok_id},
{
'$set': {
'num_lectures': course['num_lectures'] + 1
}
}
)
return {
'no_transcript_videos': no_transcript_videos
}, lecture_url_name
def get_final_youtube_url(link):
"""Checks if YouTube link is a valid URL and gets the final redirected
link (e.g. youtu.be --> youtube.com)
"""
ses = requests.Session()
if not link.startswith('http'):
link = 'http://{0}'.format(link)
try:
return ses.head(link, allow_redirects=True).url
except RequestException or ConnectionError as e:
raise InvalidLectureLinkError('Lecture YouTube link invalid')
def get_youtube_ids(youtube_url, youtube_client):
"""Retrieves YouTube IDs (youtube.com/watch?v=<youtube_id>) associated
with a YouTube URL
"""
metadata = youtube_client.get_link_metadata(youtube_url)
if metadata.get('playlist_id'):
return youtube_client.get_playlist_video_ids(metadata['playlist_id'])
elif metadata.get('video_id'):
return [metadata['video_id']]
raise VideoParseError('Cannot get videos from lecture link')
| 36.732283
| 83
| 0.650375
|
794d3846effd44b4d6eb167fac2bc8769bfb543d
| 215
|
py
|
Python
|
distrib/package.py
|
Bauer-C/opengl-exercises
|
1660347844b6703dd3317d9b8f9344028e94de3e
|
[
"WTFPL"
] | 3
|
2017-11-21T08:05:12.000Z
|
2021-09-23T12:39:53.000Z
|
distrib/package.py
|
Bauer-C/opengl-exercises
|
1660347844b6703dd3317d9b8f9344028e94de3e
|
[
"WTFPL"
] | 1
|
2022-02-02T22:03:19.000Z
|
2022-02-08T00:11:18.000Z
|
distrib/package.py
|
Bauer-C/opengl-exercises
|
1660347844b6703dd3317d9b8f9344028e94de3e
|
[
"WTFPL"
] | 5
|
2017-11-28T08:04:15.000Z
|
2022-01-13T23:27:46.000Z
|
from utils import *
HgUpdate33()
version = raw_input("Enter version number (ex : 0003) :")
Package("OpenGL-tutorial_v"+version+"_33.zip");
HgUpdate21()
Package("OpenGL-tutorial_v"+version+"_21.zip");
HgUpdate33()
| 23.888889
| 57
| 0.730233
|
794d38502552460bfbdab3734e3dd6dc86cd4196
| 1,695
|
py
|
Python
|
terngrad/inception/pruning_common.py
|
feifeibear/dist-tensorflow
|
af6ae012f1454aff2c58d26808705e01ed2f1376
|
[
"Apache-2.0"
] | null | null | null |
terngrad/inception/pruning_common.py
|
feifeibear/dist-tensorflow
|
af6ae012f1454aff2c58d26808705e01ed2f1376
|
[
"Apache-2.0"
] | null | null | null |
terngrad/inception/pruning_common.py
|
feifeibear/dist-tensorflow
|
af6ae012f1454aff2c58d26808705e01ed2f1376
|
[
"Apache-2.0"
] | 2
|
2019-06-04T07:28:27.000Z
|
2020-11-01T22:20:29.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def pruning_gradients(grads_and_vars, percent, residual_grads):
"""
pruning grads according to the percent.
"""
gradients, variables = zip(*grads_and_vars)
pruned_gradients = []
new_residual_grads = []
current_start_pos = 0
for gradient in gradients:
if gradient is None:
pruned_gradients.append(None)
continue
# find the top percent largest value.
gradient_shape = tf.shape(gradient)
gradient_flat = tf.reshape(gradient, [-1])
grad_size = gradient_flat.shape.as_list()[0]
print('FJR DEBUG in pruning_common grad_size ', grad_size)
residual_grad = residual_grads[current_start_pos : current_start_pos + grad_size]
current_start_pos = current_start_pos + grad_size
gradient_flat = tf.add(gradient_flat, residual_grad)
#size = tf.size(gradient_flat)
k = int(grad_size * percent)
#print(k)
values,_ = tf.nn.top_k( gradient_flat, k=k, sorted=True, name=None )
# set the values less than threshold in tensor to 0.
threshold = values[-1]
#print(threshold)
zeros = tf.zeros(shape=tf.shape(gradient), dtype=tf.float32)
where_cond = tf.reshape( tf.less(threshold, gradient), gradient_shape )
pruned_gradient = tf.where(where_cond, gradient, zeros)
pruned_gradients.append(pruned_gradient)
new_residual_grads.append(tf.reshape(tf.subtract(gradient, pruned_gradient), [-1]))
return list(zip(pruned_gradients, variables)), new_residual_grads
| 40.357143
| 91
| 0.688496
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.