hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c94dda0976d23139bf577b23ee4cbcdf42f78e0 | 843 | py | Python | inout/las/reader/ui/notepad.py | adriangrepo/qreservoir | 20fba1b1fd1a42add223d9e8af2d267665bec493 | [
"MIT"
] | 2 | 2019-10-04T13:54:51.000Z | 2021-05-21T19:36:15.000Z | inout/las/reader/ui/notepad.py | adriangrepo/qreservoir | 20fba1b1fd1a42add223d9e8af2d267665bec493 | [
"MIT"
] | 3 | 2019-11-19T17:06:09.000Z | 2020-01-18T20:39:54.000Z | inout/las/reader/ui/notepad.py | adriangrepo/qreservoir | 20fba1b1fd1a42add223d9e8af2d267665bec493 | [
"MIT"
] | 2 | 2020-07-02T13:20:48.000Z | 2020-11-11T00:18:51.000Z | import sys
import os
import logging
from PyQt4 import QtGui
logger = logging.getLogger('console')
class Notepad(QtGui.QDialog):
def __init__(self, parent, filename):
super(Notepad, self).__init__(parent)
self.filename = filename
self.initUI()
self.showFile()
def initUI(self):
hbox = QtGui.QHBoxLayout()
self.text = QtGui.QTextEdit(self)
hbox.addWidget(self.text)
self.setGeometry(600,600,600,600)
self.setWindowTitle(str(self.filename))
self.setLayout(hbox)
def showFile(self):
try:
f = open(self.filename, 'r')
filedata = f.read()
self.text.setText(filedata)
f.close()
except IOError:
logger.error("Cannot open file "+str(filename))
| 24.085714 | 61 | 0.578885 |
370999ec182ade9b348240c5fe5b6a0d69df3428 | 1,550 | py | Python | desktop/core/ext-py/eventlet-0.24.1/setup.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/eventlet-0.24.1/setup.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/eventlet-0.24.1/setup.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | #!/usr/bin/env python
import os
import setuptools
os.environ.setdefault('EVENTLET_IMPORT_VERSION_ONLY', '1')
import eventlet
setuptools.setup(
name='eventlet',
version=eventlet.__version__,
description='Highly concurrent networking library',
author='Linden Lab',
author_email='eventletdev@lists.secondlife.com',
url='http://eventlet.net',
packages=setuptools.find_packages(exclude=['benchmarks', 'tests', 'tests.*']),
install_requires=(
'dnspython >= 1.15.0',
'enum34;python_version<"3.4"',
'greenlet >= 0.3',
'monotonic >= 1.4',
'six >= 1.10.0',
),
zip_safe=False,
long_description=open(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read(),
test_suite='nose.collector',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
| 31 | 82 | 0.591613 |
1bbedd9da2f3ae8727794785e7ae01600bb03d00 | 1,575 | py | Python | scripts/extract/extractor/extract_keras.py | liuyihuicaicloud/klever-model-registry | 02cf87fa9313c1ed7bfbf39bd65ae5bb4b8ecf38 | [
"Apache-2.0"
] | 73 | 2020-07-16T03:37:25.000Z | 2022-03-24T07:07:14.000Z | scripts/extract/extractor/extract_keras.py | liuyihuicaicloud/klever-model-registry | 02cf87fa9313c1ed7bfbf39bd65ae5bb4b8ecf38 | [
"Apache-2.0"
] | 215 | 2020-07-15T06:53:02.000Z | 2021-08-21T14:44:32.000Z | scripts/extract/extractor/extract_keras.py | liuyihuicaicloud/klever-model-registry | 02cf87fa9313c1ed7bfbf39bd65ae5bb4b8ecf38 | [
"Apache-2.0"
] | 21 | 2020-07-23T10:18:34.000Z | 2022-03-18T14:17:14.000Z | import os
import json
import collections
from tensorflow import keras
from .base_extract import BaseExtrctor
MODEL_TYPE = 'Keras'
EXTENSION = '.h5'
class KerasExtractor(BaseExtrctor):
def _extract_inputs(self):
inputs = []
for tensor in self.models.inputs:
origin_inputs = {}
origin_inputs['name'] = tensor.name.split(':')[0]
origin_inputs['dType'] = tensor.dtype.as_numpy_dtype.__name__
origin_inputs['size'] = [
i.value if i.value else -1 for i in tensor.shape
]
inputs.append(origin_inputs)
return inputs
def _extract_outputs(self):
outputs = []
for tensor in self.models.outputs:
origin_outputs = {}
origin_outputs['name'] = tensor.name.split(':')[0]
origin_outputs['dType'] = tensor.dtype.as_numpy_dtype.__name__
origin_outputs['size'] = [
i.value if i.value else -1 for i in tensor.shape
]
outputs.append(origin_outputs)
return outputs
def _extract_ops(self):
origin_ops = [node.op for node in self.graph.node]
ops = collections.Counter(origin_ops)
return ops
def _load_model(self):
path = self._find_with_extension(EXTENSION)
try:
self.models = keras.models.load_model(path)
except Exception as e:
raise IOError('Cannot read file %s: %s.' % (path, str(e)))
with keras.backend.get_session() as sess:
self.graph = sess.graph_def
| 30.288462 | 74 | 0.599365 |
bf2cac3b6fa8d78a28d7cfbc6ddfa4b6019af121 | 3,565 | py | Python | cervmongo/vars.py | antcer1213/cervmongo | d0267cc43d8ecd073f4e503ff0f8d23d65f211d1 | [
"MIT"
] | 1 | 2020-08-06T05:30:05.000Z | 2020-08-06T05:30:05.000Z | cervmongo/vars.py | antcer1213/cervmongo | d0267cc43d8ecd073f4e503ff0f8d23d65f211d1 | [
"MIT"
] | null | null | null | cervmongo/vars.py | antcer1213/cervmongo | d0267cc43d8ecd073f4e503ff0f8d23d65f211d1 | [
"MIT"
] | null | null | null | # vars.py
#
# Copyright 2020 Anthony "antcer1213" Cervantes <anthony.cervantes@cerver.info>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
import typing
from enum import Enum
import datetime
from bson.objectid import ObjectId
from pymongo import MongoClient
from dateutil.parser import parse as dateparse
# INFO: Objects
try:
from pydantic import BaseConfig, BaseModel
MODEL = typing.NewType("DataModel", BaseModel)
except:
from dataclasses import dataclass
MODEL = typing.NewType("DataModel", dataclass)
# INFO: Custom Types
MONGODB_URI = typing.NewType("MongoDB URI", str)
YAML = typing.NewType("YAML Document", str)
JSON = typing.NewType("JSON Document", str)
ENUM = typing.NewType("Enum", Enum)
# INFO: Static
ASCENDING = 1
DESCENDING = -1
# INFO: Fields
OBJECT_ID = OBJ_ID = ObjectId
DOC_ID = typing.NewType("Document ID", OBJ_ID)
DETAILS = typing.NewType("Meta Details", dict)
class StringEnum(str, Enum): pass
class IntEnum(int, Enum): pass
# NOTE: defaults to recommended fields; overwrite depending on your schema, use utils.generate_enum
PAGINATION_SORT_FIELDS = Enum(value="Pagination Sort Fields", names=[(item, item) for item in ("_id", "created_datetime", "updated_datetime")])
class ObjectIdStr(str):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if not isinstance(v, ObjectId):
raise ValueError("Not a valid ObjectId")
return str(v)
def str2bool(v):
return str(v).lower() in ("yes", "true", "t", "1")
def str2datetime(v):
if isinstance(v, (datetime.date, datetime,datetime)):
return v
else:
return dateparse(v)
TYPES = {}
TYPES["str"] = str
TYPES["float"] = float
TYPES["int"] = int
TYPES["abs"] = abs
TYPES["dict"] = dict
TYPES["oid"] = ObjectId
TYPES["bool"] = ObjectId
TYPES["date"] = TYPES["datetime"] = str2datetime
TYPES["bool"] = str2bool
SCHEMA_TYPES = {}
SCHEMA_TYPES["str"] = SCHEMA_TYPES["string"] = SCHEMA_TYPES["text"] = "str"
SCHEMA_TYPES["number"] = SCHEMA_TYPES["num"] = SCHEMA_TYPES["decimal"] = SCHEMA_TYPES["float"] = "float"
SCHEMA_TYPES["int"] = SCHEMA_TYPES["integer"] = "int"
SCHEMA_TYPES["absolute"] = SCHEMA_TYPES["abs"] = "abs"
SCHEMA_TYPES["object"] = SCHEMA_TYPES["dict"] = SCHEMA_TYPES["obj"] = "dict"
SCHEMA_TYPES["oid"] = SCHEMA_TYPES["objectid"] = SCHEMA_TYPES["object_id"] = "oid"
SCHEMA_TYPES["date"] = "date"
SCHEMA_TYPES["datetime"] = "datetime"
SCHEMA_TYPES["bool"] = SCHEMA_TYPES["boolean"] = "bool"
| 33.009259 | 143 | 0.720337 |
538ba6a00012a6bb9d5e7987a59289f1e81c8c34 | 3,857 | py | Python | src/cryptography/hazmat/primitives/smime.py | wellingtonf-souza/cryptography | 4e24d9b9f3d17bcd5c0fd9096a05f3d8f49bc963 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2021-01-07T00:45:45.000Z | 2021-05-16T23:47:52.000Z | src/cryptography/hazmat/primitives/smime.py | wellingtonf-souza/cryptography | 4e24d9b9f3d17bcd5c0fd9096a05f3d8f49bc963 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/cryptography/hazmat/primitives/smime.py | wellingtonf-souza/cryptography | 4e24d9b9f3d17bcd5c0fd9096a05f3d8f49bc963 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-01-07T00:45:46.000Z | 2021-01-07T00:45:46.000Z | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from enum import Enum
from cryptography import x509
from cryptography.hazmat.backends import _get_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import ec, rsa
from cryptography.utils import _check_byteslike
class SMIMESignatureBuilder(object):
def __init__(self, data=None, signers=[]):
self._data = data
self._signers = signers
def set_data(self, data):
_check_byteslike("data", data)
if self._data is not None:
raise ValueError("data may only be set once")
return SMIMESignatureBuilder(data, self._signers)
def add_signer(self, certificate, private_key, hash_algorithm):
if not isinstance(
hash_algorithm,
(
hashes.SHA1,
hashes.SHA224,
hashes.SHA256,
hashes.SHA384,
hashes.SHA512,
),
):
raise TypeError(
"hash_algorithm must be one of hashes.SHA1, SHA224, "
"SHA256, SHA384, or SHA512"
)
if not isinstance(certificate, x509.Certificate):
raise TypeError("certificate must be a x509.Certificate")
if not isinstance(
private_key, (rsa.RSAPrivateKey, ec.EllipticCurvePrivateKey)
):
raise TypeError("Only RSA & EC keys are supported at this time.")
return SMIMESignatureBuilder(
self._data,
self._signers + [(certificate, private_key, hash_algorithm)],
)
def sign(self, encoding, options, backend=None):
if len(self._signers) == 0:
raise ValueError("Must have at least one signer")
if self._data is None:
raise ValueError("You must add data to sign")
options = list(options)
if not all(isinstance(x, SMIMEOptions) for x in options):
raise ValueError("options must be from the SMIMEOptions enum")
if (
encoding is not serialization.Encoding.PEM
and encoding is not serialization.Encoding.DER
):
raise ValueError("Must be PEM or DER from the Encoding enum")
# Text is a meaningless option unless it is accompanied by
# DetachedSignature
if (
SMIMEOptions.Text in options
and SMIMEOptions.DetachedSignature not in options
):
raise ValueError(
"When passing the Text option you must also pass "
"DetachedSignature"
)
if (
SMIMEOptions.Text in options
and encoding is serialization.Encoding.DER
):
raise ValueError(
"The Text option does nothing when serializing to DER"
)
# No attributes implies no capabilities so we'll error if you try to
# pass both.
if (
SMIMEOptions.NoAttributes in options
and SMIMEOptions.NoCapabilities in options
):
raise ValueError(
"NoAttributes is a superset of NoCapabilities. Do not pass "
"both values."
)
backend = _get_backend(backend)
return backend.smime_sign(self, encoding, options)
class SMIMEOptions(Enum):
Text = "Add text/plain MIME type"
Binary = "Don't translate input data into canonical MIME format"
DetachedSignature = "Don't embed data in the PKCS7 structure"
NoCapabilities = "Don't embed SMIME capabilities"
NoAttributes = "Don't embed authenticatedAttributes"
| 35.063636 | 79 | 0.621727 |
8ee2289bcd50d8a7d14073e0cef1825172de6d43 | 728 | py | Python | src/collectors/vmsdoms/test/testvmsdoms.py | hermdog/Diamond | 0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47 | [
"MIT"
] | 1,795 | 2015-01-05T11:14:55.000Z | 2022-03-25T12:07:15.000Z | src/collectors/vmsdoms/test/testvmsdoms.py | hermdog/Diamond | 0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47 | [
"MIT"
] | 671 | 2015-01-02T05:57:27.000Z | 2022-03-29T22:39:05.000Z | src/collectors/vmsdoms/test/testvmsdoms.py | hermdog/Diamond | 0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47 | [
"MIT"
] | 793 | 2015-01-03T01:39:02.000Z | 2022-02-18T05:12:27.000Z | #!/usr/bin/python
# coding=utf-8
###############################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from vmsdoms import VMSDomsCollector
###############################################################################
class TestVMSDomsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('VMSDomsCollector', {
})
self.collector = VMSDomsCollector(config, None)
def test_import(self):
self.assertTrue(VMSDomsCollector)
###############################################################################
if __name__ == "__main__":
unittest.main()
| 26.962963 | 79 | 0.487637 |
22eb6f0a9180d90f977b1672f2de365dbc693d69 | 5,523 | py | Python | databrowse/plugins/db_generic_WSGI_application/db_generic_WSGI_application.py | limatix/Databrowse | af33bc6cca930e59acc3762beeec2409d8fd8634 | [
"BSD-3-Clause"
] | 3 | 2016-09-20T07:04:09.000Z | 2018-07-17T17:31:21.000Z | databrowse/plugins/db_generic_WSGI_application/db_generic_WSGI_application.py | limatix/Databrowse | af33bc6cca930e59acc3762beeec2409d8fd8634 | [
"BSD-3-Clause"
] | 19 | 2016-10-25T07:05:28.000Z | 2018-08-07T23:18:16.000Z | databrowse/plugins/db_generic_WSGI_application/db_generic_WSGI_application.py | limatix/Databrowse | af33bc6cca930e59acc3762beeec2409d8fd8634 | [
"BSD-3-Clause"
] | 2 | 2016-10-28T00:12:42.000Z | 2016-10-28T00:18:03.000Z | #!/usr/bin/env python
###############################################################################
## Databrowse: An Extensible Data Management Platform ##
## Copyright (C) 2012-2016 Iowa State University Research Foundation, Inc. ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are ##
## met: ##
## 1. Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## 2. Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## 3. Neither the name of the copyright holder nor the names of its ##
## contributors may be used to endorse or promote products derived from ##
## this software without specific prior written permission. ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ##
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED ##
## TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A ##
## PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER ##
## OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ##
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ##
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ##
## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ##
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
## ##
## This material is based on work supported by the Air Force Research ##
## Laboratory under Contract #FA8650-10-D-5210, Task Order #023 and ##
## performed at Iowa State University. ##
## ##
## DISTRIBUTION A. Approved for public release: distribution unlimited; ##
## 19 Aug 2016; 88ABW-2016-4051. ##
###############################################################################
""" plugins/renderers/db_wsgi_generic.py - Default Handler for WSGI Scripts """
import imp
import os
import os.path
import copy
from lxml import etree
from databrowse.support.renderer_support import renderer_class
class db_generic_WSGI_application(renderer_class):
""" Default Renderer for WSGI Scripts - Simply Passes Everything Off To The Script """
_namespace_uri = "http://thermal.cnde.iastate.edu/databrowse/wsgigeneric"
_namespace_local = "wsgigeneric"
_default_content_mode = "full"
_default_style_mode = "run_application"
_default_recursion_depth = 2
def dummy_start_response(self, status, headers, exc_info=None):
self._web_support.req.status = status
for item in headers:
self._web_support.req.response_headers[item[0]] = item[1]
pass
self._web_support.req.response_headers['Content-Type'] = 'text/html'
pass
def getContent(self):
if self._caller != "databrowse":
return None
else:
if self._content_mode == "full":
savedCWD = os.getcwd()
tempCWD = os.path.dirname(self._fullpath)
os.chdir(tempCWD)
modulename = os.path.splitext(os.path.basename(self._fullpath))[0]
module = imp.load_source(modulename, self._fullpath)
environcopy = copy.copy(self._web_support.req.environ)
environcopy['DATABROWSE_FILENAME'] = environcopy['SCRIPT_FILENAME']
environcopy['SCRIPT_FILENAME'] = self._fullpath
output = module.application(environcopy, self.dummy_start_response)
os.chdir(savedCWD)
del module
del environcopy
return etree.XML(output)
elif self._content_mode == "raw":
savedCWD = os.getcwd()
tempCWD = os.path.dirname(self._fullpath)
os.chdir(tempCWD)
modulename = os.path.splitext(os.path.basename(self._fullpath))[0]
module = imp.load_source(modulename, self._fullpath)
environcopy = copy.copy(self._web_support.req.environ)
environcopy['DATABROWSE_FILENAME'] = environcopy['SCRIPT_FILENAME']
environcopy['SCRIPT_FILENAME'] = self._fullpath
output = module.application(environcopy, self._web_support.req.start_response)
os.chdir(savedCWD)
self._web_support.req.output_done = True
del module
del environcopy
return output
else:
raise self.RendererException("Invalid Content Mode")
pass
| 54.683168 | 94 | 0.579395 |
044cb8f7a656e23c2e49a4ca31a95877029b18d3 | 29,985 | py | Python | twitch_onair_neopixel.py | europaYuu/RaspiTwitchONAIR | 8fa7a5f457a297e9b06fb946e89c33a0d49e335f | [
"MIT"
] | null | null | null | twitch_onair_neopixel.py | europaYuu/RaspiTwitchONAIR | 8fa7a5f457a297e9b06fb946e89c33a0d49e335f | [
"MIT"
] | null | null | null | twitch_onair_neopixel.py | europaYuu/RaspiTwitchONAIR | 8fa7a5f457a297e9b06fb946e89c33a0d49e335f | [
"MIT"
] | null | null | null | # Get Stream Status from Twitch
#
# Requires registration of a Twitch Application. For more information, go to https://dev.twitch.tv/docs/api/ "Getting Started with the Twitch API". Leaving OAuth Redirect URL to http://localhost seems to work, but please let me know on twitter if this is bad practice.
# This script uses the OAuth Client Credentials Flow, which doesn't require a UI to authenticate
# Remember to set User (doesn't have to be the same as the dev account), client_id, and client_secret in config/twitch_onair_config.json (Or use the webserver to do so)
print('\n/////////////////////////////////')
print('Starting Twitch ON AIR Service...')
print('/////////////////////////////////')
print(' ')
##### This is to try to catch boot neopixel errors - didn't work so it's commented out for now
#import RPi.GPIO as GPIO
#GPIO.cleanup()
import os
import requests #For making cURL requests
import datetime
import pandas as pd #Simple datetime formatting for checking if tokens are stale
import json
import time
# graphics
import random
import math
import colorsys
import threading
from threading import Thread
###### store PID so it can be killed easily by the webserver
import pid #store PID in file so webserver can kill if needed
###### neopixels
import board
import neopixel
######## Connect to OLED Service
import rpyc
oled_service_connected = False
try:
c = rpyc.connect("localhost", 18861)
oled_service_connected = True
except:
pass
def tryOLedMessage(text, displayTime=1.0):
global c
global oled_service_connected
try:
c = rpyc.connect("localhost", 18861)
oled_service_connected = True
c.root.ExDrawTextBorder(text, displayTime)
except:
pass
#######################################
############ CONFIGURATION ############
#######################################
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D18
# The number of NeoPixels
num_pixels = 24
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
###### Time and Date
# get the current datetime time and format it - created here just in case we forget to call updateTime() at run
a_datetime = datetime.datetime.now()
formatted_datetime = a_datetime.isoformat()
json_datetime = json.dumps(formatted_datetime)
#define here to make it global scope
app_access_token = ''
# Catching "Connection Refused" Errors
# How many seconds to wait if server refuses our connection?
CONNREFUSE_WAIT_TIME = 30
CONNREFUSE_MAX_LOOPS = 10
CONNREFUSE_Loop = 0 #Initialize the loop count
###### Defaults ######
client_id = 'CLIENT_ID'
client_secret = 'CLIENT_SECRET'
# default twitch token age is 60 days
token_stale_age = 30
# default update interval
update_interval = 30
#streamer to watch
user_login = 'europayuu'
#default light color when live
live_color = (255,255,255)
#default light color when off
off_color = (0,0,0)
#max brightness to limit power consumption and heat - match with twitch_onair_webserver.py
MAX_HARDWARE_BRIGHTNESS = 0.7
#default configurable brightness
led_brightness = 0.3
# Used by some pixel effects
num_rows = 3
num_columns = 8
TARGET_FRAMERATE = 20 # For effects that take a time input
# Debug Log. set to True if you want debug file output
def tryMakeLogDir():
current_path = os.getcwd()
path = current_path + '/logs'
try:
os.mkdir(path, 0o777)
except:
pass
tryMakeLogDir()
ENABLE_DEBUG_LOG = False
DEBUG_LOG_FILENAME = 'logs/twitch_onair_neopixel_log.txt'
#default config
pixels = neopixel.NeoPixel(
pixel_pin,
num_pixels,
brightness=led_brightness,
auto_write=False,
pixel_order=ORDER
)
def tryMakeConfigDir():
current_path = os.getcwd()
path = current_path + '/config'
try:
os.mkdir(path, 0o777)
except:
pass
#######################################
########## END CONFIGURATION ##########
#######################################
# State Machine
first_loop = True
last_config_file_time = "-1"
ASYNC_LED_STATE = 'IDLE'
######## DEBUG LOG ########
if ENABLE_DEBUG_LOG:
import logging
logging.basicConfig(filename=DEBUG_LOG_FILENAME, level=logging.DEBUG)
def timesStamp():
current_time = datetime.datetime.now()
return ( "[" + str( current_time ) + "] ")
def printLog(message='',alsoprint=True,level='debug',include_timestamp=True):
if include_timestamp:
message = timesStamp() + message
if ENABLE_DEBUG_LOG:
if level == 'debug':
logging.debug(message)
elif level == 'info':
logging.info(message)
else:
logging.warning(message)
else:
pass
if alsoprint:
print(message)
if ENABLE_DEBUG_LOG:
separator = "********" + "\n"
printLog("\n" + separator + 'twitch_onair_neopixel.py debug Log enabled, writing to file ' + DEBUG_LOG_FILENAME + "\n" + separator )
########
######## Math
########
def clamp(n, smallest, largest): return max(smallest, min(n, largest))
def saturate(n): return clamp(n, 0,255) # I miss HLSL
def lerp(a=1.0, b=1.0, f=0.5): return (a * (1.0 - f)) + (b * f);
### 2D Vector Distance
def distance2D(vec1=(0.0,0.0),vec2=(0.0,0.0)):
vec1x = vec1[0]
vec1y = vec1[1]
vec2x = vec2[0]
vec2y = vec2[1]
a = (vec2x-vec1x) ** 2.0
b = (vec2y-vec1y) ** 2.0
ab = a + b
abClamp = clamp(ab, 0.0001, ab)
return math.sqrt(abClamp)
def hsv2rgb(h,s,v):
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(h,s,v))
def rgb2hsv(r,g,b):
rf = float(r)
gf = float(g)
bf = float(b)
rgb = ( (rf / 255.0), (gf / 255.0), (bf / 255.0) )
return colorsys.rgb_to_hsv( rgb[0], rgb[1], rgb[2] )
# Used to check even / odd rows of pixels since the wiring makes the X-direction alternate
def isEven(num): return ( (num % 2) == 0 )
def getTickLength(framerateDivider=1.0):
global TARGET_FRAMERATE
return 1.0 / ( float(TARGET_FRAMERATE) / framerateDivider )
# update datetime
def updateTime():
# get the current datetime time and format it - we want to update our global variables for current time
global a_datetime
global formatted_datetime
global json_datetime
a_datetime = datetime.datetime.now()
formatted_datetime = a_datetime.isoformat()
json_datetime = json.dumps(formatted_datetime)
###### Load configuration File ######
def tryLoadConfig():
global user_login
global client_id
global client_secret
global token_stale_age
global update_interval
global num_pixels
global live_color
global off_color
global led_brightness
global pixels
global MAX_HARDWARE_BRIGHTNESS
json_read_error = 'Error reading key value. Default key value used for '
tryMakeConfigDir()
config_file_exists = os.path.isfile('config/twitch_onair_config.json')
if config_file_exists:
### Did the file change from last time?
global last_config_file_time
config_moddate = os.stat('config/twitch_onair_config.json') [8] # there are 10 attributes this call returns and you want the next to last
# format our timestamp
timestamp = datetime.datetime.fromtimestamp(config_moddate).strftime('%Y-%m-%dT%H:%M:%S')
# For debugging file modification detection
#print('******** config file modified date: ' + timestamp + " ********")
if timestamp != last_config_file_time:
printLog ('Configuration file found. Loading Config. Modified: ' + timestamp.replace("T", " ") )
with open('config/twitch_onair_config.json') as json_config_file:
configData = json.load(json_config_file)
try:
user_login = configData['user']
except:
printLog(json_read_error + 'user')
try:
client_id = configData['client_id']
except:
printLog(json_read_error + 'client_id')
try:
client_secret = configData['client_secret']
except:
printLog(json_read_error + 'client_secret')
try:
token_stale_age = int( configData['token_stale_age'] )
except:
printLog(json_read_error + 'token_stale_age')
try:
update_interval = int( configData['update_interval'] )
except:
printLog(json_read_error + 'update_interval')
try:
num_pixels = int( configData['num_pixels'] )
except:
printLog(json_read_error + 'num_pixels')
try:
live_color = eval( str(configData['live_color'] ) )
except:
printLog(json_read_error + 'live_color')
try:
off_color = eval( str(configData['off_color'] ) )
except:
printLog(json_read_error + 'off_color')
try:
led_brightness = eval( configData['led_brightness'] )
except:
printLog(json_read_error + 'led_brightness')
try:
num_rows = int( configData['num_rows'] )
except:
printLog(json_read_error + 'num_rows')
try:
num_columns = int( configData['num_columns'] )
except:
printLog(json_read_error + 'num_columns')
led_brightness= clamp( (led_brightness * MAX_HARDWARE_BRIGHTNESS), 0, MAX_HARDWARE_BRIGHTNESS )
#printLog( 'Brightness set to ' + str(led_brightness) )
pixels = neopixel.NeoPixel(
pixel_pin, num_pixels, brightness=led_brightness, auto_write=False, pixel_order=ORDER
)
last_config_file_time = timestamp
else:
printLog('No changes in configuration file. Config Load Skipped.')
else:
printLog('ERROR: Configuration file not found, using default parameters. Will most likely break')
tryLoadConfig()
########
######## LED Graphics
########
def pixelFlood(color):
pixels.fill(color)
pixels.show()
def pixelClear():
pixels.fill((0,0,0))
pixels.show()
### Convert screenspace to strip number ###
def screenPixelInRange( pos=(0.0,0.0) ):
resultx = pos[0] >= 0.0 and pos[0] <= 1.0
resulty = pos[1] >= 0.0 and pos[1] <= 1.0
return resultx and resulty
def pixelScrToStrip( screenspace_pos=(0.0 ,0.0) ): #Converts normalized screen-space coordinates to nearest strip pixel. Check documentation for wiring setup.
global num_pixels
global num_rows
global num_columns
if screenPixelInRange(screenspace_pos):
if isEven(num_columns):
column = clamp(
int( screenspace_pos[0] * float(num_columns) ),
0,
num_columns - 1
)
else:
column = clamp(
round( screenspace_pos[0] * float(num_columns) ),
0,
num_columns - 1
)
if isEven(num_rows): #I haven't actually tested this
row = int( screenspace_pos[1] * float(num_rows) )
else:
row = round( screenspace_pos[1] * float(num_rows) )
rowOffset = clamp( (row - 1), 0, num_rows ) * num_columns
if isEven(row) and (row > 0):
nearest_pixel = rowOffset + ( num_columns - column ) - 1
else:
nearest_pixel = rowOffset + column
nearest_pixel = clamp( (nearest_pixel), 0, num_pixels - 1 )
#print( 'row: ' + str(row) + ' rowOffset: ' + str(rowOffset) + ' column: ' + str(column) + ' nearest_pixel: ' + str(nearest_pixel) + 'isEven(): ' + str(isEven(row)) + ' row > 0: ' + str(row > 0) ) #uncomment this for debug
return nearest_pixel
else:
return -1
def drawToScreen( color=(255,255,255), pos=(0.0,0.0) ): #Draws a color to the nearest strip pixel using screen-space coordinates
nearest_pixel = pixelScrToStrip( screenspace_pos=pos )
if nearest_pixel >= 0:
try:
pixels[nearest_pixel] = color
pixels.show()
except IndexError:
pass
### Pixel Index to Screen UV
# Returns normalized screen space coordinates from a pixel strip ID input
def stripToUV(pixel=0):
global num_pixels
global num_rows
global num_columns
fpixel = float(pixel)
fnum_rows = float(num_rows)
fnum_columns = float(num_columns)
posy = ( fnum_rows / (fnum_rows - 1.0) ) * ( float( int(fpixel / num_columns) ) / fnum_rows )
posx = ( ( fnum_columns / (fnum_columns - 1.0) ) * ( fpixel % fnum_columns ) ) / fnum_columns
row = int( int(posy * fnum_rows) )
if row != num_rows:
row += 1
if isEven( row ):
posx = 1.0 - posx
return (posx,posy)
def drawScreenUVs():
global num_pixels
for x in range(num_pixels):
uv = stripToUV(pixel=x)
color = [
saturate( (int( uv[0] * float(255)) ) ),
clamp((int( uv[1] * float(255) )),0,255),
0
]
pixels[x] = color
pixels.show()
### Draw rainbow
# 0 = horizontal, 1 = vertical
def drawRainbow(offset=0.0,scale=1.0, direction=0):
global num_pixels
global num_rows
direction = clamp(direction, 0, 1) #never trust the users, they are evil
for x in range(num_pixels):
u = ( stripToUV(x) )[direction]
u = u / ( 1 + (1 / num_rows) )
u = u * scale
u = u + offset
colorResult = hsv2rgb(u,1.0,1.0)
pixels[x] = colorResult
pixels.show()
### Scrolling Rainbow
def drawAnimateRainbow(length=1.0, framerateDivider=1.0, scale=1.0, reverse=False, direction=0):
tick = getTickLength(framerateDivider=framerateDivider)
loops = int( length / tick)
for i in range(loops):
a = float(i) / float(loops)
if reverse:
a = 1 - a
drawRainbow(offset=a, scale=scale, direction=direction)
time.sleep(tick)
### color cycle
def drawColorCycle(length=8.0, framerateDivider=1.0, reverse=False, offset=0.0):
tick = getTickLength(framerateDivider=framerateDivider)
loops = int( length / tick )
for i in range(loops):
a = float(i) / float(loops)
a = a + offset
if reverse:
a = 1 - a
colorResult = hsv2rgb(a,1.0,1.0)
pixelFlood(colorResult)
time.sleep(tick)
### Draw Circle
### Todo: add / subtract / multiply blend modes
### Todo: allow aspect ratio adjustment
def drawCircle(color=(255,255,255), radius=0.2, origin=(0.5,0.5), invert=False, power=6.0):
global num_pixels
fadeRadius = 0.5
if radius <= fadeRadius:
fade = radius * 1/fadeRadius
else:
fade = 1.0
radius = clamp(radius, 0.0001, (radius * 2) )
for x in range(num_pixels):
uv = stripToUV(x)
sdf = distance2D( vec1=origin, vec2=(uv[0],uv[1]) )
sdfClampBiasScale = clamp( (sdf * radius), 0, 1.0) * ( 1 / clamp(radius, 0.0001, radius))
if not invert:
sdfClampBiasScale = 1.0 - sdfClampBiasScale
sdfClampBiasScale = clamp( ( (sdfClampBiasScale ** power) * power ) , 0, 1)
sdfClampBiasScale = sdfClampBiasScale * fade
colorResult = (
saturate( int( sdfClampBiasScale * float(color[0]) ) ),
saturate( int( sdfClampBiasScale * float(color[1]) ) ),
saturate( int( sdfClampBiasScale * float(color[2]) ) )
)
pixels[x] = colorResult
pixels.show()
### Animates circle growing bigger in size
def drawRipple(color=(255,255,255), startRadius=0.0, endRadius=4.0, length=1.0, framerateDivider=1.0, reverse=False):
tick = getTickLength(framerateDivider=framerateDivider)
loops = int( length / tick )
for i in range(loops):
a = float(i) / float(loops)
if reverse:
a = 1 - a
radius = lerp(a=startRadius,b=endRadius,f=a)
drawCircle(color=color, radius=radius)
time.sleep(tick)
# Smooth fades
def pixelFadeIn(color,length):
for x in range(0,256,16):
output_color = (
saturate( int( (x*color[0]) / 255 ) ),
saturate( int( (x*color[1]) / 255 ) ),
saturate( int( (x*color[2]) / 255 ) )
)
pixels.fill((output_color))
pixels.show()
time.sleep(length/TARGET_FRAMERATE)
pixels.fill((color))
pixels.show()
def pixelFadeOut(color,length):
for x in range(256,0,-16):
output_color = (
saturate( int((x*color[0]) / 255) ),
saturate( int((x*color[1]) / 255) ),
saturate( int((x*color[2]) / 255) )
)
pixels.fill((output_color))
pixels.show()
time.sleep(length/TARGET_FRAMERATE)
pixels.fill((0,0,0))
pixels.show()
# Flash entire array
def pixelFlash(color=(255,255,255), numFlashes=4, onTime=0.1, offTime=0.1):
i = 0
while i < numFlashes:
pixels.fill(color)
pixels.show()
time.sleep(onTime)
pixels.fill((0,0,0))
pixels.show()
time.sleep(offTime)
i += 1
# Random flashing
def pixelRandom( color=(255,255,255 ), numIterations=8, flashDots= 3, onTime=0.05, offTime=0.1 ):
i = 0
flashDots = clamp(flashDots, 1, num_pixels-1)
#print('numIterations: ' + str(numIterations) + ' flashDots: ' + str(flashDots) + ' num_pixels: ' + str(num_pixels))
while i < numIterations:
randomdots = random.sample( range( 0, (num_pixels) ), flashDots)
for x in randomdots:
try:
pixels[x] = color
except IndexError:
pass
pixels.show()
i += 1
time.sleep(onTime)
pixelClear()
time.sleep(offTime)
#sequential with a soft tail
def pixelSequential(color=(255,98,0), length=2.0, fadeLength=4, reverse=False, clearPrevious=True, hold=False):
padding = fadeLength * 2
start = 0 - padding
stop = num_pixels + padding
step = 1
#Loop
for x in range(start,stop,step):
if reverse:
x = ( num_pixels - x ) - 1
else:
pass
fadeLength = clamp(fadeLength, 1, fadeLength)
#Fade
if fadeLength > 1:
for y in range(fadeLength):
brightnessScalar = 1.0 - ( float(y) / float(fadeLength) ) ** 0.5
try:
colorResult = [
int( clamp( ( float( color[0] ) * brightnessScalar ), 0.0, 256.0 ) ),
int( clamp( ( float( color[1] ) * brightnessScalar ), 0.0, 256.0 ) ),
int( clamp( ( float( color[2] ) * brightnessScalar ), 0.0, 256.0 ) )
]
if not reverse:
if 0 <= ( x - y ) < num_pixels:
pixels[ x - y ] = colorResult
else:
if 0 <= x <= num_pixels:
pixels[ x + y ] = colorResult
except IndexError:
pass
else:
pass
#Brightest Pixel - the Fade also sets this so this overwrites that value
try:
if 0 <= x <= num_pixels:
pixels[x] = color
else:
pass
except IndexError:
pass
#Clear Previous
if clearPrevious:
try:
if not reverse:
pixels[ x - fadeLength] = (0,0,0)
else:
pixels[ x + fadeLength] = (0,0,0)
except IndexError:
pass
else:
pass
pixels.show()
time.sleep( ( 1 / num_pixels) * length )
if not hold:
pixelClear()
# Draw a single column
def pixelDrawColumn(color=(255,255,255), posX=0.0 ):
for y in range(num_rows):
drawToScreen(color=color, pos=(posX, ( float(y) / float(num_rows-1) ) ))
# Horizontal Wipe
def pixelHorizontalWipe(color=(255,98,0), length=1.0, fadeLength=0.0, reverse=False, clearPrevious=False, hold=True): #I couldn't get fade length to work properly for this... maybe because it's converting floating point screenspace to integer and I'm not sure how python does this intrinsically?
global num_columns
global num_rows
fadeLength = clamp(fadeLength, 0.01, fadeLength)
padding = int((fadeLength * float(num_columns) )) * 2
start = 0 - padding
stop = num_columns + padding
step = 1
for x in range(start, stop, step):
x2 = float(x) / float(num_columns-1)
if reverse:
x2 = 1 - x2
else:
pass
if fadeLength > 0.01:
for y in range( int( fadeLength + float(num_columns) ) ):
brightnessScalar = 1.0 - ( (float(y) / num_columns) / fadeLength ) ** 0.1
colorResult = [
int( clamp( ( float( color[0] ) * brightnessScalar ), 0.0, 256.0 ) ),
int( clamp( ( float( color[1] ) * brightnessScalar ), 0.0, 256.0 ) ),
int( clamp( ( float( color[2] ) * brightnessScalar ), 0.0, 256.0 ) )
]
if not reverse:
pixelDrawColumn( color=colorResult, posX=(x - y))
else:
pixelDrawColumn( color=colorResult, posX=(x + y))
else:
pass
#Brightest Column - the Fade also sets this so this overwrites that value
pixelDrawColumn(color=color, posX=x2)
#Clear Previous
if clearPrevious:
if not reverse:
pixelDrawColumn( color=(0,0,0), posX=( x2 - (fadeLength+(1/num_columns))) )
pixelDrawColumn( color=(0,0,0), posX=( x2 - (fadeLength+(1/num_columns))) )
else:
pixelDrawColumn( color=(0,0,0), posX=( x2 + (fadeLength+(1/num_columns))) )
pixelDrawColumn( color=(0,0,0), posX=( x2 + (fadeLength+(1/num_columns))) )
else:
pass
time.sleep( (1 / (num_columns * 2) ) * length ) #this is probably wrong, length should be the length of the total animation but my brain is fried
if not hold:
pixelClear()
def pixelError():
global ASYNC_LED_STATE
ASYNC_LED_STATE = 'IDLE'
time.sleep(1.5)
pixelFlash((255,0,0),6,0.25,0.1)
# Attempt to authenticate using Client ID and secret to obtain a token
def pixelAuth():
global ASYNC_LED_STATE
ASYNC_LED_STATE = 'AUTH'
#pixelFlash((148,0,255),3,0.2,0.2) #Old
def pixelAuthSuccess(wait=0.0):
global ASYNC_LED_STATE
time.sleep(wait)
ASYNC_LED_STATE = 'IDLE'
time.sleep(0.5)
tryOLedMessage('CONNECTED', 0.5)
pixelFlash((0,255,0),4,0.1,0.1)
# Stream went ONLINE but previously was offline
def pixelLiveChanged():
pixelRandom( live_color, 8, 5, 0.025, 0.025 )
time.sleep(0.5)
pixelFadeIn( live_color, 1.0)
# Stream went OFFLINE but previously was online
def pixelOffChanged():
#pixelFlash(live_color, 1, 0.05, 0.5) #Old
#pixelFadeOut( live_color, 1.0) #Old
drawRipple(color=live_color, startRadius=0.0, endRadius=4.0, length=1.0, framerateDivider=1.0, reverse=True)
pixelClear()
# Start sequence = the Fadein/Out acts as a full array self-test
def pixelStart():
pixelRandom( (255,98,0), 4, 6, 0.15, 0.3 )
time.sleep(0.5)
pixelFadeIn( (255,255,255),1.0 )
pixelFadeOut( (255,255,255),1.0 )
########
######## Twitch API
######## The meat of the API calls / response parsing happens here
# does token exist?
def tokenFileExist():
return os.path.isfile('config/twitch_appaccesstoken.json')
# open the token file
def openTokenFile(return_token):
updateTime()
with open('config/twitch_appaccesstoken.json') as json_file:
data = json.load(json_file)
for p in data['tokens']:
# time difference
current_time = pd.to_datetime(formatted_datetime)
unformatted_stored_time = p['time']
stored_time = pd.to_datetime(unformatted_stored_time,infer_datetime_format=True, errors='coerce')
difference = current_time - stored_time
#printLog( 'App Access Token: ' + p['token'])
#printLog( 'Stored Time: ' + str(stored_time) )
#printLog( 'Current Time: ' + str(current_time) )
#printLog( 'Time Since Token: ' + str(difference) )
#printLog( 'Days Since Token: ' + str(difference.days) )
if return_token:
return p['token']
else:
#return int(difference.days)
return difference
# request a new token from Twitch API using client_id and client_secret, then store the token in config/twitch_appaccesstoken.json
def createTokenFile():
pixelAuth()
updateTime()
data = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials',
}
response = requests.post('https://id.twitch.tv/oauth2/token', data=data)
ResponseJson = response.json()
if 'access_token' in ResponseJson:
ResponseToken = ResponseJson['access_token']
ResponseTokenObfuscated = ResponseToken.replace( ( str(ResponseToken) )[0:26], 'xxxxxxxxxxxxxxxxxxxxxxxxxx' )
printLog('Token fetched: ' + ResponseTokenObfuscated)
#store the current token and date into file
# construct our data json
data = {}
data['tokens'] = []
data['tokens'].append({
'token': ResponseToken,
'time': json_datetime.strip('\"'),
})
with open('config/twitch_appaccesstoken.json', 'w') as outfile:
json.dump(data, outfile)
pixelAuthSuccess(wait=2.5)
else:
printLog(str(ResponseJson))
printLog('createTokenFile(): Error Creating Token')
#tryOLedMessage('Error Creating Token', 0.25)
#pixelError() main loop already calls this
time.sleep(0.5) #Just in case, probably not necessary
def checkConfigUpdate():
changed = os.path.isfile('temp/twitch_onair_config_updated.txt')
if changed:
os.remove('temp/twitch_onair_config_updated.txt')
return changed
# Checks if user_login is live using Get Streams example on twitch API docs
# More info: https://dev.twitch.tv/docs/api/reference#get-streams
# Returns 1 if user_login is online, 0 if user_login is offline, and -1 if there was an authentication error
def isLive(user_login):
global first_loop
updateTime()
token_file_exists = tokenFileExist()
if token_file_exists:
token_age_verbose = openTokenFile(0)
token_age = int (token_age_verbose.days)
if token_age <= token_stale_age:
printLog('Access token is valid. Age: ' + str(token_age) + ' days. Verbose token age: [' + str(token_age_verbose) +']' )
app_access_token = openTokenFile(1)
if first_loop:
pixelAuthSuccess()
first_loop = False
else:
printLog('Token is stale, fetching new access token. age: ' + str(token_age) + ' days. Verbose token age: [' + str(token_age_verbose) +']' )
tryOLedMessage('Fetch New Token')
createTokenFile()
if tokenFileExist():
app_access_token = openTokenFile(1)
first_loop = False
else:
printLog('Token doesn\'t exist. fetching new access token')
tryOLedMessage('Fetch New Token')
createTokenFile()
if tokenFileExist():
app_access_token = openTokenFile(1)
first_loop = False
if tokenFileExist():
headers = {
'Authorization': 'Bearer ' + app_access_token,
'Client-Id': client_id,
}
url = 'https://api.twitch.tv/helix/streams?user_login=' + user_login
try:
response = requests.get(url, headers=headers)
ResponseJson = response.json()
if 'data' in ResponseJson:
return len(ResponseJson['data'])
else:
return (-1)
except:
if CONNREFUSE_Loop < CONNREFUSE_MAX_LOOPS:
printLog("Connection Refused by the server... Sleeping for " + CONNREFUSE_WAIT_TIME + "seconds. This is attempt " + CONNREFUSE_Loop + "/" + CONNREFUSE_MAX_LOOPS + ". Neopixel service will restart when CONNREFUSE_MAX_LOOPS is reached.")
CONNREFUSE_Loop += 1
# Extra careful here just in case user injects a bad time into this part
wait_time = (CONNREFUSE_WAIT_TIME - clamp( update_interval, 0.5, update_interval+0.5 ))
wait_time_clamped = clamp(wait_time, 0.5, wait_time)
time.sleep( wait_time_clamped )
printLog("Continuing with next connection...")
return (-2)
else:
os.system('python3 twitch_onair_neopixel.py')
else:
return (-2)
########
######## State Machine
########
live = 0
previous_live = 0
########
######## Debug Functions
########
def debugLive(user_login):
global live
live = isLive(user_login)
if live == 1:
printLog(user_login + ' is live')
elif live == 0:
printLog(user_login + ' is offline')
else:
printLog('main: Authentication Error')
tryOLedMessage('Authentication Error')
###### Stop
def stopLive():
printLog('')
printLog('Stopping Twitch ON AIR Service...')
pixelClear()
###### debug
# Uncomment below if you just want to print to terminal
#debugLive(twitch_user_login)
########
######## Startup
########
# Only allow a single instance of this
def tryKillNeopixelService():
print('twitch_onair_webserver: Killing Neopixel Service...')
#pidResult = pid.tryReadPID('neopixel')
#if pidResult >= 0:
# pid.killPIDWithScript(pidResult, script='twitch_onair_neopixel.py')
# pixelSequential(length=1.0, reverse=True)
# pixelClear()
# pid.delPID('neopixel')
#else:
# pass
try:
os.system('sudo systemctl stop twitch_onair_neopixel_service.service')
pixelSequential(length=1.0, reverse=True)
pixelClear()
except:
pass
#tryKillNeopixelService()
pid.writePID('neopixel') #filename is referenced by twitch_onair_webserver - make sure these are synchronized
####### Launch OLED Service
def kill_oled_service():
try:
oled_pid = os.system('pgrep -f oled.py')
os.system('pkill -9 -f oled_pid')
except:
pass
#kill_oled_service()
class LaunchOLED(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.start()
def run(self):
os.system('sudo python3 /home/pi/oled.py')
pixelStart()
tryOLedMessage('Neopixels Started')
########
######## Main Loop
########
class Main(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.start()
def run(self):
global previous_live
global live
global user_login
global update_interval
global ASYNC_LED_STATE
while True:
try:
live = isLive(user_login)
if checkConfigUpdate():
pixelClear()
tryOLedMessage('Config Updated')
pixelHorizontalWipe(color=live_color,length=0.25)
time.sleep(0.2)
pixelFadeOut(color=live_color,length=0.25)
time.sleep(0.5)
if live >= 1:
printLog(user_login + ' is live')
if previous_live != live: #Did our live status change from the last check?
printLog('Live status has changed, calling pixelLiveChanged()')
tryOLedMessage('Stream Online')
pixelLiveChanged()
else:
pixelFlood(live_color)
elif live == 0:
printLog(user_login + ' is offline')
if previous_live != live: #Did our live status change from the last check?
printLog('Live Status changed, calling PixelOffChanged()')
tryOLedMessage('Stream Offline')
pixelOffChanged()
time.sleep(0.5)
pixelFadeIn(off_color, 0.5)
else:
#pixelClear()
pixelFlood(off_color)
else:
printLog('main(): Authentication Error')
tryOLedMessage('Authentication Error')
pixelError()
previous_live = live
update_interval = clamp(update_interval, 0.5, update_interval+0.5 ) # Clamp minimum to not kill CPu
except:
printLog('Exception Occurred in Main(). Will try again next update')
time.sleep(update_interval) # Delay in the loop to not kill CPU
#printLog('Update Interval: ' + str(update_interval) + ' seconds')
tryLoadConfig();
print('Heartbeat, PID: ' + str(os.getpid())) # Debugging
########
######## Async LED Thread
########
class AsyncLED(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.start()
def run(self):
global ASYNC_LED_STATE
while True:
try:
# Authenticating
if ASYNC_LED_STATE == 'AUTH':
tryOLedMessage('AUTHENTICATING', 0.5)
drawRipple(color=(148,0,255), startRadius=0.0, endRadius=4.0, length=0.2, framerateDivider=1.0, reverse=False)
if ASYNC_LED_STATE == 'AUTH': time.sleep(0.1)
else: pass
if ASYNC_LED_STATE == 'AUTH': drawRipple(color=(148,0,255), startRadius=0.0, endRadius=4.0, length=0.2, framerateDivider=1.0, reverse=True)
else: pass
pixelClear()
time.sleep(0.1)
else:
time.sleep(0.2)
except:
printLog('Exception occured in AsyncLED(). Will try again next tick')
time.sleep(0.5)
#LaunchOLED()
Main()
AsyncLED()
while True:
time.sleep(0.5) | 27.841226 | 295 | 0.681641 |
dd6d712ed28f972c5124634fc5f7cb3d787ec074 | 10,391 | py | Python | morpho_segm/prefix.py | GiulioZhou/Morpho_Zero-Shot_NMT | be9eb5915569030de763cee1aa20e663ebdc201c | [
"MIT"
] | 1 | 2019-07-22T19:18:32.000Z | 2019-07-22T19:18:32.000Z | morpho_segm/prefix.py | GiulioZhou/Morpho_Zero-Shot_NMT | be9eb5915569030de763cee1aa20e663ebdc201c | [
"MIT"
] | null | null | null | morpho_segm/prefix.py | GiulioZhou/Morpho_Zero-Shot_NMT | be9eb5915569030de763cee1aa20e663ebdc201c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from nltk.corpus import stopwords
tok_p = "##"
special = "?!\"(=:;,.\'[{"
#https://en.wiktionary.org/wiki/Category:Italian_prefixes
ita_prefix = ['epistolo', 'spermato', 'allergo', 'antropo', 'carcino',
'cherato', 'cinesio', 'cistico', 'cortico', 'dattilo', 'dermato',
'deutero', 'elettro', 'elminto', 'enantio', 'estesio', 'faringo',
'farmaco', 'galatto', 'galvano', 'geronto', 'guardia', 'idrossi',
'laringo', 'magneto', 'maxillo', 'meccano', 'meteoro', 'mirmeco',
'oftalmo', 'parteno', 'spettro', 'stomato', 'strepto', 'talasso',
'tossico', 'tracheo', 'adreno', 'ampelo', 'archeo', 'attino',
'austro', 'belone', 'biblio', 'brachi', 'bronco', 'calori', 'cardio',
'cefalo', 'centro', 'cerato', 'chemio', 'cinesi', 'circon', 'circum',
'clepto', 'condro', 'contra', 'contro', 'cripto', 'cristo', 'critto',
'dacrio', 'dendro', 'destro', 'dodeca', 'dolico', 'echino', 'embrio',
'entero', 'entomo', 'eritro', 'esacis', 'franco', 'funghi', 'gastro',
'genito', 'gineco', 'glitto', 'glosso', 'glotto', 'guarda', 'immuno',
'ispano', 'istero', 'laparo', 'latino', 'malaco', 'megalo', 'musico',
'onfalo', 'ornito', 'palato', 'pleuro', 'pneumo', 'proteo', 'pseudo',
'quadri', 'quadru', 'seleno', 'sincro', 'speleo', 'stereo', 'strato',
'tanato', 'terato', 'toraco', 'vetero', 'adeno', 'adipo', 'amilo',
'andro', 'anemo', 'anglo', 'aorto', 'archi', 'artro', 'astro',
'audio', 'bleno', 'calco', 'calli', 'callo', 'carbo', 'cario',
'carpo', 'carto', 'centi', 'cheto', 'chilo', 'chiro', 'ciano',
'ciber', 'ciclo', 'cisti', 'cisto', 'clado', 'clino', 'cloro',
'coleo', 'colpo', 'copro', 'cosmo', 'criso', 'cromo', 'crono',
'derma', 'dermo', 'desmo', 'diplo', 'econo', 'elaio', 'emato',
'entro', 'epato', 'eroto', 'estra', 'etero', 'extra', 'fanta',
'femto', 'ferri', 'ferro', 'fisio', 'flebo', 'fosfo', 'freno',
'frigo', 'fungi', 'fuori', 'gallo', 'gastr', 'glico', 'gonio',
'grafo', 'gravi', 'greco', 'iatro', 'icono', 'infra', 'inter',
'intra', 'intro', 'italo', 'ittio', 'labio', 'latto', 'leuco',
'linfo', 'lombo', 'macro', 'masto', 'medio', 'mezzo', 'micro',
'milli', 'miria', 'molti', 'morfo', 'multi', 'narco', 'necro',
'nefro', 'neuro', 'nevro', 'nipio', 'nitro', 'normo', 'oligo',
'oltre', 'onico', 'osteo', 'paleo', 'palin', 'penta', 'petro',
'picro', 'piezo', 'plani', 'pluri', 'polio', 'porno', 'porta',
'prano', 'proto', 'pseud', 'psico', 'quadr', 'quasi', 'radio',
'retro', 'retto', 'sapro', 'scafo', 'scato', 'scoto', 'siero',
'simil', 'sismo', 'socio', 'sopra', 'sotto', 'sovra', 'spiro',
'sporo', 'steno', 'stilo', 'super', 'tachi', 'tardo', 'tecno',
'terio', 'termo', 'tetra', 'tossi', 'trans', 'tribo', 'trico',
'ultra', 'vibro', 'video', 'xanto', 'acro', 'aden', 'aero', 'afro',
'agio', 'agri', 'agro', 'algo', 'ambi', 'ammo', 'anfi', 'ante', 'anti',
'anto', 'aplo', 'arci', 'areo', 'auri', 'auro', 'auto', 'avan', 'avio',
'bari', 'baro', 'bati', 'bato', 'bene', 'brio', 'caco', 'calo', 'capo',
'cata', 'ceno', 'cent', 'cine', 'cino', 'cito', 'crio', 'cris', 'deca',
'deci', 'demo', 'dici', 'dopo', 'ecto', 'eleo', 'elio', 'endo', 'enna',
'ento', 'epta', 'equi', 'ergo', 'erio', 'etno', 'etta', 'etto', 'euro',
'ezio', 'fago', 'feno', 'fico', 'filo', 'fito', 'fono', 'foto', 'geno',
'giga', 'gine', 'giro', 'gono', 'ideo', 'idio', 'idro', 'iero', 'igro',
'indo', 'iper', 'ipno', 'ippo', 'ipso', 'isco', 'isto', 'kilo', 'lava',
'lipo', 'liso', 'lito', 'logo', 'maxi', 'mega', 'meno', 'meso', 'meta',
'mico', 'mini', 'miso', 'mono', 'moto', 'nano', 'naso', 'nord', 'noso',
'oclo', 'ofio', 'oleo', 'olig', 'oltr', 'omeo', 'onni', 'opto', 'orto',
'ossi', 'otta', 'para', 'pato', 'pedo', 'peri', 'piro', 'poli', 'post',
'rino', 'rodo', 'scia', 'semi', 'silo', 'sino', 'sito', 'sopr', 'sott',
'stra', 'tele', 'tipo', 'tomo', 'tono', 'topo', 'toss', 'tras', 'vice',
'xeno', 'xero', 'xilo', 'zigo', 'zimo', 'alo', 'ana', 'apo', 'ben',
'bin', 'bio', 'bis', 'cis', 'dis', 'duo', 'eco', 'ego', 'emi', 'emo',
'eno', 'epi', 'esa', 'eso', 'eto', 'geo', 'ipo', 'iso', 'mal', 'mio',
'neo', 'olo', 'omo', 'oro', 'oto', 'ovi', 'ovo', 'pan', 'pio', 'pre',
'pro', 'reo', 'rin', 'sub', 'sud', 'teo', 'tio', 'tra', 'tri', 'udo',
'uni', 'uro', 'zoo', 'bi', 'de', 'di', 'ex', 'il', 'im', 'in', 'ir',
'oo', 're', 'ri']
# https://en.wikipedia.org/wiki/English_prefix
eng_prefix = ['counter', 'electro', 'circum', 'contra', 'contro', 'crypto',
'deuter', 'franco', 'hetero', 'megalo', 'preter', 'pseudo', 'after',
'under', 'amphi', 'anglo', 'astro', 'extra', 'hydro', 'hyper',
'infra', 'inter', 'intra', 'micro', 'multi', 'multi', 'ortho',
'paleo', 'photo', 'proto', 'quasi', 'retro', 'socio', 'super',
'supra', 'trans', 'ultra', 'anti', 'back', 'down', 'fore', 'hind',
'midi', 'mini', 'over', 'post', 'self', 'step', 'with', 'afro',
'ambi', 'anti', 'arch', 'auto', 'cryo', 'demi', 'demo', 'euro',
'gyro', 'hemi', 'homo', 'hypo', 'ideo', 'idio', 'indo', 'macr',
'maxi', 'mega', 'meta', 'mono', 'omni', 'para', 'peri', 'pleo',
'poly', 'post', 'pros', 'pyro', 'semi', 'tele', 'vice', 'dis',
'mid', 'mis', 'off', 'out', 'pre', 'pro', 'twi', 'ana', 'apo',
'bio', 'cis', 'con', 'com', 'col', 'cor', 'dia', 'dis', 'dif',
'duo', 'eco', 'epi', 'geo', 'iso', 'mal', 'mon', 'neo', 'non',
'pan', 'ped', 'per', 'pod', 'pre', 'pro', 'sub', 'sup', 'sur',
'syn', 'syl', 'sym', 'tri', 'uni', 'be', 'by', 'co', 'de', 'en',
'em', 'ex', 'on', 're', 'un', 'up', 'an', 'ap', 'bi', 'co', 'de',
'di', 'du', 'en', 'el', 'em', 'ep', 'ex', 'in', 'im', 'ir', 'ob', 'sy']
#https://en.wiktionary.org/wiki/Category:German_prefixes
de_prefix = ['auseinander', 'schwieger', 'anthropo', 'entgegen', 'herunter',
'zusammen', 'elektro', 'general', 'wegwerf', 'zurecht', 'achter',
'anheim', 'binnen', 'einzel', 'gastro', 'herauf', 'heraus', 'herein',
'hervor', 'hetero', 'hinauf', 'hinaus', 'hinein', 'hinter', 'kardio',
'kontra', 'küchen', 'nieder', 'riesen', 'scheiß', 'sonder', 'voraus',
'vorbei', 'vorder', 'vorher', 'wieder', 'zurück', 'after', 'aller',
'astro', 'außen', 'balto', 'durch', 'empor', 'extra', 'gegen',
'haupt', 'herab', 'heran', 'herum', 'hinzu', 'inter', 'intra',
'kyber', 'meist', 'melde', 'neben', 'nicht', 'nitro', 'paläo',
'phyto', 'porno', 'stein', 'stief', 'stock', 'unter', 'voran',
'wider', 'afro', 'ambi', 'anti', 'auto', 'ertz', 'fort', 'hypo',
'miss', 'myko', 'nach', 'nord', 'ober', 'onko', 'piko', 'post',
'quer', 'raus', 'rein', 'rück', 'theo', 'thio', 'über', 'vize',
'voll', 'zwie', 'alt', 'auf', 'aus', 'auß', 'bei', 'bey', 'bio',
'dar', 'ein', 'emp', 'ent', 'erz', 'her', 'hin', 'miß', 'mit',
'neo', 'neu', 'öko', 'pan', 'prä', 'sau', 'sym', 'ver', 'vor',
'weg', 'zer', 'zur', 'ab', 'an', 'be', 'da', 'er', 'ge', 'ob',
'um', 'un', 'ur', 'ze', 'zu']
#https://en.wiktionary.org/wiki/Category:Dutch_prefixes
nl_prefix = ['betovergroot', 'lievelings', 'mannetjes', 'overgroot',
'vrouwtjes', 'mercapto', 'carcino', 'elektro', 'hydroxy', 'noorder',
'spectro', 'vooruit', 'achter', 'binnen', 'buiten', 'cardio',
'chloor', 'chromo', 'contra', 'hetero', 'kanker', 'kinesi',
'middel', 'midden', 'ooster', 'psycho', 'strato', 'tering',
'thermo', 'tussen', 'wester', 'zuider', 'aarts', 'aller',
'amino', 'astro', 'bloed', 'boven', 'broom', 'centi', 'chemo',
'cyano', 'cyclo', 'fluor', 'fysio', 'groot', 'hecto', 'homeo',
'hoofd', 'infra', 'klote', 'kunst', 'kwasi', 'micro', 'milli',
'multi', 'neven', 'onder', 'opeen', 'opper', 'petro', 'quasi',
'radio', 'reuze', 'snert', 'steno', 'stief', 'super', 'tegen',
'terug', 'trans', 'tyfus', 'voort', 'yocto', 'yotta', 'zepto',
'zetta', 'acro', 'anti', 'atmo', 'atto', 'cyto', 'deca', 'deci',
'door', 'filo', 'fono', 'foto', 'giga', 'hept', 'homo', 'hypo',
'jood', 'kilo', 'mede', 'mega', 'meth', 'mono', 'nano', 'niet',
'octo', 'over', 'pedo', 'pent', 'peri', 'peta', 'pico', 'poly',
'post', 'prop', 'pyro', 'tele', 'tera', 'theo', 'thio', 'vice',
'voor', 'weer', 'xylo', 'zelf', 'aan', 'apo', 'avi', 'bij', 'bio',
'but', 'con', 'dec', 'des', 'dis', 'dys', 'eco', 'epi', 'eth', 'exa',
'geo', 'her', 'hex', 'kei', 'kut', 'min', 'mis', 'non', 'oct', 'oer',
'ont', 'oor', 'oud', 'pan', 'rot', 'sub', 'syn', 'toe', 'tri', 'uit',
'ver', 'wan', 'wel', 'af', 'al', 'an', 'be', 'de', 'di', 'er', 'et',
'ge', 'in', 'na', 'on', 'op', 'te']
#https://ro.wiktionary.org/wiki/Categorie:Prefixe_%C3%AEn_rom%C3%A2n%C4%83
ro_prefix = ['parapara', 'pluspoli', 'politico', 'portpost', 'antropo',
'electro', 'infanti', 'balneo', 'cardio', 'câteși', 'contra',
'ftizio', 'mecano', 'medico', 'pseudo', 'sexsex', 'simili', 'vavice',
'aceto', 'adeno', 'audio', 'carpo', 'centi', 'cromo', 'extra',
'ftori', 'helio', 'hidro', 'hiper', 'infra', 'inter', 'intra',
'între', 'lacto', 'macro', 'micro', 'moldo', 'multi', 'paleo',
'pluri', 'proto', 'radio', 'servo', 'silvo', 'super', 'supra',
'tehno', 'termo', 'tetra', 'trans', 'ultra', 'umidi', 'video',
'aero', 'agro', 'ante', 'anti', 'arhi', 'atot', 'auto', 'deca',
'echi', 'filo', 'fono', 'foto', 'hexa', 'hipo', 'kilo', 'loco',
'logo', 'mega', 'meta', 'mini', 'mono', 'moto', 'omni', 'orto',
'peri', 'pico', 'prea', 'semi', 'stră', 'tele', 'tera', 'topo',
'zimo', 'ana', 'bio', 'con', 'des', 'neo', 'non', 'pan', 'pre',
'răs', 'reo', 'sub', 'tri', 'tus', 'uni', 'bi', 'ex', 'im', 'in'
, 'în', 'ne', 're']
prefixes = {
"italian": ita_prefix,
"english": eng_prefix,
"german": de_prefix,
"dutch": nl_prefix,
"romanian": ro_prefix
}
def prefix_segmenter(lang, word):
punctuations = ""
result = ""
#Remove punctuations and return if stopword
while word:
if word[0] in special:
punctuations = punctuations + word[0]
word = word[1:]
else:
break
if word.lower().rstrip(" ") in list(stopwords.words(lang)):
return punctuations + word
# How many iterations?
for i in range(2):
for prefix in prefixes[lang]:
if word.lower().startswith(prefix) and word[:-len(prefix)] and word[len(prefix)] != " ":
if not word.startswith(prefix): #uppercase
prefix = prefix.title()
result = result + " " + prefix + tok_p
word = word[len(prefix):]
break
return " " + punctuations + (result + " " + word).lstrip(" ")
#
# if __name__ == "__main__":
# import sys
#
# word = sys.argv[1]
# print (prefix_segmenter(sys.argv[2], word))
| 53.287179 | 91 | 0.534212 |
49db462d664583435f0f84f33e94817c42c81810 | 461 | py | Python | db/seed-cassandra/seedCassandra.py | stephaniesara/highlights | a3a610c656d8cdae582377f53a2ca4b526b69751 | [
"MIT"
] | null | null | null | db/seed-cassandra/seedCassandra.py | stephaniesara/highlights | a3a610c656d8cdae582377f53a2ca4b526b69751 | [
"MIT"
] | null | null | null | db/seed-cassandra/seedCassandra.py | stephaniesara/highlights | a3a610c656d8cdae582377f53a2ca4b526b69751 | [
"MIT"
] | 1 | 2018-04-11T19:32:03.000Z | 2018-04-11T19:32:03.000Z | # UNABLE TO CONNECT
import csv
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
pool = ConnectionPool('test', ['127.0.0.1:9042'])
cf = ColumnFamily(pool, "testtable")
with open('test.csv', 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print str(row)
key = row['id']
del row['id']
cf.insert(key, row)
pool.dispose()
# TO RUN
# $ python
# python shell > python seedCassandra.py | 20.954545 | 49 | 0.694143 |
979374f69ed4c98d92b19a9c66e6b3281ce3738b | 2,207 | py | Python | Scripts/simulation/narrative/narrative_environment_support.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/narrative/narrative_environment_support.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/narrative/narrative_environment_support.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\narrative\narrative_environment_support.py
# Compiled at: 2018-10-30 03:32:06
# Size of source mod 2**32: 2634 bytes
from event_testing.test_variants import RegionTest
from narrative.narrative_enums import NarrativeEnvironmentParams
from sims4.tuning.tunable import AutoFactoryInit, HasTunableSingletonFactory, OptionalTunable, TunableMapping, TunableEnumEntry, TunableTuple, TunableSimMinute, Tunable
from weather.weather_loot_ops import WeatherSetOverrideForecastLootOp
class NarrativeEnvironmentOverride(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'supported_regions':OptionalTunable(description='\n If set, this override is only applicable in the specified regions.\n ',
tunable=RegionTest.TunableFactory(locked_args={'tooltip':None, 'subject':None})),
'weather_forecast_override':WeatherSetOverrideForecastLootOp.TunableFactory(description="\n If Seasons pack is installed, this forecast is used to override \n the affected region's weather.\n "),
'narrative_environment_params':TunableMapping(description='\n The various parameters to set when the narrative is enabled.\n ',
key_type=TunableEnumEntry(description='\n The parameter that we wish to change.\n ',
tunable_type=NarrativeEnvironmentParams,
default=None),
value_type=TunableTuple(interpolation_time=TunableSimMinute(description='\n The time over which to transition to the new value,\n if this occurs during simulation.\n ',
minimum=0.0,
default=15.0),
value=Tunable(description='\n The value that we will set this parameter to.\n ',
tunable_type=float,
default=1.0)))}
def should_apply(self):
if self.supported_regions is not None:
return self.supported_regions()
return True | 73.566667 | 234 | 0.696874 |
bb2d186060adcd7ad177172d005fd2cacc951f81 | 592 | py | Python | hrsalespipes/contacts/migrations/0021_auto_20200326_0301.py | hanztura/hrsalespipes | 77accf3132726ced05d84fa2a41891b841f310b8 | [
"Apache-2.0"
] | 3 | 2020-03-26T12:43:43.000Z | 2021-05-10T14:35:51.000Z | hrsalespipes/contacts/migrations/0021_auto_20200326_0301.py | hanztura/hrsalespipes | 77accf3132726ced05d84fa2a41891b841f310b8 | [
"Apache-2.0"
] | 5 | 2021-04-08T21:15:15.000Z | 2022-02-10T11:03:12.000Z | hrsalespipes/contacts/migrations/0021_auto_20200326_0301.py | hanztura/hrsalespipes | 77accf3132726ced05d84fa2a41891b841f310b8 | [
"Apache-2.0"
] | 1 | 2022-01-30T19:24:48.000Z | 2022-01-30T19:24:48.000Z | # Generated by Django 2.2.10 on 2020-03-26 03:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0020_auto_20200325_2355'),
]
operations = [
migrations.AlterField(
model_name='candidate',
name='current_previous_position',
field=models.TextField(blank=True, verbose_name='position'),
),
migrations.AlterField(
model_name='candidate',
name='notice_period',
field=models.TextField(blank=True),
),
]
| 24.666667 | 72 | 0.603041 |
218e496bc3e603d339fbd6b2cfa3e7a2eca5e4d0 | 1,812 | py | Python | examples/intersecting_iterator/search.py | volmasoft/pyaccumulo | 8adcf535bb82ba69c749efce785c9efc487e85de | [
"Apache-2.0"
] | 7 | 2015-04-04T07:17:58.000Z | 2016-02-10T03:33:29.000Z | examples/intersecting_iterator/search.py | volmasoft/pyaccumulo | 8adcf535bb82ba69c749efce785c9efc487e85de | [
"Apache-2.0"
] | 6 | 2015-01-22T16:41:29.000Z | 2016-06-06T04:52:46.000Z | examples/intersecting_iterator/search.py | volmasoft/pyaccumulo | 8adcf535bb82ba69c749efce785c9efc487e85de | [
"Apache-2.0"
] | 6 | 2015-06-23T19:06:11.000Z | 2016-08-10T19:53:11.000Z | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyaccumulo import Accumulo, Mutation, Range
from pyaccumulo.iterators import *
from pyaccumulo.proxy.ttypes import IteratorSetting, IteratorScope
from examples.util import hashcode
import hashlib, re
import settings
import sys
conn = Accumulo(host=settings.HOST, port=settings.PORT, user=settings.USER, password=settings.PASSWORD)
table = sys.argv[1]
if not conn.table_exists(table):
print "Table '%s' does not exist."%table
sys.exit(1)
search_terms = [term.lower() for term in sys.argv[2:] if len(term) > 3]
if len(search_terms) < 2:
print "More than one term of length > 3 is required for this example"
sys.exit(1)
uuids = []
for e in conn.batch_scan(table, scanranges=[Range(srow="s", erow="t")], iterators=[IntersectingIterator(priority=21, terms=search_terms)]):
uuids.append(e.cq)
if len(uuids) > 0:
for doc in conn.batch_scan(table, scanranges=[Range(srow=uuid, erow=uuid) for uuid in uuids]):
print doc.val
else:
print "No results found"
conn.close()
| 36.24 | 139 | 0.748344 |
32c5e4552046dffc86137cbd7c3f976a3be731db | 2,026 | py | Python | dislib/model_selection/_validation.py | alexbarcelo/dislib | 989f81f235ae30b17410a8d805df258c7d931b38 | [
"Apache-2.0"
] | 36 | 2018-10-22T19:21:14.000Z | 2022-03-22T12:10:01.000Z | dislib/model_selection/_validation.py | alexbarcelo/dislib | 989f81f235ae30b17410a8d805df258c7d931b38 | [
"Apache-2.0"
] | 329 | 2018-11-22T18:04:57.000Z | 2022-03-18T01:26:55.000Z | dislib/model_selection/_validation.py | alexbarcelo/dislib | 989f81f235ae30b17410a8d805df258c7d931b38 | [
"Apache-2.0"
] | 21 | 2019-01-10T11:46:39.000Z | 2022-03-17T12:59:45.000Z | import numbers
import numpy as np
def fit_and_score(estimator, train_ds, validation_ds, scorer, parameters,
fit_params):
if parameters is not None:
estimator.set_params(**parameters)
x_train, y_train = train_ds
estimator.fit(x_train, y_train, **fit_params)
x_test, y_test = validation_ds
test_scores = _score(estimator, x_test, y_test, scorer)
return [test_scores]
def _score(estimator, x, y, scorers):
"""Return a dict of scores"""
scores = {}
for name, scorer in scorers.items():
score = scorer(estimator, x, y)
scores[name] = score
return scores
def validate_score(score, name):
if not isinstance(score, numbers.Number) and \
not (isinstance(score, np.ndarray) and len(score.shape) == 0):
raise ValueError("scoring must return a number, got %s (%s) "
"instead. (scorer=%s)"
% (str(score), type(score), name))
return score
def aggregate_score_dicts(scores):
"""Aggregate the results of each scorer
Example
-------
>>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},
... {'a': 10, 'b': 10}]
>>> aggregate_score_dicts(scores)
{'a': array([1, 2, 3, 10]),
'b': array([10, 2, 3, 10])}
"""
return {key: np.asarray([score[key] for score in scores])
for key in scores[0]}
def check_scorer(estimator, scorer):
if scorer is None:
if hasattr(estimator, 'score'):
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
return _passthrough_scorer
else:
raise TypeError(
"If a scorer is None, the estimator passed should have a "
"'score' method. The estimator %r does not." % estimator)
elif callable(scorer):
return scorer
raise ValueError("Invalid scorer %r" % scorer)
| 31.169231 | 74 | 0.581935 |
825b36df98febaac082c254729e2c258f9747936 | 2,390 | py | Python | cripts/email_addresses/email_address.py | lakiw/cripts | 43f62891a3724e1ec60629887d97c421fb302163 | [
"MIT"
] | 2 | 2017-04-06T12:26:11.000Z | 2018-11-05T19:17:15.000Z | cripts/email_addresses/email_address.py | lakiw/cripts | 43f62891a3724e1ec60629887d97c421fb302163 | [
"MIT"
] | 9 | 2016-09-28T10:19:10.000Z | 2017-02-24T17:58:43.000Z | cripts/email_addresses/email_address.py | lakiw/cripts | 43f62891a3724e1ec60629887d97c421fb302163 | [
"MIT"
] | null | null | null | import uuid
from mongoengine import Document, StringField, UUIDField, ListField
from django.conf import settings
from cripts.core.cripts_mongoengine import CriptsBaseAttributes, CriptsSourceDocument
from cripts.core.cripts_mongoengine import CriptsActionsDocument
class EmailAddress(CriptsBaseAttributes, CriptsSourceDocument, CriptsActionsDocument,
Document):
"""
EmailAddress class.
"""
meta = {
"collection": settings.COL_EMAIL_ADDRESSES,
"cripts_type": 'EmailAddress',
"latest_schema_version": 1,
"schema_doc": {
'address': 'Email address, eg: test@test.com',
'datasets': ('List [] of datasets this email_address'
' appeared in'),
'domain': 'Domain of the e-mail address, eg test.com',
'local_name': 'The front part of the e-mail address. Eg. "user" of user@test.com',
'description': 'Description of the e-mail address',
'source': ('List [] of sources who provided information about this'
' email address')
},
"jtable_opts": {
'details_url': 'cripts.email_addresses.views.email_address_detail',
'details_url_key': "address",
'default_sort': "address",
'searchurl': 'cripts.email_addresses.views.email_addresses_listing',
'fields': [ "address", "local_name", "domain", "created",
"source", "id"],
'jtopts_fields': [ "address",
"local_name",
"domain",
"created",
"source",
"favorite",
"id"],
'hidden_fields': [],
'linked_fields': ["source","local_name","domain" ],
'details_link': "address",
'no_sort': []
}
}
address = StringField(required=True)
description = StringField(required=True)
domain = StringField(required=True)
local_name = StringField(required=True)
datasets = ListField(required=False)
| 41.929825 | 94 | 0.506276 |
b793b2a92123816b56d6a56290c56dce0a858fa0 | 914 | py | Python | pkgs/conda-env-2.4.5-py27_0/lib/python2.7/site-packages/conda_env/installers/conda.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/conda-env-2.4.5-py27_0/lib/python2.7/site-packages/conda_env/installers/conda.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/conda-env-2.4.5-py27_0/lib/python2.7/site-packages/conda_env/installers/conda.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from conda.cli import common
from conda import plan
def install(prefix, specs, args, env):
# TODO: do we need this?
common.check_specs(prefix, specs, json=args.json)
# TODO: support all various ways this happens
index = common.get_index_trap(channel_urls=env.channels)
actions = plan.install_actions(prefix, index, specs)
with common.json_progress_bars(json=args.json and not args.quiet):
try:
plan.execute_actions(actions, index, verbose=not args.quiet)
except RuntimeError as e:
if len(e.args) > 0 and "LOCKERROR" in e.args[0]:
error_type = "AlreadyLocked"
else:
error_type = "RuntimeError"
common.exception_and_exit(e, error_type=error_type, json=args.json)
except SystemExit as e:
common.exception_and_exit(e, json=args.json)
| 35.153846 | 79 | 0.667396 |
e9bc844593333749cae0c7e5f641429c09b73f87 | 1,651 | py | Python | file_handlers/fh_robotracine.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | 1 | 2020-06-30T06:53:36.000Z | 2020-06-30T06:53:36.000Z | file_handlers/fh_robotracine.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | null | null | null | file_handlers/fh_robotracine.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | null | null | null | import os
from datetime import datetime as dt
from ipso_phen.ipapi.file_handlers.fh_base import FileHandlerBase
import logging
logger = logging.getLogger(__name__)
class FileHandlerRobotRacine(FileHandlerBase):
def __init__(self, **kwargs):
"""Fill plant, date, time, experiment, camera and view_option from file data"""
self._file_path = kwargs.get("file_path", "")
if self._file_path:
tmp_str, self._plant = self.file_name_no_ext.split(" ")
self._plant = "plant_" + self._plant.replace("(", "").replace(")", "")
_, self._exp, _ = tmp_str.split("_")
self._exp = "rr_" + self._exp
try:
self._date_time = dt.fromtimestamp(os.path.getmtime(self.file_path))
except Exception as e:
logger.exception(f"Unable to extract date from file because: {repr(e)}")
self._date_time = dt.now()
self._date_time = self._date_time.replace(microsecond=0)
self._camera = "pi_camera"
_, ext_ = os.path.splitext(self.file_name)
self._view_option = ext_ if ext_ else "unknown"
self.update(**kwargs)
@classmethod
def probe(cls, file_path, database):
if not isinstance(file_path, str) or not os.path.isfile(file_path):
return 0
return 100 if cls.extract_file_name(file_path).lower().startswith("rr_") else 0
@property
def is_vis(self):
return True
@property
def is_fluo(self):
return False
@property
def is_nir(self):
return False
| 33.693878 | 89 | 0.602665 |
d7ea7677b7f28f0a29a3a5713916e4419729d659 | 112 | py | Python | archivesapi/archives/admin.py | SubcityRadio/archives-api | a6e4fe6bda8ce4a37c37c4a648f8514ec7919a9f | [
"MIT"
] | null | null | null | archivesapi/archives/admin.py | SubcityRadio/archives-api | a6e4fe6bda8ce4a37c37c4a648f8514ec7919a9f | [
"MIT"
] | 3 | 2020-06-05T19:59:01.000Z | 2021-06-10T21:13:42.000Z | archivesapi/archives/admin.py | SubcityRadio/archives-api | a6e4fe6bda8ce4a37c37c4a648f8514ec7919a9f | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import ArchivePost, ArchiveYear
admin.site.register(ArchivePost) | 22.4 | 44 | 0.839286 |
307656bd63e432328e027969a57aa890cd054374 | 7,068 | py | Python | google/ads/google_ads/v2/proto/enums/real_estate_placeholder_field_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v2/proto/enums/real_estate_placeholder_field_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v2/proto/enums/real_estate_placeholder_field_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/enums/real_estate_placeholder_field.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/enums/real_estate_placeholder_field.proto',
package='google.ads.googleads.v2.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v2.enumsB\037RealEstatePlaceholderFieldProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v2/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V2.Enums\312\002\035Google\\Ads\\GoogleAds\\V2\\Enums\352\002!Google::Ads::GoogleAds::V2::Enums'),
serialized_pb=_b('\nGgoogle/ads/googleads_v2/proto/enums/real_estate_placeholder_field.proto\x12\x1dgoogle.ads.googleads.v2.enums\x1a\x1cgoogle/api/annotations.proto\"\xa9\x03\n\x1eRealEstatePlaceholderFieldEnum\"\x86\x03\n\x1aRealEstatePlaceholderField\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0e\n\nLISTING_ID\x10\x02\x12\x10\n\x0cLISTING_NAME\x10\x03\x12\r\n\tCITY_NAME\x10\x04\x12\x0f\n\x0b\x44\x45SCRIPTION\x10\x05\x12\x0b\n\x07\x41\x44\x44RESS\x10\x06\x12\t\n\x05PRICE\x10\x07\x12\x13\n\x0f\x46ORMATTED_PRICE\x10\x08\x12\r\n\tIMAGE_URL\x10\t\x12\x11\n\rPROPERTY_TYPE\x10\n\x12\x10\n\x0cLISTING_TYPE\x10\x0b\x12\x17\n\x13\x43ONTEXTUAL_KEYWORDS\x10\x0c\x12\x0e\n\nFINAL_URLS\x10\r\x12\x15\n\x11\x46INAL_MOBILE_URLS\x10\x0e\x12\x10\n\x0cTRACKING_URL\x10\x0f\x12\x14\n\x10\x41NDROID_APP_LINK\x10\x10\x12\x17\n\x13SIMILAR_LISTING_IDS\x10\x11\x12\x10\n\x0cIOS_APP_LINK\x10\x12\x12\x14\n\x10IOS_APP_STORE_ID\x10\x13\x42\xf4\x01\n!com.google.ads.googleads.v2.enumsB\x1fRealEstatePlaceholderFieldProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v2/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V2.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V2\\Enums\xea\x02!Google::Ads::GoogleAds::V2::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_REALESTATEPLACEHOLDERFIELDENUM_REALESTATEPLACEHOLDERFIELD = _descriptor.EnumDescriptor(
name='RealEstatePlaceholderField',
full_name='google.ads.googleads.v2.enums.RealEstatePlaceholderFieldEnum.RealEstatePlaceholderField',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LISTING_ID', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LISTING_NAME', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CITY_NAME', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DESCRIPTION', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADDRESS', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRICE', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORMATTED_PRICE', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMAGE_URL', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROPERTY_TYPE', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LISTING_TYPE', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONTEXTUAL_KEYWORDS', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FINAL_URLS', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FINAL_MOBILE_URLS', index=14, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRACKING_URL', index=15, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANDROID_APP_LINK', index=16, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIMILAR_LISTING_IDS', index=17, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IOS_APP_LINK', index=18, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IOS_APP_STORE_ID', index=19, number=19,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=172,
serialized_end=562,
)
_sym_db.RegisterEnumDescriptor(_REALESTATEPLACEHOLDERFIELDENUM_REALESTATEPLACEHOLDERFIELD)
_REALESTATEPLACEHOLDERFIELDENUM = _descriptor.Descriptor(
name='RealEstatePlaceholderFieldEnum',
full_name='google.ads.googleads.v2.enums.RealEstatePlaceholderFieldEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_REALESTATEPLACEHOLDERFIELDENUM_REALESTATEPLACEHOLDERFIELD,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=562,
)
_REALESTATEPLACEHOLDERFIELDENUM_REALESTATEPLACEHOLDERFIELD.containing_type = _REALESTATEPLACEHOLDERFIELDENUM
DESCRIPTOR.message_types_by_name['RealEstatePlaceholderFieldEnum'] = _REALESTATEPLACEHOLDERFIELDENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RealEstatePlaceholderFieldEnum = _reflection.GeneratedProtocolMessageType('RealEstatePlaceholderFieldEnum', (_message.Message,), dict(
DESCRIPTOR = _REALESTATEPLACEHOLDERFIELDENUM,
__module__ = 'google.ads.googleads_v2.proto.enums.real_estate_placeholder_field_pb2'
,
__doc__ = """Values for Real Estate placeholder fields. For more information about
dynamic remarketing feeds, see
https://support.google.com/google-ads/answer/6053288.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.enums.RealEstatePlaceholderFieldEnum)
))
_sym_db.RegisterMessage(RealEstatePlaceholderFieldEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 42.071429 | 1,255 | 0.765139 |
f89432ff01cf435be15b34bed84d187aec8743de | 8,933 | py | Python | tutorials/micro/micro_autotune.py | Light-of-Hers/tvm | dc2f70e3c8a9b14b9e414ecf768ad32e6c3c3960 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tutorials/micro/micro_autotune.py | Light-of-Hers/tvm | dc2f70e3c8a9b14b9e414ecf768ad32e6c3c3960 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tutorials/micro/micro_autotune.py | Light-of-Hers/tvm | dc2f70e3c8a9b14b9e414ecf768ad32e6c3c3960 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-micro-autotune:
Autotuning with micro TVM
=========================
**Author**: `Andrew Reusch <https://github.com/areusch>`_, `Mehrdad Hessar <https://github.com/mehrdadh>`
This tutorial explains how to autotune a model using the C runtime.
"""
import numpy as np
import subprocess
import pathlib
import tvm
####################
# Defining the model
####################
#
# To begin with, define a model in Relay to be executed on-device. Then create an IRModule from relay model and
# fill parameters with random numbers.
#
data_shape = (1, 3, 10, 10)
weight_shape = (6, 3, 5, 5)
data = tvm.relay.var("data", tvm.relay.TensorType(data_shape, "float32"))
weight = tvm.relay.var("weight", tvm.relay.TensorType(weight_shape, "float32"))
y = tvm.relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
kernel_layout="OIHW",
out_dtype="float32",
)
f = tvm.relay.Function([data, weight], y)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = tvm.relay.transform.InferType()(relay_mod)
weight_sample = np.random.rand(
weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3]
).astype("float32")
params = {"weight": weight_sample}
#######################
# Defining the target #
#######################
# Now we define the TVM target that describes the execution environment. This looks very similar
# to target definitions from other microTVM tutorials.
#
# When running on physical hardware, choose a target and a board that
# describe the hardware. There are multiple hardware targets that could be selected from
# PLATFORM list in this tutorial. You can chose the platform by passing --platform argument when running
# this tutorial.
#
TARGET = tvm.target.target.micro("host")
# Compiling for physical hardware
# --------------------------------------------------------------------------
# When running on physical hardware, choose a TARGET and a BOARD that describe the hardware. The
# STM32L4R5ZI Nucleo target and board is chosen in the example below.
#
# TARGET = tvm.target.target.micro("stm32l4r5zi")
# BOARD = "nucleo_l4r5zi"
#########################
# Extracting tuning tasks
#########################
# Not all operators in the Relay program printed above can be tuned. Some are so trivial that only
# a single implementation is defined; others don't make sense as tuning tasks. Using
# `extract_from_program`, you can produce a list of tunable tasks.
#
# Because task extraction involves running the compiler, we first configure the compiler's
# transformation passes; we'll apply the same configuration later on during autotuning.
pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True})
with pass_context:
tasks = tvm.autotvm.task.extract_from_program(relay_mod["main"], {}, TARGET)
assert len(tasks) > 0
######################
# Configuring microTVM
######################
# Before autotuning, we need to define a module loader and then pass that to
# a `tvm.autotvm.LocalBuilder`. Then we create a `tvm.autotvm.LocalRunner` and use
# both builder and runner to generates multiple measurements for auto tunner.
#
# In this tutorial, we have the option to use x86 host as an example or use different targets
# from Zephyr RTOS. If you choose pass `--platform=host` to this tutorial it will uses x86. You can
# choose other options by choosing from `PLATFORM` list.
#
repo_root = pathlib.Path(
subprocess.check_output(["git", "rev-parse", "--show-toplevel"], encoding="utf-8").strip()
)
module_loader = tvm.micro.AutoTvmModuleLoader(
template_project_dir=repo_root / "src" / "runtime" / "crt" / "host",
project_options={},
)
builder = tvm.autotvm.LocalBuilder(
n_parallel=1,
build_kwargs={"build_option": {"tir.disable_vectorize": True}},
do_fork=True,
build_func=tvm.micro.autotvm_build_func,
)
runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=100, module_loader=module_loader)
measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
# Compiling for physical hardware
# --------------------------------------------------------------------------
# module_loader = tvm.micro.AutoTvmModuleLoader(
# template_project_dir=repo_root / "apps" / "microtvm" / "zephyr" / "template_project",
# project_options={
# "zephyr_board": BOARD,
# "west_cmd": "west",
# "verbose": 1,
# "project_type": "host_driven",
# },
# )
# builder = tvm.autotvm.LocalBuilder(
# n_parallel=1,
# build_kwargs={"build_option": {"tir.disable_vectorize": True}},
# do_fork=False,
# build_func=tvm.micro.autotvm_build_func,
# )
# runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=100, module_loader=module_loader)
# measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
################
# Run Autotuning
################
# Now we can run autotuning separately on each extracted task.
num_trials = 10
for task in tasks:
tuner = tvm.autotvm.tuner.GATuner(task)
tuner.tune(
n_trial=num_trials,
measure_option=measure_option,
callbacks=[
tvm.autotvm.callback.log_to_file("microtvm_autotune.log.txt"),
tvm.autotvm.callback.progress_bar(num_trials, si_prefix="M"),
],
si_prefix="M",
)
############################
# Timing the untuned program
############################
# For comparison, let's compile and run the graph without imposing any autotuning schedules. TVM
# will select a randomly-tuned implementation for each operator, which should not perform as well as
# the tuned operator.
with pass_context:
lowered = tvm.relay.build(relay_mod, target=TARGET, params=params)
temp_dir = tvm.contrib.utils.tempdir()
project = tvm.micro.generate_project(
str(repo_root / "src" / "runtime" / "crt" / "host"), lowered, temp_dir / "project"
)
# Compiling for physical hardware
# --------------------------------------------------------------------------
# project = tvm.micro.generate_project(
# str(repo_root / "apps" / "microtvm" / "zephyr" / "template_project"),
# lowered,
# temp_dir / "project",
# {
# "zephyr_board": BOARD,
# "west_cmd": "west",
# "verbose": 1,
# "project_type": "host_driven",
# },
# )
project.build()
project.flash()
with tvm.micro.Session(project.transport()) as session:
debug_module = tvm.micro.create_local_debug_executor(
lowered.get_graph_json(), session.get_system_lib(), session.device
)
debug_module.set_input(**lowered.get_params())
print("########## Build without Autotuning ##########")
debug_module.run()
del debug_module
##########################
# Timing the tuned program
##########################
# Once autotuning completes, you can time execution of the entire program using the Debug Runtime:
with tvm.autotvm.apply_history_best("microtvm_autotune.log.txt"):
with pass_context:
lowered_tuned = tvm.relay.build(relay_mod, target=TARGET, params=params)
temp_dir = tvm.contrib.utils.tempdir()
project = tvm.micro.generate_project(
str(repo_root / "src" / "runtime" / "crt" / "host"), lowered_tuned, temp_dir / "project"
)
# Compiling for physical hardware
# --------------------------------------------------------------------------
# project = tvm.micro.generate_project(
# str(repo_root / "apps" / "microtvm" / "zephyr" / "template_project"),
# lowered_tuned,
# temp_dir / "project",
# {
# "zephyr_board": BOARD,
# "west_cmd": "west",
# "verbose": 1,
# "project_type": "host_driven",
# },
# )
project.build()
project.flash()
with tvm.micro.Session(project.transport()) as session:
debug_module = tvm.micro.create_local_debug_executor(
lowered_tuned.get_graph_json(), session.get_system_lib(), session.device
)
debug_module.set_input(**lowered_tuned.get_params())
print("########## Build with Autotuning ##########")
debug_module.run()
del debug_module
| 35.589641 | 111 | 0.653196 |
44c3e791353670c5db679b0584f87612f36db544 | 847 | py | Python | apps/dr_tb_malancha/plots/__init__.py | malanchak/AuTuMN | 0cbd006d1f15da414d02eed44e48bb5c06f0802e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | apps/dr_tb_malancha/plots/__init__.py | malanchak/AuTuMN | 0cbd006d1f15da414d02eed44e48bb5c06f0802e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | apps/dr_tb_malancha/plots/__init__.py | malanchak/AuTuMN | 0cbd006d1f15da414d02eed44e48bb5c06f0802e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """
Used to load model parameters from file
"""
import os
import yaml
from os import path
from autumn.constants import APPS_PATH
from autumn.tool_kit.utils import merge_dicts
def load_plot_config(region_name: str):
"""
Load plot config requested COVID region.
This is for loading only, please do not put any pre-processing in here.
"""
plots_path = path.join(APPS_PATH, "dr_tb_malancha", "plots")
# Load base plot config
base_yaml_path = path.join(plots_path, "base.yml")
with open(base_yaml_path, "r") as f:
base_plot_config = yaml.safe_load(f)
# Load region plot config
region_plot_path = path.join(plots_path, f"{region_name}.yml")
with open(region_plot_path, "r") as f:
region_plot_config = yaml.safe_load(f) or {}
return merge_dicts(region_plot_config, base_plot_config)
| 28.233333 | 75 | 0.714286 |
2800062b0c7d2fabdc1d0dd80619a43098c75675 | 5,572 | py | Python | userbot/modules/updater.py | mkaraniya/PaperplaneExtended | 5df22fa54cfc432b01901d6313aacc7ecd2fb318 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5 | 2019-10-18T12:03:37.000Z | 2020-10-10T03:23:48.000Z | userbot/modules/updater.py | mkaraniya/PaperplaneExtended | 5df22fa54cfc432b01901d6313aacc7ecd2fb318 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/updater.py | mkaraniya/PaperplaneExtended | 5df22fa54cfc432b01901d6313aacc7ecd2fb318 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 42 | 2019-11-02T13:55:41.000Z | 2020-05-29T00:55:11.000Z | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
"""
This module updates the userbot based on Upstream revision
"""
from os import remove, execl, path
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from userbot import CMD_HELP, bot, HEROKU_MEMEZ, HEROKU_APIKEY, HEROKU_APPNAME, UPSTREAM_REPO_URL
from userbot.events import register
basedir = path.abspath(path.curdir)
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} <{c.author}>\n'
return ch_log
@register(outgoing=True, pattern="^.update(?: |$)(.*)")
async def upstream(ups):
"For .update command, check if the bot is up to date, update if specified"
await ups.edit("`Checking for updates, please wait....`")
conf = ups.pattern_match.group(1).lower()
try:
txt = "`Oops.. Updater cannot continue due to some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo(basedir)
except NoSuchPathError as error:
await ups.edit(f'{txt}\n`directory {error} is not found`')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\n`Early failure! {error}`')
repo.__del__()
return
except InvalidGitRepositoryError:
repo = Repo.init(basedir)
origin = repo.create_remote('upstream', UPSTREAM_REPO_URL)
if not origin.exists():
await ups.edit(f'{txt}\n`The upstream remote is invalid.`')
repo.__del__()
return
origin.fetch()
repo.create_head('master', origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != "master":
await ups.edit(
f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). \
in that case, Updater is unable to identify which branch is to be merged. \
please checkout to the official branch`')
return
try:
repo.create_remote('upstream', OFFICIAL_UPSTREAM_REPO)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog:
await ups.edit(f'`Your BOT is` **up-to-date** `with` **{ac_br}**')
return
if conf != "now":
changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit("`Changelog is too big, sending it as a file.`")
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond(
"`do \".update now\" to update`")
return
await ups.edit('`New update found, updating...`')
if HEROKU_MEMEZ:
if not HEROKU_APIKEY or not HEROKU_APPNAME:
await ups.edit(f'{txt}\n`Missing Heroku credentials for updating userbot dyno.`')
return
else:
import heroku3
heroku = heroku3.from_key(HEROKU_APIKEY)
heroku_app = None
heroku_applications = heroku.apps()
for app in heroku_applications:
if app.name == str(HEROKU_APPNAME):
heroku_app = app
break
for build in heroku_app.builds():
if build.status == "pending":
await ups.edit('`There seems to be an ongoing build for a previous update, please wait for it to finish.`')
return
heroku_git_url = f"https://api:{HEROKU_APIKEY}@git.heroku.com/{app.name}.git"
if "heroku" in repo.remotes:
repo.remotes['heroku'].set_url(heroku_git_url)
else:
repo.create_remote("heroku", heroku_git_url)
app.enable_feature('runtime-dyno-metadata')
await ups.edit(f"`[HEROKU MEMEZ] Dyno build in progress for app {HEROKU_APPNAME}`\
\nCheck build progress [here](https://dashboard.heroku.com/apps/{HEROKU_APPNAME}/activity).")
remote = repo.remotes['heroku']
try:
remote.push(refspec=f'{repo.active_branch.name}:master', force=True)
except GitCommandError as error:
await ups.edit(f"{txt}\n`Here's the error log: {error}`")
repo.__del__()
else:
repo.__del__()
ups_rem.fetch(ac_br)
repo.git.reset('--hard', 'FETCH_HEAD')
await ups.edit('`Successfully Updated!\n'
'Bot is restarting... Wait for a while!`')
await bot.disconnect()
# Spin a new instance of bot
execl(sys.executable, sys.executable, *sys.argv)
CMD_HELP.update({
'update':
".update\
\nUsage: Checks if the main userbot repository has any updates and shows a changelog if so.\
\n\n.update now\
\nUsage: Updates your userbot, if there are any updates in the main userbot repository."
})
| 35.717949 | 127 | 0.600682 |
1a010b7675f9a5608c3809beb1b851f606daba40 | 11,084 | py | Python | Doc/api/conf.py | uci-ics-32/biopython | ff7d3703d442192a1f6d84c52e028d566d44ff1c | [
"BSD-3-Clause"
] | 1 | 2022-01-04T21:38:03.000Z | 2022-01-04T21:38:03.000Z | Doc/api/conf.py | uci-ics-32/biopython | ff7d3703d442192a1f6d84c52e028d566d44ff1c | [
"BSD-3-Clause"
] | 1 | 2019-02-24T18:24:56.000Z | 2019-02-27T02:31:56.000Z | Doc/api/conf.py | uci-ics-32/biopython | ff7d3703d442192a1f6d84c52e028d566d44ff1c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""Biopython Sphinx documentation build configuration file.
After generating ``*.rst`` files from the source code, this
file controls running ``sphinx-build`` to turn these into
human readable documentation.
"""
import os
import shutil
import sys
import tempfile
from sphinx.ext import autodoc
from Bio import __version__, Application
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
needs_sphinx = "1.8"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
# Don't want to include source code in the API docs
# 'sphinx.ext.viewcode',
"sphinx.ext.autosummary",
"numpydoc",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Biopython"
copyright = "1999-2020, The Biopython Contributors"
author = "The Biopython Contributors"
document = "Biopython API Documentation"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__ # TODO: Shorten this
# The full version, including alpha/beta/rc tags.
release = __version__
# Versions for versions.html:
# (this will break if we have version gaps)
try:
main_version, minor_version, _ = version.split(".") # e.g. 1.79.dev0
dev_version = True
except ValueError:
main_version, minor_version = version.split(".") # e.g. 1.78
dev_version = False
prev_minor_version = int(minor_version) - (2 if dev_version else 1)
previous_version = f"{main_version}.{prev_minor_version}"
versions = [
("Previous", f"../../{previous_version}/api/"),
("Latest", "../../latest/api/"),
("Develop", "../../dev/api/"),
]
if version < "1.75": # 1.74 is the earliest Sphinx-generated api documentation
del versions[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for autodoc --------------------------------------------------
# This requires Sphinx 1.8 or later:
autodoc_default_options = {
"members": None,
"undoc-members": None,
"special-members": None,
"show-inheritance": None,
"member-order": "bysource",
"exclude-members": "__dict__,__weakref__,__module__",
}
# To avoid import errors.
autodoc_mock_imports = ["MySQLdb", "Bio.Restriction.Restriction"]
if version > "1.77":
autodoc_mock_imports.append("Bio.Alphabet")
# -- Options for HTML output ----------------------------------------------
# Sphinx default was html_theme = "alabaster"
html_theme = "sphinx_rtd_theme"
# Sphinx Read The Docs theme settings, see
# https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html
html_theme_options = {
"prev_next_buttons_location": "both",
# Same a Hyde theme sidebar on biopython.org:
"style_nav_header_background": "#10100F",
# Since we have the Biopython logo via html_logo,
"logo_only": True,
}
# Based on:
# https://github.com/readthedocs/sphinx_rtd_theme/issues/231#issuecomment-126447493
html_context = {
"display_github": True, # Add 'Edit on Github' link instead of 'View page source'
"github_user": "biopython",
"github_repo": "biopython",
"github_version": "master",
"conf_py_path": "/Doc/api/",
# "source_suffix": source_suffix,
"theme_display_version": False,
# Biopython-specific values for version-footer (versions.html):
"display_version_footer": True,
"current_version": version,
"versions": versions,
"project_home_url": "https://biopython.org",
"project_github_url": "https://github.com/biopython/biopython",
}
html_logo = "../images/biopython_logo_white.png"
# The RST source is transient, don't need/want to include it
html_show_sourcelink = False
html_copy_source = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The following is not applicable to the Read-the-docs theme:
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# "**": [
# "about.html",
# "navigation.html",
# "relations.html", # needs 'show_related': True theme option to display
# "searchbox.html",
# "donate.html",
# ]
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "Biopython_doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "Biopython_API.tex", document, author, "manual")]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "biopython", document, [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Biopython",
document,
author,
"Biopython",
"Collection of modules for dealing with biological data in Python.",
"Miscellaneous",
)
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = document # project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Options for numpydoc -------------------------------------------------
numpydoc_class_members_toctree = False
# Prevents the attributes and methods from being shown twice
numpydoc_show_class_members = False
# -- Magic to run sphinx-apidoc automatically -----------------------------
# See https://github.com/rtfd/readthedocs.org/issues/1139
# on which this is based.
def insert_github_link(filename):
"""Insert file specific :github_url: metadata for theme breadcrumbs."""
assert "/" not in filename and filename.endswith(".rst")
with open(filename) as handle:
text = handle.read()
if ":github_url:" in text:
return
python = filename[:-4].replace(".", "/") + "/__init__.py"
if not os.path.isfile(os.path.join("../../", python)):
python = filename[:-4].replace(".", "/") + ".py"
if not os.path.isfile(os.path.join("../../", python)):
sys.stderr.write(
"WARNING: Could not map %s to a Python file, e.g. %s\n" % (filename, python)
)
return
text = ":github_url: https://github.com/%s/%s/blob/%s/%s\n\n%s" % (
html_context["github_user"],
html_context["github_repo"],
html_context["github_version"],
python,
text,
)
with open(filename, "w") as handle:
handle.write(text)
def run_apidoc(_):
"""Call sphinx-apidoc on Bio and BioSQL modules."""
from sphinx.ext.apidoc import main as apidoc_main
cur_dir = os.path.abspath(os.path.dirname(__file__))
# Can't see a better way than running apidoc twice, for Bio & BioSQL
# We don't care about the index.rst / conf.py (we have our own)
# or the Makefile / make.bat (effectively same) clashing,
# $ sphinx-apidoc -e -F -o /tmp/api/BioSQL BioSQL
# $ sphinx-apidoc -e -F -o /tmp/api/Bio Bio
tmp_path = tempfile.mkdtemp()
apidoc_main(["-e", "-F", "-o", tmp_path, "../../BioSQL"])
apidoc_main(["-e", "-F", "-o", tmp_path, "../../Bio"])
os.remove(os.path.join(tmp_path, "index.rst")) # Using our own
for filename in os.listdir(tmp_path):
if filename.endswith(".rst"):
shutil.move(
os.path.join(tmp_path, filename), os.path.join(cur_dir, filename)
)
shutil.rmtree(tmp_path)
for f in os.listdir(cur_dir):
if f.startswith("Bio") and f.endswith(".rst"):
insert_github_link(f)
class BioPythonAPI(autodoc.ClassDocumenter):
"""Custom Class Documenter for AbstractCommandline classes."""
def import_object(self):
"""Import the class."""
ret = super().import_object()
if not issubclass(self.object, Application.AbstractCommandline):
return ret
try:
# If the object is an AbstractCommandline we instantiate it.
self.object()
except TypeError:
# Throws if the object is the base AbstractCommandline class
pass
return ret
def setup(app):
"""Over-ride Sphinx setup to trigger sphinx-apidoc."""
app.connect("builder-inited", run_apidoc)
app.add_css_file("biopython.css")
def add_documenter(app, env, docnames):
app.add_autodocumenter(BioPythonAPI, True)
# Over-ride autodoc documenter
app.connect("env-before-read-docs", add_documenter)
| 31.759312 | 88 | 0.655179 |
13193f19af9bcecbaccdf223ed23b3e4f733945a | 1,760 | py | Python | core/migrations/0001_initial.py | vitorpvcampos/gomenu | e61d9d843a191dfca9c8ce0f587075cbd84319fe | [
"MIT"
] | 4 | 2020-08-13T12:07:33.000Z | 2022-02-20T11:18:54.000Z | core/migrations/0001_initial.py | vitorpvcampos/gomenu | e61d9d843a191dfca9c8ce0f587075cbd84319fe | [
"MIT"
] | 246 | 2020-06-30T14:28:22.000Z | 2022-03-27T14:51:58.000Z | core/migrations/0001_initial.py | vitorpvcampos/gomenu | e61d9d843a191dfca9c8ce0f587075cbd84319fe | [
"MIT"
] | 1 | 2020-08-13T12:06:50.000Z | 2020-08-13T12:06:50.000Z | # Generated by Django 3.0.7 on 2020-06-29 21:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Nome')),
('address', models.CharField(max_length=100, verbose_name='Endereço')),
('phone', models.CharField(max_length=20, verbose_name='Telefone')),
('website', models.URLField(verbose_name='Website')),
('facebook', models.URLField(verbose_name='Facebook')),
('instagram', models.URLField(verbose_name='Instagram')),
],
options={
'verbose_name': 'Informações da Empresa',
'verbose_name_plural': 'Informações da Empresa',
},
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(blank=True, null=True, upload_to='profiles/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Perfil',
'verbose_name_plural': 'Profiles',
},
),
]
| 38.26087 | 121 | 0.583523 |
d26e6e565b59a0e99d8b110031c2a5c6d24c53dc | 267 | py | Python | server/users/models.py | dmitrytk/horizon | 8fc130d9d619de0e7ad7aad24d1e6f457bc68df0 | [
"MIT"
] | null | null | null | server/users/models.py | dmitrytk/horizon | 8fc130d9d619de0e7ad7aad24d1e6f457bc68df0 | [
"MIT"
] | null | null | null | server/users/models.py | dmitrytk/horizon | 8fc130d9d619de0e7ad7aad24d1e6f457bc68df0 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.db import models
class Employee(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
department = models.CharField(max_length=255)
position = models.CharField(max_length=255)
| 29.666667 | 63 | 0.779026 |
05aa5b3775b0b70a1b82a56acbb6b31f9be13b4a | 17,837 | py | Python | Bio/motifs/__init__.py | phillord/biopython | c8dfe46f192d6ccfac94b156cef024776545638e | [
"PostgreSQL"
] | 1 | 2020-12-03T12:13:58.000Z | 2020-12-03T12:13:58.000Z | Bio/motifs/__init__.py | phillord/biopython | c8dfe46f192d6ccfac94b156cef024776545638e | [
"PostgreSQL"
] | null | null | null | Bio/motifs/__init__.py | phillord/biopython | c8dfe46f192d6ccfac94b156cef024776545638e | [
"PostgreSQL"
] | null | null | null | # Copyright 2003-2009 by Bartek Wilczynski. All rights reserved.
# Copyright 2012-2013 by Michiel JL de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tools for sequence motif analysis.
Bio.motifs contains the core Motif class containing various I/O methods
as well as methods for motif comparisons and motif searching in sequences.
It also includes functionality for parsing output from the AlignACE, MEME,
and MAST programs, as well as files in the TRANSFAC format.
Bio.motifs is replacing the older and now obsolete Bio.Motif module.
"""
from __future__ import print_function
import math
def create(instances, alphabet=None):
instances = Instances(instances, alphabet)
return Motif(instances=instances, alphabet=alphabet)
def parse(handle, format):
"""Parses an output file of motif finding programs.
Currently supported formats (case is ignored):
- AlignAce: AlignAce output file format
- MEME: MEME output file motif
- MAST: MAST output file motif
- TRANSFAC: TRANSFAC database file format
- pfm: JASPAR-style position-frequency matrix
- jaspar: JASPAR-style multiple PFM format
- sites: JASPAR-style sites file
As files in the pfm and sites formats contain only a single motif,
it is easier to use Bio.motifs.read() instead of Bio.motifs.parse()
for those.
For example:
>>> from Bio import motifs
>>> for m in motifs.parse(open("Motif/alignace.out"),"AlignAce"):
... print(m.consensus)
TCTACGATTGAG
CTGCAGCTAGCTACGAGTGAG
GTGCTCTAAGCATAGTAGGCG
GCCACTAGCAGAGCAGGGGGC
CGACTCAGAGGTT
CCACGCTAAGAGAGGTGCCGGAG
GCGCGTCGCTGAGCA
GTCCATCGCAAAGCGTGGGGC
GGGATCAGAGGGCCG
TGGAGGCGGGG
GACCAGAGCTTCGCATGGGGG
GGCGTGCGTG
GCTGGTTGCTGTTCATTAGG
GCCGGCGGCAGCTAAAAGGG
GAGGCCGGGGAT
CGACTCGTGCTTAGAAGG
"""
format = format.lower()
if format=="alignace":
from Bio.motifs import alignace
record = alignace.read(handle)
return record
elif format=="meme":
from Bio.motifs import meme
record = meme.read(handle)
return record
elif format=="mast":
from Bio.motifs import mast
record = mast.read(handle)
return record
elif format=="transfac":
from Bio.motifs import transfac
record = transfac.read(handle)
return record
elif format in ('pfm', 'sites', 'jaspar'):
from Bio.motifs import jaspar
record = jaspar.read(handle, format)
return record
else:
raise ValueError("Unknown format %s" % format)
def read(handle, format):
"""Reads a motif from a handle using a specified file-format.
This supports the same formats as Bio.motifs.parse(), but
only for files containing exactly one motif. For example,
reading a JASPAR-style pfm file:
>>> from Bio import motifs
>>> m = motifs.read(open("motifs/SRF.pfm"), "pfm")
>>> m.consensus
Seq('GCCCATATATGG', IUPACUnambiguousDNA())
Or a single-motif MEME file,
>>> from Bio import motifs
>>> m = motifs.read(open("motifs/meme.out"),"meme")
>>> m.consensus
Seq('CTCAATCGTA', IUPACUnambiguousDNA())
If the handle contains no records, or more than one record,
an exception is raised:
>>> from Bio import motifs
>>> motif = motifs.read(open("motifs/alignace.out"),"AlignAce")
Traceback (most recent call last):
...
ValueError: More than one motif found in handle
If however you want the first motif from a file containing
multiple motifs this function would raise an exception (as
shown in the example above). Instead use:
>>> from Bio import motifs
>>> record = motifs.parse(open("motifs/alignace.out"),"alignace")
>>> motif = record[0]
>>> motif.consensus
Seq('TCTACGATTGAG', IUPACUnambiguousDNA())
Use the Bio.motifs.parse(handle, format) function if you want
to read multiple records from the handle.
"""
format = format.lower()
motifs = parse(handle, format)
if len(motifs)==0:
raise ValueError("No motifs found in handle")
if len(motifs) > 1:
raise ValueError("More than one motif found in handle")
motif = motifs[0]
return motif
class Instances(list):
"""
A class representing instances of sequence motifs.
"""
def __init__(self, instances=[], alphabet=None):
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
self.length = None
for instance in instances:
if self.length is None:
self.length = len(instance)
elif self.length != len(instance):
message = "All instances should have the same length (%d found, %d expected)" % (len(instance), self.length)
raise ValueError(message)
try:
a = instance.alphabet
except AttributeError:
# The instance is a plain string
continue
if alphabet is None:
alphabet = a
elif alphabet != a:
raise ValueError("Alphabets are inconsistent")
if alphabet is None or alphabet.letters is None:
# If we didn't get a meaningful alphabet from the instances,
# assume it is DNA.
alphabet = IUPAC.unambiguous_dna
for instance in instances:
if not isinstance(instance, Seq):
sequence = str(instance)
instance = Seq(sequence, alphabet=alphabet)
self.append(instance)
self.alphabet = alphabet
def __str__(self):
text = ""
for instance in self:
text += str(instance) + "\n"
return text
def count(self):
counts = {}
for letter in self.alphabet.letters:
counts[letter] = [0] * self.length
for instance in self:
for position, letter in enumerate(instance):
counts[letter][position] += 1
return counts
def search(self, sequence):
"""
a generator function, returning found positions of motif instances in a given sequence
"""
for pos in xrange(0,len(sequence)-self.length+1):
for instance in self:
if str(instance) == str(sequence[pos:pos+self.length]):
yield(pos,instance)
break # no other instance will fit (we don't want to return multiple hits)
def reverse_complement(self):
instances = Instances(alphabet=self.alphabet)
instances.length = self.length
for instance in self:
instance = instance.reverse_complement()
instances.append(instance)
return instances
class Motif(object):
"""
A class representing sequence motifs.
"""
def __init__(self, alphabet=None, instances=None, counts=None):
from . import matrix
from Bio.Alphabet import IUPAC
self.name=""
if counts is not None and instances is not None:
raise Exception(ValueError,
"Specify either instances or counts, don't specify both")
elif counts is not None:
if alphabet is None:
alphabet = IUPAC.unambiguous_dna
self.instances = None
self.counts = matrix.FrequencyPositionMatrix(alphabet, counts)
self.length = self.counts.length
elif instances is not None:
self.instances = instances
alphabet = self.instances.alphabet
counts = self.instances.count()
self.counts = matrix.FrequencyPositionMatrix(alphabet, counts)
self.length = self.counts.length
else:
self.counts = None
self.instances = None
self.length = None
if alphabet is None:
alphabet = IUPAC.unambiguous_dna
self.alphabet = alphabet
self.pseudocounts = None
self.background = None
self.mask = None
def __get_mask(self):
return self.__mask
def __set_mask(self, mask):
if self.length is None:
self.__mask = ()
elif mask is None:
self.__mask = (1,) * self.length
elif len(mask)!=self.length:
raise ValueError("The length (%d) of the mask is inconsistent with the length (%d) of the motif", (len(mask), self.length))
elif isinstance(mask, str):
self.__mask=[]
for char in mask:
if char=="*":
self.__mask.append(1)
elif char==" ":
self.__mask.append(0)
else:
raise ValueError("Mask should contain only '*' or ' ' and not a '%s'"%char)
self.__mask = tuple(self.__mask)
else:
self.__mask = tuple(int(bool(c)) for c in mask)
mask = property(__get_mask, __set_mask)
del __get_mask
del __set_mask
def __get_pseudocounts(self):
return self._pseudocounts
def __set_pseudocounts(self, value):
self._pseudocounts = {}
if isinstance(value, dict):
self._pseudocounts = dict((letter, value[letter]) for letter in self.alphabet.letters)
else:
if value is None:
value = 0.0
self._pseudocounts = dict.fromkeys(self.alphabet.letters, value)
pseudocounts = property(__get_pseudocounts, __set_pseudocounts)
del __get_pseudocounts
del __set_pseudocounts
def __get_background(self):
return self._background
def __set_background(self, value):
if isinstance(value, dict):
self._background = dict((letter, value[letter]) for letter in self.alphabet.letters)
elif value is None:
self._background = dict.fromkeys(self.alphabet.letters, 1.0)
else:
if sorted(self.alphabet.letters)!=["A", "C", "G", "T"]:
raise Exception("Setting the background to a single value only works for DNA motifs (in which case the value is interpreted as the GC content")
self._background['A'] = (1.0-value)/2.0
self._background['C'] = value/2.0
self._background['G'] = value/2.0
self._background['T'] = (1.0-value)/2.0
total = sum(self._background.values())
for letter in self.alphabet.letters:
self._background[letter] /= total
background = property(__get_background, __set_background)
del __get_background
del __set_background
@property
def pwm(self):
return self.counts.normalize(self._pseudocounts)
@property
def pssm(self):
return self.pwm.log_odds(self._background)
def __str__(self,masked=False):
""" string representation of a motif.
"""
text = ""
if self.instances is not None:
text += str(self.instances)
if masked:
for i in xrange(self.length):
if self.__mask[i]:
text += "*"
else:
text += " "
text += "\n"
return text
def __len__(self):
"""return the length of a motif
Please use this method (i.e. invoke len(m)) instead of referring to m.length directly.
"""
if self.length is None:
return 0
else:
return self.length
def reverse_complement(self):
"""
Gives the reverse complement of the motif
"""
alphabet = self.alphabet
if self.instances is not None:
instances = self.instances.reverse_complement()
res = Motif(instances=instances, alphabet=alphabet)
else: # has counts
res = Motif(alphabet)
res.counts={}
res.counts["A"]=self.counts["T"][::-1]
res.counts["T"]=self.counts["A"][::-1]
res.counts["G"]=self.counts["C"][::-1]
res.counts["C"]=self.counts["G"][::-1]
res.length=self.length
res.__mask = self.__mask[::-1]
return res
@property
def consensus(self):
"""Returns the consensus sequence.
"""
return self.counts.consensus
@property
def anticonsensus(self):
"""returns the least probable pattern to be generated from this motif.
"""
return self.counts.anticonsensus
@property
def degenerate_consensus(self):
"""Following the rules adapted from
D. R. Cavener: "Comparison of the consensus sequence flanking
translational start sites in Drosophila and vertebrates."
Nucleic Acids Research 15(4): 1353-1361. (1987).
The same rules are used by TRANSFAC."""
return self.counts.degenerate_consensus
def weblogo(self,fname,format="PNG",version="2.8.2", **kwds):
"""
uses the Berkeley weblogo service to download and save a weblogo of
itself
requires an internet connection.
The parameters from **kwds are passed directly to the weblogo server.
Currently, this method uses WebLogo version 3.3.
These are the arguments and their default values passed to
WebLogo 3.3; see their website at http://weblogo.threeplusone.com
for more information:
'stack_width' : 'medium',
'stack_per_line' : '40',
'alphabet' : 'alphabet_dna',
'ignore_lower_case' : True,
'unit_name' : "bits",
'first_index' : '1',
'logo_start' : '1',
'logo_end': str(self.length),
'composition' : "comp_auto",
'percentCG' : '',
'scale_width' : True,
'show_errorbars' : True,
'logo_title' : '',
'logo_label' : '',
'show_xaxis': True,
'xaxis_label': '',
'show_yaxis': True,
'yaxis_label': '',
'yaxis_scale': 'auto',
'yaxis_tic_interval' : '1.0',
'show_ends' : True,
'show_fineprint' : True,
'color_scheme': 'color_auto',
'symbols0': '',
'symbols1': '',
'symbols2': '',
'symbols3': '',
'symbols4': '',
'color0': '',
'color1': '',
'color2': '',
'color3': '',
'color4': '',
"""
import urllib
import urllib2
frequencies = self.format('transfac')
url = 'http://weblogo.threeplusone.com/create.cgi'
values = {'sequences' : frequencies,
'format' : format.lower(),
'stack_width' : 'medium',
'stack_per_line' : '40',
'alphabet' : 'alphabet_dna',
'ignore_lower_case' : True,
'unit_name' : "bits",
'first_index' : '1',
'logo_start' : '1',
'logo_end': str(self.length),
'composition' : "comp_auto",
'percentCG' : '',
'scale_width' : True,
'show_errorbars' : True,
'logo_title' : '',
'logo_label' : '',
'show_xaxis': True,
'xaxis_label': '',
'show_yaxis': True,
'yaxis_label': '',
'yaxis_scale': 'auto',
'yaxis_tic_interval' : '1.0',
'show_ends' : True,
'show_fineprint' : True,
'color_scheme': 'color_auto',
'symbols0': '',
'symbols1': '',
'symbols2': '',
'symbols3': '',
'symbols4': '',
'color0': '',
'color1': '',
'color2': '',
'color3': '',
'color4': '',
}
for k,v in kwds.iteritems():
if isinstance(values[k], bool):
if not v:
v = ""
values[k]=str(v)
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
f = open(fname,"w")
im = response.read()
f.write(im)
f.close()
def format(self, format):
"""Returns a string representation of the Motif in a given format
Currently supported fromats:
- pfm : JASPAR single Position Frequency Matrix
- jaspar : JASPAR multiple Position Frequency Matrix
- transfac : TRANSFAC like files
"""
if format in ('pfm', 'jaspar'):
from Bio.motifs import jaspar
motifs = [self]
return jaspar.write(motifs, format)
elif format=="transfac":
from Bio.motifs import transfac
motifs = [self]
return transfac.write(motifs)
else:
raise ValueError("Unknown format type %s" % format)
def write(motifs, format):
"""Returns a string representation of motifs in a given format
Currently supported formats (case is ignored):
- pfm : JASPAR simple single Position Frequency Matrix
- jaspar : JASPAR multiple PFM format
- transfac : TRANSFAC like files
"""
format = format.lower()
if format in ("pfm", "jaspar"):
from Bio.motifs import jaspar
return jaspar.write(motifs, format)
elif format=="transfac":
from Bio.motifs import transfac
return transfac.write(motifs)
else:
raise ValueError("Unknown format type %s" % format)
| 34.105163 | 159 | 0.574088 |
35d49516960617ed71b4063b404ceb027d5b016e | 507 | py | Python | 0x0A-python-inheritance/100-my_int.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | 0x0A-python-inheritance/100-my_int.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | 0x0A-python-inheritance/100-my_int.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
Contains the class MyInt
"""
class MyInt(int):
"""rebel version of an integer, perfect for opposite day!"""
def __new__(cls, *args, **kwargs):
"""create a new instance of the class"""
return super(MyInt, cls).__new__(cls, *args, **kwargs)
def __eq__(self, other):
"""what was != is now =="""
return int(self) != other
def __ne__(self, other):
"""what was == is now !="""
return int(self) == other
| 25.35 | 65 | 0.546351 |
40c8ce0e49eace382dfcf7a452c82520719ffca7 | 1,953 | py | Python | scripts/bin_to_mem.py | appotry/openofdm | 0ab83ce22385c132f055ef85dcbcea8eeffd77f0 | [
"Apache-2.0"
] | 200 | 2017-05-03T19:17:11.000Z | 2022-03-22T00:19:50.000Z | scripts/bin_to_mem.py | jools76/openofdm | 229da948ae4df55fb3b6d9a055ca3e75079e50b1 | [
"Apache-2.0"
] | 8 | 2018-05-08T12:01:56.000Z | 2021-12-09T13:54:44.000Z | scripts/bin_to_mem.py | jools76/openofdm | 229da948ae4df55fb3b6d9a055ca3e75079e50b1 | [
"Apache-2.0"
] | 78 | 2017-05-12T09:36:17.000Z | 2022-03-28T14:38:07.000Z | #!/usr/bin/env python
"""
Convert a binary data file (generated by rx_samples_to_file) to a memroy text
file that can be read by Verilog's $readmemh.
"""
import argparse
import os
import time
import sys
CHUNK_SIZE = 2**20
def le_bin_str_to_signed_short(b):
v = ord(b[1])*(1<<8) + ord(b[0])
if v > (1<<15):
v = v - (1<<16)
return v
def signed_short_to_hex_str(n):
return format(n%(1<<16), '04x')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', help="Binary file.")
parser.add_argument('--out', help="Output file.")
parser.add_argument('--scale', type=int, default=1)
args = parser.parse_args()
if args.out is None:
args.out = '%s.txt' % (os.path.splitext(args.file)[0])
begin = time.clock()
byte_count = 0
total_bytes = os.path.getsize(args.file)
with open(args.file, 'rb') as input:
with open(args.out, 'wb', buffering=2**26) as output:
while True:
bytes = input.read(CHUNK_SIZE)
if len(bytes) == 0:
break
for i in range(0, len(bytes), 4):
I = le_bin_str_to_signed_short(bytes[i:i+2])/args.scale
Q = le_bin_str_to_signed_short(bytes[i+2:i+4])/args.scale
output.write('%s%s\n' % (signed_short_to_hex_str(I),
signed_short_to_hex_str(Q)))
byte_count += len(bytes)
elapsed = time.clock() - begin
speed = byte_count / elapsed
eta = (total_bytes - byte_count)/speed
progress = '%d / %d B\tSpeed: %.1f B/s\t Elapsed: %d s\tETA: %d s' %\
(byte_count>>20, total_bytes, speed, int(elapsed), int(eta))
sys.stdout.write('\r%s' % (progress))
sys.stdout.flush()
sys.stdout.write('\n')
if __name__ == '__main__':
main()
| 29.149254 | 85 | 0.549923 |
e6ab7cf1144de7c27f13fc811aea68227ac1866e | 3,378 | py | Python | configgen/generators/reicast/reicastControllers.py | pmoran13800/rhgamestation-configgen | 72cf636adeb6fa83abea66e0a85fe574405a0ede | [
"MIT"
] | null | null | null | configgen/generators/reicast/reicastControllers.py | pmoran13800/rhgamestation-configgen | 72cf636adeb6fa83abea66e0a85fe574405a0ede | [
"MIT"
] | null | null | null | configgen/generators/reicast/reicastControllers.py | pmoran13800/rhgamestation-configgen | 72cf636adeb6fa83abea66e0a85fe574405a0ede | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import ConfigParser
import rhgamestationFiles
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
import settings.unixSettings as unixSettings
import rhgamestationFiles
reicastMapping = { 'a' : {'button': 'btn_b'},
'b' : {'button': 'btn_a'},
'x' : {'button': 'btn_y'},
'y' : {'button': 'btn_x'},
'start' : {'button': 'btn_start'},
'hotkey' : {'button': 'btn_escape'},
'pageup' : {'axis': 'axis_trigger_left', 'button': 'btn_trigger_left'},
'pagedown' : {'axis': 'axis_trigger_right', 'button': 'btn_trigger_right'},
'joystick1left' : {'axis': 'axis_x'},
'joystick1up' : {'axis': 'axis_y'},
# The DPAD can be an axis (for gpio sticks for example) or a hat
'left' : {'hat': 'axis_dpad1_x', 'axis': 'axis_x', 'button': 'btn_dpad1_left'},
'up' : {'hat': 'axis_dpad1_y', 'axis': 'axis_y', 'button': 'btn_dpad1_up'},
'right' : {'button': 'btn_dpad1_right'},
'down' : {'button': 'btn_dpad1_down'},
# We are only interested in L2/R2 if they are axis, to have real dreamcasttriggers
'r2' : {'axis': 'axis_trigger_right'},
'l2' : {'axis': 'axis_trigger_left'}
}
sections = { 'emulator' : ['mapping_name', 'btn_escape'],
'dreamcast' : ['btn_a', 'btn_b', 'btn_c', 'btn_d', 'btn_z', 'btn_x', 'btn_y', 'btn_start', 'axis_x', 'axis_y', 'axis_trigger_left', 'axis_trigger_right', 'btn_dpad1_left', 'btn_dpad1_right', 'btn_dpad1_up', 'btn_dpad1_down', 'btn_dpad2_left', 'btn_dpad2_right', 'btn_dpad2_up', 'btn_dpad2_down'],
'compat' : ['axis_dpad1_x', 'axis_dpad1_y', 'btn_trigger_left', 'btn_trigger_right', 'axis_dpad2_x', 'axis_dpad2_y', 'axis_x_inverted', 'axis_y_inverted', 'axis_trigger_left_inverted', 'axis_trigger_right_inverted']
}
# Create the controller configuration file
# returns its name
def generateControllerConfig(controller):
# Set config file name
configFileName = "{}/controllerP{}.cfg".format(rhgamestationFiles.reicastCustom,controller.player)
Config = ConfigParser.ConfigParser()
cfgfile = open(configFileName,'w+')
# create ini sections
for section in sections:
Config.add_section(section)
# Add controller name
Config.set("emulator", "mapping_name", controller.realName)
# Parse controller inputs
for index in controller.inputs:
input = controller.inputs[index]
if input.name not in reicastMapping:
continue
if input.type not in reicastMapping[input.name]:
continue
var = reicastMapping[input.name][input.type]
for i in sections:
if var in sections[i]:
section = i
break
# Sadly, we don't get the right axis code for Y hats. So, dirty hack time
code = input.code
if input.type == 'hat':
if input.name == 'up':
code = int(input.code) + 1
else:
code = input.code
Config.set(section, var, code)
Config.write(cfgfile)
cfgfile.close()
return configFileName
| 40.214286 | 309 | 0.585554 |
e51fdde13475a2fdbbc5e07b8090d6fafa405e6f | 1,074 | py | Python | Tester.py | betisb/WebProgrammin_I | 6e2e4525c8d7766d65785e30bb43234cd7d829ef | [
"MIT"
] | null | null | null | Tester.py | betisb/WebProgrammin_I | 6e2e4525c8d7766d65785e30bb43234cd7d829ef | [
"MIT"
] | null | null | null | Tester.py | betisb/WebProgrammin_I | 6e2e4525c8d7766d65785e30bb43234cd7d829ef | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
def test_assignment_0_hello(student_id,url):
response = requests.get(url+"/index")
assert response.status_code == 200
assert "h1" in response.text.lower()
#assert student_id in response.text
print("Pass")
def test_assignment_0_login(student_id,url):
response = requests.get(url+"/login")
assert response.status_code == 200
assert "<form" in response.text
assert "submit" in response.text.lower()
assert "username" in response.text.lower()
assert "password" in response.text.lower()
html = BeautifulSoup(response.text,'html.parser')
print(html.prettify())
form_tag = html.find_all("form")
assert (len(form_tag)>0)
input_tag = html.find_all("input")
assert (len(input_tag)>2)
print(html.find_all('form'))
print("pass")
if __name__ == "__main__":
name='bbaheri'
url="http://bbaheri.pythonanywhere.com"
url= "localhost:5000"
for test in [
test_assignment_0_hello,
test_assignment_0_login
]:
test(name,url) | 30.685714 | 53 | 0.679702 |
483cc02f32cb024bb83b5eec5cec7432cd8defe0 | 164 | py | Python | case_bootloader/def_bootloader.py | eaglelaw/runcase | 87c88b592d2c4a1a68e3af069ebbd1c91367c91c | [
"Apache-2.0"
] | null | null | null | case_bootloader/def_bootloader.py | eaglelaw/runcase | 87c88b592d2c4a1a68e3af069ebbd1c91367c91c | [
"Apache-2.0"
] | null | null | null | case_bootloader/def_bootloader.py | eaglelaw/runcase | 87c88b592d2c4a1a68e3af069ebbd1c91367c91c | [
"Apache-2.0"
] | null | null | null | #the key word for info: desc, name, abbr
INFO = {
'desc':'MCU bootloader test',
'name':'case_bootloader',
'abbr':'boot'#Abbreviation of case module name
}
| 23.428571 | 48 | 0.664634 |
84bcc95a1dc44f385e792834925146625bdb3c09 | 4,584 | py | Python | os_dhcp_server/server.py | dsneddon/os-dhcp-server | b35bca9b15f1fb8ec164273f2052a5b37669dec2 | [
"Apache-2.0"
] | null | null | null | os_dhcp_server/server.py | dsneddon/os-dhcp-server | b35bca9b15f1fb8ec164273f2052a5b37669dec2 | [
"Apache-2.0"
] | null | null | null | os_dhcp_server/server.py | dsneddon/os-dhcp-server | b35bca9b15f1fb8ec164273f2052a5b37669dec2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import socket
import select
import logging
from os_dhcp_server import dhcp_packet
from os_dhcp_server import utils
logger = logging.getLogger(__name__)
class DhcpServer(object):
"""DHCP Server object for listening and sending DHCP requests and offers"""
def __init__(self, ip_address, listen_port, verbose, debug, reuseaddr=True,
broadcast=True):
self.ip_address = ip_address
self.listen_port = listen_port
self.reuseaddr = reuseaddr
self.broadcast = broadcast
self.verbose = verbose
self.debug = debug
self.dhcp_socket = None
if self.listen_port < 1024:
if not os.geteuid() == 0:
sys.exit("Error, %s must be run as root to use ports <1024." %
os.path.basename(__file__))
def create_socket(self):
"""Open a socket for listening to DHCP requests"""
logger.info("Creating os-dhcp-server socket...")
if not self.ip_address or self.ip_address == '0.0.0.0':
logger.debug(" address: all interfaces")
logger.debug(" address: %s" % self.ip_address )
#logger.debug(" tftp: %s" % str(tftp) )
#logger.debug(" gateway: %s" % str(gateway) )
#logger.debug(" dns: %s" % str(dns) )
#logger.debug(" netmask: %s" % str(netmask) )
logger.debug(" port: %s" % str(self.listen_port) )
#logger.debug(" pxe filename: %s" % str(pxefilename) )
logger.debug(" pid: %s" % str(os.getpid()) )
#logger.debug(" serving: %s - %s" % (str(offerfrom),
# str(offerto)))
try:
self.dhcp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error as err:
logger.error("Error creating DHCP server socket: %s" % err)
return False
try:
if self.reuseaddr:
self.dhcp_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
except socket.error as err:
logger.error("Error setting socket option SO_REUSEADDR: %s" % err)
return False
try:
if self.broadcast:
self.dhcp_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_BROADCAST, 1)
except socket.error as err:
logger.error("Error setting socket option SO_BROADCAST: %s" % err)
return False
def bind_socket(self):
"""Bind the socket to the IP address and port"""
if self.verbose or self.debug:
logger.info("Attempting to bind DHCP server to %s:%s" % \
(self.ip_address, self.listen_port))
try:
self.dhcp_socket.bind((self.ip_address, self.listen_port))
except socket.error as err:
logger.error("Error binding to socket: %s" % err)
return False
def receive(self):
"""Main loop for processing DHCP packets"""
data_in, data_out, data_except = select.select([self.dhcp_socket],
[], [])
if (data_in != []):
(data, source_address) = self.dhcp_socket.recvfrom(8192)
else:
return None
if data:
packet = dhcp_packet.DhcpPacket(data)
packet.source_address = source_address
logger.debug(packet.str())
return packet
def listen(self):
while 1: # main loop
try:
packet = self.receive()
if not packet:
logger.error('Error processing received packet, ' +
'no data received')
else:
packet.decode_packet()
except KeyboardInterrupt:
return 0 | 37.884298 | 79 | 0.566099 |
4b2b65216df63ff40c34618b12012582b178271d | 2,148 | py | Python | examples/full-screen/simple-demos/margins.py | wichmann/python-prompt-toolkit | e2bb3a9f2062327a6203024c75aec29ea7a110de | [
"BSD-3-Clause"
] | 1 | 2020-08-08T22:00:16.000Z | 2020-08-08T22:00:16.000Z | examples/full-screen/simple-demos/margins.py | wichmann/python-prompt-toolkit | e2bb3a9f2062327a6203024c75aec29ea7a110de | [
"BSD-3-Clause"
] | 1 | 2021-04-08T11:26:23.000Z | 2021-04-08T11:26:23.000Z | examples/full-screen/simple-demos/margins.py | wichmann/python-prompt-toolkit | e2bb3a9f2062327a6203024c75aec29ea7a110de | [
"BSD-3-Clause"
] | 1 | 2021-04-08T11:25:43.000Z | 2021-04-08T11:25:43.000Z | #!/usr/bin/env python
"""
Example of Window margins.
This is mainly used for displaying line numbers and scroll bars, but it could
be used to display any other kind of information as well.
"""
from prompt_toolkit.application import Application
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.containers import HSplit, Window
from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout.margins import NumberedMargin, ScrollbarMargin
LIPSUM = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas
quis interdum enim. Nam viverra, mauris et blandit malesuada, ante est bibendum
mauris, ac dignissim dui tellus quis ligula. Aenean condimentum leo at
dignissim placerat. In vel dictum ex, vulputate accumsan mi. Donec ut quam
placerat massa tempor elementum. Sed tristique mauris ac suscipit euismod. Ut
tempus vehicula augue non venenatis. Mauris aliquam velit turpis, nec congue
risus aliquam sit amet. Pellentesque blandit scelerisque felis, faucibus
consequat ante. Curabitur tempor tortor a imperdiet tincidunt. Nam sed justo
sit amet odio bibendum congue. Quisque varius ligula nec ligula gravida, sed
convallis augue faucibus. Nunc ornare pharetra bibendum. Praesent blandit ex
quis sodales maximus.""" * 40
# Create text buffers. The margins will update if you scroll up or down.
buff = Buffer()
buff.text = LIPSUM
# 1. The layout
body = HSplit([
Window(FormattedTextControl('Press "q" to quit.'), height=1, style='reverse'),
Window(
BufferControl(buffer=buff),
# Add margins.
left_margins=[NumberedMargin(), ScrollbarMargin()],
right_margins=[ScrollbarMargin(), ScrollbarMargin()],
),
])
# 2. Key bindings
kb = KeyBindings()
@kb.add('q')
@kb.add('c-c')
def _(event):
" Quit application. "
event.app.exit()
# 3. The `Application`
application = Application(
layout=Layout(body),
key_bindings=kb,
full_screen=True)
def run():
application.run()
if __name__ == '__main__':
run()
| 30.253521 | 82 | 0.75838 |
8bc29419ecfb1ab181836aa15e6c694677337b43 | 1,202 | py | Python | tests/support/factories/distribution_errors.py | leobelen/pydatajson | bcc6bbbb9d6554c82a658542522330d8e6337731 | [
"MIT"
] | null | null | null | tests/support/factories/distribution_errors.py | leobelen/pydatajson | bcc6bbbb9d6554c82a658542522330d8e6337731 | [
"MIT"
] | null | null | null | tests/support/factories/distribution_errors.py | leobelen/pydatajson | bcc6bbbb9d6554c82a658542522330d8e6337731 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from tests.support.utils import jsonschema_str
def distribution_error():
return {
"status": "ERROR",
"error": {
"catalog": {
"status": "OK",
"errors": [],
"title": "Datos Argentina"
},
"dataset": [{
"status":
"ERROR",
"identifier":
"99db6631-d1c9-470b-a73e-c62daa32c420",
"list_index":
0,
"errors": [{
"instance":
None,
"validator":
"required",
"path": ["dataset", 0, "distribution", 0],
"message":
"%s is a required property" % jsonschema_str('title'),
"error_code":
1,
"validator_value":
["accessURL", "downloadURL", "title", "issued"]
}],
"title":
"Sistema de contrataciones electrónicas"
}]
}
}
def missing_distribution_title():
return distribution_error()
| 27.318182 | 74 | 0.383527 |
6d7859a328e5aa822cee03f20f4e38c861fb93c9 | 115,590 | py | Python | simulator/dpsched/DP_simulator.py | DelphianCalamity/PrivateKube | 14f575e77021ab7baca30f4061140ec83bdc96a7 | [
"Apache-2.0"
] | 9 | 2021-06-16T00:22:45.000Z | 2021-11-25T07:19:11.000Z | simulator/dpsched/DP_simulator.py | DelphianCalamity/PrivateKube | 14f575e77021ab7baca30f4061140ec83bdc96a7 | [
"Apache-2.0"
] | 2 | 2021-11-14T10:42:43.000Z | 2022-03-16T03:43:22.000Z | simulator/dpsched/DP_simulator.py | DelphianCalamity/PrivateKube | 14f575e77021ab7baca30f4061140ec83bdc96a7 | [
"Apache-2.0"
] | 3 | 2021-04-08T08:08:48.000Z | 2021-12-24T01:42:20.000Z | # Copyright (c) 2021. Tao Luo <tao.luo@columbia.edu>
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import heapq as hq
from datetime import timedelta
from functools import partial
from itertools import count, tee
import simpy
import timeit
import math
from dpsched.utils.rdp import (
compute_rdp_epsilons_gaussian,
gaussian_dp2sigma,
)
from operator import add, sub
from dpsched.utils.misc import max_min_fair_allocation, defuse
from dpsched.utils.configs import *
from dpsched.utils.store import (
LazyAnyFilterQueue,
DummyPutPool,
DummyPool,
DummyPutQueue,
DummyPutLazyAnyFilterQueue,
DummyFilterQueue,
)
from dpsched.utils.exceptions import *
import shutil
import os
from vcd.gtkw import GTKWSave
from typing import List
from pyDigitalWaveTools.vcd.parser import VcdParser
import yaml
from desmod.component import Component
from desmod.dot import generate_dot
import pprint as pp
import copy
NoneType = type(None)
IS_DEBUG = True
class Top(Component):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if ENABLE_OSDI21_ARTIFACT_ONLY:
self._check_odsi_artifact_features_only()
self._save_config()
self.tasks = Tasks(self)
self.resource_master = ResourceMaster(self)
if self.env.config['sim.clock.adaptive_tick']:
tick_seconds = (
self.env.config['task.arrival_interval'] * 0.5
) # median: avg arrival time x0.68
else:
tick_seconds = self.env.config['resource_master.clock.tick_seconds']
self.global_clock = Clock(tick_seconds, self)
self.add_process(self._timeout_stop)
def _save_config(self):
if self.env.config.get('sim.main_file'):
shutil.copy(self.env.config['sim.main_file'], os.path.basename(self.env.config['sim.main_file']) )
if self.env.config['workload_test.enabled']:
shutil.copy(self.env.config['workload_test.workload_trace_file'], os.path.join('./', os.path.basename(self.env.config['workload_test.workload_trace_file']) ))
def _check_odsi_artifact_features_only(self):
if self.env.config['resource_master.dp_policy'].value == DISABLED_FLAG:
raise Exception("this feature is disabled for OSDI'21 artifact release, set ENABLE_OSDI21_ARTIFACT_ONLY in configs.py to enable it")
else:
return True
def _timeout_stop(self):
t0 = timeit.default_timer()
while timeit.default_timer() - t0 < self.env.config['sim.runtime.timeout'] * 60:
yield self.env.timeout(20)
raise Exception(
'Simulation timeout %d min ' % self.env.config['sim.runtime.timeout']
)
def connect_children(self):
self.connect(self.tasks, 'resource_master')
self.connect(self.tasks, 'global_clock')
self.connect(self.resource_master, 'global_clock')
@classmethod
def pre_init(cls, env):
with open(env.config['sim.gtkw.file'], 'w') as gtkw_file:
gtkw = GTKWSave(gtkw_file)
gtkw.dumpfile(env.config['sim.vcd.dump_file'], abspath=False)
gtkw.treeopen('dp_sim')
gtkw.signals_width(300)
analog_kwargs = {
'color': 'cycle',
'extraflags': ['analog_step'],
}
with gtkw.group(f'task'):
scope = 'tasks'
gtkw.trace(f'{scope}.active_count', datafmt='dec', **analog_kwargs)
gtkw.trace(f'{scope}.completion_count', datafmt='dec', **analog_kwargs)
gtkw.trace(f'{scope}.fail_count', datafmt='dec', **analog_kwargs)
with gtkw.group(f'resource'):
scope = 'resource_master'
gtkw.trace(f'{scope}.cpu_pool', datafmt='dec', **analog_kwargs)
gtkw.trace(f'{scope}.gpu_pool', datafmt='dec', **analog_kwargs)
gtkw.trace(f'{scope}.memory_pool', datafmt='dec', **analog_kwargs)
gtkw.trace(f'{scope}.unused_dp', datafmt='real', **analog_kwargs)
gtkw.trace(f'{scope}.committed_dp', datafmt='real', **analog_kwargs)
def elab_hook(self):
generate_dot(self)
def get_result_hook(self,result):
pass
class Clock(Component):
base_name = 'clock'
def __init__(self, seconds_per_tick, *args, **kwargs):
super().__init__(*args, **kwargs)
self.seconds_per_tick = seconds_per_tick
self.ticking_proc_h = self.env.process(self.precise_ticking())
def precise_ticking(self):
tick_counter = count()
while True:
tick_event_time = next(tick_counter) * self.seconds_per_tick
# high accurate tick, sched a timeout-like event
tick_event = self.env.event()
tick_event._value = None
tick_event._ok = True
self.env.schedule_at(event=tick_event, sim_time=tick_event_time)
yield tick_event
@property
def next_tick(self):
return self.ticking_proc_h.target
class ResourceMaster(Component):
base_name = 'resource_master'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_connections('global_clock')
self._retired_blocks = set()
self._avg_num_blocks = (
self.env.config['task.demand.num_blocks.mice']
+ self.env.config['task.demand.num_blocks.elephant']
) / 2
self._avg_epsilon = (
self.env.config['task.demand.epsilon.mice']
+ self.env.config['task.demand.epsilon.elephant']
) / 2
self.dp_policy = self.env.config["resource_master.dp_policy"]
self.is_dp_policy_fcfs = self.dp_policy == DpPolicyType.DP_POLICY_FCFS
self.is_rdp = self.env.config['resource_master.dp_policy.is_rdp']
self.is_admission_control_enabled = self.env.config[
'resource_master.dp_policy.is_admission_control_enabled'
]
self.is_dp_policy_dpfn = self.dp_policy == DpPolicyType.DP_POLICY_DPF_N
self.is_dp_policy_dpft = self.dp_policy == DpPolicyType.DP_POLICY_DPF_T
self.is_dp_policy_dpfna = self.dp_policy == DpPolicyType.DP_POLICY_DPF_NA
self.is_dp_dpf = (not self.is_rdp) and (
self.is_dp_policy_dpfna or self.is_dp_policy_dpfn or self.is_dp_policy_dpft
)
self.is_rdp_dpf = self.is_rdp and (
self.is_dp_policy_dpfna or self.is_dp_policy_dpfn or self.is_dp_policy_dpft
)
self.is_dp_policy_rr_t = self.dp_policy == DpPolicyType.DP_POLICY_RR_T
self.is_dp_policy_rr_n2 = self.dp_policy == DpPolicyType.DP_POLICY_RR_N2
self.is_dp_policy_rr_n = self.dp_policy == DpPolicyType.DP_POLICY_RR_N
self.is_centralized_quota_sched = (
self.is_dp_policy_dpfn or self.is_dp_policy_dpfna or self.is_dp_policy_dpft
)
self.is_accum_container_sched = (
self.is_dp_policy_rr_t or self.is_dp_policy_rr_n2 or self.is_dp_policy_rr_n
)
self.is_N_based_retire = (
self.is_dp_policy_rr_n or self.is_dp_policy_rr_n2 or self.is_dp_policy_dpfn
)
self.is_T_based_retire = (
self.is_dp_policy_rr_t or self.is_dp_policy_dpft or self.is_dp_policy_dpfna
)
# regardless of rdp or dp
self.does_task_handler_unlock_quota = (
self.is_dp_policy_rr_n2 or self.is_dp_policy_dpfn or self.is_dp_policy_dpfna
)
if not self.is_rdp:
if self.is_dp_policy_dpfn:
pass
elif self.is_dp_policy_dpft:
pass
elif self.is_dp_policy_dpfna:
pass
elif self.is_dp_policy_rr_n2:
NotImplementedError()
elif self.is_dp_policy_rr_n:
pass
elif self.is_dp_policy_rr_t:
pass
elif self.is_dp_policy_fcfs:
pass
else:
raise NotImplementedError()
else:
assert self.is_rdp
if self.is_dp_policy_dpfn:
pass
elif self.is_dp_policy_dpft:
pass # raise NotImplementedError()
elif self.is_dp_policy_dpfna:
raise NotImplementedError()
elif self.is_dp_policy_rr_n2:
raise NotImplementedError()
elif self.is_dp_policy_rr_n:
raise NotImplementedError()
elif self.is_dp_policy_rr_t:
raise NotImplementedError()
elif self.is_dp_policy_fcfs:
pass
else:
raise NotImplementedError()
self.unused_dp = DummyPool(self.env)
self.auto_probe('unused_dp', vcd={'var_type': 'real'})
self.init_blocks_ready = self.env.event()
self.committed_dp = DummyPool(self.env)
self.auto_probe('committed_dp', vcd={'var_type': 'real'})
self.block_dp_storage = DummyPutQueue(self.env, capacity=float("inf"))
self.is_cpu_needed_only = self.env.config['resource_master.is_cpu_needed_only']
self.cpu_pool = DummyPool(
self.env,
capacity=self.env.config["resource_master.cpu_capacity"],
init=self.env.config["resource_master.cpu_capacity"],
hard_cap=True,
)
self.auto_probe('cpu_pool', vcd={})
self.memory_pool = DummyPool(
self.env,
capacity=self.env.config["resource_master.memory_capacity"],
init=self.env.config["resource_master.memory_capacity"],
hard_cap=True,
)
self.auto_probe('memory_pool', vcd={})
self.gpu_pool = DummyPool(
self.env,
capacity=self.env.config["resource_master.gpu_capacity"],
init=self.env.config["resource_master.gpu_capacity"],
hard_cap=True,
)
self.auto_probe('gpu_pool', vcd={})
self.mail_box = DummyPutQueue(self.env)
# make sure get event happens at the last of event queue at current epoch.
self.resource_sched_mail_box = DummyPutLazyAnyFilterQueue(self.env)
# two types of item in mail box:
# 1. a list of block ids whose quota get incremented,
# 2. new arrival task id
self.dp_sched_mail_box = DummyPutLazyAnyFilterQueue(self.env)
self.task_state = dict() # {task_id:{...},...,}
self.add_processes(self.generate_datablocks_loop)
self.add_processes(self.allocator_frontend_loop)
self.debug("dp allocation policy %s" % self.dp_policy)
# waiting for dp permission
self.dp_waiting_tasks = DummyFilterQueue(
self.env, capacity=float("inf")
) # {tid: DRS }, put task id and state to cal order
# waiting for resource permission
self.resource_waiting_tasks = DummyFilterQueue(self.env, capacity=float("inf"))
self.auto_probe('resource_waiting_tasks', vcd={})
self.auto_probe('dp_waiting_tasks', vcd={})
if self.is_N_based_retire:
self.denom = self.env.config['resource_master.dp_policy.denominator']
else:
self.denom = None
# for quota based policy
if self.is_centralized_quota_sched:
self.add_processes(self.scheduling_dp_loop)
self.add_processes(self.scheduling_resources_loop)
def scheduling_resources_loop(self):
def _permit_resource(request_tid, idle_resources):
# non blocking
# warning, resource allocation may fail/abort after permitted.
# e.g. when resource handler is interrupted
self.resource_waiting_tasks.get(filter=lambda x: x == request_tid)
idle_resources['cpu_level'] -= self.task_state[request_tid][
'resource_request'
]['cpu']
if not self.is_cpu_needed_only:
idle_resources['gpu_level'] -= self.task_state[request_tid][
'resource_request'
]['gpu']
idle_resources['memory_level'] -= self.task_state[request_tid][
'resource_request'
]['memory']
self.task_state[request_tid]['resource_permitted_event'].succeed()
# fixme coverage
def _reject_resource(request_tid):
self.resource_waiting_tasks.get(filter=lambda x: x == request_tid)
self.task_state[request_tid]['resource_permitted_event'].fail(
RejectResourcePermissionError('xxxx')
)
while True:
yield self.resource_sched_mail_box.when_any()
# ensure the scheduler is really lazy to process getter
assert (
self.env.peek() != self.env.now
or self.env._queue[0][1] == LazyAnyFilterQueue.LAZY
)
# ignore fake door bell, listen again
if len(self.resource_sched_mail_box.items) == 0:
continue
mail_box = self.resource_sched_mail_box
# HACK avoid calling slow get()
msgs, mail_box.items = mail_box.items, []
resrc_release_msgs = []
new_arrival_msgs = []
fail_alloc_msgs = []
for msg in msgs:
if msg['msg_type'] == ResourceHandlerMessageType.RESRC_TASK_ARRIVAL:
new_arrival_msgs.append(msg)
elif msg['msg_type'] == ResourceHandlerMessageType.RESRC_RELEASE:
resrc_release_msgs.append(msg)
# fixme coverage
elif msg['msg_type'] == ResourceHandlerMessageType.RESRC_PERMITED_FAIL_TO_ALLOC:
fail_alloc_msgs.append(msg)
else:
raise Exception('cannot identify message type')
new_arrival_tid = [m['task_id'] for m in new_arrival_msgs]
# should be a subset
assert set(new_arrival_tid) <= set(self.resource_waiting_tasks.items)
task_sched_order = None
# optimization for case with only new arrival task(s), fcfs
if len(new_arrival_msgs) == len(msgs):
task_sched_order = new_arrival_tid
# otherwise, iterate over all sleeping tasks to sched.
else:
task_sched_order = copy.deepcopy(self.resource_waiting_tasks.items)
this_epoch_idle_resources = {
"cpu_level": self.cpu_pool.level,
"gpu_level": self.gpu_pool.level,
"memory_level": self.memory_pool.level,
}
# save, sched later
fcfs_sleeping_dp_waiting_tasks = []
for sleeping_tid in task_sched_order:
if not self.task_state[sleeping_tid]['dp_committed_event'].triggered:
# will schedule dp_waiting task later
fcfs_sleeping_dp_waiting_tasks.append(sleeping_tid)
# sched dp granted tasks
# first round: sched dp-granted tasks in FCFS order.
elif self.task_state[sleeping_tid]['dp_committed_event'].ok:
if self._is_idle_resource_enough(
sleeping_tid, this_epoch_idle_resources
):
_permit_resource(sleeping_tid, this_epoch_idle_resources)
# fixme coverage
else:
assert not self.task_state[sleeping_tid]['dp_committed_event'].ok
if not self.task_state[sleeping_tid]['is_admission_control_ok']:
_reject_resource(sleeping_tid)
else:
raise Exception(
"impossible to see dp rejected task in resource_waiting_tasks. This should already happen: failed dp commit -> "
"interrupt resoruce handler -> dequeue resource_waiting_tasks"
)
# sched dp waiting tasks in FCFS order
if (
self.is_dp_policy_fcfs
or self.is_dp_policy_rr_t
or self.is_dp_policy_rr_n2
or self.is_dp_policy_rr_n
):
# regardless of rdp or dp
sleeping_dp_waiting_sched_order = fcfs_sleeping_dp_waiting_tasks
else:
assert (
self.is_dp_policy_dpfn
or self.is_dp_policy_dpft
or self.is_dp_policy_dpfna
)
# regardless of rdp or dp
# smallest dominant_resource_share task first
sleeping_dp_waiting_sched_order = sorted(
fcfs_sleeping_dp_waiting_tasks,
reverse=False,
key=lambda t_id: self.task_state[t_id]['dominant_resource_share'],
)
# second round: sched dp ungranted
for sleeping_tid in sleeping_dp_waiting_sched_order:
if self._is_idle_resource_enough(
sleeping_tid, this_epoch_idle_resources
):
_permit_resource(sleeping_tid, this_epoch_idle_resources)
def _is_mice_task_dp_demand(self, epsilon, num_blocks):
return epsilon < self._avg_epsilon, num_blocks < self._avg_num_blocks
def _is_idle_resource_enough(self, tid, idle_resources):
if (
idle_resources['cpu_level']
< self.task_state[tid]['resource_request']['cpu']
):
return False
if not self.is_cpu_needed_only:
if (
idle_resources['gpu_level']
< self.task_state[tid]['resource_request']['gpu']
):
return False
if (
idle_resources['memory_level']
< self.task_state[tid]['resource_request']['memory']
):
return False
return True
def _cal_rdp_dominant_consumption(
self, g_budget_curve, consumpiton_curve, demand_curve, g_budget_max
):
current_rdp_max = max(map(sub, g_budget_curve, consumpiton_curve))
post_alloc_consumption = map(add, consumpiton_curve, demand_curve)
post_alloc_rdp_max = max(map(sub, g_budget_curve, post_alloc_consumption))
return (current_rdp_max - post_alloc_rdp_max) / g_budget_max
def _commit_rdp_allocation(self, block_idx: List[int], e_rdp: List[float]):
assert len(block_idx) > 0
for b in block_idx:
# todo perf use iterator + map?
temp_balance = []
temp_quota_balance = []
for j, e in enumerate(e_rdp):
self.block_dp_storage.items[b]["rdp_consumption"][j] += e
temp_balance.append(
self.block_dp_storage.items[b]["rdp_budget_curve"][j]
- self.block_dp_storage.items[b]["rdp_consumption"][j]
)
temp_quota_balance.append(
self.block_dp_storage.items[b]["rdp_quota_curve"][j]
- self.block_dp_storage.items[b]["rdp_consumption"][j]
)
assert max(temp_balance) >= (0 - self.env.config['sim.numerical_delta'])
self.block_dp_storage.items[b]["rdp_quota_balance"] = temp_quota_balance
def scheduling_dp_loop(self):
# sourcery skip: hoist-statement-from-if, merge-duplicate-blocks, remove-redundant-if, simplify-len-comparison, split-or-ifs, swap-if-else-branches
assert self.is_centralized_quota_sched
# calculate DR share, match, allocate,
# update DRS if new quota has over lap with tasks
while True:
doorbell = self.dp_sched_mail_box.when_any()
yield doorbell
# rejected or permitted tasks
dp_processed_task_idx = []
# ensure the scheduler is really lazy to process getter, wait for all quota incremented
assert (
self.env.peek() != self.env.now
or self.env._queue[0][1] == LazyAnyFilterQueue.LAZY
)
# ignore fake door bell, listen again
if len(self.dp_sched_mail_box.items) == 0:
continue
# HACK, avoid calling slow get()
msgs, self.dp_sched_mail_box.items = self.dp_sched_mail_box.items, []
new_arrival_tid = []
incremented_quota_idx = set()
msgs_amount = len(msgs)
for m in msgs:
if isinstance(m, int):
tid = m
# assert m in self.dp_waiting_tasks.items
idx = self.dp_waiting_tasks.items.index(tid, -msgs_amount - 10)
new_arrival_tid.append((idx, tid))
else:
assert isinstance(m, list)
incremented_quota_idx.update(m)
this_epoch_unused_quota = [
block['dp_quota'].level for block in self.block_dp_storage.items
]
# new task arrived
for _, new_task_id in new_arrival_tid:
assert self.task_state[new_task_id]['dominant_resource_share'] is None
has_quota_increment = len(incremented_quota_idx) > 0
# update DRS of tasks if its demands has any incremented quota, or new comming tasks.
quota_incre_upper_bound = (
max(incremented_quota_idx) if has_quota_increment else -1
)
quota_incre_lower_bound = (
min(incremented_quota_idx) if has_quota_increment else -1
)
# cal DRS
if not self.is_rdp:
self._cal_drs_dp_L_Inf(new_arrival_tid)
else:
assert self.is_rdp
self._cal_drs_rdp_a_all2()
permit_dp_task_order = None
# optimization for no new quota case
if (not has_quota_increment) and len(new_arrival_tid) != 0:
new_arrival_drs = (
self.task_state[t[1]]['dominant_resource_share']
for t in new_arrival_tid
)
permit_dp_task_order = list(zip(new_arrival_drs, new_arrival_tid))
hq.heapify(permit_dp_task_order)
else:
assert has_quota_increment
waiting_task_drs = (
self.task_state[t]['dominant_resource_share']
for t in self.dp_waiting_tasks.items
)
permit_dp_task_order = list(
zip(waiting_task_drs, enumerate(self.dp_waiting_tasks.items))
)
hq.heapify(permit_dp_task_order)
# iterate over tasks ordered by DRS, match quota, allocate.
permitted_task_ids = set()
dp_rejected_task_ids = set()
permitted_blk_ids = set()
should_grant_top_small = self.env.config[
'resource_master.dp_policy.dpf_family.grant_top_small'
]
are_leading_tasks_ok = True
if not self.is_rdp:
self._dpf_best_effort_dp_sched(
are_leading_tasks_ok,
dp_processed_task_idx,
permit_dp_task_order,
permitted_blk_ids,
permitted_task_ids,
should_grant_top_small,
this_epoch_unused_quota,
)
# reject tasks after allocation
# only reject task on retired blocks
if has_quota_increment: # either dpft or dpfn
self._dpf_check_remaining_dp_n_reject(
dp_processed_task_idx,
dp_rejected_task_ids,
permitted_task_ids,
this_epoch_unused_quota,
)
else: # is_rdp
self.best_effort_rdp_sched_n_commit_reject(
dp_processed_task_idx,
dp_rejected_task_ids,
permit_dp_task_order,
permitted_blk_ids,
permitted_task_ids,
)
# dequeue all permitted and rejected waiting tasks
# HACK avoid calling dp_waiting_tasks.get()
dp_processed_task_idx.sort(reverse=True)
for i in dp_processed_task_idx:
self.debug(
self.dp_waiting_tasks.items[i], "task get dequeued from wait queue"
)
del self.dp_waiting_tasks.items[i]
def _dpf_check_remaining_dp_n_reject(
self,
dp_processed_task_idx,
dp_rejected_task_ids,
permitted_task_ids,
this_epoch_unused_quota,
):
# reject tasks after allocation
# only reject task on retired blocks
assert not self.is_rdp
for idx, t_id in enumerate(self.dp_waiting_tasks.items):
should_reject = None
if t_id not in permitted_task_ids:
this_task = self.task_state[t_id]
this_request = this_task["resource_request"]
task_demand_block_idx = this_request['block_idx']
task_demand_epsilon = this_request['epsilon']
# HACK, only check old and new items for rejection performance
old_demand_b_idx = task_demand_block_idx[0]
old_item = self.block_dp_storage.items[old_demand_b_idx]
new_demand_b_idx = task_demand_block_idx[-1]
# check oldest item this will check and reject for dpft
if old_item["retire_event"].triggered:
assert old_item["retire_event"].ok
b = old_demand_b_idx
if this_epoch_unused_quota[b] < task_demand_epsilon:
should_reject = True
# check latest item
elif (
not self.is_dp_policy_dpft and new_demand_b_idx != old_demand_b_idx
):
new_item = self.block_dp_storage.items[new_demand_b_idx]
if new_item["retire_event"] and new_item["retire_event"].triggered:
b = new_demand_b_idx
if this_epoch_unused_quota[b] < task_demand_epsilon:
should_reject = True
if should_reject:
this_task["dp_permitted_event"].fail(DpBlockRetiredError())
dp_rejected_task_ids.add(t_id)
dp_processed_task_idx.append(idx)
def best_effort_rdp_sched_n_commit_reject(
self,
dp_processed_task_idx,
dp_rejected_task_ids,
permit_dp_task_order,
permitted_blk_ids,
permitted_task_ids,
):
for drs, t in permit_dp_task_order:
t_idx, t_id = t
this_task = self.task_state[t_id]
this_request = this_task["resource_request"]
task_demand_block_idx = this_request['block_idx']
task_demand_e_rdp = this_request['e_rdp']
violated_blk, is_quota_insufficient_all = self.is_all_block_quota_sufficient(task_demand_block_idx, task_demand_e_rdp)
# task is permitted
if not is_quota_insufficient_all :#
drs = this_task['dominant_resource_share']
self.debug(t_id, "DP permitted, Dominant resource share: %.3f" % drs)
this_task["dp_permitted_event"].succeed()
permitted_task_ids.add(t_id)
permitted_blk_ids.update(task_demand_block_idx)
# need to update consumption for following rejection
self._commit_rdp_allocation(task_demand_block_idx, task_demand_e_rdp)
this_task["dp_committed_event"].succeed()
this_task['is_dp_granted'] = True
dp_processed_task_idx.append(t_idx)
else: # is_quota_insufficient_all
if self.block_dp_storage.items[violated_blk]["retire_event"].triggered:
assert self.block_dp_storage.items[violated_blk]["retire_event"].ok
dp_rejected_task_ids.add(t_id)
this_task["dp_permitted_event"].fail(
DpBlockRetiredError(
"block %d retired, insufficient unlocked rdp left" % violated_blk
)
)
this_task['is_dp_granted'] = False
dp_processed_task_idx.append(t_idx)
return
def is_all_block_quota_sufficient(self, task_demand_block_idx, task_demand_e_rdp):
for b in task_demand_block_idx:
for j, e_d in enumerate(task_demand_e_rdp):
if (
e_d
<= self.block_dp_storage.items[b]['rdp_quota_balance'][j]
):
break
else:
return b, True
else:
return None, False
def _dpf_best_effort_dp_sched(
self,
are_leading_tasks_ok,
dp_processed_task_idx,
permit_dp_task_order,
permitted_blk_ids,
permitted_task_ids,
should_grant_top_small,
this_epoch_unused_quota,
):
for drs, t in permit_dp_task_order:
t_idx, t_id = t
if should_grant_top_small and (not are_leading_tasks_ok):
break
this_task = self.task_state[t_id]
this_request = this_task["resource_request"]
task_demand_block_idx = this_request['block_idx']
task_demand_epsilon = this_request['epsilon']
for b_idx in task_demand_block_idx:
if (
this_epoch_unused_quota[b_idx]
+ self.env.config['sim.numerical_delta']
< task_demand_epsilon
):
are_leading_tasks_ok = False
break
# task is permitted
else:
drs = this_task['dominant_resource_share']
self.debug(t_id, "DP permitted, Dominant resource share: %.3f" % drs)
for i in task_demand_block_idx:
this_epoch_unused_quota[i] -= task_demand_epsilon
this_task["dp_permitted_event"].succeed()
permitted_task_ids.add(t_id)
permitted_blk_ids.update(task_demand_block_idx)
dp_processed_task_idx.append(t_idx)
return
def _cal_drs_rdp_a_all2(self):
for t_id in reversed(self.dp_waiting_tasks.items):
this_task = self.task_state[t_id]
# ending condition, drs already calculated
if this_task['dominant_resource_share'] is not None:
break
this_request = this_task['resource_request']
# block wise
temp_max = -1
for b in this_request['block_idx']:
for j, e in enumerate(this_request['e_rdp']):
# iterate over all alpha demand
if self.block_dp_storage.items[b]["rdp_budget_curve"][j] > 0:
normalized_e = (e / self.block_dp_storage.items[b]["rdp_budget_curve"][j]
)
temp_max = max(temp_max, normalized_e)
assert temp_max != -1
this_task['dominant_resource_share'] = temp_max
def _cal_drs_dp_L_Inf(self, new_arrival_tid):
for _, new_task_id in new_arrival_tid:
this_task = self.task_state[new_task_id]
this_task['dominant_resource_share'] = this_task["resource_request"][
'epsilon'
]
def allocator_frontend_loop(self):
while True:
# loop only blocks here
yield self.mail_box.when_any()
for i in range(self.mail_box.size):
get_evt = self.mail_box.get()
msg = get_evt.value
if msg["message_type"] == DpHandlerMessageType.NEW_TASK:
assert msg["task_id"] not in self.task_state
self.task_state[msg["task_id"]] = dict()
self.task_state[msg["task_id"]]["task_proc"] = msg["task_process"]
if msg["message_type"] == DpHandlerMessageType.ALLOCATION_REQUEST:
assert msg["task_id"] in self.task_state
self.task_state[msg["task_id"]] = dict()
self.task_state[msg["task_id"]]["resource_request"] = msg
self.task_state[msg["task_id"]][
"resource_allocate_timestamp"
] = None
self.task_state[msg["task_id"]]["dp_commit_timestamp"] = None
self.task_state[msg["task_id"]]["task_completion_timestamp"] = None
self.task_state[msg["task_id"]]["task_publish_timestamp"] = None
self.task_state[msg["task_id"]]["is_dp_granted"] = None
self.task_state[msg["task_id"]]["is_admission_control_ok"] = None
self.task_state[msg["task_id"]][
"resource_allocated_event"
] = msg.pop("resource_allocated_event")
self.task_state[msg["task_id"]]["dp_committed_event"] = msg.pop(
"dp_committed_event"
)
# following two events are controlled by scheduling policy
self.task_state[msg["task_id"]][
"dp_permitted_event"
] = self.env.event()
self.task_state[msg["task_id"]][
"resource_permitted_event"
] = self.env.event()
self.task_state[msg["task_id"]][
"resource_released_event"
] = self.env.event()
self.task_state[msg["task_id"]]["dominant_resource_share"] = None
self.task_state[msg["task_id"]]["execution_proc"] = msg.pop(
"execution_proc"
)
self.task_state[msg["task_id"]]["waiting_for_dp_proc"] = msg.pop(
"waiting_for_dp_proc"
)
## trigger allocation
self.task_state[msg["task_id"]][
"handler_proc_dp"
] = self.env.process(self.task_dp_handler(msg["task_id"]))
self.task_state[msg["task_id"]][
"handler_proc_resource"
] = self.env.process(self.task_resources_handler(msg["task_id"]))
self.task_state[msg["task_id"]][
"blk2accum_getters"
] = dict() # blk_idx: getter
msg['task_init_event'].succeed()
def _handle_accum_block_waiters(self, task_id):
this_task = self.task_state[task_id]
resource_demand = this_task["resource_request"]
dp_committed_event = this_task["dp_committed_event"]
wait_for_all_getter_proc = self.env.all_of(
list(this_task["blk2accum_getters"].values())
)
try:
if self.env.config['task.timeout.enabled']:
timeout_evt = self.env.timeout(
self.env.config['task.timeout.interval'], TIMEOUT_VAL
)
permitted_or_timeout_val = yield wait_for_all_getter_proc | timeout_evt
else:
permitted_or_timeout_val = yield wait_for_all_getter_proc
if wait_for_all_getter_proc.triggered:
self.debug(task_id, "get all dp from blocks")
return 0
else:
assert TIMEOUT_VAL in list(permitted_or_timeout_val.values())
raise DprequestTimeoutError()
except (
StopReleaseDpError,
InsufficientDpException,
DprequestTimeoutError,) as err:
self.debug(
task_id,
"policy=%s, fail to acquire dp due to" % self.dp_policy,
err.__repr__(),
)
# interrupt dp_waiting_proc
if this_task["handler_proc_resource"].is_alive:
this_task["handler_proc_resource"].interrupt(
DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG
)
dp_committed_event.fail(err)
removed_accum_cn = []
missing_waiting_accum_cn = []
fullfilled_blk = []
unfullfilled_blk = []
for blk_idx, get_event in this_task[
"blk2accum_getters"
].items(): # get_evt_block_mapping.items():
if get_event.triggered and get_event.ok:
fullfilled_blk.append(blk_idx)
elif (not get_event.triggered) or (not get_event.ok):
unfullfilled_blk.append(blk_idx)
get_event.cancel() # if not triggered pop from waiters
get_event.defused = True
this_block = self.block_dp_storage.items[blk_idx]
dp_container = this_block["dp_container"]
if task_id in this_block['waiting_tid2accum_containers']:
this_block['waiting_tid2accum_containers'].pop(task_id)
removed_accum_cn.append(task_id)
else:
missing_waiting_accum_cn.append(blk_idx)
if get_event.triggered and get_event.ok:
assert (
dp_container.level + get_event.amount
< dp_container.capacity
+ self.env.config['sim.numerical_delta']
)
if len(removed_accum_cn) != 0:
self.debug(
task_id,
"accum containers removed by task handler for blocks %s"
% removed_accum_cn,
)
if len(missing_waiting_accum_cn) != 0:
self.debug(
task_id,
"accum containers removed by sched for blocks %s"
% removed_accum_cn,
)
self.debug(task_id, "fullfilled block demand getter: %s" % fullfilled_blk)
self.debug(
task_id, "unfullfilled block demand getter: %s" % unfullfilled_blk
)
return 1
def _check_task_admission_control(self, task_id):
this_task = self.task_state[task_id]
resource_demand = this_task["resource_request"]
dp_committed_event = this_task["dp_committed_event"]
if not self.is_rdp:
# only check uncommitted dp capacity
# peek remaining DP, reject if DP is already insufficient
for i in resource_demand["block_idx"]:
this_block = self.block_dp_storage.items[i]
capacity = this_block["dp_container"].capacity
if (
capacity + self.env.config['sim.numerical_delta']
< resource_demand["epsilon"]
):
self.debug(
task_id,
"DP is insufficient before asking dp scheduler, Block ID: %d, remain epsilon: %.3f"
% (i, capacity),
)
if this_task["handler_proc_resource"].is_alive:
this_task["handler_proc_resource"].interrupt(
DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG
)
# inform user's dp waiting task
dp_committed_event.fail(
InsufficientDpException(
"DP request is rejected by handler admission control, Block ID: %d, remain epsilon: %.3f"
% (i, capacity)
)
)
return False
elif self.is_accum_container_sched and (
not this_block['block_proc'].is_alive
):
dp_committed_event.fail(
InsufficientDpException(
"DP request is rejected by handler admission control, Block %d sched is inactive"
% i
)
)
return False
else:
for b in resource_demand["block_idx"]:
for j, e in enumerate(resource_demand["e_rdp"]):
if (
self.block_dp_storage.items[b]["rdp_budget_curve"][j]
- self.block_dp_storage.items[b]["rdp_consumption"][j]
>= e
):
break
else:
self.debug(
task_id,
"RDP is insufficient before asking rdp scheduler, Block ID: %d"
% (b),
)
if this_task["handler_proc_resource"].is_alive:
this_task["handler_proc_resource"].interrupt(
DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG
)
# inform user's dp waiting task
this_task['is_dp_granted'] = False
dp_committed_event.fail(
InsufficientDpException(
"RDP request is rejected by handler admission control, Block ID: %d "
% (b)
)
)
return False
return True
def _do_fcfs(self, task_id):
this_task = self.task_state[task_id]
resource_demand = this_task["resource_request"]
dp_committed_event = this_task["dp_committed_event"]
if not self.is_rdp:
for i in resource_demand["block_idx"]:
# after admission control check, only need to handle numerical accuracy
self.block_dp_storage.items[i]["dp_container"].get(
min(
resource_demand["epsilon"],
self.block_dp_storage.items[i]["dp_container"].level,
)
)
else:
self._commit_rdp_allocation(
resource_demand["block_idx"], resource_demand["e_rdp"]
)
this_task["dp_committed_event"].succeed()
def _rdp_update_quota_balance(self, blk_idx):
this_block = self.block_dp_storage.items[blk_idx]
this_block['rdp_quota_balance'] = list(
map(
lambda d: d[0] - d[1],
zip(this_block['rdp_quota_curve'], this_block['rdp_consumption']),
)
)
def _check_n_update_block_state(self, task_id):
this_task = self.task_state[task_id]
resource_demand = this_task["resource_request"]
blocks_retired_by_this_task = []
dp_committed_event = this_task["dp_committed_event"]
# quota increment
quota_increment_idx = []
retired_blocks_before_arrival = []
inactive_block_sched_procs = []
for i in resource_demand["block_idx"]:
this_block = self.block_dp_storage.items[i]
this_block["arrived_task_num"] += 1
if this_block["retire_event"].triggered:
assert this_block["retire_event"].ok
retired_blocks_before_arrival.append(i)
if self.does_task_handler_unlock_quota:
continue
# unlock quota by task
elif self.does_task_handler_unlock_quota:
new_quota_unlocked = None
# update quota
if not self.is_rdp:
if self.is_dp_policy_dpfn:
assert not this_block["retire_event"].triggered
quota_increment = self.env.config["resource_master.block.init_epsilon"] / self.denom
assert (
quota_increment
< this_block['dp_container'].level
+ self.env.config['sim.numerical_delta']
)
if (
-self.env.config['sim.numerical_delta']
< quota_increment - this_block['dp_container'].level
< self.env.config['sim.numerical_delta']
):
get_amount = this_block['dp_container'].level
else:
get_amount = quota_increment
this_block['dp_container'].get(get_amount)
block_quota = this_block['dp_quota']
assert -self.env.config['sim.numerical_delta'] < block_quota.capacity - block_quota.level - get_amount
block_quota.put(min(get_amount,block_quota.capacity - block_quota.level))
new_quota_unlocked = True
elif self.is_dp_policy_dpft:
pass
elif self.is_dp_policy_dpfna:
assert not this_block["retire_event"].triggered
age = self.env.now - this_block["create_time"]
x = age / self.env.config['resource_master.block.lifetime']
target_quota = ((x - 1) / (-0.0 * x + 1) + 1) * self.env.config[
"resource_master.block.init_epsilon"
]
released_quota = (
self.env.config["resource_master.block.init_epsilon"]
- this_block['dp_container'].level
)
get_amount = target_quota - released_quota
this_block['dp_container'].get(get_amount)
this_block['dp_quota'].put(get_amount)
new_quota_unlocked = True
elif self.is_dp_policy_rr_n2:
if not this_block[
"retire_event"
].triggered: # sched finished wont be allocated
# centralized update;
quota_increment = (
self.env.config["resource_master.block.init_epsilon"]
/ self.denom
)
assert this_block["arrived_task_num"] <= self.denom
assert (
quota_increment
< this_block['dp_container'].level
+ self.env.config['sim.numerical_delta']
)
if (
-self.env.config['sim.numerical_delta']
< quota_increment - this_block['dp_container'].level
< self.env.config['sim.numerical_delta']
):
get_amount = this_block['dp_container'].level
else:
get_amount = quota_increment
this_block['dp_container'].get(get_amount)
this_block["residual_dp"] = (
this_block["global_epsilon_dp"]
* this_block["arrived_task_num"]
/ self.denom
)
new_quota_unlocked = True
else:
assert not this_block['block_proc'].is_alive
inactive_block_sched_procs.append(i)
continue
elif self.is_dp_policy_rr_t:
if not this_block['block_proc'].is_alive:
inactive_block_sched_procs.append(i)
continue
elif self.is_dp_policy_rr_n:
if this_block['block_proc'].is_alive:
this_block['_mail_box'].put(task_id)
else:
inactive_block_sched_procs.append(i)
continue
else:
raise NotImplementedError()
else:
assert self.is_rdp
if self.is_dp_policy_dpfn:
assert not this_block["retire_event"].triggered
fraction = this_block["arrived_task_num"] / self.denom
if fraction < 1:
this_block['rdp_quota_curve'] = [
bjt * fraction for bjt in this_block['rdp_budget_curve']
]
else:
this_block['rdp_quota_curve'] = this_block[
'rdp_budget_curve'
].copy()
new_quota_unlocked = True
elif self.is_dp_policy_dpft:
pass
else:
raise NotImplementedError()
if self.does_task_handler_unlock_quota and new_quota_unlocked:
quota_increment_idx.append(i)
if self.is_rdp:
self._rdp_update_quota_balance(i)
if self.is_N_based_retire and (
this_block["arrived_task_num"] == self.denom
):
this_block["retire_event"].succeed()
self._retired_blocks.add(i)
blocks_retired_by_this_task.append(i)
# for RR policy, add accum container and getter per task
if self.is_accum_container_sched:
if (len(inactive_block_sched_procs) == 0 ): # make sure no retired blocks before
if not self.is_rdp:
for i in resource_demand["block_idx"]:
# need to wait to get per-task accum containers
this_block = self.block_dp_storage.items[i]
accum_cn = DummyPutPool(
self.env, capacity=resource_demand["epsilon"], init=0.0
)
# will disable get event when when fail to grant.
this_task["blk2accum_getters"][i] = accum_cn.get(
resource_demand["epsilon"]
* (1 - self.env.config['sim.numerical_delta'])
)
this_block["waiting_tid2accum_containers"][task_id] = accum_cn
self.debug(
"waiting_tid2accum_containers: %d, %s"
% (task_id, resource_demand["block_idx"])
)
else:
raise NotImplementedError('no rdp x RR')
else:
assert this_task["handler_proc_resource"].is_alive
this_task["handler_proc_resource"].interrupt(
DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG
)
assert not dp_committed_event.triggered
dp_committed_event.fail(
StopReleaseDpError(
"bbb task %d rejected dp, due to block %s has retired for RR"
% (task_id, retired_blocks_before_arrival)
)
)
# assert not dp_committed_event.ok
if IS_DEBUG and len(blocks_retired_by_this_task):
self.debug(
task_id,
'blocks No. %s get retired.' % blocks_retired_by_this_task.__repr__(),
)
if len(quota_increment_idx) != 0:
self.dp_sched_mail_box.put(quota_increment_idx)
# for RR fail tasks when block is retired.
if dp_committed_event.triggered:
assert not dp_committed_event.ok
return 1
def _handle_quota_sched_permission(self, task_id):
this_task = self.task_state[task_id]
resource_demand = this_task["resource_request"]
dp_committed_event = this_task["dp_committed_event"]
def wait_for_permit():
yield self.task_state[task_id]["dp_permitted_event"]
wait_for_permit_proc = self.env.process(wait_for_permit())
try:
t0 = self.env.now
if self.env.config['task.timeout.enabled']:
permitted_or_timeout_val = (
yield wait_for_permit_proc
| self.env.timeout(
self.env.config['task.timeout.interval'], TIMEOUT_VAL
)
)
else:
permitted_or_timeout_val = yield wait_for_permit_proc
if wait_for_permit_proc.triggered:
self.debug(
task_id,
"grant_dp_permitted after ",
timedelta(seconds=(self.env.now - t0)),
)
if not self.is_rdp:
for i in resource_demand["block_idx"]:
this_block = self.block_dp_storage.items[i]
this_block["dp_quota"].get(
min(
resource_demand["epsilon"], this_block["dp_quota"].level
)
)
return 0
else:
pass # already increase rdp quota while scheduling.
else:
assert list(permitted_or_timeout_val.values())[0] == TIMEOUT_VAL
raise DprequestTimeoutError()
except (DprequestTimeoutError, DpBlockRetiredError) as err:
if isinstance(err, DprequestTimeoutError):
del_idx = self.dp_waiting_tasks.items.index(task_id)
self.debug(
task_id,
"dp request timeout after %d "
% self.env.config['task.timeout.interval'],
)
self.debug(task_id, "task get dequeued from wait queue")
del self.dp_waiting_tasks.items[del_idx]
self.debug(
task_id,
"policy=%s, fail to acquire dp: %s" % (self.dp_policy, err.__repr__()),
)
if isinstance(err, DpBlockRetiredError):
# should not issue get to quota
assert not self.task_state[task_id]["dp_permitted_event"].ok
# interrupt dp_waiting_proc
if this_task["handler_proc_resource"].is_alive:
this_task["handler_proc_resource"].interrupt(
DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG
)
dp_committed_event.fail(err)
return 1
# should trigger committed dp_committed_event after return
def task_dp_handler(self, task_id):
self.debug(task_id, "Task DP handler created")
this_task = self.task_state[task_id]
dp_committed_event = this_task["dp_committed_event"]
resource_demand = this_task["resource_request"]
# admission control
# getevent -> blk_idx
if self.is_dp_policy_fcfs:
this_task["is_admission_control_ok"] = self._check_task_admission_control(
task_id
)
if not this_task["is_admission_control_ok"]:
return
self._do_fcfs(task_id)
# always grant because admission control is ok
self.task_state[task_id]["is_dp_granted"] = True
else: # policy other than fcfs
if self.is_admission_control_enabled:
this_task[
"is_admission_control_ok"
] = self._check_task_admission_control(task_id)
if not this_task["is_admission_control_ok"]:
return
update_status = self._check_n_update_block_state(task_id)
if update_status == 1:
self.task_state[task_id]["is_dp_granted"] = False
else:
if self.is_centralized_quota_sched:
# dp_policy_dpft only needs enqueue
self.dp_waiting_tasks.put(task_id)
self.dp_sched_mail_box.put(task_id)
a = yield from self._handle_quota_sched_permission(task_id)
if a == 0:
self.task_state[task_id]["is_dp_granted"] = True
else: ## RR-t RR-n
a = yield from self._handle_accum_block_waiters(task_id)
if a == 0:
self.task_state[task_id]["is_dp_granted"] = True
# commit dp after permission from sched or directly for fcfs.
if self.task_state[task_id]["is_dp_granted"]:
if not self.is_rdp:
assert not dp_committed_event.triggered
self._commit_dp_allocation(
resource_demand["block_idx"], epsilon=resource_demand["epsilon"]
)
dp_committed_event.succeed()
else:
assert dp_committed_event.ok
# rdp is already committed, and triggered in scheduling algo
pass
else:
assert not dp_committed_event.ok
def task_resources_handler(self, task_id):
self.debug(task_id, "Task resource handler created")
# add to resources wait queue
self.resource_waiting_tasks.put(task_id)
self.resource_sched_mail_box.put(
{"msg_type": ResourceHandlerMessageType.RESRC_TASK_ARRIVAL, "task_id": task_id}
)
this_task = self.task_state[task_id]
resource_allocated_event = this_task["resource_allocated_event"]
resource_demand = this_task["resource_request"]
resource_permitted_event = this_task["resource_permitted_event"]
success_resrc_get_events = []
try:
yield resource_permitted_event
get_cpu_event = self.cpu_pool.get(resource_demand["cpu"])
success_resrc_get_events.append(get_cpu_event)
if not self.is_cpu_needed_only:
get_memory_event = self.memory_pool.get(resource_demand["memory"])
success_resrc_get_events.append(get_memory_event)
get_gpu_event = self.gpu_pool.get(resource_demand["gpu"])
success_resrc_get_events.append(get_gpu_event)
resource_allocated_event.succeed()
self.task_state[task_id]["resource_allocate_timestamp"] = self.env.now
# todo, maybe add another exception handling chain: interrupt dp handler....
except RejectResourcePermissionError as err:
# sched find task's dp is rejected, then fail its resource handler.
resource_allocated_event.fail(ResourceAllocFail(err))
return
except simpy.Interrupt as err:
assert err.args[0] == DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG
assert len(success_resrc_get_events) == 0
resource_allocated_event.fail(
ResourceAllocFail("Abort resource request: %s" % err)
)
defuse(resource_permitted_event)
# interrupted while permitted
# very likely
if not resource_permitted_event.triggered:
assert task_id in self.resource_waiting_tasks.items
self.resource_waiting_tasks.get(filter=lambda x: x == task_id)
for i in self.resource_sched_mail_box.items:
if (i['task_id'] == task_id) and (
i['msg_type'] == ResourceHandlerMessageType.RESRC_TASK_ARRIVAL
):
self.resource_sched_mail_box.get(
filter=lambda x: (x['task_id'] == task_id)
and (x['msg_type'] == ResourceHandlerMessageType.RESRC_TASK_ARRIVAL)
)
pass
# fixme coverage
else:
assert resource_permitted_event.ok and (
not resource_permitted_event.processed
)
self.debug(
task_id,
"warning: resource permitted but abort to allocate due to interrupt",
)
self.resource_sched_mail_box.put(
{"msg_type": ResourceHandlerMessageType.RESRC_PERMITED_FAIL_TO_ALLOC, "task_id": task_id}
)
return
exec_proc = this_task['execution_proc']
try:
# yield task_completion_event
yield exec_proc
except simpy.Interrupt as err:
assert err.args[0] == DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG
if exec_proc.is_alive:
defuse(exec_proc)
exec_proc.interrupt(DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG)
v = yield exec_proc | self.env.timeout(
self.env.config['sim.instant_timeout'], TIMEOUT_VAL
)
# exec_proc should exit immeidately after interrupt
assert v != TIMEOUT_VAL
self.cpu_pool.put(resource_demand["cpu"])
if not self.is_cpu_needed_only:
self.gpu_pool.put(resource_demand["gpu"])
self.memory_pool.put(resource_demand["memory"])
self.debug(task_id, "Resource released")
self.resource_sched_mail_box.put(
{"msg_type": ResourceHandlerMessageType.RESRC_RELEASE, "task_id": task_id}
)
return
## unlock dp quota for dpft policy
def _dpft_subloop_unlock_quota(self, block_id):
self.debug(
'block_id %d release quota subloop start at %.3f' % (block_id, self.env.now)
)
this_block = self.block_dp_storage.items[block_id]
# wait first to sync clock
yield self.global_clock.next_tick
if this_block["end_of_life"] <= self.env.now:
if not self.is_rdp:
total_dp = this_block["dp_container"].level
this_block["dp_container"].get(total_dp)
this_block["dp_quota"].put(total_dp)
else:
this_block['rdp_quota_curve'] = this_block['rdp_budget_curve'].copy()
self._rdp_update_quota_balance(block_id)
self.dp_sched_mail_box.put([block_id])
this_block["retire_event"].succeed()
self._retired_blocks.add(block_id)
return
total_ticks = (
int(
(this_block["end_of_life"] - self.env.now)
/ self.global_clock.seconds_per_tick
)
+ 1
)
init_level = this_block["dp_container"].level
for t in range(total_ticks):
if not self.is_rdp:
should_release_amount = init_level / total_ticks * (t + 1)
get_amount = should_release_amount - (
init_level - this_block["dp_container"].level
)
if t + 1 == total_ticks:
assert (
-self.env.config['sim.numerical_delta']
< this_block["dp_container"].level - get_amount
< self.env.config['sim.numerical_delta']
)
get_amount = this_block["dp_container"].level
this_block["dp_container"].get(get_amount)
assert (
this_block["dp_quota"].level + get_amount
< this_block["dp_quota"].capacity
+ self.env.config['sim.numerical_delta']
)
put_amount = min(
get_amount,
this_block["dp_quota"].capacity - this_block["dp_quota"].level,
)
this_block["dp_quota"].put(put_amount)
else:
this_block['rdp_quota_curve'] = [
b * ((t + 1) / total_ticks) for b in this_block['rdp_budget_curve']
]
self._rdp_update_quota_balance(block_id)
self.debug(
'block_id %d release %.3f fraction at %.3f'
% (block_id, (t + 1) / total_ticks, self.env.now)
)
self.dp_sched_mail_box.put([block_id])
if t + 1 != total_ticks:
yield self.global_clock.next_tick
else:
break
if not self.is_rdp:
assert this_block["dp_container"].level == 0
this_block["retire_event"].succeed()
self._retired_blocks.add(block_id)
# HACK quotad increment msg, to trigger waiting task exceptions
self.dp_sched_mail_box.put([block_id])
self.debug('block_id %d retired with 0 DP left' % block_id)
def _dpfn_subloop_eol_retire(self, block_id):
this_block = self.block_dp_storage.items[block_id]
lifetime_ub = (
self.env.config['resource_master.block.arrival_interval']
* self.env.config['task.demand.num_blocks.elephant']
* 1.01
)
yield self.env.timeout(lifetime_ub)
if not this_block['retire_event'].triggered:
this_block['retire_event'].succeed()
self.debug(
'DPF-N,N=%d, block %d get retired by timeout with %d arrived tasks'
% (self.denom, block_id, this_block['arrived_task_num'])
)
def _rr_n_subloop_eol_sched(self, block_id):
this_block = self.block_dp_storage.items[block_id]
# due to discretization, shift release during tick seceonds period to the start of this second.
# therefore, last release should happen before end of life
yield self.env.timeout(self.env.config['resource_master.block.lifetime'])
# allocate in ascending order
waiting_task_cn_mapping = this_block["waiting_tid2accum_containers"]
if len(waiting_task_cn_mapping) > 0:
total_alloc = 0 # this is inaccurate
is_dp_sufficient = True
unlocked_dp_quota_total = this_block["residual_dp"]
waiting_tasks_asc = list(
sorted(waiting_task_cn_mapping, key=lambda x: x[1].capacity)
)
self.debug( 'block %d EOL, remove all waiting accum containers %s'
% (block_id, waiting_tasks_asc)
)
for task_id in waiting_tasks_asc:
cn = waiting_task_cn_mapping[task_id]
if (
total_alloc + cn.capacity
<= unlocked_dp_quota_total + self.env.config['sim.numerical_delta']
):
cn.put(cn.capacity)
total_alloc = total_alloc + cn.capacity
elif is_dp_sufficient:
waiting_evt = cn._get_waiters.pop(0)
waiting_evt.fail(
InsufficientDpException(
"block %d remaining uncommitted DP is insufficient for remaining ungranted dp of task %d"
% (block_id, task_id)
)
)
is_dp_sufficient = False
else:
assert not is_dp_sufficient
waiting_evt = cn._get_waiters.pop(0)
waiting_evt.fail(
InsufficientDpException(
"block %d remaining uncommitted DP is insufficient for remaining ungranted dp of task %d"
% (block_id, task_id)
)
)
# remove all waiting accum cn at the eol
this_block["waiting_tid2accum_containers"].pop(task_id)
# fail others waiting tasks
## for RR N policy
def _rr_nn_subloop_unlock_quota_n_sched(self, block_id):
this_block = self.block_dp_storage.items[block_id]
this_block["residual_dp"] = 0.0
init_level = this_block["dp_container"].level
while True:
new_tid = yield this_block['_mail_box'].get()
n = this_block['arrived_task_num'] # should update in task hander
if n == self.denom:
# already retired by task handler
assert this_block["retire_event"].ok
elif (
n > self.denom
and this_block["residual_dp"] < self.env.config['sim.numerical_delta']
):
break
get_amount = 0 # t + 1 > total_ticks
if n < self.denom:
should_release_amount = init_level / self.denom * n
get_amount = should_release_amount - (
init_level - this_block["dp_container"].level
)
elif n == self.denom:
get_amount = this_block["dp_container"].level
if get_amount != 0:
this_block["dp_container"].get(get_amount)
this_block["residual_dp"] += get_amount
else:
assert this_block["dp_container"].level == 0
# only allocate among active getter tasks
waiting_task_cn_mapping = this_block["waiting_tid2accum_containers"]
if len(waiting_task_cn_mapping) > 0:
desired_dp = {
tid: cn.capacity - cn.level
for tid, cn in waiting_task_cn_mapping.items()
}
# self.debug(block_id, "call max_min_fair_allocation")
fair_allocation = max_min_fair_allocation(
demand=desired_dp, capacity=this_block["residual_dp"]
)
# all waiting task is granted by this block, return back unused dp
if sum(fair_allocation.values()) < this_block["residual_dp"]:
this_block["residual_dp"] -= sum(
fair_allocation.values()
)
else:
this_block["residual_dp"] = 0.0
yield self.env.timeout(delay=0) # may update residual ??????
for tid, dp_alloc_amount in fair_allocation.items():
cn = this_block["waiting_tid2accum_containers"][tid]
if (
-self.env.config['sim.numerical_delta']
< (cn.capacity - cn.level) - dp_alloc_amount
< self.env.config['sim.numerical_delta']
):
dp_alloc_amount = cn.capacity - cn.level
cn.put(dp_alloc_amount)
this_block["waiting_tid2accum_containers"].pop(tid)
self.debug(
tid,
"accum containers granted and removed for block %s"
% block_id,
)
else:
cn.put(dp_alloc_amount)
# now >= end of life
# wait for dp getter event processed, reject untriggered get
yield self.env.timeout(delay=0)
rej_waiting_task_cn_mapping = this_block["waiting_tid2accum_containers"]
if len(rej_waiting_task_cn_mapping) != 0:
self.debug(
"block %d last period of lifetime, with waiting tasks: " % block_id,
list(rej_waiting_task_cn_mapping.keys()),
)
for task_id in list(rej_waiting_task_cn_mapping.keys()):
cn = rej_waiting_task_cn_mapping[task_id]
# avoid getter triggered by cn
waiting_evt = cn._get_waiters.pop(0)
waiting_evt.fail(
StopReleaseDpError(
"task %d rejected dp, due to block %d has stopped release"
% (task_id, block_id)
)
)
this_block["waiting_tid2accum_containers"].pop(task_id)
else:
self.debug("block %d out of dp, with NO waiting task" % block_id)
return 0
## for RR T policy
def _rr_t_subloop_unlock_quota_n_sched(self, block_id):
this_block = self.block_dp_storage.items[block_id]
this_block["residual_dp"] = 0.0
# due to discretization, shift release during tick seceonds period to the start of this second.
# therefore, last release should happen before end of life
# wait first to sync clock
yield self.global_clock.next_tick
t0 = self.env.now
total_ticks = (
int((this_block["end_of_life"] - t0) / self.global_clock.seconds_per_tick)
+ 1
)
init_level = this_block["dp_container"].level
ticker_counter = count()
while True:
t = next(ticker_counter) + 1
get_amount = 0 # t + 1 > total_ticks
if t < total_ticks:
should_release_amount = init_level / total_ticks * t
get_amount = should_release_amount - (
init_level - this_block["dp_container"].level
)
elif t == total_ticks:
get_amount = this_block["dp_container"].level
this_block["retire_event"].succeed()
if get_amount != 0:
this_block["dp_container"].get(get_amount)
this_block["residual_dp"] = this_block["residual_dp"] + get_amount
else:
assert this_block["dp_container"].level == 0
# only allocate among active getter tasks
waiting_task_cn_mapping = this_block["waiting_tid2accum_containers"]
if len(waiting_task_cn_mapping) > 0:
desired_dp = {
tid: cn.capacity - cn.level
for tid, cn in waiting_task_cn_mapping.items()
}
# self.debug(block_id, "call max_min_fair_allocation")
fair_allocation = max_min_fair_allocation(
demand=desired_dp, capacity=this_block["residual_dp"]
)
# all waiting task is granted by this block, return back unused dp
if sum(fair_allocation.values()) < this_block["residual_dp"]:
this_block["residual_dp"] = this_block["residual_dp"] - sum(
fair_allocation.values()
)
else:
this_block["residual_dp"] = 0.0
for tid, dp_alloc_amount in fair_allocation.items():
cn = this_block["waiting_tid2accum_containers"][tid]
if (
-self.env.config['sim.numerical_delta']
< (cn.capacity - cn.level) - dp_alloc_amount
< self.env.config['sim.numerical_delta']
):
dp_alloc_amount = cn.capacity - cn.level
cn.put(dp_alloc_amount)
this_block["waiting_tid2accum_containers"].pop(tid)
self.debug(
tid,
"accum containers granted and removed for block %s"
% block_id,
)
else:
cn.put(dp_alloc_amount)
if (
this_block["residual_dp"] < self.env.config['sim.numerical_delta']
and t >= total_ticks
):
break
else:
yield self.global_clock.next_tick
# now >= end of life
# wait for dp getter event processed, reject untriggered get
yield self.env.timeout(delay=0)
rej_waiting_task_cn_mapping = this_block[
"waiting_tid2accum_containers"
]
if len(rej_waiting_task_cn_mapping) != 0:
self.debug(
"block %d EOL, removing accum containers for waiting tasks: %s"
% (block_id, list(rej_waiting_task_cn_mapping.keys()))
)
for task_id in list(rej_waiting_task_cn_mapping.keys()):
cn = rej_waiting_task_cn_mapping[task_id]
# avoid getter triggered by cn
waiting_evt = cn._get_waiters.pop(0)
waiting_evt.fail(
StopReleaseDpError(
"aaa task %d rejected dp, due to block %d has run out of dp"
% (task_id, block_id)
)
)
this_block["waiting_tid2accum_containers"].pop(task_id)
else:
self.debug("block %d run out of dp, with NO waiting task" % block_id)
return 0
def _dp_dpfna_eol_callback_gen(self, b_idx):
cn = self.block_dp_storage.items[b_idx]['dp_container']
quota = self.block_dp_storage.items[b_idx]['dp_quota']
def eol_callback(eol_evt):
lvl = cn.level
cn.get(lvl)
assert (
quota.level + lvl
< quota.capacity + self.env.config['sim.numerical_delta']
)
lvl = min(lvl, quota.capacity - quota.level)
quota.put(lvl)
assert cn.level == 0
# inform mail box retire
self.dp_sched_mail_box.put([b_idx])
self.block_dp_storage.items[b_idx]["retire_event"].succeed()
self._retired_blocks.add(b_idx)
self.debug(
'block %d EOF, move remaining dp from container to quota' % b_idx
)
return eol_callback
def generate_datablocks_loop(self):
cur_block_nr = 0
block_id = count()
is_static_blocks = self.env.config["resource_master.block.is_static"]
init_amount = self.env.config["resource_master.block.init_amount"]
while True:
if cur_block_nr > init_amount:
yield self.env.timeout(
self.env.config["resource_master.block.arrival_interval"]
)
elif cur_block_nr < init_amount:
cur_block_nr += 1
elif cur_block_nr == init_amount:
cur_block_nr += 1
self.init_blocks_ready.succeed()
if is_static_blocks:
self.debug(
'epsilon initial static data blocks: %s'
% pp.pformat(
[
blk['dp_container'].capacity
for blk in self.block_dp_storage.items
]
)
)
return
# generate block_id
cur_block_id = next(block_id)
total_dp = self.env.config['resource_master.block.init_epsilon']
new_block = DummyPool(
self.env,
capacity=total_dp,
init=total_dp,
name=cur_block_id,
hard_cap=True,
)
rdp_budget_curve = []
if self.is_rdp:
for a in sorted(ALPHAS):
assert a > 1
total_delta = self.env.config['resource_master.block.init_delta']
total_rdp = max(0, total_dp - math.log(1 / total_delta) / (a - 1))
rdp_budget_curve.append(
total_rdp
)
if self.is_T_based_retire:
EOL = self.env.now + self.env.config['resource_master.block.lifetime']
else:
EOL = None
# no sched fcfs
accum_cn_dict = None
new_quota = None
if self.is_accum_container_sched:
accum_cn_dict = dict()
new_quota = None
elif self.is_centralized_quota_sched:
accum_cn_dict = None
new_quota = DummyPool(
self.env,
capacity=total_dp,
init=0,
name=cur_block_id,
hard_cap=True,
)
block_item = {
"global_epsilon_dp": total_dp,
"dp_container": new_block,
"rdp_budget_curve": rdp_budget_curve,
"rdp_quota_curve": [0.0, ] * len(rdp_budget_curve),
"rdp_consumption": [0.0, ] * len(rdp_budget_curve),
"rdp_quota_balance": [0.0, ]
* len(rdp_budget_curve), # balance = quota - consumption
"dp_quota": new_quota, # for dpf policy
# lifetime is # of periods from born to end
"end_of_life": EOL,
"waiting_tid2accum_containers": accum_cn_dict, # task_id: container, for rate limiting policy
"retire_event": self.env.event(),
'arrived_task_num': 0,
'last_task_arrival_time': None,
'create_time': self.env.now,
'residual_dp': 0.0,
'block_proc': None,
'_mail_box': DummyPutQueue(self.env, capacity=1, hard_cap=True),
}
self.block_dp_storage.put(block_item)
self.unused_dp.put(total_dp)
self.debug("new data block %d created" % cur_block_id)
if self.is_dp_policy_rr_t:
if not self.is_rdp:
block_item['block_proc'] = self.env.process(
self._rr_t_subloop_unlock_quota_n_sched(cur_block_id)
)
else:
raise NotImplementedError()
elif self.is_dp_policy_rr_n2:
if not self.is_rdp:
block_item['block_proc'] = self.env.process(
self._rr_n_subloop_eol_sched(cur_block_id)
)
else:
raise NotImplementedError()
elif self.is_dp_policy_dpft:
# one process for rdp and non-rdp
block_item['block_proc'] = self.env.process(self._dpft_subloop_unlock_quota(cur_block_id))
elif self.is_dp_policy_dpfna:
block_item['retire_event'] = self.env.timeout(
self.env.config['resource_master.block.lifetime']
)
block_item['retire_event'].callbacks.append(
self._dp_dpfna_eol_callback_gen(
self.block_dp_storage.items.index(block_item)
)
)
elif self.is_dp_policy_rr_n:
block_item['block_proc'] = self.env.process(
self._rr_nn_subloop_unlock_quota_n_sched(cur_block_id)
)
elif (
self.is_dp_policy_dpfn
and not self.env.config['resource_master.block.is_static']
):
block_item['block_proc'] = self.env.process(
self._dpfn_subloop_eol_retire(cur_block_id)
)
def _commit_dp_allocation(self, block_idx: List[int], epsilon: float):
"""
each block's capacity is uncommitted DP, commit by deducting capacity by epsilon.
Args:
block_idx:
epsilon:
Returns:
"""
assert len(block_idx) > 0
# verify able to commit
for i in block_idx:
this_container = self.block_dp_storage.items[i]["dp_container"]
assert epsilon <= this_container.capacity or (
epsilon - this_container.capacity
< self.env.config['sim.numerical_delta']
and this_container.level == 0
)
assert (
this_container.level + epsilon
) < this_container.capacity + self.env.config['sim.numerical_delta']
for i in block_idx:
this_container = self.block_dp_storage.items[i]["dp_container"]
this_container.capacity = max(
this_container.capacity - epsilon, this_container.level
)
committed_amount = min(epsilon * len(block_idx), self.unused_dp.level)
self.unused_dp.get(committed_amount)
self.committed_dp.put(committed_amount)
if IS_DEBUG:
unused_dp = []
if (
self.is_centralized_quota_sched
): # :self.is_dp_policy_dpfn or self.is_dp_policy_dpft or self.is_dp_policy_dpfa
for i, block in enumerate(self.block_dp_storage.items):
if i in block_idx:
unused_dp.append(-round(block['dp_quota'].level, 2))
else:
unused_dp.append(round(block['dp_quota'].level, 2))
if self.env.config['workload_test.enabled']:
self.debug(
"unused dp quota after commit: %s (negative sign denotes committed block)"
% pp.pformat(unused_dp)
)
elif self.is_accum_container_sched:
for i, block in enumerate(self.block_dp_storage.items):
# uncommitted dp
if i in block_idx:
unused_dp.append(-round(block['dp_container'].capacity, 2))
else:
unused_dp.append(round(block['dp_container'].capacity, 2))
if self.env.config['workload_test.enabled']:
self.debug(
"unused dp after commit: %s (negative sign denotes committed block)"
% pp.pformat(unused_dp)
)
def get_result_hook(self, result):
if not self.env.tracemgr.vcd_tracer.enabled:
return
cpu_capacity = self.env.config['resource_master.cpu_capacity']
with open(self.env.config["sim.vcd.dump_file"]) as vcd_file:
if vcd_file.read(1) == '':
return
with open(self.env.config["sim.vcd.dump_file"]) as vcd_file:
vcd = VcdParser()
vcd.parse(vcd_file)
root_data = vcd.scope.toJson()
assert root_data['children'][0]['children'][2]['name'] == "cpu_pool"
# at least 11 sample
if len(root_data['children'][0]['children'][2]['data']) == 0:
result['CPU_utilization%'] = 0
return
elif len(root_data['children'][0]['children'][2]['data']) <= 10:
self.debug("WARNING: CPU change sample size <= 10")
idle_cpu_record = map(
lambda t: (t[0], eval('0' + t[1])),
root_data['children'][0]['children'][2]['data'],
)
idle_cpu_record = list(idle_cpu_record)
# record should start at time 0
if idle_cpu_record[0][0] != 0:
idle_cpu_record = [(0, cpu_capacity)] + idle_cpu_record
assert (
self.env.config['sim.timescale']
== self.env.config['sim.duration'].split(' ')[1]
)
end_tick = int(self.env.config['sim.duration'].split(' ')[0]) - 1
if idle_cpu_record[-1][0] != end_tick:
idle_cpu_record += [
(end_tick, idle_cpu_record[-1][1])
]
t1, t2 = tee(idle_cpu_record)
next(t2)
busy_cpu_time = map(
lambda t: (cpu_capacity - t[0][1]) * (t[1][0] - t[0][0]),
zip(t1, t2),
)
# cal over start and end
result['CPU_utilization%'] = (
100 * sum(busy_cpu_time) / (end_tick + 1) / cpu_capacity
)
class Tasks(Component):
base_name = 'tasks'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.load_rand = self.env.rand
self.add_connections('resource_master')
self.add_connections('global_clock')
self.add_process(self.generate_tasks_loop)
self.task_unpublished_count = DummyPool(self.env)
self.auto_probe('task_unpublished_count', vcd={})
self.task_published_count = DummyPool(self.env)
self.auto_probe('task_published_count', vcd={})
self.task_sleeping_count = DummyPool(self.env)
self.auto_probe('task_sleeping_count', vcd={})
self.task_running_count = DummyPool(self.env)
self.auto_probe('task_running_count', vcd={})
self.task_completed_count = DummyPool(self.env)
self.auto_probe('task_completed_count', vcd={})
self.task_abort_count = DummyPool(self.env)
self.auto_probe('task_abort_count', vcd={})
self.task_ungranted_count = DummyPool(self.env)
self.auto_probe('task_ungranted_count', vcd={})
self.task_granted_count = DummyPool(self.env)
self.auto_probe('task_granted_count', vcd={})
self.tasks_granted_count_s_dp_s_blk = DummyPool(self.env)
self.auto_probe('tasks_granted_count_s_dp_s_blk', vcd={})
self.tasks_granted_count_s_dp_l_blk = DummyPool(self.env)
self.auto_probe('tasks_granted_count_s_dp_l_blk', vcd={})
self.tasks_granted_count_l_dp_s_blk = DummyPool(self.env)
self.auto_probe('tasks_granted_count_l_dp_s_blk', vcd={})
self.tasks_granted_count_l_dp_l_blk = DummyPool(self.env)
self.auto_probe('tasks_granted_count_l_dp_l_blk', vcd={})
self.task_dp_rejected_count = DummyPool(self.env)
self.auto_probe('task_dp_rejected_count', vcd={})
if self.env.tracemgr.sqlite_tracer.enabled:
self.db = self.env.tracemgr.sqlite_tracer.db
self.db.execute(
'CREATE TABLE tasks '
'(task_id INTEGER PRIMARY KEY,'
' start_block_id INTEGER,'
' num_blocks INTEGER,'
' epsilon REAL,'
' cpu INTEGER,'
' gpu INTEGER,'
' memory REAL,'
' start_timestamp REAL,'
' dp_commit_timestamp REAL,'
' resource_allocation_timestamp REAL,'
' completion_timestamp REAL,'
' publish_timestamp REAL'
')'
)
else:
self.db = None
if self.env.config.get('task.demand.num_cpu.constant') is not None:
assert isinstance(self.env.config['task.demand.num_cpu.constant'], int)
self.cpu_dist = lambda: self.env.config['task.demand.num_cpu.constant']
else:
num_cpu_min = self.env.config.setdefault('task.demand.num_cpu.min',1)
num_cpu_max = self.env.config.setdefault('task.demand.num_cpu.max', num_cpu_min)
self.cpu_dist = partial(
self.load_rand.randint,
num_cpu_min,
num_cpu_max,
)
size_memory_min = self.env.config.setdefault('task.demand.size_memory.min',1)
size_memory_max = self.env.config.setdefault('task.demand.size_memory.max', size_memory_min)
self.memory_dist = partial( self.load_rand.randint, size_memory_min, size_memory_max )
num_gpu_min = self.env.config.setdefault('task.demand.num_gpu.min',1)
num_gpu_max = self.env.config.setdefault('task.demand.num_gpu.max', 1)
self.gpu_dist = partial( self.load_rand.randint, num_gpu_min, num_gpu_max )
if self.env.config.get('task.demand.completion_time.constant') is not None:
self.completion_time_dist = lambda: self.env.config[
'task.demand.completion_time.constant'
]
else:
completion_time_min = self.env.config.setdefault('task.demand.completion_time.min', 0)
completion_time_max = self.env.config.setdefault('task.demand.completion_time.max', completion_time_min)
self.completion_time_dist = partial(
self.load_rand.randint, completion_time_min, completion_time_max,)
choose_one = lambda *kargs, **kwargs: self.load_rand.choices(*kargs, **kwargs)[
0
]
e_mice_fraction = self.env.config['task.demand.epsilon.mice_percentage'] / 100
choose_and_discount = lambda *kargs, **kwargs: choose_one(
*kargs, **kwargs
) * self.load_rand.uniform(0.9999999, 1)
self.epsilon_dist = partial(
choose_and_discount,
(
self.env.config['task.demand.epsilon.mice'],
self.env.config['task.demand.epsilon.elephant'],
),
(e_mice_fraction, 1 - e_mice_fraction),
)
block_mice_fraction = (
self.env.config['task.demand.num_blocks.mice_percentage'] / 100
)
self.num_blocks_dist = partial(
choose_one,
(
self.env.config['task.demand.num_blocks.mice'],
self.env.config['task.demand.num_blocks.elephant'],
),
(block_mice_fraction, 1 - block_mice_fraction),
)
def generate_tasks_loop(self):
task_id = count()
arrival_interval_dist = partial(
self.load_rand.expovariate, 1 / self.env.config['task.arrival_interval']
)
## wait for generating init blocks
def init_one_task(
task_id,
start_block_idx,
end_block_idx,
epsilon,
delta,
e_rdp,
completion_time,
cpu_demand,
gpu_demand,
memory_demand,
):
task_process = self.env.process(
self.task(
task_id,
start_block_idx,
end_block_idx,
epsilon,
delta,
e_rdp,
completion_time,
cpu_demand,
gpu_demand,
memory_demand,
)
)
new_task_msg = {
"message_type": DpHandlerMessageType.NEW_TASK,
"task_id": task_id,
"task_process": task_process,
}
self.resource_master.mail_box.put(new_task_msg)
yield self.resource_master.init_blocks_ready
if not self.env.config['workload_test.enabled']:
while True:
yield self.env.timeout(arrival_interval_dist())
t_id = next(task_id)
# query existing data blocks
num_stored_blocks = len(self.resource_master.block_dp_storage.items)
assert num_stored_blocks > 0
num_blocks_demand = min(
max(1, round(self.num_blocks_dist())), num_stored_blocks
)
epsilon = self.epsilon_dist()
if self.resource_master.is_rdp:
sigma = gaussian_dp2sigma(epsilon, 1, DELTA)
rdp_demand = compute_rdp_epsilons_gaussian(sigma, ALPHAS)
else:
rdp_demand = None
init_one_task(
task_id = t_id,
start_block_idx=num_stored_blocks - num_blocks_demand,
end_block_idx=num_stored_blocks - 1,
epsilon=epsilon,
delta=DELTA,
e_rdp=rdp_demand,
completion_time=self.completion_time_dist(),
cpu_demand=self.cpu_dist(),
gpu_demand=self.gpu_dist(),
memory_demand=self.memory_dist(),
)
else:
assert self.env.config['workload_test.workload_trace_file']
with open(self.env.config['workload_test.workload_trace_file']) as f:
tasks = yaml.load(f, Loader=yaml.FullLoader)
for t in sorted(tasks, key=lambda x: x['arrival_time']):
assert t['arrival_time'] - self.env.now >= 0
yield self.env.timeout(t['arrival_time'] - self.env.now)
if self.resource_master.is_rdp:
sigma = gaussian_dp2sigma(t['epsilon'], 1, DELTA)
rdp_demand = compute_rdp_epsilons_gaussian(sigma, ALPHAS)
else:
rdp_demand = None
t_id = next(task_id)
init_one_task(
task_id=t_id,
start_block_idx=t['start_block_index'],
end_block_idx=t['end_block_index'],
epsilon=t['epsilon'],
delta=DELTA,
e_rdp=rdp_demand,
completion_time=t['completion_time'],
cpu_demand=t['cpu_demand'],
gpu_demand=t['gpu_demand'],
memory_demand=t['memory_demand'],
)
def task(
self,
task_id,
start_block_idx,
end_block_idx,
epsilon,
delta,
e_rdp,
completion_time,
cpu_demand,
gpu_demand,
memory_demand,
):
num_blocks_demand = end_block_idx - start_block_idx + 1
if self.env.config['workload_test.enabled']:
self.debug(
task_id,
'DP demand epsilon=%.2f for blocks No. %s '
% (epsilon, list(range(start_block_idx, end_block_idx + 1))),
)
else:
self.debug(
task_id,
'DP demand epsilon=%.2f for blocks No. %s '
% (epsilon, range(start_block_idx, end_block_idx).__repr__()),
)
self.task_unpublished_count.put(1)
self.task_ungranted_count.put(1)
self.task_sleeping_count.put(1)
t0 = self.env.now
resource_allocated_event = self.env.event()
dp_committed_event = self.env.event()
task_init_event = self.env.event()
def run_task(task_id, resource_allocated_event):
assert not resource_allocated_event.triggered
try:
yield resource_allocated_event
resource_alloc_time = self.resource_master.task_state[task_id][
"resource_allocate_timestamp"
]
assert resource_alloc_time is not None
resrc_allocation_wait_duration = resource_alloc_time - t0
self.debug(
task_id,
'INFO: Compute Resources allocated after',
timedelta(seconds=resrc_allocation_wait_duration),
)
self.task_sleeping_count.get(1)
self.task_running_count.put(1)
except ResourceAllocFail as err:
task_abort_timestamp = self.resource_master.task_state[task_id][
"task_completion_timestamp"
] = -self.env.now
# note negative sign here
task_preempted_duration = -task_abort_timestamp - t0
self.debug(
task_id,
'WARNING: Resource Allocation fail after',
timedelta(seconds=task_preempted_duration),
)
self.task_sleeping_count.get(1)
self.task_abort_count.put(1)
return 1
core_running_task = self.env.timeout(
resource_request_msg["completion_time"]
)
def post_completion_callback(event):
task_completion_timestamp = self.resource_master.task_state[task_id][
"task_completion_timestamp"
] = self.env.now
task_completion_duration = task_completion_timestamp - t0
self.debug(
task_id,
'Task completed after',
timedelta(seconds=task_completion_duration),
)
self.task_running_count.get(1)
self.task_completed_count.put(1)
try:
# running task
yield core_running_task
post_completion_callback(core_running_task)
return 0
except simpy.Interrupt as err:
assert err.args[0] == ResourceHandlerMessageType.RESRC_HANDLER_INTERRUPT_MSG
# triggered but not porocessed
if core_running_task.triggered:
# same as post completion_event handling
assert not core_running_task.processed
post_completion_callback(core_running_task)
# fixme coverage
else:
task_abort_timestamp = self.resource_master.task_state[task_id][
"task_completion_timestamp"
] = -self.env.now
# note negative sign here
task_preempted_duration = -task_abort_timestamp - t0
self.debug(
task_id,
'Task preempted while running after',
timedelta(seconds=task_preempted_duration),
)
self.task_running_count.get(1)
self.task_abort_count.put(1)
return 1
def wait_for_dp(task_id, dp_committed_event, epsilon_demand, num_blocks_demand):
assert not dp_committed_event.triggered
t0 = self.env.now
try:
yield dp_committed_event
dp_committed_time = self.resource_master.task_state[task_id][
"dp_commit_timestamp"
] = self.env.now
dp_committed_duration = dp_committed_time - t0
self.debug(
task_id,
'INFO: DP committed after',
timedelta(seconds=dp_committed_duration),
)
self.task_ungranted_count.get(1)
self.task_granted_count.put(1)
task_class = self.resource_master._is_mice_task_dp_demand(
epsilon_demand, num_blocks_demand
)
if task_class == (True, True):
self.tasks_granted_count_s_dp_s_blk.put(1)
elif task_class == (True, False):
self.tasks_granted_count_s_dp_l_blk.put(1)
elif task_class == (False, True):
self.tasks_granted_count_l_dp_s_blk.put(1)
else:
assert task_class == (False, False)
self.tasks_granted_count_l_dp_l_blk.put(1)
return 0
except (
DprequestTimeoutError,
InsufficientDpException,
StopReleaseDpError,
DpBlockRetiredError,
) as err:
assert not dp_committed_event.ok
dp_rejected_timestamp = self.resource_master.task_state[task_id][
"dp_commit_timestamp"
] = -self.env.now
allocation_rej_duration = -dp_rejected_timestamp - t0
self.debug(
task_id,
'WARNING: DP commit fails after',
timedelta(seconds=allocation_rej_duration),
err.__repr__(),
)
self.task_dp_rejected_count.put(1)
return 1
# listen, wait for allocation
running_task = self.env.process(run_task(task_id, resource_allocated_event))
waiting_for_dp = self.env.process(
wait_for_dp(task_id, dp_committed_event, epsilon, num_blocks_demand)
)
# prep allocation request,
resource_request_msg = {
"message_type": DpHandlerMessageType.ALLOCATION_REQUEST,
"task_id": task_id,
"cpu": cpu_demand,
"memory": memory_demand,
"gpu": gpu_demand,
"epsilon": epsilon,
"delta": delta,
# todo maybe exclude EOL blocks?
"e_rdp": e_rdp, # a list of rdp demand
"block_idx": list(
range(start_block_idx, end_block_idx + 1)
), # choose latest num_blocks_demand
"completion_time": completion_time,
"resource_allocated_event": resource_allocated_event,
"dp_committed_event": dp_committed_event,
'task_init_event': task_init_event,
"user_id": None,
"model_id": None,
'execution_proc': running_task,
'waiting_for_dp_proc': waiting_for_dp,
}
# send allocation request, note, do it when child process is already listening
self.resource_master.mail_box.put(resource_request_msg)
t0 = self.env.now
if self.db:
assert isinstance(task_id, int)
assert isinstance(resource_request_msg["block_idx"][0], int)
assert isinstance(num_blocks_demand, int)
assert isinstance(resource_request_msg["epsilon"], float)
assert isinstance(resource_request_msg["cpu"], int)
assert isinstance(resource_request_msg["gpu"], (int, NoneType))
assert isinstance(resource_request_msg["memory"], (int, NoneType))
assert isinstance(t0, (float, int))
def db_init_task():
self.db.execute(
'INSERT INTO tasks '
'(task_id, start_block_id, num_blocks, epsilon, cpu, gpu, memory, '
'start_timestamp) '
'VALUES (?,?,?,?,?,?,?,?)',
(
task_id,
resource_request_msg["block_idx"][0],
num_blocks_demand,
resource_request_msg["epsilon"],
resource_request_msg["cpu"],
resource_request_msg["gpu"],
resource_request_msg["memory"],
t0,
),
)
for i in range(20): # try 20 times
try:
db_init_task()
break
except Exception as e:
time.sleep(0.3)
else:
raise (e)
dp_grant, task_exec = yield self.env.all_of([waiting_for_dp, running_task])
if dp_grant.value == 0:
# verify, if dp granted, then task must be completed.
assert task_exec.value == 0
self.resource_master.task_state[task_id][
"task_publish_timestamp"
] = self.env.now
self.task_unpublished_count.get(1)
self.task_published_count.put(1)
publish_duration = self.env.now - t0
self.debug(
task_id,
"INFO: task get published after ",
timedelta(seconds=publish_duration),
)
else:
assert dp_grant.value == 1
publish_fail_duration = self.env.now - t0
self.debug(
task_id,
"WARNING: task fail to publish after ",
timedelta(seconds=publish_fail_duration),
)
self.resource_master.task_state[task_id]["task_publish_timestamp"] = None
if self.db:
# verify iff cp commit fail <=> no publish
if (
self.resource_master.task_state[task_id]["task_publish_timestamp"]
is None
):
assert (
self.resource_master.task_state[task_id]["dp_commit_timestamp"] <= 0
)
if self.resource_master.task_state[task_id]["dp_commit_timestamp"] < 0:
assert (
self.resource_master.task_state[task_id]["task_publish_timestamp"]
is None
)
assert isinstance(
self.resource_master.task_state[task_id]["dp_commit_timestamp"],
(float, int),
)
assert isinstance(
self.resource_master.task_state[task_id]["resource_allocate_timestamp"],
(float, NoneType, int),
)
assert isinstance(
self.resource_master.task_state[task_id]["task_completion_timestamp"],
(float, int),
)
assert isinstance(
self.resource_master.task_state[task_id]["task_publish_timestamp"],
(float, NoneType, int),
)
def db_update_task():
self.db.execute(
'UPDATE tasks '
'set dp_commit_timestamp = ?, resource_allocation_timestamp = ?, completion_timestamp = ?, publish_timestamp = ?'
'where task_id= ?',
(
self.resource_master.task_state[task_id]["dp_commit_timestamp"],
self.resource_master.task_state[task_id][
"resource_allocate_timestamp"
],
self.resource_master.task_state[task_id][
"task_completion_timestamp"
],
self.resource_master.task_state[task_id][
"task_publish_timestamp"
],
task_id,
),
)
for i in range(20): # try 20 times
try:
db_update_task()
break
except Exception as e:
time.sleep(0.3)
else:
raise (e)
return
def get_result_hook(self, result):
if not self.db:
return
result['succeed_tasks_total'] = self.db.execute(
'SELECT COUNT() FROM tasks WHERE dp_commit_timestamp >=0'
).fetchone()[0]
result['succeed_tasks_s_dp_s_blk'] = self.db.execute(
'SELECT COUNT() FROM tasks WHERE dp_commit_timestamp >=0 and epsilon < ? and num_blocks < ?',
(self.resource_master._avg_epsilon, self.resource_master._avg_num_blocks),
).fetchone()[0]
result['succeed_tasks_s_dp_l_blk'] = self.db.execute(
'SELECT COUNT() FROM tasks WHERE dp_commit_timestamp >=0 and epsilon < ? and num_blocks > ?',
(self.resource_master._avg_epsilon, self.resource_master._avg_num_blocks),
).fetchone()[0]
result['succeed_tasks_l_dp_s_blk'] = self.db.execute(
'SELECT COUNT() FROM tasks WHERE dp_commit_timestamp >=0 and epsilon > ? and num_blocks < ?',
(self.resource_master._avg_epsilon, self.resource_master._avg_num_blocks),
).fetchone()[0]
result['succeed_tasks_l_dp_l_blk'] = self.db.execute(
'SELECT COUNT() FROM tasks WHERE dp_commit_timestamp >=0 and epsilon > ? and num_blocks > ?',
(self.resource_master._avg_epsilon, self.resource_master._avg_num_blocks),
).fetchone()[0]
result['succeed_tasks_per_hour'] = result['succeed_tasks_total'] / (
self.env.time() / 3600
)
# not exact cal for median
sql_duration_percentile = """
with nt_table as
(
select (%s - start_timestamp) AS dp_allocation_duration, ntile(%d) over (order by (dp_commit_timestamp - start_timestamp) desc) ntile
from tasks
WHERE dp_commit_timestamp >=0
)
select avg(a)from (
select min(dp_allocation_duration) a
from nt_table
where ntile = 1
union
select max(dp_allocation_duration) a
from nt_table
where ntile = 2
)"""
result['dp_allocation_duration_avg'] = self.db.execute(
'SELECT AVG(dp_commit_timestamp - start_timestamp) as dur FROM tasks WHERE dp_commit_timestamp >=0 '
).fetchone()[0]
result['dp_allocation_duration_min'] = self.db.execute(
'SELECT MIN(dp_commit_timestamp - start_timestamp) as dur FROM tasks WHERE dp_commit_timestamp >=0'
).fetchone()[0]
result['dp_allocation_duration_max'] = self.db.execute(
'SELECT MAX(dp_commit_timestamp - start_timestamp) as dur FROM tasks WHERE dp_commit_timestamp >=0'
).fetchone()[0]
result['dp_allocation_duration_Median'] = (
self.db.execute(
sql_duration_percentile % ('dp_commit_timestamp', 2)
).fetchone()[0]
if result['succeed_tasks_total'] >= 2
else None
)
result['dp_allocation_duration_P99'] = (
self.db.execute(
sql_duration_percentile % ('dp_commit_timestamp', 100)
).fetchone()[0]
if result['succeed_tasks_total'] >= 100
else None
)
result['dp_allocation_duration_P999'] = (
self.db.execute(
sql_duration_percentile % ('dp_commit_timestamp', 1000)
).fetchone()[0]
if result['succeed_tasks_total'] >= 1000
else None
)
result['dp_allocation_duration_P9999'] = (
self.db.execute(
sql_duration_percentile % ('dp_commit_timestamp', 10000)
).fetchone()[0]
if result['succeed_tasks_total'] >= 10000
else None
)
if __name__ == '__main__':
pass
| 41.062167 | 170 | 0.542184 |
4fed117ac54f9b521524c5ef09947e9ded37706b | 7,379 | py | Python | donkeycar/templates/donkey2.py | 991693552/donkeycar-pi | f629f69c4a2c3d167e59525e3eedc4c0a7991cba | [
"MIT"
] | 6 | 2019-06-05T02:25:55.000Z | 2021-06-21T13:48:58.000Z | donkeycar/templates/donkey2.py | 991693552/donkeycar_jetson_nano | 0656898c14099f105f82945dd481cc6ce606b103 | [
"MIT"
] | null | null | null | donkeycar/templates/donkey2.py | 991693552/donkeycar_jetson_nano | 0656898c14099f105f82945dd481cc6ce606b103 | [
"MIT"
] | 4 | 2019-06-19T07:04:10.000Z | 2020-03-12T08:10:57.000Z | #!/usr/bin/env python3
"""
Scripts to drive a donkey 2 car and train a model for it.
Usage:
manage.py (drive) [--model=<model>] [--js] [--chaos]
manage.py (train) [--tub=<tub1,tub2,..tubn>] (--model=<model>) [--base_model=<base_model>] [--no_cache]
Options:
-h --help Show this screen.
--tub TUBPATHS List of paths to tubs. Comma separated. Use quotes to use wildcards. ie "~/tubs/*"
--js Use physical joystick.
--chaos Add periodic random steering when manually driving
"""
import os
from docopt import docopt
import donkeycar as dk
#import parts
from donkeycar.parts.camera import PiCamera
from donkeycar.parts.transform import Lambda
from donkeycar.parts.keras import KerasLinear,KerasCategorical
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
from donkeycar.parts.datastore import TubGroup, TubWriter
from donkeycar.parts.controller import LocalWebController, JoystickController
from donkeycar.parts.clock import Timestamp
from donkeycar.parts.xrcamera import XRCamera
def drive(cfg, model_path=None, use_joystick=False, use_chaos=False):
"""
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
"""
V = dk.vehicle.Vehicle()
clock = Timestamp()
V.add(clock, outputs='timestamp')
#cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION)
cam = XRCamera(resolution=cfg.CAMERA_RESOLUTION)
V.add(cam, outputs=['cam/image_array'], threaded=True)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
ctr = JoystickController(max_throttle=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
else:
# This web controller will create a web server that is capable
# of managing steering, throttle, and modes, and more.
ctr = LocalWebController(use_chaos=use_chaos)
V.add(ctr,
inputs=['cam/image_array'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
# See if we should even run the pilot module.
# This is only needed because the part run_condition only accepts boolean
def pilot_condition(mode):
if mode == 'user':
return False
else:
return True
pilot_condition_part = Lambda(pilot_condition)
V.add(pilot_condition_part, inputs=['user/mode'],
outputs=['run_pilot'])
# Run the pilot if the mode is not user.
kl = KerasCategorical()
#kl = KerasLinear()
if model_path:
kl.load(model_path)
V.add(kl, inputs=['cam/image_array'],
outputs=['pilot/angle', 'pilot/throttle'],
run_condition='run_pilot')
# Choose what inputs should change the car.
def drive_mode(mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle, user_throttle
else:
return pilot_angle, pilot_throttle
drive_mode_part = Lambda(drive_mode)
V.add(drive_mode_part,
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#steering_controller = PCA9685(cfg.STEERING_CHANNEL)
steering = PWMSteering(left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
#throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
throttle = PWMThrottle(max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
# add tub to save data
inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode', 'timestamp']
types = ['image_array', 'float', 'float', 'str', 'str']
#multiple tubs
#th = TubHandler(path=cfg.DATA_PATH)
#tub = th.new_tub_writer(inputs=inputs, types=types)
# single tub
tub = TubWriter(path=cfg.TUB_PATH, inputs=inputs, types=types)
V.add(tub, inputs=inputs, run_condition='recording')
# run the vehicle
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS)
def train(cfg, tub_names, new_model_path, base_model_path=None ):
"""
use the specified data in tub_names to train an artifical neural network
saves the output trained model as model_name
"""
X_keys = ['cam/image_array']
y_keys = ['user/angle', 'user/throttle']
def train_record_transform(record):
""" convert categorical steering to linear and apply image augmentations """
record['user/angle'] = dk.util.data.linear_bin(record['user/angle'])
# TODO add augmentation that doesn't use opencv
return record
def val_record_transform(record):
""" convert categorical steering to linear """
record['user/angle'] = dk.util.data.linear_bin(record['user/angle'])
return record
new_model_path = os.path.expanduser(new_model_path)
kl = KerasCategorical()
if base_model_path is not None:
base_model_path = os.path.expanduser(base_model_path)
kl.load(base_model_path)
print('tub_names', tub_names)
if not tub_names:
tub_names = os.path.join(cfg.DATA_PATH, '*')
tubgroup = TubGroup(tub_names)
train_gen, val_gen = tubgroup.get_train_val_gen(X_keys, y_keys,
train_record_transform=train_record_transform,
val_record_transform=val_record_transform,
batch_size=cfg.BATCH_SIZE,
train_frac=cfg.TRAIN_TEST_SPLIT)
total_records = len(tubgroup.df)
total_train = int(total_records * cfg.TRAIN_TEST_SPLIT)
total_val = total_records - total_train
print('train: %d, validation: %d' % (total_train, total_val))
steps_per_epoch = total_train // cfg.BATCH_SIZE
print('steps_per_epoch', steps_per_epoch)
kl.train(train_gen,
val_gen,
saved_model_path=new_model_path,
steps=steps_per_epoch,
train_split=cfg.TRAIN_TEST_SPLIT)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = dk.load_config()
if args['drive']:
drive(cfg, model_path = args['--model'], use_joystick=args['--js'], use_chaos=args['--chaos'])
elif args['train']:
tub = args['--tub']
new_model_path = args['--model']
base_model_path = args['--base_model']
cache = not args['--no_cache']
train(cfg, tub, new_model_path, base_model_path)
| 36.349754 | 108 | 0.648733 |
b8cb6f9227f04100d3554b00dee434f0aa99caea | 3,080 | py | Python | app/app/settings.py | mufasasa/recipe-app-api | d602137fc9e198a3fc847828abbc0751b280a8af | [
"MIT"
] | null | null | null | app/app/settings.py | mufasasa/recipe-app-api | d602137fc9e198a3fc847828abbc0751b280a8af | [
"MIT"
] | null | null | null | app/app/settings.py | mufasasa/recipe-app-api | d602137fc9e198a3fc847828abbc0751b280a8af | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1c6*)-))*&#q)c+(-cj779)krw((^32^57%d$yb0q65rc756bm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.454545 | 91 | 0.692857 |
ab5d4740ab21032b6601106c1950fd36e0bc526e | 8,727 | py | Python | PyFunceble/abstracts/package.py | NeolithEra/PyFunceble | 58db861d36224f279a460f4959aaa2e140ce749f | [
"MIT"
] | null | null | null | PyFunceble/abstracts/package.py | NeolithEra/PyFunceble | 58db861d36224f279a460f4959aaa2e140ce749f | [
"MIT"
] | null | null | null | PyFunceble/abstracts/package.py | NeolithEra/PyFunceble | 58db861d36224f279a460f4959aaa2e140ce749f | [
"MIT"
] | null | null | null | """
The tool to check the availability or syntax of domains, IPv4, IPv6 or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides everything related to the package and its version.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io///en/master/
Project homepage:
https://pyfunceble.github.io/
License:
::
MIT License
Copyright (c) 2017, 2018, 2019, 2020 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import PyFunceble.helpers as helpers
class Package:
"""
Provides some packaging related abstractions.
"""
NAME = "PyFunceble"
"""
Sets the package name.
:type: str
"""
VERSION = "3.1.0. (Teal Blauwbok: Ladybug)"
"""
Sets the package version.
:type: str
"""
class Version:
"""
Provides a simple way to compare our own versions.
"""
@classmethod
def split_versions(cls, version, return_non_digits=False):
"""
Converts the versions to a shorter one.
:param str version: The version to split.
:param bool return_non_digits:
Activate the return of the non-digits parts of the splitted
version.
:return:
A tuple. The first index is the digit part of the version,
when the second one is the the non-digit part of the
version.
:rtype: tuple
"""
# We split the version.
splited_version = version.split(".")
# We keep the digits only.
digits = [x for x in splited_version if x.isdigit()]
if not return_non_digits:
# We do not have to return the non digits part of the version.
# We return the digits part of the version.
return digits
# We have to return the non digit parts of the version.
# We keep the non digits.
non_digits = [x for x in splited_version if not x.isdigit()]
# We return a tuple with first the digits part and finally the non digit parts.
return digits, non_digits[0]
@classmethod
def literally_compare(cls, local, upstream):
"""
Compares the given versions, literally.
:param str local:
The local version converted
by :py:func:`~PyFunceble.abstracts.package.split_versions`.
:param str upstream:
The upstream version converted
by :py:func:`~PyFunceble.abstracts.package.split_versions`.
:return:
- :code:`True`: local == upstream
- :code:`False`: local != upstream
:rtype: bool
"""
return local == upstream
@classmethod
def compare(cls, upstream):
"""
Compares the given versions with the local one.
:param list local:
The local version converted
by :py:func:`~PyFunceble.abstracts.package.split_versions`.
:param list upstream:
The upstream version converted
by :py:func:`~PyFunceble.abstracts.package.split_versions`.
:return:
- :code:`True`: local < upstream
- :code:`None`: local == upstream
- :code:`False`: local > upstream
:rtype: bool, None
"""
# We get the local version.
local = cls.split_versions(Package.VERSION)
# We get the upstream version
upstream = cls.split_versions(upstream)
# A version should be in format [1,2,3] which is actually the version `1.2.3`
# So as we only have 3 elements in the versioning,
# we initiate the following variable in order to get the status of each parts.
status = [None, None, None]
for index, version_number in enumerate(local):
# We loop through the local version.
if int(version_number) < int(upstream[index]):
# The local version is less than the upstream version.
# We initiate its status to True which means that we are in
# an old version (for the current version part).
status[index] = True
elif int(version_number) > int(upstream[index]):
# The local version is greater then the upstream version.
# We initiate its status to False which means that we are in
# a more recent version (for the current version part).
status[index] = False
else:
# The local version is eqal to the upstream version.
# We initiate its status to None which means that we are in
# the same version (for the current version part).
status[index] = None
# Otherwise the status stay None which means that there is no change
# between both local and upstream.
# We consider that the version is the same.
result = None
for data in status:
# We loop through the list of status.
# The purpose of this loop is only to
# get the first not None value.
if result is None:
# The result is None (no changes).
# We set the currently read one as the result.
result = data
# We return the result.
return result
@classmethod
def is_local_dev(cls):
"""
Checks if the local version is the development version.
"""
return "dev" in Package.VERSION
@classmethod
def is_local_cloned(cls): # pragma: no cover
"""
Checks if the local version is was downloaded
per :code:`git clone`.
"""
if not helpers.Directory(".git").exists():
# The git directory does not exist.
# We return False, the current version is not the cloned version.
return False
# We list the list of file which can be found only in a cloned version.
list_of_file = [
".coveragerc",
".coveralls.yml",
".gitignore",
".PyFunceble_production.yaml",
".travis.yml",
"CODE_OF_CONDUCT.rst",
"CONTRIBUTING.rst",
"dir_structure_production.json",
"MANIFEST.in",
"README.rst",
"requirements.txt",
"setup.py",
"version.yaml",
]
# We list the list of directory which can be found only in a cloned
# version.
list_of_dir = ["docs", "PyFunceble", "tests"]
if not all([helpers.File(x).exists() for x in list_of_file]):
return False
# All required files exist in the current directory.
if not all([helpers.Directory(x).exists() for x in list_of_dir]):
return False
# All required directories exist in the current directory.
# We return True, the current version is a cloned version.
return True
| 31.967033 | 88 | 0.573278 |
6d56e05974286be0cd48530301910dae464674af | 553 | py | Python | hospital/admin.py | favourch/hospitalmanagement | 7dcf4d9f548b7808d54b758a1769a3de07280227 | [
"MIT"
] | 3 | 2021-03-08T17:41:12.000Z | 2022-01-10T13:01:20.000Z | hospital/admin.py | sharon-06/hospitalmanagement | 853135de381be15797a219b33b7d383ae2a5c62b | [
"MIT"
] | null | null | null | hospital/admin.py | sharon-06/hospitalmanagement | 853135de381be15797a219b33b7d383ae2a5c62b | [
"MIT"
] | 2 | 2021-12-25T20:01:05.000Z | 2022-01-09T14:12:20.000Z | from django.contrib import admin
from .models import Doctor,Patient,Appointment,PatientDischargeDetails
# Register your models here.
class DoctorAdmin(admin.ModelAdmin):
pass
admin.site.register(Doctor, DoctorAdmin)
class PatientAdmin(admin.ModelAdmin):
pass
admin.site.register(Patient, PatientAdmin)
class AppointmentAdmin(admin.ModelAdmin):
pass
admin.site.register(Appointment, AppointmentAdmin)
class PatientDischargeDetailsAdmin(admin.ModelAdmin):
pass
admin.site.register(PatientDischargeDetails, PatientDischargeDetailsAdmin)
| 29.105263 | 74 | 0.82821 |
77bbabdabcf6089f1c91b26d0e6f1291a4653e9a | 2,712 | py | Python | neighbourhood/migrations/0001_initial.py | NIelsen-Mudaki/neighbourhood | 12e7a38188e00c1cbc7810745eda4d9d205ae0e1 | [
"Unlicense"
] | null | null | null | neighbourhood/migrations/0001_initial.py | NIelsen-Mudaki/neighbourhood | 12e7a38188e00c1cbc7810745eda4d9d205ae0e1 | [
"Unlicense"
] | null | null | null | neighbourhood/migrations/0001_initial.py | NIelsen-Mudaki/neighbourhood | 12e7a38188e00c1cbc7810745eda4d9d205ae0e1 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2022-03-21 11:24
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('location', models.CharField(max_length=255)),
('occupants_count', models.IntegerField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
('country', django_countries.fields.CountryField(default='KE', max_length=2)),
('Admin', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
('Author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True)),
('photo', models.ImageField(blank=True, default='profile_pics/default.jpg', upload_to='profile_pics/')),
('neighborhood', models.ForeignKey(blank=True, default='1', on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.Neighborhood')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='posts',
name='author_profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.Profile'),
),
migrations.AddField(
model_name='posts',
name='neighborhood',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.Neighborhood'),
),
]
| 43.741935 | 155 | 0.618363 |
e5df08784497c33e9ee631f885177aba1f349362 | 42,274 | py | Python | run_generator.py | PDillis/stylegan2-fun | 420a2819c12dbe3c0c4704a722e292d541f6b006 | [
"BSD-Source-Code"
] | 40 | 2020-02-18T04:43:45.000Z | 2022-03-14T13:44:36.000Z | run_generator.py | PDillis/stylegan2-fun | 420a2819c12dbe3c0c4704a722e292d541f6b006 | [
"BSD-Source-Code"
] | null | null | null | run_generator.py | PDillis/stylegan2-fun | 420a2819c12dbe3c0c4704a722e292d541f6b006 | [
"BSD-Source-Code"
] | 4 | 2020-11-07T08:35:00.000Z | 2021-09-12T08:54:13.000Z | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
import argparse
import scipy
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import re
import sys
import os
import pretrained_networks
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import moviepy.editor
import warnings # mostly numpy warnings for me
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
#----------------------------------------------------------------------------
def create_image_grid(images, grid_size=None):
'''
Args:
images: np.array, images
grid_size: tuple(Int), size of grid (grid_w, grid_h)
Returns:
grid: np.array, image grid of size grid_size
'''
# Some sanity check:
assert images.ndim == 3 or images.ndim == 4
num, img_h, img_w = images.shape[0], images.shape[1], images.shape[2]
if grid_size is not None:
grid_w, grid_h = tuple(grid_size)
else:
grid_w = max(int(np.ceil(np.sqrt(num))), 1)
grid_h = max((num - 1) // grid_w + 1, 1)
# Get the grid
grid = np.zeros(
[grid_h * img_h, grid_w * img_w] + list(images.shape[-1:]), dtype=images.dtype
)
for idx in range(num):
x = (idx % grid_w) * img_w
y = (idx // grid_w) * img_h
grid[y : y + img_h, x : x + img_w, ...] = images[idx]
return grid
#----------------------------------------------------------------------------
def generate_images(network_pkl,
seeds,
truncation_psi,
grid=False):
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
if truncation_psi is not None:
Gs_kwargs.truncation_psi = truncation_psi
images = []
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d)...' % (seed, seed_idx, len(seeds)))
rnd = np.random.RandomState(seed)
z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
image = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
images.append(image[0])
PIL.Image.fromarray(image[0], 'RGB').save(dnnlib.make_run_dir_path('seed%04d.png' % seed))
if grid:
print('Generating image grid...')
PIL.Image.fromarray(create_image_grid(np.array(images)), 'RGB').save(dnnlib.make_run_dir_path('grid.png'))
#----------------------------------------------------------------------------
def style_mixing_example(network_pkl, # Path to pretrained model pkl file
row_seeds, # Seeds of the source images
col_seeds, # Seeds of the destination images
truncation_psi, # Truncation trick
col_styles, # Styles to transfer from first row to first column
minibatch_size=4):
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
w_avg = Gs.get_var('dlatent_avg') # [component]
# Sanity check: styles are actually possible for generated image size
max_style = int(2 * np.log2(Gs.output_shape[-1])) - 3
assert max(col_styles) <= max_style, f"Maximum col-style allowed: {max_style}"
Gs_syn_kwargs = dnnlib.EasyDict()
Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_syn_kwargs.randomize_noise = False
Gs_syn_kwargs.minibatch_size = minibatch_size
print('Generating W vectors...')
all_seeds = list(set(row_seeds + col_seeds))
all_z = np.stack([np.random.RandomState(seed).randn(*Gs.input_shape[1:]) for seed in all_seeds]) # [minibatch, component]
all_w = Gs.components.mapping.run(all_z, None) # [minibatch, layer, component]
all_w = w_avg + (all_w - w_avg) * truncation_psi # [minibatch, layer, component]
w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))} # [layer, component]
print('Generating images...')
all_images = Gs.components.synthesis.run(all_w, **Gs_syn_kwargs) # [minibatch, height, width, channel]
image_dict = {(seed, seed): image for seed, image in zip(all_seeds, list(all_images))}
print('Generating style-mixed images...')
for row_seed in row_seeds:
for col_seed in col_seeds:
w = w_dict[row_seed].copy()
w[col_styles] = w_dict[col_seed][col_styles]
image = Gs.components.synthesis.run(w[np.newaxis], **Gs_syn_kwargs)[0]
image_dict[(row_seed, col_seed)] = image
print('Saving images...')
for (row_seed, col_seed), image in image_dict.items():
PIL.Image.fromarray(image, 'RGB').save(dnnlib.make_run_dir_path('%d-%d.png' % (row_seed, col_seed)))
print('Saving image grid...')
_N, _C, H, W = Gs.output_shape
canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black')
for row_idx, row_seed in enumerate([None] + row_seeds):
for col_idx, col_seed in enumerate([None] + col_seeds):
if row_seed is None and col_seed is None:
continue
key = (row_seed, col_seed)
if row_seed is None:
key = (col_seed, col_seed)
if col_seed is None:
key = (row_seed, row_seed)
canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx))
canvas.save(dnnlib.make_run_dir_path('grid.png'))
#----------------------------------------------------------------------------
def lerp_video(network_pkl, # Path to pretrained model pkl file
seeds, # Random seeds
grid_w=None, # Number of columns
grid_h=None, # Number of rows
truncation_psi=1.0, # Truncation trick
slowdown=1, # Slowdown of the video (power of 2)
duration_sec=30.0, # Duration of video in seconds
smoothing_sec=3.0,
mp4_fps=30,
mp4_codec="libx264",
mp4_bitrate="16M",
minibatch_size=8):
# Sanity check regarding slowdown
message = 'slowdown must be a power of 2 (1, 2, 4, 8, ...) and greater than 0!'
assert slowdown & (slowdown - 1) == 0 and slowdown > 0, message
num_frames = int(np.rint(duration_sec * mp4_fps))
total_duration = duration_sec * slowdown
print('Loading network from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
print("Generating latent vectors...")
# If there's more than one seed provided and the shape isn't specified
if grid_w == grid_h == None and len(seeds) >= 1:
# number of images according to the seeds provided
num = len(seeds)
# Get the grid width and height according to num:
grid_w = max(int(np.ceil(np.sqrt(num))), 1)
grid_h = max((num - 1) // grid_w + 1, 1)
grid_size = [grid_w, grid_h]
# [frame, image, channel, component]:
shape = [num_frames] + Gs.input_shape[1:]
# Get the latents:
all_latents = np.stack([np.random.RandomState(seed).randn(*shape).astype(np.float32) for seed in seeds], axis=1)
# If only one seed is provided and the shape is specified
elif None not in (grid_w, grid_h) and len(seeds) == 1:
# Otherwise, the user gives one seed and the grid width and height:
grid_size = [grid_w, grid_h]
# [frame, image, channel, component]:
shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:]
# Get the latents with the random state:
random_state = np.random.RandomState(seeds)
all_latents = random_state.randn(*shape).astype(np.float32)
else:
print("Error: wrong combination of arguments! Please provide \
either one seed and the grid width and height, or a \
list of seeds to use.")
sys.exit(1)
all_latents = scipy.ndimage.gaussian_filter(
all_latents,
[smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape),
mode="wrap"
)
all_latents /= np.sqrt(np.mean(np.square(all_latents)))
# Name of the final mp4 video
mp4 = f"{grid_w}x{grid_h}-lerp-{slowdown}xslowdown.mp4"
# Aux function to slowdown the video by 2x
def double_slowdown(latents, duration_sec, num_frames):
# Make an empty latent vector with double the amount of frames
z = np.empty(np.multiply(latents.shape, [2, 1, 1]), dtype=np.float32)
# Populate it
for i in range(len(latents)):
z[2*i] = latents[i]
# Interpolate in the odd frames
for i in range(1, len(z), 2):
# For the last frame, we loop to the first one
if i == len(z) - 1:
z[i] = (z[0] + z[i-1]) / 2
else:
z[i] = (z[i-1] + z[i+1]) / 2
# We also need to double the duration_sec and num_frames
duration_sec *= 2
num_frames *= 2
# Return the new latents, and the two previous quantities
return z, duration_sec, num_frames
while slowdown > 1:
all_latents, duration_sec, num_frames = double_slowdown(all_latents,
duration_sec,
num_frames)
slowdown //= 2
# Define the kwargs for the Generator:
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
if truncation_psi is not None:
Gs_kwargs.truncation_psi = truncation_psi
# Aux function: Frame generation func for moviepy.
def make_frame(t):
frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
latents = all_latents[frame_idx]
# Get the images (with labels = None)
images = Gs.run(latents, None, **Gs_kwargs)
# Generate the grid for this timestamp:
grid = create_image_grid(images, grid_size)
# grayscale => RGB
if grid.shape[2] == 1:
grid = grid.repeat(3, 2)
return grid
# Generate video using make_frame:
print(f'Generating interpolation video of length: {total_duration} seconds...')
videoclip = moviepy.editor.VideoClip(make_frame, duration=duration_sec)
videoclip.write_videofile(dnnlib.make_run_dir_path(mp4),
fps=mp4_fps,
codec=mp4_codec,
bitrate=mp4_bitrate)
#----------------------------------------------------------------------------
def style_mixing_video(network_pkl,
src_seed, # Seed of the source image style (row)
dst_seeds, # Seeds of the destination image styles (columns)
col_styles, # Styles to transfer from first row to first column
truncation_psi=1.0, # Truncation trick
only_stylemix=False, # True if user wishes to show only thre style transferred result
duration_sec=30.0,
smoothing_sec=3.0,
mp4_fps=30,
mp4_codec="libx264",
mp4_bitrate="16M",
minibatch_size=8):
# Calculate the number of frames:
num_frames = int(np.rint(duration_sec * mp4_fps))
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
w_avg = Gs.get_var('dlatent_avg') # [component]
# Sanity check: styles are actually possible for generated image size
max_style = int(2 * np.log2(Gs.output_shape[-1])) - 3
assert max(col_styles) <= max_style, f"Maximum col-style allowed: {max_style}"
Gs_syn_kwargs = dnnlib.EasyDict()
Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_syn_kwargs.randomize_noise = False
Gs_syn_kwargs.minibatch_size = minibatch_size
# Left col latents
print('Generating Source W vectors...')
src_shape = [num_frames] + Gs.input_shape[1:]
src_z = np.random.RandomState(*src_seed).randn(*src_shape).astype(np.float32) # [frames, src, component]
src_z = scipy.ndimage.gaussian_filter(
src_z, [smoothing_sec * mp4_fps] + [0] * (len(Gs.input_shape) - 1), mode="wrap"
)
src_z /= np.sqrt(np.mean(np.square(src_z)))
# Map into the detangled latent space W and do truncation trick
src_w = Gs.components.mapping.run(src_z, None)
src_w = w_avg + (src_w - w_avg) * truncation_psi
# Top row latents
print('Generating Destination W vectors...')
dst_z = np.stack([np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in dst_seeds])
dst_w = Gs.components.mapping.run(dst_z, None)
dst_w = w_avg + (dst_w - w_avg) * truncation_psi
# Get the width and height of each image:
_N, _C, H, W = Gs.output_shape
# Generate ALL the source images:
src_images = Gs.components.synthesis.run(src_w, **Gs_syn_kwargs)
# Generate the column images:
dst_images = Gs.components.synthesis.run(dst_w, **Gs_syn_kwargs)
# If the user wishes to show both the source and destination images
if not only_stylemix:
print('Generating full video (including source and destination images)')
# Generate our canvas where we will paste all the generated images:
canvas = PIL.Image.new("RGB", (W * (len(dst_seeds) + 1), H * (len(src_seed) + 1)), "white")
for col, dst_image in enumerate(list(dst_images)):
canvas.paste(PIL.Image.fromarray(dst_image, "RGB"), ((col + 1) * H, 0))
# Paste them:
# Aux functions: Frame generation func for moviepy.
def make_frame(t):
# Get the frame number according to time t:
frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
# We wish the image belonging to the frame at time t:
src_image = src_images[frame_idx]
# Paste it to the lower left:
canvas.paste(PIL.Image.fromarray(src_image, "RGB"), (0, H))
# Now, for each of the column images:
for col, dst_image in enumerate(list(dst_images)):
# Select the pertinent latent w column:
w_col = np.stack([dst_w[col]]) # [18, 512] -> [1, 18, 512]
# Replace the values defined by col_styles:
w_col[:, col_styles] = src_w[frame_idx, col_styles]
# Generate these synthesized images:
col_images = Gs.components.synthesis.run(w_col, **Gs_syn_kwargs)
# Paste them in their respective spot:
for row, image in enumerate(list(col_images)):
canvas.paste(
PIL.Image.fromarray(image, "RGB"),
((col + 1) * H, (row + 1) * W),
)
return np.array(canvas)
# Else, show only the style-transferred images (this is nice for the 1x1 case)
else:
print('Generating only the style-transferred images')
# Generate our canvas where we will paste all the generated images:
canvas = PIL.Image.new("RGB", (W * len(dst_seeds), H * len(src_seed)), "white")
def make_frame(t):
# Get the frame number according to time t:
frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
# Now, for each of the column images:
for col, dst_image in enumerate(list(dst_images)):
# Select the pertinent latent w column:
w_col = np.stack([dst_w[col]]) # [18, 512] -> [1, 18, 512]
# Replace the values defined by col_styles:
w_col[:, col_styles] = src_w[frame_idx, col_styles]
# Generate these synthesized images:
col_images = Gs.components.synthesis.run(w_col, **Gs_syn_kwargs)
# Paste them in their respective spot:
for row, image in enumerate(list(col_images)):
canvas.paste(
PIL.Image.fromarray(image, "RGB"),
(col * H, row * W),
)
return np.array(canvas)
# Generate video using make_frame:
print('Generating style-mixed video...')
videoclip = moviepy.editor.VideoClip(make_frame, duration=duration_sec)
grid_size = [len(dst_seeds), len(src_seed)]
mp4 = "{}x{}-style-mixing.mp4".format(*grid_size)
videoclip.write_videofile(dnnlib.make_run_dir_path(mp4),
fps=mp4_fps,
codec=mp4_codec,
bitrate=mp4_bitrate)
#----------------------------------------------------------------------------
def lerp(t, v0, v1):
'''
Linear interpolation
Args:
t (float/np.ndarray): Value between 0.0 and 1.0
v0 (np.ndarray): Starting vector
v1 (np.ndarray): Final vector
Returns:
v2 (np.ndarray): Interpolation vector between v0 and v1
'''
v2 = (1.0 - t) * v0 + t * v1
return v2
# Taken and adapted from wikipedia's slerp article
# https://en.wikipedia.org/wiki/Slerp
def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
'''
Spherical linear interpolation
Args:
t (float/np.ndarray): Float value between 0.0 and 1.0
v0 (np.ndarray): Starting vector
v1 (np.ndarray): Final vector
DOT_THRESHOLD (float): Threshold for considering the two vectors as
colineal. Not recommended to alter this.
Returns:
v2 (np.ndarray): Interpolation vector between v0 and v1
'''
# Copy the vectors to reuse them later
v0_copy = np.copy(v0)
v1_copy = np.copy(v1)
# Normalize the vectors to get the directions and angles
v0 = v0 / np.linalg.norm(v0)
v1 = v1 / np.linalg.norm(v1)
# Dot product with the normalized vectors (can't use np.dot in W)
dot = np.sum(v0 * v1)
# If absolute value of dot product is almost 1, vectors are ~colineal, so use lerp
if np.abs(dot) > DOT_THRESHOLD:
return lerp(t, v0_copy, v1_copy)
# Calculate initial angle between v0 and v1
theta_0 = np.arccos(dot)
sin_theta_0 = np.sin(theta_0)
# Angle at timestep t
theta_t = theta_0 * t
sin_theta_t = np.sin(theta_t)
# Finish the slerp algorithm
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
s1 = sin_theta_t / sin_theta_0
v2 = s0 * v0_copy + s1 * v1_copy
return v2
# Helper function for interpolation
def interpolate(v0, v1, n_steps, interp_type='spherical', smooth=False):
'''
Input:
v0, v1 (np.ndarray): latent vectors in the spaces Z or W
n_steps (int): number of steps to take between both latent vectors
interp_type (str): Type of interpolation between latent vectors (linear or spherical)
smooth (bool): whether or not to smoothly transition between dlatents
Output:
vectors (np.ndarray): interpolation of latent vectors, without including v1
'''
# Get the timesteps
t_array = np.linspace(0, 1, num=n_steps, endpoint=False).reshape(-1, 1)
if smooth:
# Smooth interpolation, constructed following
# https://math.stackexchange.com/a/1142755
t_array = t_array**2 * (3 - 2 * t_array)
# TODO: no need of a for loop; this can be optimized using the fact that they're numpy arrays!
vectors = list()
for t in t_array:
if interp_type == 'linear':
v = lerp(t, v0, v1)
elif interp_type == 'spherical':
v = slerp(t, v0, v1)
vectors.append(v)
return np.asarray(vectors)
def sightseeding(network_pkl, # Path to pretrained model pkl file
seeds, # List of random seeds to use
truncation_psi=1.0, # Truncation trick
seed_sec=5.0, # Time duration between seeds
interp_type='spherical', # Type of interpolation: linear or spherical
interp_in_z=False, # Interpolate in Z (True) or in W (False)
smooth=False, # Smoothly interpolate between latent vectors
mp4_fps=30,
mp4_codec="libx264",
mp4_bitrate="16M",
minibatch_size=8):
# Sanity check before doing any calculations
assert interp_type in ['linear', 'spherical'], 'interp_type must be either "linear" or "spherical"'
if len(seeds) < 2:
print('Please enter more than one seed to interpolate between!')
sys.exit(1)
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
w_avg = Gs.get_var('dlatent_avg') # [component]
Gs_syn_kwargs = dnnlib.EasyDict()
Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_syn_kwargs.randomize_noise = False
Gs_syn_kwargs.minibatch_size = minibatch_size
# Number of steps to take between each latent vector
n_steps = int(np.rint(seed_sec * mp4_fps))
# Number of frames in total
num_frames = int(n_steps * (len(seeds) - 1))
# Duration in seconds
duration_sec = num_frames / mp4_fps
# Generate the random vectors from each seed
print('Generating Z vectors...')
all_z = np.stack([np.random.RandomState(seed).randn(*Gs.input_shape[1:]) for seed in seeds])
# If user wants to interpolate in Z
if interp_in_z:
print(f'Interpolating in Z...(interpolation type: {interp_type})')
src_z = np.empty([0] + list(all_z.shape[1:]), dtype=np.float64)
for i in range(len(all_z) - 1):
# We interpolate between each pair of latents
interp = interpolate(all_z[i], all_z[i+1], n_steps, interp_type, smooth)
# Append it to our source
src_z = np.append(src_z, interp, axis=0)
# Convert to W (dlatent vectors)
print('Generating W vectors...')
src_w = Gs.components.mapping.run(src_z, None) # [minibatch, layer, component]
# Otherwise, we interpolate in W
else:
print(f'Interpolating in W...(interp type: {interp_type})')
print('Generating W vectors...')
all_w = Gs.components.mapping.run(all_z, None) # [minibatch, layer, component]
src_w = np.empty([0] + list(all_w.shape[1:]), dtype=np.float64)
for i in range(len(all_w) - 1):
# We interpolate between each pair of latents
interp = interpolate(all_w[i], all_w[i+1], n_steps, interp_type, smooth)
# Append it to our source
src_w = np.append(src_w, interp, axis=0)
# Do the truncation trick
src_w = w_avg + (src_w - w_avg) * truncation_psi
# Our grid will be 1x1
grid_size = [1,1]
# Aux function: Frame generation func for moviepy.
def make_frame(t):
frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
latent = src_w[frame_idx]
# Select the pertinent latent w column:
w = np.stack([latent]) # [18, 512] -> [1, 18, 512]
image = Gs.components.synthesis.run(w, **Gs_syn_kwargs)
# Generate the grid for this timestamp:
grid = create_image_grid(image, grid_size)
# grayscale => RGB
if grid.shape[2] == 1:
grid = grid.repeat(3, 2)
return grid
# Generate video using make_frame:
print('Generating sightseeding video...')
videoclip = moviepy.editor.VideoClip(make_frame, duration=duration_sec)
name = '-'
name = name.join(map(str, seeds))
mp4 = "{}-sighseeding.mp4".format(name)
videoclip.write_videofile(dnnlib.make_run_dir_path(mp4),
fps=mp4_fps,
codec=mp4_codec,
bitrate=mp4_bitrate)
#----------------------------------------------------------------------------
def circular_video(network_pkl,
seed,
grid_w,
grid_h,
truncation_psi=1.0,
duration_sec=30.0,
mp4_fps=30,
mp4_codec="libx264",
mp4_bitrate="16M",
minibatch_size=8,
radius=10.0):
# Total number of frames (round to nearest integer then convert to int)
num_frames = int(np.rint(duration_sec * mp4_fps))
print('Loading network from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
# Define the kwargs for the Generator
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
if truncation_psi is not None:
Gs_kwargs.truncation_psi = truncation_psi
grid_size = [grid_w, grid_h]
# Get the latents with the random state
random_state = np.random.RandomState(seed)
# Choose two random dims on which to get the circles (from 0 to 511),
# one pair for each image in the grid (2*np.prod(grid_size) in total)
z1, z2 = np.split(random_state.choice(Gs.input_shape[1],
2 * np.prod(grid_size),
replace=False), 2)
# We partition the circle in equal strides w.r.t. num_frames
def get_angles(num_frames):
angles = np.linspace(0, 2 * np.pi, num_frames)
return angles
angles = get_angles(num_frames=num_frames)
# Basic Polar to Cartesian transformation
def get_z_coords(radius, theta):
return radius * np.cos(theta), radius * np.sin(theta)
Z1, Z2 = get_z_coords(radius=radius, theta=angles)
# Create the shape of the latents
shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:]
# Create the latents comprising solely of zeros
all_latents = np.zeros(shape).astype(np.float32)
# We will obtain all the frames belonging to the specific scene/box in the
# grid, so then we replace the values of zeros with our circle values
for box in range(np.prod(grid_size)):
box_frames = all_latents[:, box]
box_frames[:, [z1[box], z2[box]]] = np.vstack((Z1, Z2)).T
# Aux function: Frame generation function for moviepy
def make_frame(t):
frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
latents = all_latents[frame_idx]
# Get the images (with labels = None)
images = Gs.run(latents, None, **Gs_kwargs)
# Generate the grid for this timestamp
grid = create_image_grid(images, grid_size)
# grayscale => RGB
if grid.shape[2] == 1:
grid = grid.repeat(3, 2)
return grid
# Generate video using make_frame
print('Generating circular interpolation video...')
videoclip = moviepy.editor.VideoClip(make_frame, duration=duration_sec)
mp4 = f"{grid_w}x{grid_h}-circular.mp4"
videoclip.write_videofile(dnnlib.make_run_dir_path(mp4),
fps=mp4_fps,
codec=mp4_codec,
bitrate=mp4_bitrate)
#----------------------------------------------------------------------------
# My extended version of this helper function:
def _parse_num_range(s):
'''
Input:
s (str): Comma separated string of numbers 'a,b,c', a range 'a-c',
or even a combination of both 'a,b-c', 'a-b,c', 'a,b-c,d,e-f,...'
Output:
nums (list): Ordered list of ascending ints in s, with repeating values deleted
'''
# Sanity check 0:
# In case there's a space between the numbers (impossible due to argparse,
# but hey, I am that paranoid):
s = s.replace(' ', '')
# Split w.r.t comma
str_list = s.split(',')
nums = []
for el in str_list:
if '-' in el:
# The range will be 'a-b', so we wish to find both a and b using re:
range_re = re.compile(r'^(\d+)-(\d+)$')
match = range_re.match(el)
# We get the two numbers:
a = int(match.group(1))
b = int(match.group(2))
# Sanity check 1: accept 'a-b' or 'b-a', with a<=b:
if a <= b: r = [n for n in range(a, b + 1)]
else: r = [n for n in range(b, a + 1)]
# Use extend since r will also be an array:
nums.extend(r)
else:
# It's a single number, so just append it:
nums.append(int(el))
# Sanity check 2: delete repeating numbers:
nums = list(set(nums))
# Return the numbers in ascending order:
return sorted(nums)
#----------------------------------------------------------------------------
# Helper function for parsing seeds for sightseeding
def _parse_seeds(s):
'''
Input:
s (str): Comma separated list of numbers 'a,b,c,...'
Output:
nums (list): Unordered list of ints in s with no deletion of repeated values
'''
# Do the same sanity check as above:
s = s.replace(' ', '')
# Split w.r.t. comma
str_list = s.split(',')
nums = []
for el in str_list:
if '-' in el:
# The range will be 'a-b', so we wish to find both a and b using re:
range_re = re.compile(r'^(\d+)-(\d+)$')
match = range_re.match(el)
# We get the two numbers:
a = int(match.group(1))
b = int(match.group(2))
# Sanity check 1: accept 'a-b' or 'b-a', with a<=b:
if a <= b: r = [n for n in range(a, b + 1)]
else: r = [n for n in range(b, a + 1)]
# Use extend since r will also be an array:
nums.extend(r)
else:
# It's a single number, so just append it:
nums.append(int(el))
return nums
#----------------------------------------------------------------------------
def _str_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
#----------------------------------------------------------------------------
_examples = '''examples:
# Generate ffhq uncurated images (matches paper Figure 12)
python %(prog)s generate-images --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=6600-6625 --truncation-psi=0.5
# Generate ffhq curated images (matches paper Figure 11)
python %(prog)s generate-images --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=66,230,389,1518 --truncation-psi=1.0
# Generate style mixing example
python %(prog)s style-mixing-example --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --row-seeds=85,100,75,458,1500 --col-seeds=55,821,1789,293 --truncation-psi=1.0
# Generate 50-second-long uncurated 'cars' 5x3 interpolation video at 60fps, with truncation-psi=0.7
python %(prog)s lerp-video --network=gdrive:networks/stylegan2-car-config-f.pkl --seeds=1000 --grid-w=5 --grid-h=3 --truncation-psi=0.7 --duration_sec=50 --fps=60
# Generate style mixing video with FFHQ
python %(prog)s style-mixing-video --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --row-seed=85 --col-seeds=55,821,1789,293
# Generate style mixing example of fine styles layers (64^2-1024^2, as defined in StyleGAN)
python %(prog)s style-mixing-video --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --row-seed=85 --col-seeds=55,821,1789,293 --col-styles=8-17 --truncation-psi=1.0
# Generate sightseeding video (1x1), with 10-second smooth interpolation between seeds, looping back to the first seed in the end
python %(prog)s sightseeding --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=4,9,7,5,4,6,8,4 --seed-sec=10.0 --interp-type=smooth
# Generate 50-second long 2x1 circular interpolation video, at 60 fps (Z-planes will be generated with the seed=1000):
python %(prog)s circular-video --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seed=1000 --grid-w=2 --grid-h=1 --duration-sec=50 --fps=60
'''
#----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description='''StyleGAN2 generator.
Run 'python %(prog)s <subcommand> --help' for subcommand help.''',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
subparsers = parser.add_subparsers(help='Sub-commands', dest='command')
parser_generate_images = subparsers.add_parser('generate-images', help='Generate images')
parser_generate_images.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)
parser_generate_images.add_argument('--seeds', type=_parse_num_range, help='List of random seeds', dest='seeds', required=True)
parser_generate_images.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', dest='truncation_psi', default=0.5)
parser_generate_images.add_argument('--create-grid', action='store_true', help='Add flag to save the generated images in a grid', dest='grid')
parser_generate_images.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser_style_mixing_example = subparsers.add_parser('style-mixing-example', help='Generate style mixing video')
parser_style_mixing_example.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)
parser_style_mixing_example.add_argument('--row-seeds', type=_parse_num_range, help='Random seeds to use for image rows', dest='row_seeds', required=True)
parser_style_mixing_example.add_argument('--col-seeds', type=_parse_num_range, help='Random seeds to use for image columns', dest='col_seeds', required=True)
parser_style_mixing_example.add_argument('--col-styles', type=_parse_num_range, help='Style layer range (default: %(default)s)', dest='col_styles', default='0-6')
parser_style_mixing_example.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', dest='truncation_psi', default=0.5)
parser_style_mixing_example.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser_lerp_video = subparsers.add_parser('lerp-video', help='Generate interpolation video (lerp) between random vectors')
parser_lerp_video.add_argument('--network', help='Path to network pickle filename', dest='network_pkl', required=True)
parser_lerp_video.add_argument('--seeds', type=_parse_num_range, help='List of random seeds', dest='seeds', required=True)
parser_lerp_video.add_argument('--grid-w', type=int, help='Video grid width/columns (default: %(default)s)', default=None, dest='grid_w')
parser_lerp_video.add_argument('--grid-h', type=int, help='Video grid height/rows (default: %(default)s)', default=None, dest='grid_h')
parser_lerp_video.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=1.0, dest='truncation_psi')
parser_lerp_video.add_argument('--slowdown', type=int, help='Slowdown the video by this amount; must be a power of 2 (default: %(default)s)', default=1, dest='slowdown')
parser_lerp_video.add_argument('--duration-sec', type=float, help='Duration of video (default: %(default)s)', default=30.0, dest='duration_sec')
parser_lerp_video.add_argument('--fps', type=int, help='FPS of generated video (default: %(default)s)', default=30, dest='mp4_fps')
parser_lerp_video.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser_style_mixing_video = subparsers.add_parser('style-mixing-video', help='Generate style mixing video (lerp)')
parser_style_mixing_video.add_argument('--network', help='Path to network pickle filename', dest='network_pkl', required=True)
parser_style_mixing_video.add_argument('--row-seed', type=_parse_num_range, help='Random seed to use for image source row', dest='src_seed', required=True)
parser_style_mixing_video.add_argument('--col-seeds', type=_parse_num_range, help='Random seeds to use for image columns (style)', dest='dst_seeds', required=True)
parser_style_mixing_video.add_argument('--col-styles', type=_parse_num_range, help='Style layer range (default: %(default)s)', default='0-6', dest='col_styles')
parser_style_mixing_video.add_argument('--only-stylemix', action='store_true', help='Add flag to only show the style mxied images in the video', dest='only_stylemix')
parser_style_mixing_video.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=0.7, dest='truncation_psi')
parser_style_mixing_video.add_argument('--duration-sec', type=float, help='Duration of video (default: %(default)s)', default=30, dest='duration_sec')
parser_style_mixing_video.add_argument('--fps', type=int, help='FPS of generated video (default: %(default)s)', default=30, dest='mp4_fps')
parser_style_mixing_video.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser_sightseeding = subparsers.add_parser('sightseeding', help='Generate latent interpolation video between a set of user-fed random seeds.')
parser_sightseeding.add_argument('--network', help='Path to network pickle filename', dest='network_pkl', required=True)
parser_sightseeding.add_argument('--seeds', type=_parse_seeds, help='List of seeds to visit (will be in order)', dest='seeds', required=True)
parser_sightseeding.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=1.0, dest='truncation_psi')
parser_sightseeding.add_argument('--seed-sec', type=float, help='Number of seconds between each seed (default: %(default)s)', default=5.0, dest='seed_sec')
parser_sightseeding.add_argument('--interp-type', type=str, help='Type of interpolation to perform: choose between linear or spherical (default: %(default)s)', default='spherical', dest='interp_type')
parser_sightseeding.add_argument('--interp-in-z', type=_str_to_bool, help='Whether or not to perform the interpolation in Z instead of in W (default: %(default)s)', default=False, dest='interp_in_z')
parser_sightseeding.add_argument('--smooth', action='store_true', help='Add flag to smoothly interpolate between the latent vectors', dest='smooth')
parser_sightseeding.add_argument('--fps', type=int, help='FPS of generated video (default: %(default)s)', default=30, dest='mp4_fps')
parser_sightseeding.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser_circular_video = subparsers.add_parser('circular-video', help='Generate circular interpolation video between random vectors')
parser_circular_video.add_argument('--network', help='Path to network pickle filename', dest='network_pkl', required=True)
parser_circular_video.add_argument('--seed', type=int, help='Random seed', dest='seed', required=True)
parser_circular_video.add_argument('--grid-w', type=int, help='Video grid width/no. of columns (default: %(default)s)', default=3, dest='grid_w')
parser_circular_video.add_argument('--grid-h', type=int, help='Video grid height/no of rows (default: %(default)s)', default=2, dest='grid_h')
parser_circular_video.add_argument('--truncation-psi', type=float, help='Trncation psi (default: %(default)s)', default=1.0, dest='truncation_psi')
parser_circular_video.add_argument('--duration-sec', type=float, help='Duration of video (default: %(default)s)', default=30.0, dest='duration_sec')
parser_circular_video.add_argument('--fps', type=int, help='FPS of generated video (default: %(default)s)', default=30, dest='mp4_fps')
parser_circular_video.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
args = parser.parse_args()
kwargs = vars(args)
subcmd = kwargs.pop('command')
if subcmd is None:
print('Error: missing subcommand. Re-run with --help for usage.')
sys.exit(1)
sc = dnnlib.SubmitConfig()
sc.num_gpus = 1
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
sc.run_dir_root = kwargs.pop('result_dir')
sc.run_desc = subcmd
func_name_map = {
'generate-images': 'run_generator.generate_images',
'style-mixing-example': 'run_generator.style_mixing_example',
'lerp-video': 'run_generator.lerp_video',
'style-mixing-video': 'run_generator.style_mixing_video',
'sightseeding': 'run_generator.sightseeding',
'circular-video': 'run_generator.circular_video'
}
dnnlib.submit_run(sc, func_name_map[subcmd], **kwargs)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| 50.147094 | 204 | 0.621422 |
8650f877a14772e01c1ad0bd43caf878fa1f18ce | 1,741 | py | Python | setup.py | zkneupper/codecolor | a0794d5b43da367c700d1c9421f9e607ac50ff5a | [
"Apache-2.0"
] | 1 | 2022-03-23T23:41:19.000Z | 2022-03-23T23:41:19.000Z | setup.py | zkneupper/codecolor | a0794d5b43da367c700d1c9421f9e607ac50ff5a | [
"Apache-2.0"
] | 1 | 2021-05-10T00:42:57.000Z | 2021-05-10T00:42:57.000Z | setup.py | zkneupper/codecolor | a0794d5b43da367c700d1c9421f9e607ac50ff5a | [
"Apache-2.0"
] | 1 | 2021-07-31T23:17:13.000Z | 2021-07-31T23:17:13.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"Click>=7.0",
]
setup_requirements = [
"pytest-runner",
]
test_requirements = [
"pytest>=3",
]
# PEP 440 -- Version Identification and Dependency Specification
# https://www.python.org/dev/peps/pep-0440/
setup(
author="Zachary Jonathan Kneupper",
author_email="zachary.kneupper@gmail.com",
python_requires=">=3.5",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="Inspect python object code with colorful syntax highlighting",
entry_points={"console_scripts": ["codecolor=codecolor.cli:main"]},
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="codecolor",
name="codecolor",
packages=find_packages(include=["codecolor", "codecolor.*"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/zkneupper/codecolor",
version="0.0.2", # version="0.0.dev2", <- last test
zip_safe=False,
)
| 27.634921 | 79 | 0.657668 |
c5afb50a98cd27f25f723935d438388ff6ad94d1 | 929 | py | Python | axelrod/__init__.py | rjsu26/Axelrod | 677f6da86d7ba56d3b2ec031de558c4f2a0675b0 | [
"MIT"
] | null | null | null | axelrod/__init__.py | rjsu26/Axelrod | 677f6da86d7ba56d3b2ec031de558c4f2a0675b0 | [
"MIT"
] | null | null | null | axelrod/__init__.py | rjsu26/Axelrod | 677f6da86d7ba56d3b2ec031de558c4f2a0675b0 | [
"MIT"
] | null | null | null | DEFAULT_TURNS = 200
# The order of imports matters!
from axelrod.version import __version__
from axelrod.load_data_ import load_pso_tables, load_weights
from axelrod import graph
from axelrod.action import Action
from axelrod.random_ import random_choice, random_flip, seed, Pdf
from axelrod.plot import Plot
from axelrod.game import DefaultGame, Game
from axelrod.history import History, LimitedHistory
from axelrod.player import is_basic, obey_axelrod, Player
from axelrod.mock_player import MockPlayer
from axelrod.match import Match
from axelrod.moran import MoranProcess, ApproximateMoranProcess
from axelrod.strategies import *
from axelrod.deterministic_cache import DeterministicCache
from axelrod.match_generator import *
from axelrod.tournament import Tournament
from axelrod.result_set import ResultSet
from axelrod.ecosystem import Ecosystem
from axelrod.fingerprint import AshlockFingerprint, TransitiveFingerprint
| 40.391304 | 73 | 0.861141 |
44b52493a87601a0e46434efc966d170111c03a7 | 569 | py | Python | dailyReset.py | eugman/eugeneQuest | 39b2a3d0d20336e18b30da372f62765aa1b6282a | [
"MIT"
] | 1 | 2019-10-01T00:04:33.000Z | 2019-10-01T00:04:33.000Z | dailyReset.py | eugman/eugeneQuest | 39b2a3d0d20336e18b30da372f62765aa1b6282a | [
"MIT"
] | null | null | null | dailyReset.py | eugman/eugeneQuest | 39b2a3d0d20336e18b30da372f62765aa1b6282a | [
"MIT"
] | null | null | null | from app import db
from app.models import *
player = db.session.query(Player).get(1)
ntl = NegThoughtsLog(thoughts = player.negThoughts)
db.session.add(ntl)
player.prevNegThoughts = player.negThoughts
player.negThoughts = 0
player.prevPointsGained = player.pointsGained
player.pointsGained = 0
Daily.query.update({Daily.completed: False})
Daily.query.update({Daily.snooze: 0})
if player.vacation != 1:
db.session.query(Daily).update({Daily.rest: Daily.rest - 1})
db.session.query(Exercise).update({Exercise.rest: Exercise.rest - 1})
db.session.commit()
| 23.708333 | 73 | 0.752197 |
af4668411d9df1b93af981ffa05f6cd5ea990371 | 1,871 | py | Python | spatial.py | mbrc27/lasREST | 24246c78affafc6ddeece35af5ae7cec2243e9ea | [
"MIT"
] | null | null | null | spatial.py | mbrc27/lasREST | 24246c78affafc6ddeece35af5ae7cec2243e9ea | [
"MIT"
] | null | null | null | spatial.py | mbrc27/lasREST | 24246c78affafc6ddeece35af5ae7cec2243e9ea | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.path as mpl_path
# Uwaga nie wspiera roznych ukladow odniesienia
def validate_geom(geometry):
try:
sr = geometry["spatialReference"]
rings = geometry["rings"]
return True
except:
return False
def las_within(points_file, polygon, parameters, point_export = False):
bb_path = mpl_path.Path(np.array(polygon))
coords = np.vstack((points_file.x, points_file.y)).transpose()
point_tester = bb_path.contains_points(coords)
params_list = []
for param in parameters:
params_list.append(getattr(points_file, param, None)[np.where(point_tester)])
return_arr = np.vstack(tuple(params_list)).transpose()
if point_export == True:
return return_arr.tolist()
else:
return_obj = {"params": parameters, "points": return_arr.tolist()}
return return_obj
def las_statistics(z_array):
num_array = np.array(z_array)
minVal = num_array.min()
maxVal = num_array.max()
meanVal = np.mean(num_array)
std = np.std(num_array)
return {"MIN": minVal, "MAX": maxVal, "MEAN": meanVal, "STD": std}
def las_header(hdr):
return {
"version": hdr.version,
"filesource_id": hdr.filesource_id,
#"reserved": 0,
"guid": hdr.guid.urn, #TODO zweryfikowac do konca obiekt {UUID}
"system_id": hdr.system_id,
"software_id": hdr.software_id,
"date": hdr.date.microsecond,
"header_size": hdr.header_size,
"data_offset": hdr.data_offset,
"vlrs_count": len(hdr.vlrs),
"dataformat_id": hdr.dataformat_id,
"data_record_length": hdr.data_record_length,
"number_points": hdr.count,
"point_return_count": hdr.point_return_count,
"scale": hdr.scale,
"offset": hdr.offset,
"min": hdr.min,
"max": hdr.max}
| 30.177419 | 85 | 0.644041 |
c5ba5cb6295784d196ccb2f3123c32531a46dcb1 | 3,747 | py | Python | bin.py | MananSoni42/fantasy-predictions | f511c46b5f5896fa3ba709d5de517116ab8a30ce | [
"MIT"
] | null | null | null | bin.py | MananSoni42/fantasy-predictions | f511c46b5f5896fa3ba709d5de517116ab8a30ce | [
"MIT"
] | null | null | null | bin.py | MananSoni42/fantasy-predictions | f511c46b5f5896fa3ba709d5de517116ab8a30ce | [
"MIT"
] | null | null | null | import csv
from pprint import pprint
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.preprocessing import KBinsDiscretizer
from math import floor,ceil
from tqdm import tqdm
df = pd.read_csv('final_data/final-data-v2.csv')
points = np.array(df['points']).reshape(-1,1)
dist = KBinsDiscretizer(n_bins=5, encode='ordinal', strategy='quantile')
dist.fit(points)
classes = dist.transform(points)
intervals = dist.bin_edges_[0].tolist()
intervals[0] = -14
intervals[-1] = 300
interval_map = [ (intervals[i], intervals[i+1]) for i in range(dist.n_bins_[0]) ]
print(interval_map)
num_runs = 30
n = classes.shape[0]
probs = np.zeros(300+14+1)
temp = 0.1
for i in range(n):
probs[14+int(points[i][0])] += 1
probs_001 = np.exp(0.01*probs)
probs_01 = np.exp(0.05*probs)
probs_1 = np.exp(0.001*probs)
adaptive_probs = []
for i in range(len(interval_map)):
lo,hi = interval_map[i]
lo,hi = floor(lo), ceil(hi)
adaptive_probs.append((
lo,
hi,
probs_001[14+lo:hi+14+1]/np.sum(probs_001[14+lo:hi+14+1]),
probs_01[14+lo:hi+14+1]/np.sum(probs_01[14+lo:hi+14+1]),
probs_1[14+lo:hi+14+1]/np.sum(probs_1[14+lo:hi+14+1])
))
mean_points = np.zeros(n).reshape(-1,1)
uniform_points = np.zeros(num_runs*n).reshape(-1,1)
gaussian_points_3 = np.zeros(num_runs*n).reshape(-1,1)
gaussian_points_2 = np.zeros(num_runs*n).reshape(-1,1)
adaptive_points_001 = np.zeros(num_runs*n).reshape(-1,1)
adaptive_points_01 = np.zeros(num_runs*n).reshape(-1,1)
adaptive_points_1 = np.zeros(num_runs*n).reshape(-1,1)
for i in tqdm(range(num_runs*n)):
ind = int(classes[i%n][0])
lo,hi = interval_map[ind]
lo1,hi1 = floor(lo), ceil(hi)
mean, sd_3, sd_2 = (lo + hi) / 2, (hi - lo)/6, (hi-lo)/4
mean_points[i%n] = round(0.5*lo + 0.5*hi)
uniform_points[i] = np.random.randint(int(lo), int(hi))
gaussian_points_3[i] = round(np.random.normal(loc=mean, scale=sd_3))
gaussian_points_2[i] = round(np.random.normal(loc=mean, scale=sd_2))
adaptive_points_001[i] = np.random.choice([i for i in range(lo1,hi1+1)], p=adaptive_probs[ind][2])
adaptive_points_01[i] = np.random.choice([i for i in range(lo1,hi1+1)], p=adaptive_probs[ind][3])
adaptive_points_1[i] = np.random.choice([i for i in range(lo1,hi1+1)], p=adaptive_probs[ind][4])
plt.subplot(421)
plt.title('Original')
sns.distplot(points, hist=False, color='r')
sns.distplot(points, kde=False, norm_hist=True, color='b')
plt.subplot(422)
plt.title('Mean')
sns.distplot(mean_points, hist=False, color='r')
sns.distplot(mean_points, kde=False, norm_hist=True, color='b')
plt.subplot(423)
plt.title('Random - uniform')
sns.distplot(uniform_points, hist=False, color='r')
sns.distplot(uniform_points, kde=False, norm_hist=True, color='b')
plt.subplot(424)
plt.title('Random - gaussian - 3sd')
sns.distplot(gaussian_points_3, hist=False, color='r')
sns.distplot(gaussian_points_3, kde=False, norm_hist=True, color='b')
plt.subplot(425)
plt.title('Random - gaussian - 2sd')
sns.distplot(gaussian_points_2, hist=False, color='r')
sns.distplot(gaussian_points_2, kde=False, norm_hist=True, color='b')
plt.subplot(426)
plt.title('Random - adaptive - 0.01')
sns.distplot(adaptive_points_001, hist=False, color='r')
sns.distplot(adaptive_points_001, kde=False, norm_hist=True, color='b')
plt.subplot(427)
plt.title('Random - adaptive - 0.05')
sns.distplot(adaptive_points_01, hist=False, color='r')
sns.distplot(adaptive_points_01, kde=False, norm_hist=True, color='b')
plt.subplot(428)
plt.title('Random - adaptive - 0.001')
sns.distplot(adaptive_points_1, hist=False, color='r')
sns.distplot(adaptive_points_1, kde=False, norm_hist=True, color='b')
plt.tight_layout()
plt.show()
| 33.159292 | 102 | 0.710435 |
0056e236008e8a449e7b3eac7c2f4672f5d10d12 | 134 | py | Python | app/admin/__init__.py | CAUCHY2932/Northern_Hemisphere | 06e5b3e3f0b47940d5b4549899d062373b019579 | [
"BSD-3-Clause"
] | null | null | null | app/admin/__init__.py | CAUCHY2932/Northern_Hemisphere | 06e5b3e3f0b47940d5b4549899d062373b019579 | [
"BSD-3-Clause"
] | 8 | 2021-03-19T03:28:32.000Z | 2022-03-11T23:59:00.000Z | app/admin/__init__.py | CAUCHY2932/Northern_Hemisphere | 06e5b3e3f0b47940d5b4549899d062373b019579 | [
"BSD-3-Clause"
] | null | null | null | # coding:utf-8
from flask import Blueprint
admin = Blueprint("admin", __name__)
import app.admin.views
from app.models import site
| 14.888889 | 36 | 0.768657 |
3300e32850b7050f8ceffbc6a33f1b88ed6a76cc | 2,413 | py | Python | physics/colliders.py | jonntd/ragdolize | 903d9e95e2f5e3a16057aa15fc0a4e4e9c2d9efa | [
"MIT"
] | 16 | 2020-08-20T21:51:05.000Z | 2022-03-26T18:31:28.000Z | physics/colliders.py | Braveel/ragdolize | 903d9e95e2f5e3a16057aa15fc0a4e4e9c2d9efa | [
"MIT"
] | null | null | null | physics/colliders.py | Braveel/ragdolize | 903d9e95e2f5e3a16057aa15fc0a4e4e9c2d9efa | [
"MIT"
] | 5 | 2020-08-24T03:02:43.000Z | 2022-01-06T09:17:39.000Z | # -*- coding: utf-8 -*-
"""Particle system colliders objects
MIT License
Copyright (c) 2020 Mauro Lopez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFT
"""
class Collider(object):
"""base collider abstraction
"""
def solve(self):
"""this method will be called for the dynamic system to solve the collision
should be override by each collider instance
"""
raise NotImplementedError
def reset(self):
return
class GroundCollider(Collider):
"""simple plane collider object
"""
def __init__(self, partciles, bouncinnes=0.1, friction=.9, height=0.0):
self.bouncinnes = bouncinnes
self.friction = friction
self.height = float(height)
self.particles = partciles
def solve(self):
"""if the particle is below the plane height it will bounce usiong the
opocite velocity scaled by the bounciness argument
"""
for each in self.particles:
if each.isPinned():
continue
currPos = each.getPosition()
if currPos[1] < self.height:
prevPos = each.getPrevPosition()
velocity = (currPos - prevPos) * self.friction
currPos[1] = self.height
each.setPosition(currPos)
prevPos[1] = currPos[1]+(velocity[1] * each.getBounciness() * self.bouncinnes)
each.setPrevPosition(prevPos)
| 39.557377 | 94 | 0.688355 |
97903bfa7b40b8eaa37db469f17ceb6b6c82fffe | 12,095 | py | Python | tests/test_04_PGP_objects.py | sharuzzaman/PGPy | 4ae5cab8306d3911a377fc9e975bbb31e703be23 | [
"BSD-3-Clause"
] | 248 | 2015-01-12T22:52:02.000Z | 2022-03-20T22:01:36.000Z | tests/test_04_PGP_objects.py | sharuzzaman/PGPy | 4ae5cab8306d3911a377fc9e975bbb31e703be23 | [
"BSD-3-Clause"
] | 224 | 2015-01-27T13:41:56.000Z | 2022-03-31T13:21:47.000Z | tests/test_04_PGP_objects.py | sharuzzaman/PGPy | 4ae5cab8306d3911a377fc9e975bbb31e703be23 | [
"BSD-3-Clause"
] | 83 | 2015-03-18T00:11:21.000Z | 2022-03-09T20:24:23.000Z | """ test the functionality of PGPKeyring
"""
import pytest
import glob
import os
import six
from pgpy import PGPKey
from pgpy import PGPKeyring
from pgpy import PGPMessage
from pgpy import PGPSignature
from pgpy import PGPUID
from pgpy.types import Fingerprint
@pytest.fixture
def abe_image():
with open('tests/testdata/abe.jpg', 'rb') as abef:
abebytes = bytearray(os.path.getsize('tests/testdata/abe.jpg'))
abef.readinto(abebytes)
return PGPUID.new(abebytes)
_msgfiles = sorted(glob.glob('tests/testdata/messages/*.asc'))
class TestPGPMessage(object):
@pytest.mark.parametrize('msgfile', _msgfiles, ids=[os.path.basename(f) for f in _msgfiles])
def test_load_from_file(self, msgfile):
# TODO: figure out a good way to verify that all went well here, because
# PGPy reorders signatures sometimes, and also unwraps compressed messages
# so comparing str(msg) to the contents of msgfile doesn't actually work
msg = PGPMessage.from_file(msgfile)
with open(msgfile, 'r') as mf:
mt = mf.read()
assert len(str(msg)) == len(mt)
@pytest.fixture
def un():
return PGPUID.new(six.u('Temperair\xe9e Youx\'seur'))
@pytest.fixture
def unc():
return PGPUID.new(six.u('Temperair\xe9e Youx\'seur'), comment=six.u('\u2603'))
@pytest.fixture
def une():
return PGPUID.new(six.u('Temperair\xe9e Youx\'seur'), email='snowman@not.an.email.addre.ss')
@pytest.fixture
def unce():
return PGPUID.new(six.u('Temperair\xe9e Youx\'seur'), comment=six.u('\u2603'), email='snowman@not.an.email.addre.ss')
@pytest.fixture
def abe():
return PGPUID.new('Abraham Lincoln', comment='Honest Abe', email='abraham.lincoln@whitehouse.gov')
class TestPGPUID(object):
def test_userid(self, abe):
assert abe.name == 'Abraham Lincoln'
assert abe.comment == 'Honest Abe'
assert abe.email == 'abraham.lincoln@whitehouse.gov'
assert abe.image is None
def test_userphoto(self, abe_image):
assert abe_image.name == ""
assert abe_image.comment == ""
assert abe_image.email == ""
with open('tests/testdata/abe.jpg', 'rb') as abef:
abebytes = bytearray(os.path.getsize('tests/testdata/abe.jpg'))
abef.readinto(abebytes)
assert abe_image.image == abebytes
def test_format(self, un, unc, une, unce):
assert six.u("{:s}").format(un) == six.u('Temperair\xe9e Youx\'seur')
assert six.u("{:s}").format(unc) == six.u('Temperair\xe9e Youx\'seur (\u2603)')
assert six.u("{:s}").format(une) == six.u('Temperair\xe9e Youx\'seur <snowman@not.an.email.addre.ss>')
assert six.u("{:s}").format(unce) == six.u('Temperair\xe9e Youx\'seur (\u2603) <snowman@not.an.email.addre.ss>')
_keyfiles = sorted(glob.glob('tests/testdata/blocks/*key*.asc'))
_fingerprints = {'dsapubkey.asc': '2B5BBB143BA0B290DCEE6668B798AE8990877201',
'dsaseckey.asc': '2B5BBB143BA0B290DCEE6668B798AE8990877201',
'eccpubkey.asc': '502D1A5365D1C0CAA69945390BA52DF0BAA59D9C',
'eccseckey.asc': '502D1A5365D1C0CAA69945390BA52DF0BAA59D9C',
'openpgp.js.pubkey.asc': 'C7C38ECEE94A4AD32DDB064E14AB44C74D1BDAB8',
'openpgp.js.seckey.asc': 'C7C38ECEE94A4AD32DDB064E14AB44C74D1BDAB8',
'rsapubkey.asc': 'F4294BC8094A7E0585C85E8637473B3758C44F36',
'rsaseckey.asc': 'F4294BC8094A7E0585C85E8637473B3758C44F36',}
class TestPGPKey(object):
@pytest.mark.parametrize('kf', _keyfiles, ids=[os.path.basename(f) for f in _keyfiles])
def test_load_from_file(self, kf):
key, _ = PGPKey.from_file(kf)
assert key.fingerprint == _fingerprints[os.path.basename(kf)]
@pytest.mark.parametrize('kf', _keyfiles, ids=[os.path.basename(f) for f in _keyfiles])
def test_load_from_str(self, kf):
with open(kf, 'r') as tkf:
key, _ = PGPKey.from_blob(tkf.read())
assert key.fingerprint == _fingerprints[os.path.basename(kf)]
@pytest.mark.regression(issue=140)
@pytest.mark.parametrize('kf', _keyfiles, ids=[os.path.basename(f) for f in _keyfiles])
def test_load_from_bytes(self, kf):
with open(kf, 'rb') as tkf:
key, _ = PGPKey.from_blob(tkf.read())
assert key.fingerprint == _fingerprints[os.path.basename(kf)]
@pytest.mark.regression(issue=140)
@pytest.mark.parametrize('kf', _keyfiles, ids=[os.path.basename(f) for f in _keyfiles])
def test_load_from_bytearray(self, kf):
tkb = bytearray(os.stat(kf).st_size)
with open(kf, 'rb') as tkf:
tkf.readinto(tkb)
key, _ = PGPKey.from_blob(tkb)
assert key.fingerprint == _fingerprints[os.path.basename(kf)]
@pytest.mark.parametrize('kf', sorted(filter(lambda f: not f.endswith('enc.asc'), glob.glob('tests/testdata/keys/*.asc'))))
def test_save(self, kf):
# load the key and export it back to binary
key, _ = PGPKey.from_file(kf)
pgpyblob = key.__bytes__()
# try loading the exported key
reloaded, _ = PGPKey.from_file(kf)
assert pgpyblob == reloaded.__bytes__()
@pytest.fixture(scope='module')
def keyring():
return PGPKeyring()
class TestPGPKeyring(object):
def test_load(self, keyring):
# load from filenames
keys = keyring.load(glob.glob('tests/testdata/*test.asc'), glob.glob('tests/testdata/signatures/*.key.asc'))
# keys
assert all(isinstance(k, Fingerprint) for k in keys)
# __len__
assert len(keys) == 10
assert len(keyring) == 16
# __contains__
# RSA von TestKey
selectors = ["F429 4BC8 094A 7E05 85C8 5E86 3747 3B37 58C4 4F36", "37473B3758C44F36", "58C44F36",
"RSA von TestKey", "rsa@test.key"]
for selector in selectors:
assert selector in keyring
# DSA von TestKey
selectors = ["EBC8 8A94 ACB1 10F1 BE3F E3C1 2B47 4BB0 2084 C712", "2B474BB02084C712", "2084C712",
"DSA von TestKey", "dsa@test.key"]
for selector in selectors:
assert selector in keyring
# fingerprints filtering
# we have 10 keys
assert len(keyring.fingerprints()) == 10
# 10 public halves, 6 private halves
assert len(keyring.fingerprints(keyhalf='public')) == 10
assert len(keyring.fingerprints(keyhalf='private')) == 6
# we have 5 primary keys; 5 public and 2 private
assert len(keyring.fingerprints(keytype='primary')) == 5
assert len(keyring.fingerprints(keytype='primary', keyhalf='public')) == 5
assert len(keyring.fingerprints(keytype='primary', keyhalf='private')) == 2
# and the other 5; 5 public and 4 private
assert len(keyring.fingerprints(keytype='sub')) == 5
assert len(keyring.fingerprints(keytype='sub', keyhalf='public')) == 5
assert len(keyring.fingerprints(keytype='sub', keyhalf='private')) == 4
# now test sorting:
rvt = keyring._get_keys("RSA von TestKey")
assert len(rvt) == 2
assert not rvt[0].is_public
assert rvt[1].is_public
@pytest.mark.parametrize('kf', _keyfiles, ids=[os.path.basename(f) for f in _keyfiles])
def test_load_key_instance(self, keyring, kf):
key, _ = PGPKey.from_file(kf)
keys = keyring.load(key)
assert key.fingerprint in keyring
for uid in key.userids:
if uid.name != "":
assert uid.name in keyring
if uid.email != "":
assert uid.email in keyring
with keyring.key(key.fingerprint) as loaded_key:
assert loaded_key.fingerprint == key.fingerprint
def test_select_fingerprint(self, keyring):
for fp, name in [("F429 4BC8 094A 7E05 85C8 5E86 3747 3B37 58C4 4F36", "RSA von TestKey"),
(six.u("F429 4BC8 094A 7E05 85C8 5E86 3747 3B37 58C4 4F36"), six.u("RSA von TestKey")),
(Fingerprint("F429 4BC8 094A 7E05 85C8 5E86 3747 3B37 58C4 4F36"), "RSA von TestKey"),
("EBC8 8A94 ACB1 10F1 BE3F E3C1 2B47 4BB0 2084 C712", "DSA von TestKey"),
(six.u("EBC8 8A94 ACB1 10F1 BE3F E3C1 2B47 4BB0 2084 C712"), six.u("DSA von TestKey")),
(Fingerprint("EBC8 8A94 ACB1 10F1 BE3F E3C1 2B47 4BB0 2084 C712"), "DSA von TestKey"),]:
with keyring.key(fp) as key:
assert key.fingerprint == fp
assert key.userids[0].name == name
def test_select_keyid(self, keyring):
with keyring.key("37473B3758C44F36") as rsa:
assert rsa.userids[0].name == "RSA von TestKey"
with keyring.key("2B474BB02084C712") as dsa:
assert dsa.userids[0].name == "DSA von TestKey"
def test_select_shortid(self, keyring):
with keyring.key("58C44F36") as rsa:
assert rsa.userids[0].name == "RSA von TestKey"
with keyring.key("2084C712") as dsa:
assert dsa.userids[0].name == "DSA von TestKey"
def test_select_name(self, keyring):
with keyring.key("RSA von TestKey") as rsa:
assert rsa.userids[0].name == "RSA von TestKey"
with keyring.key("DSA von TestKey") as dsa:
assert dsa.userids[0].name == "DSA von TestKey"
def test_select_comment(self, keyring):
with keyring.key("2048-bit RSA") as rsa:
assert rsa.userids[0].name == "RSA von TestKey"
with keyring.key("2048-bit DSA") as dsa:
assert dsa.userids[0].name == "DSA von TestKey"
def test_select_email(self, keyring):
with keyring.key("rsa@test.key") as rsa:
assert rsa.userids[0].name == "RSA von TestKey"
with keyring.key("dsa@test.key") as dsa:
assert dsa.userids[0].name == "DSA von TestKey"
def test_select_pgpsignature(self, keyring):
sig = PGPSignature()
with open('tests/testdata/signatures/debian-sid.sig.asc', 'r') as sigf:
sig.parse(sigf.read())
with keyring.key(sig) as sigkey:
assert sigkey.fingerprint.keyid == sig.signer
def test_select_pgpmessage(self, keyring):
m1 = PGPMessage()
with open('tests/testdata/messages/message.rsa.cast5.asc', 'r') as m1f:
m1.parse(m1f.read())
with keyring.key(m1) as rsakey:
assert rsakey.fingerprint == "00EC FAF5 48AE B655 F861 8193 EEE0 97A0 17B9 79CA"
assert rsakey.parent.fingerprint == "F429 4BC8 094A 7E05 85C8 5E86 3747 3B37 58C4 4F36"
def test_unload_key(self, keyring):
with keyring.key("Test Repository Signing Key") as key:
keyring.unload(key)
# is the key and its subkeys actually gone?
assert id(key) not in keyring._keys
for pkid in iter(id(sk) for sk in key.subkeys.values()):
assert pkid not in keyring._keys
# aliases
# userid components
assert "Test Repository Signing Key" not in keyring
assert "KUS" not in keyring
assert "usc-kus@securityinnovation.com" not in keyring
# fingerprints
assert "513B 160A A994 8C1F 3D77 952D CE57 0774 D0FD CA20" not in keyring
# keyid(s)
assert "CE570774D0FDCA20" not in keyring
# shortids
assert "D0FDCA20" not in keyring
def test_unload_key_half(self, keyring):
with keyring.key('RSA von TestKey') as key:
keyring.unload(key)
# key was unloaded for real
assert id(key) not in keyring._keys
# but it was not a unique alias, because we only unloaded half of the key
# userid components
assert 'RSA von TestKey' in keyring
assert '2048-bit RSA' in keyring
assert 'rsa@test.key' in keyring
# fingerprint, keyid, shortid
assert 'F429 4BC8 094A 7E05 85C8 5E86 3747 3B37 58C4 4F36' in keyring
assert '37473B3758C44F36' in keyring
assert '58C44F36' in keyring
| 38.275316 | 127 | 0.633154 |
8b2fce3d14d4d9903b54bcd22d206eed4873dd46 | 164 | py | Python | tests/view_tree/with_template/view_tree_node.py | quadrant-newmedia/django_tree_view | ef2bd36c8af870c8612c601510e35f2dad0b7538 | [
"MIT"
] | 1 | 2020-09-30T08:51:55.000Z | 2020-09-30T08:51:55.000Z | tests/view_tree/with_template/view_tree_node.py | quadrant-newmedia/django_tree_view | ef2bd36c8af870c8612c601510e35f2dad0b7538 | [
"MIT"
] | 8 | 2020-03-24T21:59:53.000Z | 2021-09-22T18:48:09.000Z | tests/view_tree/with_template/view_tree_node.py | quadrant-newmedia/django_tree_view | ef2bd36c8af870c8612c601510e35f2dad0b7538 | [
"MIT"
] | null | null | null | from django.template.response import TemplateResponse
def get(request, **kwargs):
return TemplateResponse(request, request.view_tree_path+'/template.html', {}) | 41 | 81 | 0.786585 |
06fe8404227d64dea4fea8fbbe6e2191fb5f3c40 | 671 | py | Python | Python/StonePaperScissors.py | Resolution-1/Python-tryout | 44cdd124e1b8865d907f57e0ca75bf620f7167b8 | [
"MIT"
] | 21 | 2020-10-01T16:19:16.000Z | 2021-11-08T13:01:47.000Z | Python/StonePaperScissors.py | Resolution-1/Python-tryout | 44cdd124e1b8865d907f57e0ca75bf620f7167b8 | [
"MIT"
] | 13 | 2020-10-01T13:10:25.000Z | 2021-10-01T06:27:44.000Z | Python/StonePaperScissors.py | Resolution-1/Python-tryout | 44cdd124e1b8865d907f57e0ca75bf620f7167b8 | [
"MIT"
] | 77 | 2020-10-01T11:28:37.000Z | 2021-10-16T09:27:37.000Z | from random import*
print("wanna play?(y/n)")
c=str(input())
if(c=="y"):
m_c=0
m_u=0
print("Let's play stone paper scissor\nhere\n1 for stone\n2 for paper\n3 for scissor\nNOW LET'S PLAY")
for i in range(5):
print("your turn:")
n=int(input())
a=randint(1,3)
print("I chose %d" %(a))
if(n==1 and a==2):
m_c+=1
if(n==2 and a==1):
m_u+=1
if(n==2 and a==3):
m_c+=1
if(n==3 and a==2):
m_u+=1
if(n==1 and a==3):
m_u+=1
if(n==3 and a==1):
m_c+=1
print("your score=%d\ncomputer score=%d" %(m_u,m_c))
if(m_u>m_c):
print("you won")
elif(m_u==m_c):
print("it's a draw match")
else:
print("you lose")
if(c=='n'):
print("then bye")
| 19.735294 | 103 | 0.5693 |
1867a903fef0303b1ca211cc5c3e8140178bb442 | 738 | py | Python | core/migrations/0004_auto_20200727_1738.py | amcquistan/project-time-tracker-api-django | da8a4129964fa4e330939178f12f24097527e77d | [
"MIT"
] | null | null | null | core/migrations/0004_auto_20200727_1738.py | amcquistan/project-time-tracker-api-django | da8a4129964fa4e330939178f12f24097527e77d | [
"MIT"
] | null | null | null | core/migrations/0004_auto_20200727_1738.py | amcquistan/project-time-tracker-api-django | da8a4129964fa4e330939178f12f24097527e77d | [
"MIT"
] | 1 | 2021-01-01T14:58:11.000Z | 2021-01-01T14:58:11.000Z | # Generated by Django 3.0.8 on 2020-07-27 17:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_project'),
]
operations = [
migrations.AddField(
model_name='organization',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='project',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 28.384615 | 121 | 0.644986 |
3ca621bcbdb1471b5eb70ce18cb04ed008c7aa41 | 306 | py | Python | live.py | ritza-co/nix.dev | 85e23ca44bd4b049beff54fda9013bdaedd61c1c | [
"CC0-1.0"
] | 623 | 2020-07-07T09:10:57.000Z | 2022-03-31T15:01:42.000Z | live.py | ritza-co/nix.dev | 85e23ca44bd4b049beff54fda9013bdaedd61c1c | [
"CC0-1.0"
] | 160 | 2020-07-07T09:06:26.000Z | 2022-03-31T10:24:03.000Z | live.py | ritza-co/nix.dev | 85e23ca44bd4b049beff54fda9013bdaedd61c1c | [
"CC0-1.0"
] | 52 | 2020-07-09T18:07:15.000Z | 2022-03-21T22:33:42.000Z | from livereload import Server, shell
server = Server()
build_docs = shell("make html")
print("Doing an initial build of the docs...")
build_docs()
server.watch("source/*", build_docs)
server.watch("source/**/*", build_docs)
server.watch("_templates/*.html", build_docs)
server.serve(root="build/html")
| 21.857143 | 46 | 0.72549 |
5a8626a7843430db0fc7512e57eeeb45e861d9b9 | 2,253 | py | Python | code/GUI/picker.py | Lagostra/tictactoe | 17cdbc4e3e31f06dd7bd9fb6fd2a296ef279762f | [
"MIT"
] | null | null | null | code/GUI/picker.py | Lagostra/tictactoe | 17cdbc4e3e31f06dd7bd9fb6fd2a296ef279762f | [
"MIT"
] | null | null | null | code/GUI/picker.py | Lagostra/tictactoe | 17cdbc4e3e31f06dd7bd9fb6fd2a296ef279762f | [
"MIT"
] | null | null | null | import pygame
class Picker(pygame.Surface):
BUTTON_WIDTH = 20
def __init__(self, x, y, width, height, options, selection=0):
super().__init__((width, height))
self.x = x
self.y = y
self.options = options
self.selected_option = selection
def update(self, events):
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
if event.pos[1] > self.y and event.pos[1] < self.y + self.get_height():
if event.pos[0] > self.x + self.get_width() - self.BUTTON_WIDTH * 2 \
and event.pos[0] < self.x + self.get_width() - self.BUTTON_WIDTH:
self.selected_option = (self.selected_option - 1) % len(self.options)
elif event.pos[0] > self.x + self.get_width() - self.BUTTON_WIDTH \
and event.pos[0] < self.x + self.get_width():
self.selected_option = (self.selected_option + 1) % len(self.options)
def render(self):
self.fill((255, 255, 255))
#pygame.draw.rect(self, pygame.Rect(2, 2, self.get_width() - 4, self.get_height() - 4), 1)
text = self.options[self.selected_option][0]
font = pygame.font.SysFont(None, self.get_height())
label = font.render(text, True, (0, 0, 0))
self.blit(label, (5, 5))
pygame.draw.line(self, (0, 0, 0), (self.get_width() - 2 * self.BUTTON_WIDTH, 0),
(self.get_width() - 2 * self.BUTTON_WIDTH, self.get_height()))
pygame.draw.line(self, (0, 0, 0), (self.get_width() - self.BUTTON_WIDTH, 0),
(self.get_width() - self.BUTTON_WIDTH, self.get_height()))
x0 = self.get_width() - 2 * self.BUTTON_WIDTH + 3
x1 = self.get_width() - self.BUTTON_WIDTH - 3
y0 = 2
y1 = self.get_height() / 2
y2 = self.get_height() - 2
pygame.draw.polygon(self, (0, 0, 0), ((x1, y0), (x0, y1), (x1, y2)))
pygame.draw.polygon(self, (0, 0, 0), ((x0 + self.BUTTON_WIDTH, y0), (x1 + self.BUTTON_WIDTH, y1),
(x0 + self.BUTTON_WIDTH, y2)))
def get_value(self):
return self.options[self.selected_option][1] | 45.979592 | 105 | 0.548602 |
a616a64521c471189db35a18ad4e6db7839ad338 | 2,574 | py | Python | ecomstore/models.py | hmamirchishti/Technotions | e3becd5fcd626eb8fcc514d00e53626f4d7aea1f | [
"MIT"
] | null | null | null | ecomstore/models.py | hmamirchishti/Technotions | e3becd5fcd626eb8fcc514d00e53626f4d7aea1f | [
"MIT"
] | null | null | null | ecomstore/models.py | hmamirchishti/Technotions | e3becd5fcd626eb8fcc514d00e53626f4d7aea1f | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
class Category(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50, unique=True, help_text='unique value for product page URL, create from name.')
description = models.TextField()
is_active = models.BooleanField(default = True)
meta_keywords = models.CharField("Meta Keywords", max_length=255, help_text='coma delimited set of SEO keywords for meta tags.')
meta_description = models.CharField("Meta Description", max_length=255, help_text= 'description for meta tag')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'categories'
ordering = ['-created_at']
verbose_name_plural = 'Categories'
def __unicode__(self):
return self.name
def get_absolutte_url(self):
return reverse('catelog_category', args=[self.slug])
class Product(models.Model):
name = models.CharField(max_length=255, unique=True)
slug = models.SlugField(max_length=255, unique=True,help_text='Unique value for product page URL, created from name.')
brand = models.CharField(max_length=50)
sku = models.CharField(max_length=50)
price = models.DecimalField(max_digits=9,decimal_places=2)
old_price = models.DecimalField(max_digits=9,decimal_places=2,blank=True,default=0.00)
image = models.CharField(max_length=50)
is_active = models.BooleanField(default=True)
is_bestseller = models.BooleanField(default=False)
is_featured = models.BooleanField(default=False)
quantity = models.IntegerField()
description = models.TextField()
meta_keywords = models.CharField(max_length=255,help_text='Comma-delimited set of SEO keywords for meta tag')
meta_description = models.CharField(max_length=255,help_text='Content for description meta tag')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
categories = models.ManyToManyField(Category)
class Meta:
db_table = 'products'
ordering = ['-created_at']
def __unicode__(self):
return self.name
def get_absolutte_url(self):
return reverse('catelog_product', args=[self.name])
def sale_price(self):
if self.old_price > self.price:
return self.price
else:
return None | 45.157895 | 132 | 0.684926 |
c30087c7430c7c398cd0cf092c153e6cf9031fd9 | 1,182 | py | Python | app/app_config.py | AustinVirts/origin-website | 257a8c3fdfbdc0bdd03904a3620019501de3f170 | [
"MIT"
] | 1 | 2020-05-21T04:44:45.000Z | 2020-05-21T04:44:45.000Z | app/app_config.py | AustinVirts/origin-website | 257a8c3fdfbdc0bdd03904a3620019501de3f170 | [
"MIT"
] | null | null | null | app/app_config.py | AustinVirts/origin-website | 257a8c3fdfbdc0bdd03904a3620019501de3f170 | [
"MIT"
] | null | null | null | import logging
from config import constants
from database import db
from raven.contrib.flask import Sentry
sentry = Sentry()
class AppConfig(object):
SECRET_KEY = constants.FLASK_SECRET_KEY
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = constants.SQLALCHEMY_DATABASE_URI
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
RECAPTCHA_SITE_KEY = constants.RECAPTCHA_SITE_KEY
RECAPTCHA_SECRET_KEY = constants.RECAPTCHA_SECRET_KEY
RECAPTCHA_SIZE = constants.RECAPTCHA_SIZE
def init_app(app):
db.init_app(app)
def init_sentry(app):
if constants.SENTRY_DSN:
sentry.init_app(app,
dsn=constants.SENTRY_DSN)
def init_prod_app(app):
app.config.from_object(__name__ + '.AppConfig')
init_app(app)
init_sentry(app)
log_formatter = logging.Formatter(
'%(asctime)s %(levelname)s [in %(pathname)s:%(lineno)d]: %(message)s')
if not constants.DEBUG:
file_handler = logging.FileHandler(constants.APP_LOG_FILENAME)
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(log_formatter)
app.logger.addHandler(file_handler)
return app
| 25.148936 | 78 | 0.725888 |
67226f9db1490c4bcd91ea525b44b69b42935d90 | 1,424 | py | Python | MLlib/utils/misc_utils.py | quadri-haider-ali/ML-DL-implementation | 1abbd0104a261ed8e21d6bed3d52f8ce62cdafde | [
"BSD-3-Clause"
] | null | null | null | MLlib/utils/misc_utils.py | quadri-haider-ali/ML-DL-implementation | 1abbd0104a261ed8e21d6bed3d52f8ce62cdafde | [
"BSD-3-Clause"
] | null | null | null | MLlib/utils/misc_utils.py | quadri-haider-ali/ML-DL-implementation | 1abbd0104a261ed8e21d6bed3d52f8ce62cdafde | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pickle
def read_data(file):
'''
Read the training data from a file in the specified directory.
Parameters
==========
file:
data type : str
Name of the file to be read with extension.
Example
=======
If the training data is stored in "dataset.txt" use
>>> read_data('dataset.txt')
'''
A = np.genfromtxt(file)
# Read all the column except the last into X
# Add an extra column of 1s at the end to act as constant
# Read the last column into Y
X = np.hstack((A[:, 1:2], np.ones((A.shape[0], 1))))
M, N = X.shape
Y = A[:, -1]
Y.shape = (1, M)
return X, Y
def printmat(name, matrix):
'''
Prints matrix in a easy to read form with
dimension and label.
Parameters
==========
name:
data type : str
The name displayed in the output.
matrix:
data type : numpy array
The matrix to be displayed.
'''
print('matrix ' + name + ':', matrix.shape)
print(matrix, '\n')
def generate_weights(rows, cols, zeros=False):
'''
Generates a Matrix of weights according to the
specified rows and columns
'''
if zeros:
return np.zeros((rows, cols))
else:
return np.random.rand(rows, cols)
def load_model(name):
with open(name, 'rb') as robfile:
model = pickle.load(robfile)
return model
| 19.777778 | 66 | 0.579354 |
4a3dc22cead938f4dcefbc7031390cb27397f82a | 7,179 | py | Python | nonebot/message.py | zxz0415/nonebot2 | 0c35449d08dfd603ee0edd46befffe0a5fd22632 | [
"MIT"
] | null | null | null | nonebot/message.py | zxz0415/nonebot2 | 0c35449d08dfd603ee0edd46befffe0a5fd22632 | [
"MIT"
] | null | null | null | nonebot/message.py | zxz0415/nonebot2 | 0c35449d08dfd603ee0edd46befffe0a5fd22632 | [
"MIT"
] | null | null | null | """
事件处理
========
NoneBot 内部处理并按优先级分发事件给所有事件响应器,提供了多个插槽以进行事件的预处理等。
"""
import asyncio
from datetime import datetime
from typing import Set, Type, Optional, TYPE_CHECKING
from nonebot.log import logger
from nonebot.rule import TrieRule
from nonebot.matcher import matchers, Matcher
from nonebot.exception import IgnoredException, StopPropagation, NoLogException
from nonebot.typing import T_State, T_EventPreProcessor, T_RunPreProcessor, T_EventPostProcessor, T_RunPostProcessor
if TYPE_CHECKING:
from nonebot.adapters import Bot, Event
_event_preprocessors: Set[T_EventPreProcessor] = set()
_event_postprocessors: Set[T_EventPostProcessor] = set()
_run_preprocessors: Set[T_RunPreProcessor] = set()
_run_postprocessors: Set[T_RunPostProcessor] = set()
def event_preprocessor(func: T_EventPreProcessor) -> T_EventPreProcessor:
"""
:说明:
事件预处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之前执行。
:参数:
事件预处理函数接收三个参数。
* ``bot: Bot``: Bot 对象
* ``event: Event``: Event 对象
* ``state: T_State``: 当前 State
"""
_event_preprocessors.add(func)
return func
def event_postprocessor(func: T_EventPostProcessor) -> T_EventPostProcessor:
"""
:说明:
事件后处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之后执行。
:参数:
事件后处理函数接收三个参数。
* ``bot: Bot``: Bot 对象
* ``event: Event``: Event 对象
* ``state: T_State``: 当前事件运行前 State
"""
_event_postprocessors.add(func)
return func
def run_preprocessor(func: T_RunPreProcessor) -> T_RunPreProcessor:
"""
:说明:
运行预处理。装饰一个函数,使它在每次事件响应器运行前执行。
:参数:
运行预处理函数接收四个参数。
* ``matcher: Matcher``: 当前要运行的事件响应器
* ``bot: Bot``: Bot 对象
* ``event: Event``: Event 对象
* ``state: T_State``: 当前 State
"""
_run_preprocessors.add(func)
return func
def run_postprocessor(func: T_RunPostProcessor) -> T_RunPostProcessor:
"""
:说明:
运行后处理。装饰一个函数,使它在每次事件响应器运行后执行。
:参数:
运行后处理函数接收五个参数。
* ``matcher: Matcher``: 运行完毕的事件响应器
* ``exception: Optional[Exception]``: 事件响应器运行错误(如果存在)
* ``bot: Bot``: Bot 对象
* ``event: Event``: Event 对象
* ``state: T_State``: 当前 State
"""
_run_postprocessors.add(func)
return func
async def _check_matcher(priority: int, Matcher: Type[Matcher], bot: "Bot",
event: "Event", state: T_State) -> None:
if Matcher.expire_time and datetime.now() > Matcher.expire_time:
try:
matchers[priority].remove(Matcher)
except Exception:
pass
return
try:
if not await Matcher.check_perm(
bot, event) or not await Matcher.check_rule(bot, event, state):
return
except Exception as e:
logger.opt(colors=True, exception=e).error(
f"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>")
return
if Matcher.temp:
try:
matchers[priority].remove(Matcher)
except Exception:
pass
await _run_matcher(Matcher, bot, event, state)
async def _run_matcher(Matcher: Type[Matcher], bot: "Bot", event: "Event",
state: T_State) -> None:
logger.info(f"Event will be handled by {Matcher}")
matcher = Matcher()
coros = list(
map(lambda x: x(matcher, bot, event, state), _run_preprocessors))
if coros:
try:
await asyncio.gather(*coros)
except IgnoredException:
logger.opt(colors=True).info(
f"Matcher {matcher} running is <b>cancelled</b>")
return
except Exception as e:
logger.opt(colors=True, exception=e).error(
"<r><bg #f8bbd0>Error when running RunPreProcessors. "
"Running cancelled!</bg #f8bbd0></r>")
return
exception = None
try:
logger.debug(f"Running matcher {matcher}")
await matcher.run(bot, event, state)
except Exception as e:
logger.opt(colors=True, exception=e).error(
f"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>"
)
exception = e
coros = list(
map(lambda x: x(matcher, exception, bot, event, state),
_run_postprocessors))
if coros:
try:
await asyncio.gather(*coros)
except Exception as e:
logger.opt(colors=True, exception=e).error(
"<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>"
)
if matcher.block:
raise StopPropagation
return
async def handle_event(bot: "Bot", event: "Event") -> Optional[Exception]:
"""
:说明:
处理一个事件。调用该函数以实现分发事件。
:参数:
* ``bot: Bot``: Bot 对象
* ``event: Event``: Event 对象
:示例:
.. code-block:: python
import asyncio
asyncio.create_task(handle_event(bot, event))
"""
show_log = True
log_msg = f"<m>{bot.type.upper()} {bot.self_id}</m> | "
try:
log_msg += event.get_log_string()
except NoLogException:
show_log = False
if show_log:
logger.opt(colors=True).success(log_msg)
state = {}
coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))
if coros:
try:
if show_log:
logger.debug("Running PreProcessors...")
await asyncio.gather(*coros)
except IgnoredException as e:
logger.opt(colors=True).info(
f"Event {event.get_event_name()} is <b>ignored</b>")
return e
except Exception as e:
logger.opt(colors=True, exception=e).error(
"<r><bg #f8bbd0>Error when running EventPreProcessors. "
"Event ignored!</bg #f8bbd0></r>")
return e
# Trie Match
_, _ = TrieRule.get_value(bot, event, state)
break_flag = False
for priority in sorted(matchers.keys()):
if break_flag:
break
if show_log:
logger.debug(f"Checking for matchers in priority {priority}...")
pending_tasks = [
_check_matcher(priority, matcher, bot, event, state.copy())
for matcher in matchers[priority]
]
results = await asyncio.gather(*pending_tasks, return_exceptions=True)
for result in results:
if not isinstance(result, Exception):
continue
if isinstance(result, StopPropagation):
break_flag = True
logger.debug("Stop event propagation")
else:
logger.opt(colors=True, exception=result).error(
"<r><bg #f8bbd0>Error when checking Matcher.</bg #f8bbd0></r>"
)
return result
coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))
if coros:
try:
if show_log:
logger.debug("Running PostProcessors...")
await asyncio.gather(*coros)
except Exception as e:
logger.opt(colors=True, exception=e).error(
"<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>"
)
return e
| 27.400763 | 116 | 0.593676 |
588682a472abcb5793b362514da3279f77e44ed3 | 418 | py | Python | animeMusic_server/anime_music/main.py | waahah/animeMusic | 1a4c9e0859ec7ee42f5aa70cea293ae46787d0c8 | [
"MIT"
] | 287 | 2018-08-30T07:43:44.000Z | 2022-03-26T05:43:56.000Z | animeMusic_server/anime_music/main.py | waahah/animeMusic | 1a4c9e0859ec7ee42f5aa70cea293ae46787d0c8 | [
"MIT"
] | 4 | 2018-10-21T04:32:46.000Z | 2020-09-05T19:13:18.000Z | animeMusic_server/anime_music/main.py | waahah/animeMusic | 1a4c9e0859ec7ee42f5aa70cea293ae46787d0c8 | [
"MIT"
] | 43 | 2018-08-30T07:43:44.000Z | 2021-08-19T11:01:26.000Z | #-*- coding:utf-8 -*-
from tornado.options import define, options
import tornado.options
import setting
import turbo.register
import turbo.app
turbo.register.register_app(setting.SERVER_NAME, setting.TURBO_APP_SETTING, setting.WEB_APPLICATION_SETTING, __file__, globals())
define("port", default=8885, type=int)
if __name__ == '__main__':
tornado.options.parse_command_line()
turbo.app.start(options.port) | 26.125 | 129 | 0.782297 |
035a8470a56794d1b3d19f8e0a67c84d2456d969 | 4,483 | py | Python | controller/structure/concrete.py | QuantumNovice/civil-engineering-toolbox | b759df2ed32614fa237ed7e4fccaf79f78c3eee4 | [
"BSD-3-Clause"
] | 32 | 2015-11-12T08:36:26.000Z | 2021-12-28T19:48:04.000Z | controller/structure/concrete.py | QuantumNovice/civil-engineering-toolbox | b759df2ed32614fa237ed7e4fccaf79f78c3eee4 | [
"BSD-3-Clause"
] | 2 | 2020-09-17T05:47:07.000Z | 2021-09-05T10:50:24.000Z | controller/structure/concrete.py | QuantumNovice/civil-engineering-toolbox | b759df2ed32614fa237ed7e4fccaf79f78c3eee4 | [
"BSD-3-Clause"
] | 12 | 2016-04-27T06:51:48.000Z | 2021-09-05T10:30:04.000Z | from src import view
from model.material import concrete
from model.structure import concrete_slab
import cherrypy
class Concrete:
def index(self):
pass
def flexural_analysis(self, **var):
# Prepare view & model object
template = view.lookup.get_template('structure/concrete_flexural_analysis.mako')
model = concrete.Concrete()
# Prepare url params & cookie as default value
param = cherrypy.request.params
cookie = cherrypy.request.cookie
# Get url parameter or set default variable (if None)
fyr = float(param.get('fyr') or cookie['fyr'].value)
fc = float(param.get('fc') or cookie['fc'].value)
height = float(param.get('height') or 565)
width = float(param.get('width') or 250)
n = int(param.get('n') or 4)
diameter = float(param.get('diameter') or 13)
cover = float(param.get('cover') or 65)
# Calculate
mn = model.Mn(fyr, fc, height, width, n, diameter, cover)
rho = model.rho(n, diameter, width, height)
rho_max = model.rho_max(fc, fyr)
As = model.As(n, diameter)
As_max = model.As_max(fc, fyr, height, width)
As_min = model.As_min(fc, fyr, height, width, cover)
eps_s = model.eps_s(height-cover, As, fc, fyr, width)
phi = model.phi(eps_s)
# Prepare data to view
data = {
'fyr': fyr, #MPa
'fc': fc, #MPa
'width': width, #mm
'height': height, #mm
'cover': cover, #mm
'n': n, #number
'diameter': diameter, #mm
'mn': float('{0:.2f}'.format(mn/1E6)),
'rho': float('{0:.5f}'.format(rho)),
'rho_max': float('{0:.5f}'.format(rho_max)),
'As': float('{0:.2f}'.format(As)),
'As_max': float('{0:.2f}'.format(As_max)),
'As_min': float('{0:.2f}'.format(As_min)),
'eps_s': float('{0:.4f}'.format(eps_s)),
'phi': float('{0:.2f}'.format(phi)),
}
return template.render(**data)
def slab_two_ways_design(self, **var):
# Prepare view & model object
template = view.lookup.get_template('structure/concrete_slab.mako')
model = concrete_slab.Slab()
# Prepare url params & cookie as default value
param = cherrypy.request.params
cookie = cherrypy.request.cookie
# Get url parameter or set default variable (if None)
ly = float(param.get('ly') or 4)
lx = float(param.get('lx') or 3)
t = float(param.get('t') or 0.12)
dl = float(param.get('dl') or 100)
ll = float(param.get('ll') or 250)
include_self_weight = param.get('include_self_weight') or 'Yes'
kdl = float(param.get('kdl') or 1.2)
kll = float(param.get('kll') or 1.6)
conc_unit_weight = float(param.get('conc_unit_weight') or cookie['conc_unit_weight'].value)
fc = float(param.get('fc') or cookie['fc'].value)
fus = float(param.get('fus') or cookie['fus'].value)
slab_type = param.get('slab_type') or '1'
diameter = float(param.get('diameter') or 10)
dy= float(param.get('dy') or 40)
dx= float(param.get('dx') or 50)
# Calculate
Mlx, Mly, Mtx, Mty, slx, sly, stx, sty, error = model.marcus_method(
ly, lx, t, dl, ll, include_self_weight, kdl, kll,
conc_unit_weight, fc, fus, slab_type, diameter, dy, dx)
# Prepare data to view
data = {
'ly': ly,
'lx': lx, #m
't': t,
'dl': dl,
'll': ll,
'include_self_weight': include_self_weight,
'kdl': kdl,
'kll': kll,
'conc_unit_weight': conc_unit_weight,
'fc': fc,
'fus': fus,
'slab_type': slab_type,
'diameter': diameter,
'dy': dy,
'dx': dx,
'Mlx': float('{0:.2f}'.format(Mlx)),
'Mly': float('{0:.2f}'.format(Mly)),
'Mtx': float('{0:.2f}'.format(Mtx)),
'Mty': float('{0:.2f}'.format(Mty)),
'slx': int(slx),
'sly': int(sly),
'stx': int(stx),
'sty': int(sty),
'error': error,
}
return template.render(**data)
| 37.672269 | 100 | 0.516172 |
f4abd2a81cb679e8ece31d75602cca14d6d60f24 | 9,676 | py | Python | Lib/test/audiotests.py | rbuzatu90/hyperv-python | 82bf5a72b4d956ea05affe1644b47e378dec0f4e | [
"bzip2-1.0.6"
] | 195 | 2016-01-14T16:03:02.000Z | 2021-12-29T09:15:02.000Z | Lib/test/audiotests.py | odsod/cpython-internals-course | 55fffca28e83ac0f30029c60113a3110451dfa08 | [
"PSF-2.0"
] | 75 | 2016-01-14T16:03:02.000Z | 2020-04-29T22:51:53.000Z | Lib/test/audiotests.py | odsod/cpython-internals-course | 55fffca28e83ac0f30029c60113a3110451dfa08 | [
"PSF-2.0"
] | 24 | 2016-02-29T11:45:47.000Z | 2021-12-24T08:41:37.000Z | from test.test_support import findfile, TESTFN, unlink
import unittest
import array
import io
import pickle
import sys
import base64
class UnseekableIO(file):
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args, **kwargs):
raise io.UnsupportedOperation
def fromhex(s):
return base64.b16decode(s.replace(' ', ''))
def byteswap2(data):
a = array.array('h')
a.fromstring(data)
a.byteswap()
return a.tostring()
def byteswap3(data):
ba = bytearray(data)
ba[::3] = data[2::3]
ba[2::3] = data[::3]
return bytes(ba)
def byteswap4(data):
a = array.array('i')
a.fromstring(data)
a.byteswap()
return a.tostring()
class AudioTests:
close_fd = False
def setUp(self):
self.f = self.fout = None
def tearDown(self):
if self.f is not None:
self.f.close()
if self.fout is not None:
self.fout.close()
unlink(TESTFN)
def check_params(self, f, nchannels, sampwidth, framerate, nframes,
comptype, compname):
self.assertEqual(f.getnchannels(), nchannels)
self.assertEqual(f.getsampwidth(), sampwidth)
self.assertEqual(f.getframerate(), framerate)
self.assertEqual(f.getnframes(), nframes)
self.assertEqual(f.getcomptype(), comptype)
self.assertEqual(f.getcompname(), compname)
params = f.getparams()
self.assertEqual(params,
(nchannels, sampwidth, framerate, nframes, comptype, compname))
dump = pickle.dumps(params)
self.assertEqual(pickle.loads(dump), params)
class AudioWriteTests(AudioTests):
def create_file(self, testfile):
f = self.fout = self.module.open(testfile, 'wb')
f.setnchannels(self.nchannels)
f.setsampwidth(self.sampwidth)
f.setframerate(self.framerate)
f.setcomptype(self.comptype, self.compname)
return f
def check_file(self, testfile, nframes, frames):
f = self.module.open(testfile, 'rb')
try:
self.assertEqual(f.getnchannels(), self.nchannels)
self.assertEqual(f.getsampwidth(), self.sampwidth)
self.assertEqual(f.getframerate(), self.framerate)
self.assertEqual(f.getnframes(), nframes)
self.assertEqual(f.readframes(nframes), frames)
finally:
f.close()
def test_write_params(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
self.nframes, self.comptype, self.compname)
f.close()
def test_write(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_incompleted_write(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes + 1)
f.writeframes(self.frames)
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_multiple_writes(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes)
framesize = self.nchannels * self.sampwidth
f.writeframes(self.frames[:-framesize])
f.writeframes(self.frames[-framesize:])
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_overflowed_write(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes - 1)
f.writeframes(self.frames)
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_unseekable_read(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
f.close()
with UnseekableIO(TESTFN, 'rb') as testfile:
self.check_file(testfile, self.nframes, self.frames)
def test_unseekable_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
f = self.create_file(testfile)
f.setnframes(self.nframes)
f.writeframes(self.frames)
f.close()
self.fout = None
self.check_file(TESTFN, self.nframes, self.frames)
def test_unseekable_incompleted_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes + 1)
try:
f.writeframes(self.frames)
except IOError:
pass
try:
f.close()
except IOError:
pass
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes + 1, self.frames)
def test_unseekable_overflowed_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes - 1)
try:
f.writeframes(self.frames)
except IOError:
pass
try:
f.close()
except IOError:
pass
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
framesize = self.nchannels * self.sampwidth
self.check_file(testfile, self.nframes - 1, self.frames[:-framesize])
class AudioTestsWithSourceFile(AudioTests):
@classmethod
def setUpClass(cls):
cls.sndfilepath = findfile(cls.sndfilename, subdir='audiodata')
def test_read_params(self):
f = self.f = self.module.open(self.sndfilepath)
#self.assertEqual(f.getfp().name, self.sndfilepath)
self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
self.sndfilenframes, self.comptype, self.compname)
def test_close(self):
with open(self.sndfilepath, 'rb') as testfile:
f = self.f = self.module.open(testfile)
self.assertFalse(testfile.closed)
f.close()
self.assertEqual(testfile.closed, self.close_fd)
with open(TESTFN, 'wb') as testfile:
fout = self.fout = self.module.open(testfile, 'wb')
self.assertFalse(testfile.closed)
with self.assertRaises(self.module.Error):
fout.close()
self.assertEqual(testfile.closed, self.close_fd)
fout.close() # do nothing
def test_read(self):
framesize = self.nchannels * self.sampwidth
chunk1 = self.frames[:2 * framesize]
chunk2 = self.frames[2 * framesize: 4 * framesize]
f = self.f = self.module.open(self.sndfilepath)
self.assertEqual(f.readframes(0), b'')
self.assertEqual(f.tell(), 0)
self.assertEqual(f.readframes(2), chunk1)
f.rewind()
pos0 = f.tell()
self.assertEqual(pos0, 0)
self.assertEqual(f.readframes(2), chunk1)
pos2 = f.tell()
self.assertEqual(pos2, 2)
self.assertEqual(f.readframes(2), chunk2)
f.setpos(pos2)
self.assertEqual(f.readframes(2), chunk2)
f.setpos(pos0)
self.assertEqual(f.readframes(2), chunk1)
with self.assertRaises(self.module.Error):
f.setpos(-1)
with self.assertRaises(self.module.Error):
f.setpos(f.getnframes() + 1)
def test_copy(self):
f = self.f = self.module.open(self.sndfilepath)
fout = self.fout = self.module.open(TESTFN, 'wb')
fout.setparams(f.getparams())
i = 0
n = f.getnframes()
while n > 0:
i += 1
fout.writeframes(f.readframes(i))
n -= i
fout.close()
fout = self.fout = self.module.open(TESTFN, 'rb')
f.rewind()
self.assertEqual(f.getparams(), fout.getparams())
self.assertEqual(f.readframes(f.getnframes()),
fout.readframes(fout.getnframes()))
def test_read_not_from_start(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
with open(self.sndfilepath, 'rb') as f:
testfile.write(f.read())
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
f = self.module.open(testfile, 'rb')
try:
self.assertEqual(f.getnchannels(), self.nchannels)
self.assertEqual(f.getsampwidth(), self.sampwidth)
self.assertEqual(f.getframerate(), self.framerate)
self.assertEqual(f.getnframes(), self.sndfilenframes)
self.assertEqual(f.readframes(self.nframes), self.frames)
finally:
f.close()
| 34.070423 | 81 | 0.59384 |
0b0c81981471c0f0330025b49952785f336dc29b | 1,953 | py | Python | mzdeepnet/base.py | MobtgZhang/mzdeepnet | 7856461444d9874de2fce9883e23bcd571834364 | [
"MIT"
] | null | null | null | mzdeepnet/base.py | MobtgZhang/mzdeepnet | 7856461444d9874de2fce9883e23bcd571834364 | [
"MIT"
] | null | null | null | mzdeepnet/base.py | MobtgZhang/mzdeepnet | 7856461444d9874de2fce9883e23bcd571834364 | [
"MIT"
] | null | null | null | import logging
import pickle
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(message)s',
)
class ParamMixin(object):
@property
def params(self):
""" List of Parameter objects. """
raise NotImplementedError()
@params.setter
def params(self, params):
raise NotImplementedError()
class PhaseMixin(object):
_phase = None
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, phase):
self._phase = phase
class CollectionMixin(ParamMixin, PhaseMixin):
collection = []
@property
def params(self):
params = []
for obj in self.collection:
if isinstance(obj, ParamMixin):
params.extend(obj.params)
return params
@params.setter
def params(self, params):
idx = 0
for obj in self.collection:
if isinstance(obj, ParamMixin):
n_params = len(obj.params)
obj.params = params[idx:idx+n_params]
idx += n_params
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, phase):
if self._phase == phase:
return
self._phase = phase
for obj in self.collection:
if isinstance(obj, PhaseMixin):
obj.phase = phase
class Model(ParamMixin):
def setup(self, *array_shapes):
pass
def update(self, *arrays):
raise NotImplementedError()
def load_state(self,state_dict):
pass
class PickleMixin(object):
_pickle_ignore = ['_tmp_']
def _pickle_discard(self, attr_name):
for s in self._pickle_ignore:
if attr_name.startswith(s):
return True
return False
def __getstate__(self):
return dict((k, None) if self._pickle_discard(k) else (k, v)
for k, v in self.__dict__.items())
| 23.25 | 68 | 0.586278 |
9022c4f31c87af15982a675ff14634791bf74083 | 10,121 | py | Python | deeprankcore/Metrics.py | DeepRank/deeprank-gnn-2 | 9d1b5f254ae25364bec88ba6e82a6aa1022fc699 | [
"Apache-2.0"
] | null | null | null | deeprankcore/Metrics.py | DeepRank/deeprank-gnn-2 | 9d1b5f254ae25364bec88ba6e82a6aa1022fc699 | [
"Apache-2.0"
] | 17 | 2022-03-22T16:07:45.000Z | 2022-03-31T08:09:52.000Z | deeprankcore/Metrics.py | DeepRank/deeprank-gnn-2 | 9d1b5f254ae25364bec88ba6e82a6aa1022fc699 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
def get_binary(values, threshold, target):
"""
Transforms continuous or multiclass values into binary values (0/1)
Args:
values (list): vector of the target values
threshold (int or float): threshold used to assign a binary value
0 is assigned to 'bad' values;
1 is assigned to 'good' values
target (string): target (y)
if target is 'fnat' or 'bin_class': target value > threshold = 1
esle: target value > threshold = 0
Returns:
list: list of binary values
"""
inverse = ["fnat", "bin_class"]
if target in inverse:
values_binary = [1 if x > threshold else 0 for x in values]
else:
values_binary = [1 if x < threshold else 0 for x in values]
return values_binary
def get_comparison(prediction, ground_truth, binary=True, classes=None):
"""
Computes the confusion matrix to get the number of:
- false positive (FP)
- false negative (FN)
- true positive (TP)
- true negative (TN)
Args:
prediction (list): list of predicted values
ground_truth (list): list of target values (y)
binary (bool, optional): If binary is True, the function will return a single value for each FP/FN/TP/TN variable.
If binary is False, the function will return a vector of n values for each FP/FN/TP/TN
Defaults to True.
classes (list, optional): Array-like of shape (n_classes). Defaults to [0, 1].
Returns:
int: false_positive, false_negative, true_positive, true_negative
"""
if classes is None:
classes = [0, 1]
CM = confusion_matrix(ground_truth, prediction, labels=classes)
false_positive = CM.sum(axis=0) - np.diag(CM)
false_negative = CM.sum(axis=1) - np.diag(CM)
true_positive = np.diag(CM)
true_negative = CM.sum() - (false_positive + false_negative + true_positive)
if binary:
return false_positive[1], false_negative[1], true_positive[1], true_negative[1]
return false_positive, false_negative, true_positive, true_negative
class Metrics():
def __init__(self, prediction, y, target, threshold=4, binary=True): # noqa
"""
Master class from which all metrics are computed
Computed metrics:
Classification metrics:
- self.sensitivity: Sensitivity, hit rate, recall, or true positive rate
- self.specificity: Specificity or true negative rate
- self.precision: Precision or positive predictive value
- self.NPV: Negative predictive value
- self.FPR: Fall out or false positive rate
- self.FNR: False negative rate
- self.FDR: False discovery rate
- self.accuracy: Accuracy
- self.auc(): AUC
- self.hitrate(): Hit rate
Regression metrics:
- self.explained_variance: Explained variance regression score function
- self.max_error: Max_error metric calculates the maximum residual error
- self.mean_abolute_error: Mean absolute error regression loss
- self.mean_squared_error: Mean squared error regression loss
- self.root_mean_squared_error: Root mean squared error regression loss
- self.mean_squared_log_error: Mean squared logarithmic error regression loss
- self.median_squared_log_error: Median absolute error regression loss
- self.r2_score: R^2 (coefficient of determination) regression score function
Args:
prediction (list): predicted values
y (list): list of target values
target (string): irmsd, fnat, capri_class, bin_class
binary (bool, optional): transform the data in binary vectors. Defaults to True.
threshold (int, optional): threshold used to split the data into a binary vector. Defaults to 4.
"""
self.prediction = prediction
self.y = y
self.binary = binary
self.target = target
self.threshold = threshold
print(f"Threshold set to {self.threshold}")
if self.binary:
prediction_binary = get_binary(self.prediction, self.threshold, self.target)
y_binary = get_binary(self.y, self.threshold, self.target)
classes = [0, 1]
(
false_positive,
false_negative,
true_positive,
true_negative,
) = get_comparison(
prediction_binary, y_binary, self.binary, classes=classes
)
else:
if target == "capri_class":
classes = [1, 2, 3, 4, 5]
elif target == "bin_class":
classes = [0, 1]
else:
raise ValueError("target must be capri_class on bin_class")
(
false_positive,
false_negative,
true_positive,
true_negative,
) = get_comparison(self.prediction, self.y, self.binary, classes=classes)
try:
# Sensitivity, hit rate, recall, or true positive rate
self.sensitivity = true_positive / (true_positive + false_negative)
except BaseException:
self.sensitivity = None
try:
# Specificity or true negative rate
self.specificity = true_negative / (true_negative + false_positive)
except BaseException:
self.specificity = None
try:
# Precision or positive predictive value
self.precision = true_positive / (true_positive + false_positive)
except BaseException:
self.precision = None
try:
# Negative predictive value
self.NPV = true_negative / (true_negative + false_negative)
except BaseException:
self.NPV = None
try:
# Fall out or false positive rate
self.FPR = false_positive / (false_positive + true_negative)
except BaseException:
self.FPR = None
try:
# False negative rate
self.FNR = false_negative / (true_positive + false_negative)
except BaseException:
self.FNR = None
try:
# False discovery rate
self.FDR = false_positive / (true_positive + false_positive)
except BaseException:
self.FDR = None
self.accuracy = (true_positive + true_negative) / (
true_positive + false_positive + false_negative + true_negative
)
# regression metrics
self.explained_variance = None
self.max_error = None
self.mean_abolute_error = None
self.mean_squared_error = None
self.root_mean_squared_error = None
self.mean_squared_log_error = None
self.median_squared_log_error = None
self.r2_score = None
if target in ["fnat", "irmsd", "lrmsd"]:
# Explained variance regression score function
self.explained_variance = metrics.explained_variance_score(
self.y, self.prediction
)
# Max_error metric calculates the maximum residual error
self.max_error = metrics.max_error(self.y, self.prediction)
# Mean absolute error regression loss
self.mean_absolute_error = metrics.mean_absolute_error(
self.y, self.prediction
)
# Mean squared error regression loss
self.mean_squared_error = metrics.mean_squared_error(
self.y, self.prediction, squared=True
)
# Root mean squared error regression loss
self.root_mean_squared_error = metrics.mean_squared_error(
self.y, self.prediction, squared=False
)
try:
# Mean squared logarithmic error regression loss
self.mean_squared_log_error = metrics.mean_squared_log_error(
self.y, self.prediction
)
except ValueError:
print(
"WARNING: Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values."
)
# Median absolute error regression loss
self.median_squared_log_error = metrics.median_absolute_error(
self.y, self.prediction
)
# R^2 (coefficient of determination) regression score function
self.r2_score = metrics.r2_score(self.y, self.prediction)
def format_score(self):
"""
Sorts the predicted values depending on the target:
- if target is fnat or bin_class: the highest value the better ranked
- else: the lowest value the better ranked
Returns:
lists: ranks of the predicted values and
the corresponding binary (0/1) target values
"""
idx = np.argsort(self.prediction)
inverse = ["fnat", "bin_class"]
if self.target in inverse:
idx = idx[::-1]
ground_truth_bool = get_binary(self.y, self.threshold, self.target)
ground_truth_bool = np.array(ground_truth_bool)
return idx, ground_truth_bool
def hitrate(self):
"""
Sorts the target boolean values (0/1) according to the ranks of predicted values
Returns:
list: the cumulative sum of hits (1)
"""
idx, ground_truth_bool = self.format_score()
return np.cumsum(ground_truth_bool[idx])
def auc(self):
"""
Computes the Receiver Operating Characteristic (ROC) area under the curve (AUC)
Returns:
float: AUC of the ROC curve
"""
idx, ground_truth_bool = self.format_score()
return roc_auc_score(ground_truth_bool, idx)
| 36.017794 | 122 | 0.605375 |
9db30c7d59babb0b73b18883ea0d9fcb0b390fe1 | 19,592 | py | Python | stable_baselines3/common/callbacks.py | Practical-Formal-Methods/mod_stable_baselines3 | 08bdb0a529c8ab446ac7973f2a02f832c0c3f454 | [
"MIT"
] | null | null | null | stable_baselines3/common/callbacks.py | Practical-Formal-Methods/mod_stable_baselines3 | 08bdb0a529c8ab446ac7973f2a02f832c0c3f454 | [
"MIT"
] | null | null | null | stable_baselines3/common/callbacks.py | Practical-Formal-Methods/mod_stable_baselines3 | 08bdb0a529c8ab446ac7973f2a02f832c0c3f454 | [
"MIT"
] | null | null | null | import os
import warnings
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional, Union
import mod_gym
import numpy as np
from mod_stable_baselines3.stable_baselines3.common import base_class # pytype: disable=pyi-error
from mod_stable_baselines3.stable_baselines3.common.evaluation import evaluate_policy
from mod_stable_baselines3.stable_baselines3.common.vec_env import DummyVecEnv, VecEnv, sync_envs_normalization
class BaseCallback(ABC):
"""
Base class for callback.
:param verbose:
"""
def __init__(self, verbose: int = 0):
super(BaseCallback, self).__init__()
# The RL model
self.model = None # type: Optional[base_class.BaseAlgorithm]
# An alias for self.model.get_env(), the environment used for training
self.training_env = None # type: Union[gym.Env, VecEnv, None]
# Number of time the callback was called
self.n_calls = 0 # type: int
# n_envs * n times env.step() was called
self.num_timesteps = 0 # type: int
self.verbose = verbose
self.locals: Dict[str, Any] = {}
self.globals: Dict[str, Any] = {}
self.logger = None
# Sometimes, for event callback, it is useful
# to have access to the parent object
self.parent = None # type: Optional[BaseCallback]
# Type hint as string to avoid circular import
def init_callback(self, model: "base_class.BaseAlgorithm") -> None:
"""
Initialize the callback by saving references to the
RL model and the training environment for convenience.
"""
self.model = model
self.training_env = model.get_env()
self.logger = model.logger
self._init_callback()
def _init_callback(self) -> None:
pass
def on_training_start(self, locals_: Dict[str, Any], globals_: Dict[str, Any]) -> None:
# Those are reference and will be updated automatically
self.locals = locals_
self.globals = globals_
self._on_training_start()
def _on_training_start(self) -> None:
pass
def on_rollout_start(self) -> None:
self._on_rollout_start()
def _on_rollout_start(self) -> None:
pass
@abstractmethod
def _on_step(self) -> bool:
"""
:return: If the callback returns False, training is aborted early.
"""
return True
def on_step(self) -> bool:
"""
This method will be called by the model after each call to ``env.step()``.
For child callback (of an ``EventCallback``), this will be called
when the event is triggered.
:return: If the callback returns False, training is aborted early.
"""
self.n_calls += 1
# timesteps start at zero
self.num_timesteps = self.model.num_timesteps
return self._on_step()
def on_training_end(self) -> None:
self._on_training_end()
def _on_training_end(self) -> None:
pass
def on_rollout_end(self) -> None:
self._on_rollout_end()
def _on_rollout_end(self) -> None:
pass
def update_locals(self, locals_: Dict[str, Any]) -> None:
"""
Update the references to the local variables.
:param locals_: the local variables during rollout collection
"""
self.locals.update(locals_)
self.update_child_locals(locals_)
def update_child_locals(self, locals_: Dict[str, Any]) -> None:
"""
Update the references to the local variables on sub callbacks.
:param locals_: the local variables during rollout collection
"""
pass
class EventCallback(BaseCallback):
"""
Base class for triggering callback on event.
:param callback: Callback that will be called
when an event is triggered.
:param verbose:
"""
def __init__(self, callback: Optional[BaseCallback] = None, verbose: int = 0):
super(EventCallback, self).__init__(verbose=verbose)
self.callback = callback
# Give access to the parent
if callback is not None:
self.callback.parent = self
def init_callback(self, model: "base_class.BaseAlgorithm") -> None:
super(EventCallback, self).init_callback(model)
if self.callback is not None:
self.callback.init_callback(self.model)
def _on_training_start(self) -> None:
if self.callback is not None:
self.callback.on_training_start(self.locals, self.globals)
def _on_event(self) -> bool:
if self.callback is not None:
return self.callback.on_step()
return True
def _on_step(self) -> bool:
return True
def update_child_locals(self, locals_: Dict[str, Any]) -> None:
"""
Update the references to the local variables.
:param locals_: the local variables during rollout collection
"""
if self.callback is not None:
self.callback.update_locals(locals_)
class CallbackList(BaseCallback):
"""
Class for chaining callbacks.
:param callbacks: A list of callbacks that will be called
sequentially.
"""
def __init__(self, callbacks: List[BaseCallback]):
super(CallbackList, self).__init__()
assert isinstance(callbacks, list)
self.callbacks = callbacks
def _init_callback(self) -> None:
for callback in self.callbacks:
callback.init_callback(self.model)
def _on_training_start(self) -> None:
for callback in self.callbacks:
callback.on_training_start(self.locals, self.globals)
def _on_rollout_start(self) -> None:
for callback in self.callbacks:
callback.on_rollout_start()
def _on_step(self) -> bool:
continue_training = True
for callback in self.callbacks:
# Return False (stop training) if at least one callback returns False
continue_training = callback.on_step() and continue_training
return continue_training
def _on_rollout_end(self) -> None:
for callback in self.callbacks:
callback.on_rollout_end()
def _on_training_end(self) -> None:
for callback in self.callbacks:
callback.on_training_end()
def update_child_locals(self, locals_: Dict[str, Any]) -> None:
"""
Update the references to the local variables.
:param locals_: the local variables during rollout collection
"""
for callback in self.callbacks:
callback.update_locals(locals_)
class CheckpointCallback(BaseCallback):
"""
Callback for saving a model every ``save_freq`` calls
to ``env.step()``.
.. warning::
When using multiple environments, each call to ``env.step()``
will effectively correspond to ``n_envs`` steps.
To account for that, you can use ``save_freq = max(save_freq // n_envs, 1)``
:param save_freq:
:param save_path: Path to the folder where the model will be saved.
:param name_prefix: Common prefix to the saved models
:param verbose:
"""
def __init__(self, save_freq: int, save_path: str, name_prefix: str = "rl_model", verbose: int = 0):
super(CheckpointCallback, self).__init__(verbose)
self.save_freq = save_freq
self.save_path = save_path
self.name_prefix = name_prefix
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.save_freq == 0:
path = os.path.join(self.save_path, f"{self.name_prefix}_{self.num_timesteps}_steps")
self.model.save(path)
if self.verbose > 1:
print(f"Saving model checkpoint to {path}")
return True
class ConvertCallback(BaseCallback):
"""
Convert functional callback (old-style) to object.
:param callback:
:param verbose:
"""
def __init__(self, callback: Callable[[Dict[str, Any], Dict[str, Any]], bool], verbose: int = 0):
super(ConvertCallback, self).__init__(verbose)
self.callback = callback
def _on_step(self) -> bool:
if self.callback is not None:
return self.callback(self.locals, self.globals)
return True
class EvalCallback(EventCallback):
"""
Callback for evaluating an agent.
.. warning::
When using multiple environments, each call to ``env.step()``
will effectively correspond to ``n_envs`` steps.
To account for that, you can use ``eval_freq = max(eval_freq // n_envs, 1)``
:param eval_env: The environment used for initialization
:param callback_on_new_best: Callback to trigger
when there is a new best model according to the ``mean_reward``
:param n_eval_episodes: The number of episodes to test the agent
:param eval_freq: Evaluate the agent every ``eval_freq`` call of the callback.
:param log_path: Path to a folder where the evaluations (``evaluations.npz``)
will be saved. It will be updated at each evaluation.
:param best_model_save_path: Path to a folder where the best model
according to performance on the eval env will be saved.
:param deterministic: Whether the evaluation should
use a stochastic or deterministic actions.
:param render: Whether to render or not the environment during evaluation
:param verbose:
:param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been
wrapped with a Monitor wrapper)
"""
def __init__(
self,
eval_env: Union[mod_gym.gym.Env, VecEnv],
callback_on_new_best: Optional[BaseCallback] = None,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
log_path: str = None,
best_model_save_path: str = None,
deterministic: bool = True,
render: bool = False,
verbose: int = 1,
warn: bool = True,
):
super(EvalCallback, self).__init__(callback_on_new_best, verbose=verbose)
self.n_eval_episodes = n_eval_episodes
self.eval_freq = eval_freq
self.best_mean_reward = -np.inf
self.last_mean_reward = -np.inf
self.deterministic = deterministic
self.render = render
self.warn = warn
# Convert to VecEnv for consistency
if not isinstance(eval_env, VecEnv):
eval_env = DummyVecEnv([lambda: eval_env])
self.eval_env = eval_env
self.best_model_save_path = best_model_save_path
# Logs will be written in ``evaluations.npz``
if log_path is not None:
log_path = os.path.join(log_path, "evaluations")
self.log_path = log_path
self.evaluations_results = []
self.evaluations_timesteps = []
self.evaluations_length = []
# For computing success rate
self._is_success_buffer = []
self.evaluations_successes = []
def _init_callback(self) -> None:
# Does not work in some corner cases, where the wrapper is not the same
if not isinstance(self.training_env, type(self.eval_env)):
warnings.warn("Training and eval env are not of the same type" f"{self.training_env} != {self.eval_env}")
# Create folders if needed
if self.best_model_save_path is not None:
os.makedirs(self.best_model_save_path, exist_ok=True)
if self.log_path is not None:
os.makedirs(os.path.dirname(self.log_path), exist_ok=True)
def _log_success_callback(self, locals_: Dict[str, Any], globals_: Dict[str, Any]) -> None:
"""
Callback passed to the ``evaluate_policy`` function
in order to log the success rate (when applicable),
for instance when using HER.
:param locals_:
:param globals_:
"""
info = locals_["info"]
if locals_["done"]:
maybe_is_success = info.get("is_success")
if maybe_is_success is not None:
self._is_success_buffer.append(maybe_is_success)
def _on_step(self) -> bool:
if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:
# Sync training and eval env if there is VecNormalize
sync_envs_normalization(self.training_env, self.eval_env)
# Reset success rate buffer
self._is_success_buffer = []
episode_rewards, episode_lengths = evaluate_policy(
self.model,
self.eval_env,
n_eval_episodes=self.n_eval_episodes,
render=self.render,
deterministic=self.deterministic,
return_episode_rewards=True,
warn=self.warn,
callback=self._log_success_callback,
)
if self.log_path is not None:
self.evaluations_timesteps.append(self.num_timesteps)
self.evaluations_results.append(episode_rewards)
self.evaluations_length.append(episode_lengths)
kwargs = {}
# Save success log if present
if len(self._is_success_buffer) > 0:
self.evaluations_successes.append(self._is_success_buffer)
kwargs = dict(successes=self.evaluations_successes)
np.savez(
self.log_path,
timesteps=self.evaluations_timesteps,
results=self.evaluations_results,
ep_lengths=self.evaluations_length,
**kwargs,
)
mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)
mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)
self.last_mean_reward = mean_reward
if self.verbose > 0:
print(f"Eval num_timesteps={self.num_timesteps}, " f"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}")
print(f"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}")
# Add to current Logger
self.logger.record("eval/mean_reward", float(mean_reward))
self.logger.record("eval/mean_ep_length", mean_ep_length)
if len(self._is_success_buffer) > 0:
success_rate = np.mean(self._is_success_buffer)
if self.verbose > 0:
print(f"Success rate: {100 * success_rate:.2f}%")
self.logger.record("eval/success_rate", success_rate)
# Dump log so the evaluation results are printed with the correct timestep
self.logger.record("time/total timesteps", self.num_timesteps, exclude="tensorboard")
self.logger.dump(self.num_timesteps)
if mean_reward > self.best_mean_reward:
if self.verbose > 0:
print("New best mean reward!")
if self.best_model_save_path is not None:
self.model.save(os.path.join(self.best_model_save_path, "best_model"))
self.best_mean_reward = mean_reward
# Trigger callback if needed
if self.callback is not None:
return self._on_event()
return True
def update_child_locals(self, locals_: Dict[str, Any]) -> None:
"""
Update the references to the local variables.
:param locals_: the local variables during rollout collection
"""
if self.callback:
self.callback.update_locals(locals_)
class StopTrainingOnRewardThreshold(BaseCallback):
"""
Stop the training once a threshold in episodic reward
has been reached (i.e. when the model is good enough).
It must be used with the ``EvalCallback``.
:param reward_threshold: Minimum expected reward per episode
to stop training.
:param verbose:
"""
def __init__(self, reward_threshold: float, verbose: int = 0):
super(StopTrainingOnRewardThreshold, self).__init__(verbose=verbose)
self.reward_threshold = reward_threshold
def _on_step(self) -> bool:
assert self.parent is not None, "``StopTrainingOnMinimumReward`` callback must be used " "with an ``EvalCallback``"
# Convert np.bool_ to bool, otherwise callback() is False won't work
continue_training = bool(self.parent.best_mean_reward < self.reward_threshold)
if self.verbose > 0 and not continue_training:
print(
f"Stopping training because the mean reward {self.parent.best_mean_reward:.2f} "
f" is above the threshold {self.reward_threshold}"
)
return continue_training
class EveryNTimesteps(EventCallback):
"""
Trigger a callback every ``n_steps`` timesteps
:param n_steps: Number of timesteps between two trigger.
:param callback: Callback that will be called
when the event is triggered.
"""
def __init__(self, n_steps: int, callback: BaseCallback):
super(EveryNTimesteps, self).__init__(callback)
self.n_steps = n_steps
self.last_time_trigger = 0
def _on_step(self) -> bool:
if (self.num_timesteps - self.last_time_trigger) >= self.n_steps:
self.last_time_trigger = self.num_timesteps
return self._on_event()
return True
class StopTrainingOnMaxEpisodes(BaseCallback):
"""
Stop the training once a maximum number of episodes are played.
For multiple environments presumes that, the desired behavior is that the agent trains on each env for ``max_episodes``
and in total for ``max_episodes * n_envs`` episodes.
:param max_episodes: Maximum number of episodes to stop training.
:param verbose: Select whether to print information about when training ended by reaching ``max_episodes``
"""
def __init__(self, max_episodes: int, verbose: int = 0):
super(StopTrainingOnMaxEpisodes, self).__init__(verbose=verbose)
self.max_episodes = max_episodes
self._total_max_episodes = max_episodes
self.n_episodes = 0
def _init_callback(self) -> None:
# At start set total max according to number of envirnments
self._total_max_episodes = self.max_episodes * self.training_env.num_envs
def _on_step(self) -> bool:
# Checking for both 'done' and 'dones' keywords because:
# Some models use keyword 'done' (e.g.,: SAC, TD3, DQN, DDPG)
# While some models use keyword 'dones' (e.g.,: A2C, PPO)
done_array = np.array(self.locals.get("done") if self.locals.get("done") is not None else self.locals.get("dones"))
self.n_episodes += np.sum(done_array).item()
continue_training = self.n_episodes < self._total_max_episodes
if self.verbose > 0 and not continue_training:
mean_episodes_per_env = self.n_episodes / self.training_env.num_envs
mean_ep_str = (
f"with an average of {mean_episodes_per_env:.2f} episodes per env" if self.training_env.num_envs > 1 else ""
)
print(
f"Stopping training with a total of {self.num_timesteps} steps because the "
f"{self.locals.get('tb_log_name')} model reached max_episodes={self.max_episodes}, "
f"by playing for {self.n_episodes} episodes "
f"{mean_ep_str}"
)
return continue_training
| 36.620561 | 124 | 0.638934 |
11f36f60faafc820eca1bc27b4fe4c5f54aaa139 | 917 | py | Python | services/recommendsystem/recomv1/v1/service.py | X5GON/lamapi | 0558c3b7af520ab83bdbd29e1b1b9b87bdc147b0 | [
"BSD-2-Clause"
] | null | null | null | services/recommendsystem/recomv1/v1/service.py | X5GON/lamapi | 0558c3b7af520ab83bdbd29e1b1b9b87bdc147b0 | [
"BSD-2-Clause"
] | null | null | null | services/recommendsystem/recomv1/v1/service.py | X5GON/lamapi | 0558c3b7af520ab83bdbd29e1b1b9b87bdc147b0 | [
"BSD-2-Clause"
] | null | null | null | from flask import request
from flask_restx import Resource, Namespace
from .core import get_resource_recommend_v1
from ....iomodels import input_def_recommend_v1, output_def
ns_recomv1 = Namespace('recommendsystem',
description='First version of the recommendation system based on KNN models')
input_def = ns_recomv1.model(*input_def_recommend_v1)
output_def = ns_recomv1.model(*output_def)
@ns_recomv1.route("/v1")
class GetRecommendv1(Resource):
'''This is a test'''
@ns_recomv1.expect(input_def, validate=True)
@ns_recomv1.marshal_with(output_def)
def post(self):
'''Compute the recommendation list based on Knn models'''
return {'output': get_resource_recommend_v1(**request.json)}
@ns_recomv1.errorhandler
def default_error_handler(error):
'''Default error handler'''
return {'message': str(error)}, getattr(error, 'code', 500)
| 35.269231 | 100 | 0.725191 |
4aacf3a95f0971856b7956ab982174f5ee2da027 | 16,716 | py | Python | hoomd/data/local_access.py | yurivict/hoomd-blue | 52bbbb5202ad73c576a83b47633d15f3b052af77 | [
"BSD-3-Clause"
] | null | null | null | hoomd/data/local_access.py | yurivict/hoomd-blue | 52bbbb5202ad73c576a83b47633d15f3b052af77 | [
"BSD-3-Clause"
] | null | null | null | hoomd/data/local_access.py | yurivict/hoomd-blue | 52bbbb5202ad73c576a83b47633d15f3b052af77 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
"""Access simulation state data directly."""
from abc import ABC, abstractmethod
from hoomd import Box
from hoomd import _hoomd
class _LocalAccess(ABC):
__slots__ = ('_entered', '_accessed_fields', '_cpp_obj')
_global_fields = {'rtag': 'getRTags'}
@property
@abstractmethod
def _fields(self):
pass
@property
@abstractmethod
def _array_cls(self):
pass
def __init__(self):
self._entered = False
self._accessed_fields = dict()
def __getattr__(self, attr):
if attr in self._accessed_fields:
return self._accessed_fields[attr]
elif attr in self._global_fields:
buff = getattr(self._cpp_obj, self._global_fields[attr])()
else:
raw_attr, flag = self._get_raw_attr_and_flag(attr)
if raw_attr in self._fields:
buff = getattr(self._cpp_obj, self._fields[raw_attr])(flag)
else:
raise AttributeError("{} object has no attribute {}".format(
type(self), attr))
self._accessed_fields[attr] = arr = self._array_cls(
buff, lambda: self._entered)
return arr
def _get_raw_attr_and_flag(self, attr):
ghosts_only = attr.startswith("ghost_")
with_ghosts = attr.endswith("_with_ghost")
raw_attr = attr.replace("_with_ghost", "").replace("ghost_", "")
if ghosts_only and with_ghosts:
raise ValueError("Attribute cannot be both prefixed with ghost_ "
"and suffixed with _with_ghost")
elif ghosts_only:
return raw_attr, _hoomd.GhostDataFlag.ghost
elif with_ghosts:
return raw_attr, _hoomd.GhostDataFlag.both
else:
return raw_attr, _hoomd.GhostDataFlag.standard
def __setattr__(self, attr, value):
if attr in self.__slots__:
super().__setattr__(attr, value)
return
try:
arr = getattr(self, attr)
except AttributeError:
raise AttributeError("{} object has no attribute {}.".format(
self.__class__, attr))
else:
if arr.read_only:
raise RuntimeError("Attribute {} is not settable.".format(attr))
arr[:] = value
def _enter(self):
self._cpp_obj.enter()
self._entered = True
def _exit(self):
self._cpp_obj.exit()
self._entered = False
self._accessed_fields = dict()
class ParticleLocalAccessBase(_LocalAccess):
"""Class for directly accessing HOOMD-blue particle data.
Attributes:
typeid ((N_particles) `hoomd.data.array` object of ``float``):
The integer type of a particle
tag ((N_particles) `hoomd.data.array` object of ``int``):
The tag of a particle. HOOMD-blue uses spacial sorting to improve
cache efficiency in particle look-ups. This means the ordering of
the array changes. However, particle tags remain constant. This
means that if ``particles.tag[0]`` is 1, then later whatever
particle has a tag of 1 later in the simulation is the same
particle.
rtag ((N_particles_global) `hoomd.data.array` object of ``int``):
The reverse tag of a particle. This means that the value
``particles.rtag[0]`` represents the current index accessing data
for the particle with tag 0.
position ((N_particles, 3) `hoomd.data.array` object of ``float``):
particle positions :math:`[\\mathrm{length}]`
image ((N_particles, 3) `hoomd.data.array` object of ``int``):
The periodic image a particle occupies
velocity ((N_particles, 3) `hoomd.data.array` object of ``float``):
particle velocities :math:`[\\mathrm{velocity}]`
acceleration ((N_particles, 3) `hoomd.data.array` object of ``float``):
particle accelerations
:math:`[\\mathrm{velocity} \\cdot \\mathrm{time}^{-1}]`
mass ((N_particles) `hoomd.data.array` object of ``float``):
particles' masses :math:`[\\mathrm{mass}]`
orientation ((N_particles, 4) `hoomd.data.array` object of ``float``):
particle orientations expressed as quaternions
angmom ((N_particles, 4) `hoomd.data.array` object of \
``float``):
particle angular momenta expressed as quaternions
:math:`[\\mathrm{mass} \\cdot \\mathrm{velocity} \\cdot
\\mathrm{length}]`
moment_inertia ((N_particles, 3) `hoomd.data.array` object of \
``float``):
particle principal moments of inertia
:math:`[\\mathrm{mass} \\cdot \\mathrm{length}^2]`
charge ((N_particles) `hoomd.data.array` object of ``float``):
particle electrical charges :math:`[\\mathrm{charge}]`
diameter ((N_particles) `hoomd.data.array` object of ``float``):
particle diameters :math:`[\\mathrm{length}]`
body ((N_particles) `hoomd.data.array` object of ``int``):
The id of the rigid body the particle is in.
net_force ((N_particles, 3) `hoomd.data.array` object of ``float``):
net force on particle :math:`[\\mathrm{force}]`
net_torque ((N_particles, 3) `hoomd.data.array` object of ``float``):
net torque on particle
:math:`[\\mathrm{force} \\cdot \\mathrm{length}]`
net_virial ((N_particles, 3) `hoomd.data.array` object of ``float``):
net virial on particle :math:`[\\mathrm{energy}]`
net_energy ((N_particles,) `hoomd.data.array` object of ``float``):
net energy of a particle (accounts for duplicate counting of an
interaction). :math:`[\\mathrm{energy}]`
Note:
That changing some attributes like (``velocity`` or ``acceleration``)
may not change the trajectory of the system. Integration of the
equations of motion do not necessarily use velocity or accleration
directly. This is also true in HOOMD-blue's MD integration methods (see
`hoomd.md.methods`)
"""
@property
@abstractmethod
def _cpp_cls(self):
pass
_fields = {
'position': 'getPosition',
'typeid': 'getTypes',
'velocity': 'getVelocities',
'mass': 'getMasses',
'acceleration': 'getAcceleration',
'orientation': 'getOrientation',
'angmom': 'getAngularMomentum',
'moment_inertia': 'getMomentsOfInertia',
'charge': 'getCharge',
'diameter': 'getDiameter',
'image': 'getImages',
'tag': 'getTags',
'rtag': 'getRTags',
'body': 'getBodies',
'net_force': 'getNetForce',
'net_torque': 'getNetTorque',
'net_virial': 'getNetVirial',
'net_energy': 'getNetEnergy'
}
def __init__(self, state):
super().__init__()
self._cpp_obj = self._cpp_cls(state._cpp_sys_def.getParticleData())
class _GroupLocalAccess(_LocalAccess):
@property
@abstractmethod
def _cpp_cls(self):
pass
@property
@abstractmethod
def _cpp_get_data_method_name(self):
pass
_fields = {
'typeid': 'getTypeVal',
'group': 'getMembers',
'tag': 'getTags',
'rtag': 'getRTags'
}
def __init__(self, state):
super().__init__()
self._cpp_obj = self._cpp_cls(
getattr(state._cpp_sys_def, self._cpp_get_data_method_name)())
class BondLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue bond data.
Attributes:
typeid ((N_bonds) `hoomd.data.array` object of ``int``):
The integer type of a bond.
members ((N_bonds, 2) `hoomd.data.array` object of ``int``):
The tags of particles in a bond.
tag ((N_bonds) `hoomd.data.array` object of ``int``):
The tag of the bond. HOOMD-blue uses spacial sorting to improve
cache efficiency in bond look-ups. This means the ordering of the
array changes. However, bond tags remain constant. This means that
if ``bond.tag[0]`` is 1, then later whatever bond has a tag of 1
later in the simulation is the same bond.
rtag ((N_bonds_global) `hoomd.data.array` object of ``int``): the
reverse tag of a bond. This means that the value ``bond.rtag[0]``
represents the current index to access data for the bond with tag 0.
"""
_cpp_get_data_method_name = "getBondData"
class AngleLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue angle data.
Attributes:
typeid ((N_angles) `hoomd.data.array` object of ``int``):
The integer type of a angle.
members ((N_angles, 3) `hoomd.data.array` object of ``int``):
The tags of particles in a angle.
tag ((N_angles) `hoomd.data.array` object of ``int``):
The tag of the angle. HOOMD-blue uses spacial sorting to improve
cache efficiency in angle look-ups. This means the ordering of the
array changes. However, angle tags remain constant. This means
that if ``angle.tag[0]`` is 1, then later whatever angle has a
tag of 1 later in the simulation is the same angle.
rtag ((N_angles_global) `hoomd.data.array` object of ``int``):
The reverse tag of a angle. This means that the value
``angle.rtag[0]`` represents the current index for accessing data
for the angle with tag 0.
"""
_cpp_get_data_method_name = "getAngleData"
class DihedralLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue dihedral data.
Attributes:
typeid ((N_dihedrals) `hoomd.data.array` object of ``int``): The integer
type of a dihedral.
members ((N_dihedrals, 3) `hoomd.data.array` object of ``int``): the
tags of particles in a dihedral.
tag ((N_dihedrals) `hoomd.data.array` object of ``int``):
The tag of the dihedral. HOOMD-blue uses spacial sorting to improve
cache efficiency in dihedral look-ups. This means the ordering of
the array changes. However, dihedral tags remain constant. This
means that if ``dihedral.tag[0]`` is 1, then later whatever dihedral
has a tag of 1 later in the simulation is the same dihedral.
rtag ((N_dihedrals_global) `hoomd.data.array` object of ``int``):
The reverse tag of a dihedral. This means that the value
``dihedral.rtag[0]`` represents the current index for accessing data
for the dihedral with tag 0.
"""
_cpp_get_data_method_name = "getDihedralData"
class ImproperLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue improper data.
Attributes:
typeid ((N_impropers) `hoomd.data.array` object of ``int``):
The integer type of a improper.
members ((N_impropers, 3) `hoomd.data.array` object of ``int``):
The tags of particles in a improper.
tag ((N_impropers) `hoomd.data.array` object of ``int``):
The tag of the improper. HOOMD-blue uses spacial sorting to improve
cache efficiency in improper look-ups. This means the ordering of
the array changes. However, improper tags remain constant. This
means that if ``improper.tag[0]`` is 1, then later whatever improper
has a tag of 1 later in the simulation is the same improper.
rtag ((N_impropers_global) `hoomd.data.array` object of ``int``):
The reverse tag of a improper. This means that the value
``improper.rtag[0]`` represents the current index for accessing data
for the improper with tag 0.
"""
_cpp_get_data_method_name = "getImproperData"
class ConstraintLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue constraint data.
Attributes:
value ((N_constraints) `hoomd.data.array` object of ``float``): The
constaint value.
members ((N_constraints, 3) `hoomd.data.array` object of ``int``): the
tags of particles in a constraint.
tag ((N_constraints) `hoomd.data.array` object of ``int``):
The tag of the constraint. HOOMD-blue uses spacial sorting to
improve cache efficiency in constraint look-ups. This means the
ordering of the array changes. However, constraint tags remain
constant. This means that if ``constraint.tag[0]`` is 1, then later
whatever constraint has a tag of 1 later in the simulation is the
same constraint.
rtag ((N_constraints_global) `hoomd.data.array` object of ``int``):
The reverse tag of a constraint. This means that the value
``constraint.rtag[0]`` represents the current index for accessing
data for the constraint with tag 0.
"""
_fields = {
'value': 'getTypeVal',
'group': 'getMembers',
'tag': 'getTags',
'rtag': 'getRTags'
}
_cpp_get_data_method_name = "getConstraintData"
class PairLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue special pair data.
Attributes:
typeid ((N_pairs) `hoomd.data.array` object of ``float``): The type of
special pair.
members ((N_pairs, 3) `hoomd.data.array` object of ``int``): the tags of
particles in a special pair.
tag ((N_special_pairs) `hoomd.data.array` object of ``int``):
The tag of the special pair. HOOMD-blue uses spacial sorting to
improve cache efficiency in special pair look-ups. This means the
ordering of the array changes. However, special pair tags remain
constant. This means that if ``special pair.tag[0]`` is 1, then
later whatever special pair has a tag of 1 later in the simulation
is the same special pair.
rtag ((N_special_pairs_global) `hoomd.data.array` object of ``int``):
The reverse tag of a special pair. This means that the value
``special pair.rtag[0]`` represents the current index for accessing
data for the special pair with tag 0.
"""
_cpp_get_data_method_name = "getPairData"
class _LocalSnapshot:
def __init__(self, state):
self._state = state
self._box = state.box
self._local_box = state._cpp_sys_def.getParticleData().getBox()
@property
def global_box(self):
"""hoomd.Box: The global simulation box."""
return Box.from_box(self._box)
@property
def local_box(self):
"""hoomd.Box: The local box according to the domain decomposition."""
return Box.from_box(Box._from_cpp(self._local_box))
@property
def particles(self):
"""hoomd.data.ParticleLocalAccessBase: Local particle data."""
return self._particles
@property
def bonds(self):
"""hoomd.data.BondLocalAccessBase: Local bond data."""
return self._bonds
@property
def angles(self):
"""hoomd.data.AngleLocalAccessBase: Local angle data."""
return self._angles
@property
def dihedrals(self):
"""hoomd.data.DihedralLocalAccessBase: Local dihedral data."""
return self._dihedrals
@property
def impropers(self):
"""hoomd.data.ImproperLocalAccessBase: Local improper data."""
return self._impropers
@property
def constraints(self):
"""hoomd.data.ConstraintLocalAccessBase: Local constraint data."""
return self._constraints
@property
def pairs(self):
"""hoomd.data.PairLocalAccessBase: Local special pair data."""
return self._pairs
def __enter__(self):
self._state._in_context_manager = True
self._particles._enter()
self._bonds._enter()
self._angles._enter()
self._dihedrals._enter()
self._impropers._enter()
self._constraints._enter()
self._pairs._enter()
return self
def __exit__(self, type, value, traceback):
self._state._in_context_manager = False
self._particles._exit()
self._bonds._exit()
self._angles._exit()
self._dihedrals._exit()
self._impropers._exit()
self._constraints._exit()
self._pairs._exit()
| 40.279518 | 80 | 0.623475 |
eca7adcece891360f3f3467587d14e1c9fece2ab | 1,046 | py | Python | profile.py | Sergio0896/csc496 | f9d2a50daed9155daa93588230bd6554c7a27145 | [
"MIT"
] | null | null | null | profile.py | Sergio0896/csc496 | f9d2a50daed9155daa93588230bd6554c7a27145 | [
"MIT"
] | null | null | null | profile.py | Sergio0896/csc496 | f9d2a50daed9155daa93588230bd6554c7a27145 | [
"MIT"
] | 3 | 2019-10-29T16:59:08.000Z | 2019-12-05T03:37:20.000Z | import geni.portal as portal
import geni.rspec.pg as pg
import geni.rspec.igext as IG
# Create a portal context.
pc = portal.Context()
# Create a Request object to start building the RSpec.
request = pc.makeRequestRSpec()
tourDescription = \
"""
This profile provides the template for a full research cluster with head node, scheduler, compute nodes, and shared file systems.
At the moment, we start with a single node running MPI.
"""
# Setup the Tour info with the above description and instructions.
tour = IG.Tour()
tour.Description(IG.Tour.TEXT,tourDescription)
request.addTour(tour)
node = request.XenVM("compute-node")
node.cores = 4
node.ram = 4096
node.disk_image = "urn:publicid:IDN+emulab.net+image+emulab-ops:CENTOS7-64-STD"
node.routable_control_ip = "true"
node.addService(pg.Execute(shell="sh", command="sudo chmod 755 /local/repository/install_mpi.sh"))
node.addService(pg.Execute(shell="sh", command="sudo /local/repository/install_mpi.sh"))
# Print the RSpec to the enclosing page.
pc.printRequestRSpec(request)
| 30.764706 | 129 | 0.763862 |
66353880b325202dce9dcb4361c0f7b85af65dd5 | 126 | py | Python | chapter04/4-10.py | alberthao/Python-Crash-Course-Homework | 105ffb3075db075425d6cf0d08d9837ef0548866 | [
"MIT"
] | 1 | 2021-07-13T09:05:18.000Z | 2021-07-13T09:05:18.000Z | chapter04/4-10.py | alberthao/Python-Crash-Course-Homework | 105ffb3075db075425d6cf0d08d9837ef0548866 | [
"MIT"
] | null | null | null | chapter04/4-10.py | alberthao/Python-Crash-Course-Homework | 105ffb3075db075425d6cf0d08d9837ef0548866 | [
"MIT"
] | 2 | 2021-02-16T09:43:20.000Z | 2021-11-27T07:02:48.000Z | msg1 = ['The','first','three','items','in','the','list','are:']
print(msg1)
print(msg1[0:3])
print(msg1[2:6])
print(msg1[-3:]) | 25.2 | 63 | 0.595238 |
a3f6bedfc3fd34f0b98647d1d7ebf9f9321a3509 | 2,166 | py | Python | py/dcp/problems/graph/word_ladder.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 1 | 2020-06-26T13:28:43.000Z | 2020-06-26T13:28:43.000Z | py/dcp/problems/graph/word_ladder.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 7 | 2021-11-18T19:46:08.000Z | 2022-03-12T01:03:01.000Z | py/dcp/problems/graph/word_ladder.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | null | null | null | """
Create stepword chain.
Given a start word, an end word, and a dictionary of valid words, find the shortest transformation sequence from start to end
such that only one letter is changed at each step of the sequence, and each transformed word exists in the dictionary. If
there is no possible transformation, return null. Each word in the dictionary has the same length as start and end and is lowercase.
For example,
given start = "dog", end = "cat", and dictionary = {"dot", "dop", "dat", "cat"},
return ["dog", "dot", "dat", "cat"]
given start = "dog", end = "cat", and dictionary = {"dot", "tod", "dat", "dar"},
return null as there is no possible transformation from "dog" to "cat".
"""
from collections import defaultdict
from dcp.problems.graph.adj_mat_graph import AMGraph
from dcp.problems.graph.common import find_path
def word_ladder1(start : str, end : str, words : dict[str, str]):
all_words = set(words) | set([start, end])
def are_similar(word1, word2) -> bool:
"""
determines if two words are seperated by only 1 edge
Args:
word1 ([type]): word 1
word2 ([type]): word 2
Returns:
[bool]: return true if words are seperated by only 1 character
"""
n1, n2 = len(word1), len(word2)
if n1 != n2:
return False
differences = 0
for index in range(n1):
if word1[index] != word2[index]:
differences += 1
return differences <= 1
def gen_links(all_words : set[str] ):
"""
gets all words that are seprated by 1 char or less.
Args:
all_words ([type]): [description]
Returns:
[type]: [description]
"""
links = defaultdict(list)
for word1 in all_words:
for word2 in all_words:
if word1 == word2:
continue
if are_similar(word1, word2):
links[word1].append(word2)
return links
links = gen_links(all_words)
graph = AMGraph(links)
path = find_path(graph, start, end)
return path | 27.417722 | 132 | 0.595568 |
ec988c5ebbcc7e6e1b4f62a5708e3a4a526d9754 | 1,399 | py | Python | populate/ropensci_libraries/throughputpy/goodHit.py | throughput-ec/throughputdb | cd59844e6bac8d3e9992274c659343d305986f93 | [
"MIT"
] | 4 | 2017-11-09T19:57:09.000Z | 2021-11-09T18:15:23.000Z | populate/ropensci_libraries/throughputpy/goodHit.py | throughput-ec/throughputdb | cd59844e6bac8d3e9992274c659343d305986f93 | [
"MIT"
] | 2 | 2018-09-03T19:11:25.000Z | 2019-06-10T17:39:12.000Z | populate/ropensci_libraries/throughputpy/goodHit.py | throughput-ec/throughputdb | cd59844e6bac8d3e9992274c659343d305986f93 | [
"MIT"
] | 6 | 2017-11-21T21:14:19.000Z | 2020-12-04T21:02:30.000Z | from re import search
import json
def goodHit(query, text):
"""Check for expected query call in file content.
Parameters
----------
query : str
Text string passed to the original GitHub code search query.
text : list
The File contents, including highlighted fragments.
Returns
-------
type
Description of returned object.
"""
strings = query.split(" ")
matchlib = ".*" + strings[0] + r'\(([^\)]*' + strings[1] + ')'
matchcal = strings[1] + r'\:\:'
matchvig = r".*VignetteDepends\{([^\}])*" + strings[1]
checklib = list(map(lambda x: search(matchlib, x.get('fragment')), text))
checkcal = list(map(lambda x: search(matchcal, x.get('fragment')), text))
checkvig = list(map(lambda x: search(matchvig, x.get('fragment')), text))
check = checkcal + checklib + checkvig
output = not(all(matches is None for matches in check))
if output is not True:
f = open("fail_log.txt", "a")
textdump = {'query': query,
'text': list(map(lambda x: x.get('fragment'), text))}
f.write(json.dumps(textdump) + "\n")
f.close()
else:
f = open("pass_log.txt", "a")
textdump = {'query': query,
'text': list(map(lambda x: x.get('fragment'), text))}
f.write(json.dumps(textdump) + "\n")
f.close()
return output
| 31.795455 | 77 | 0.566119 |
b0e18aa9213a6d10a41229e1af33c28ac1597fe9 | 268 | py | Python | tests/artificial/transf_Difference/trend_LinearTrend/cycle_12/ar_/test_artificial_1024_Difference_LinearTrend_12__0.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_Difference/trend_LinearTrend/cycle_12/ar_/test_artificial_1024_Difference_LinearTrend_12__0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_Difference/trend_LinearTrend/cycle_12/ar_/test_artificial_1024_Difference_LinearTrend_12__0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 12, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 0); | 38.285714 | 168 | 0.735075 |
a8c713e844c8a7567ade5cf8ff44e2474d228370 | 52,115 | py | Python | pyNastran/bdf/cards/nodes.py | 214929177/pyNastran | 73032d6ffd445ef085c124dde6b5e90a516a5b6a | [
"BSD-3-Clause"
] | null | null | null | pyNastran/bdf/cards/nodes.py | 214929177/pyNastran | 73032d6ffd445ef085c124dde6b5e90a516a5b6a | [
"BSD-3-Clause"
] | null | null | null | pyNastran/bdf/cards/nodes.py | 214929177/pyNastran | 73032d6ffd445ef085c124dde6b5e90a516a5b6a | [
"BSD-3-Clause"
] | 1 | 2021-10-14T03:52:44.000Z | 2021-10-14T03:52:44.000Z | """
All nodes are defined in this file. This includes:
* Node
* XPoint
* EPOINT
* SPOINT
* XPoints
* EPOINTs
* SPOINTs
* GRID
* GRDSET
* GRIDB
* POINT
* Ring
* RINGAX
* SEQGP
All ungrouped elements are Node objects.
The EPOINT/SPOINT classes refer to a single EPOINT/SPOINT. The
EPOINTs/SPOINTs classes are for multiple degrees of freedom
(e.g. an SPOINT card).
"""
from __future__ import annotations
from itertools import count
from typing import List, Union, Optional, Any, TYPE_CHECKING
import numpy as np
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer_8 import set_string8_blank_if_default
from pyNastran.bdf.field_writer_16 import set_string16_blank_if_default
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import BaseCard, expand_thru
from pyNastran.bdf.cards.collpase_card import collapse_thru_packs
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank, blank, integer_or_string,
integer_or_double, components_or_blank)
from pyNastran.bdf.field_writer_8 import print_card_8, print_float_8, print_int_card
from pyNastran.bdf.field_writer_16 import print_float_16, print_card_16
from pyNastran.bdf.field_writer_double import print_scientific_double, print_card_double
#u = str
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf_interface.typing import BDF, BDFCard, Coord, Element
class SEQGP(BaseCard):
"""defines the SEQGP class"""
type = 'SEQGP'
@classmethod
def _init_from_empty(cls):
nids = 1
seqids = [2, 3]
return SEQGP(nids, seqids, comment='')
def __init__(self, nids, seqids, comment=''):
"""
Creates the SEQGP card
Parameters
----------
nid : int
the node id
seqid : int/float
the superelement id
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
if isinstance(nids, integer_types):
nids = [nids]
if isinstance(seqids, integer_types):
seqids = [seqids]
self.nids = nids
self.seqids = seqids
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a SEQGP card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
ncard = len(card) - 1
assert len(card) > 1, 'len(SEQGP) = 1; card=%s' % card
assert ncard % 2 == 0, card
nids = []
seqids = []
for ifield in range(1, ncard, 2):
nid = integer(card, ifield, 'nid')
seqid = integer_or_double(card, ifield+1, 'seqid')
nids.append(nid)
seqids.append(seqid)
return SEQGP(nids, seqids, comment=comment)
def cross_reference(self, model: BDF) -> None:
pass
def append(self, seqgp):
self.nids += seqgp.nids
self.seqids += seqgp.seqids
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a SEQGP card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
nids, seqids = data
return SEQGP(nids, seqids, comment=comment)
def raw_fields(self):
"""
Gets the fields in their unmodified form
Returns
-------
fields : List[varies]
the fields that define the card
"""
list_fields = ['SEQGP']
for nid, seqid in zip(self.nids, self.seqids):
list_fields.append(nid)
list_fields.append(seqid)
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
The writer method used by BDF.write_card
Parameters
----------
size : int; default=8
unused
is_double : bool; default=False
unused
"""
msg = self.comment
list_fields = ['SEQGP']
for i, nid, seqid in zip(count(), self.nids, self.seqids):
if i % 4 == 0 and i > 0:
msg += print_card_8(list_fields)
list_fields = ['SEQGP']
list_fields.append(nid)
list_fields.append(seqid)
if len(list_fields) > 1:
msg += print_card_8(list_fields)
return msg
class XPoint(BaseCard):
"""common class for EPOINT/SPOINT"""
@classmethod
def _init_from_empty(cls):
nid = 1
return cls(nid, comment='')
def __init__(self, nid, comment):
#Node.__init__(self)
if comment:
self.comment = comment
self.nid = nid
assert isinstance(nid, integer_types), nid
@classmethod
def _export_to_hdf5(cls, h5_file, model: BDF, nids: List[int]) -> None:
"""exports the nodes in a vectorized way"""
#comments = []
#for nid in nids:
#node = model.nodes[nid]
#comments.append(element.comment)
#h5_file.create_dataset('_comment', data=comments)
h5_file.create_dataset('nid', data=nids)
@property
def type(self):
"""dummy method for EPOINT/SPOINT classes"""
raise NotImplementedError('This method should be overwritten by the parent class')
def raw_fields(self):
"""
Gets the fields in their unmodified form
Returns
-------
fields : List[varies]
the fields that define the card
"""
lists_fields = []
if isinstance(self.nid, integer_types):
list_fields = [self.type, self.nid]
lists_fields.append(list_fields)
else:
singles, doubles = collapse_thru_packs(self.nid)
if singles:
list_fields = [self.type] + singles
if doubles:
for spoint_double in doubles:
list_fields = [self.type] + spoint_double
lists_fields.append(list_fields)
return lists_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
The writer method used by BDF.write_card
Parameters
----------
size : int; default=8
unused
is_double : bool; default=False
unused
"""
msg = self.comment
lists_fields = self.repr_fields()
for list_fields in lists_fields:
if 'THRU' not in list_fields:
msg += print_int_card(list_fields)
else:
msg += print_card_8(list_fields)
return msg
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
pass
class SPOINT(XPoint):
"""defines the SPOINT class"""
type = 'SPOINT'
def __init__(self, nid, comment=''):
"""
Creates the SPOINT card
Parameters
----------
nid : int
the SPOINT id
comment : str; default=''
a comment for the card
"""
XPoint.__init__(self, nid, comment)
def get_position(self):
return np.zeros(3)
class EPOINT(XPoint):
"""defines the EPOINT class"""
type = 'EPOINT'
def __init__(self, nid, comment=''):
"""
Creates the EPOINT card
Parameters
----------
nid : int
the EPOINT id
comment : str; default=''
a comment for the card
"""
XPoint.__init__(self, nid, comment)
def write_xpoints(cardtype, points, comment=''):
"""writes SPOINTs/EPOINTs"""
msg = comment
if isinstance(points, dict):
point_ids = []
for point_id, point in sorted(points.items()):
point_ids.append(point_id)
if point.comment:
msg += point.comment
else:
point_ids = points
lists_fields = compress_xpoints(cardtype, point_ids)
for list_fields in lists_fields:
if 'THRU' not in list_fields:
msg += print_int_card(list_fields)
else:
msg += print_card_8(list_fields)
return msg
def compress_xpoints(point_type, xpoints):
"""
Gets the SPOINTs/EPOINTs in sorted, short form.
uncompresed: SPOINT,1,3,5
compressed: SPOINT,1,3,5
uncompresed: SPOINT,1,2,3,4,5
compressed: SPOINT,1,THRU,5
uncompresed: SPOINT,1,2,3,4,5,7
compressed: SPOINT,7
SPOINT,1,THRU,5
point_type = 'SPOINT'
spoints = [1, 2, 3, 4, 5]
fields = compressed_xpoints(point_type, spoints)
>>> fields
['SPOINT', 1, 'THRU', 5]
"""
spoints = list(xpoints)
spoints.sort()
singles, doubles = collapse_thru_packs(spoints)
lists_fields = []
if singles:
list_fields = [point_type] + singles
lists_fields.append(list_fields)
if doubles:
for spoint_double in doubles:
list_fields = [point_type] + spoint_double
lists_fields.append(list_fields)
return lists_fields
class XPoints(BaseCard):
"""common class for EPOINTs and SPOINTs"""
@property
def type(self):
"""dummy method for EPOINTs/SPOINTs classes"""
raise NotImplementedError('This method should be overwritten by the parent class')
@classmethod
def _init_from_empty(cls):
ids = [1]
return cls(ids, comment='')
def __init__(self, ids, comment=''):
#Node.__init__(self)
if comment:
self.comment = comment
if isinstance(ids, integer_types):
ids = [ids]
self.points = set(expand_thru(ids))
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a SPOINT/EPOINT card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
points = []
for i in range(1, len(card)):
field = integer_or_string(card, i, 'ID%i' % i)
points.append(field)
return cls(points, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a SPOINT/EPOINT card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
points = data
assert isinstance(points, list), points
assert isinstance(points[0], integer_types), points
assert min(points) > 0, points
return cls(points, comment=comment)
def __len__(self):
"""
Returns the number of degrees of freedom for the EPOINTs/SPOINTs class
Returns
-------
ndofs : int
the number of degrees of freedom
"""
return len(self.points)
def add_points(self, sList):
"""Adds more EPOINTs/SPOINTs to this object"""
self.points = self.points.union(set(sList))
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
pass
def raw_fields(self):
"""
Gets the fields in their unmodified form
Returns
-------
fields : List[varies]
the fields that define the card
"""
points = list(self.points)
points.sort()
return [self.type] + points
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
The writer method used by BDF.write_card
Parameters
----------
size : int; default=8
unused
is_double : bool; default=False
unused
"""
lists_fields = compress_xpoints(self.type, self.points)
msg = self.comment
for list_fields in lists_fields:
if 'THRU' not in list_fields:
msg += print_int_card(list_fields)
else:
msg += print_card_8(list_fields)
return msg
class SPOINTs(XPoints):
"""
+--------+-----+------+-----+-----+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=====+======+=====+=====+=====+=====+=====+=====+
| SPOINT | ID1 | THRU | ID2 | | | | | |
+--------+-----+------+-----+-----+-----+-----+-----+-----+
| SPOINT | ID1 | ID1 | ID3 | ID4 | ID5 | ID6 | ID7 | ID8 |
+--------+-----+------+-----+-----+-----+-----+-----+-----+
| | ID8 | etc. | | | | | | |
+--------+-----+------+-----+-----+-----+-----+-----+-----+
"""
type = 'SPOINT'
def __init__(self, ids, comment=''):
"""
Creates the SPOINTs card that contains many SPOINTs
Parameters
----------
ids : List[int]
SPOINT ids
comment : str; default=''
a comment for the card
"""
XPoints.__init__(self, ids, comment=comment)
def create_spointi(self):
"""Creates individal SPOINT objects"""
spoints = []
for nid in self.points:
spoint = SPOINT(nid)
spoints.append(spoint)
if hasattr(self, 'ifile'):
for spoint in spoints:
spoint.ifile = self.ifile # type: int
return spoints
class EPOINTs(XPoints):
"""
+--------+-----+------+-----+-----+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=====+======+=====+=====+=====+=====+=====+=====+
| EPOINT | ID1 | THRU | ID2 | | | | | |
+--------+-----+------+-----+-----+-----+-----+-----+-----+
| EPOINT | ID1 | ID1 | ID3 | ID4 | ID5 | ID6 | ID7 | ID8 |
+--------+-----+------+-----+-----+-----+-----+-----+-----+
| | ID8 | etc. | | | | | | |
+--------+-----+------+-----+-----+-----+-----+-----+-----+
"""
type = 'EPOINT'
def __init__(self, ids, comment=''):
"""
Creates the EPOINTs card that contains many EPOINTs
Parameters
----------
ids : List[int]
EPOINT ids
comment : str; default=''
a comment for the card
"""
XPoints.__init__(self, ids, comment=comment)
def create_epointi(self):
"""Creates individal EPOINT objects"""
points = []
for nid in self.points:
points.append(EPOINT(nid))
return points
class GRDSET(BaseCard):
"""
Defines default options for fields 3, 7, 8, and 9 of all GRID entries.
+--------+-----+----+----+----+----+----+----+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=====+====+====+====+====+====+====+======+
| GRDSET | | CP | | | | CD | PS | SEID |
+--------+-----+----+----+----+----+----+----+------+
"""
type = 'GRDSET'
#: allows the get_field method and update_field methods to be used
_field_map = {2:'cp', 6:'cd', 7:'ps', 8:'seid'}
@classmethod
def _init_from_empty(cls):
cp = 0
cd = 1
ps = '34'
seid = 0
return GRDSET(cp, cd, ps, seid, comment='')
def __init__(self, cp, cd, ps, seid, comment=''):
"""
Creates the GRDSET card
Parameters
----------
cp : int; default=0
the xyz coordinate frame
cd : int; default=0
the analysis coordinate frame
ps : str; default=''
Additional SPCs in the analysis coordinate frame (e.g. '123').
This corresponds to DOF set ``SG``.
seid : int; default=0
superelement id
TODO: how is this used by Nastran???
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
#: Output Coordinate System
self.cp = cp
#: Analysis coordinate system
self.cd = cd
#: Default SPC constraint on undefined nodes
self.ps = ps
#: Superelement ID
self.seid = seid
self.cp_ref = None
self.cd_ref = None
self.seid_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a GRDSET card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
#: Grid point coordinate system
blank(card, 1, 'blank')
cp = integer_or_blank(card, 2, 'cp', 0)
blank(card, 3, 'blank')
blank(card, 4, 'blank')
blank(card, 5, 'blank')
cd = integer_or_blank(card, 6, 'cd', 0)
ps = str(integer_or_blank(card, 7, 'ps', ''))
seid = integer_or_blank(card, 8, 'seid', 0)
assert len(card) <= 9, 'len(GRDSET card) = %i\ncard=%s' % (len(card), card)
return GRDSET(cp, cd, ps, seid, comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by the GRDSET'
self.cp_ref = model.Coord(self.cp, msg=msg)
self.cd_ref = model.Coord(self.cd, msg=msg)
#self.seid = model.SuperElement(self.seid, msg)
#self.seid_ref = self.seid
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.cp = self.Cp()
self.cd = self.Cd()
self.cp_ref = None
self.cd_ref = None
self.seid_ref = None
def Cd(self):
"""
Gets the output coordinate system
Returns
-------
cd : int
the output coordinate system
"""
if self.cd_ref is None:
return self.cd
return self.cd.cid
def Cp(self):
"""
Gets the analysis coordinate system
Returns
-------
cp : int
the analysis coordinate system
"""
if self.cp_ref is None:
return self.cp
return self.cp.cid
def Ps(self):
"""
Gets the GRID-based SPC
Returns
-------
ps : str
the GRID-based SPC
"""
return self.ps
def SEid(self):
"""
Gets the Superelement ID
Returns
-------
seid : int
the Superelement ID
"""
if self.seid_ref is None:
return self.seid
return self.seid_ref.seid
def _verify(self, xref):
"""
Verifies all methods for this object work
Parameters
----------
xref: bool
has this model been cross referenced
"""
cp = self.Cp()
seid = self.SEid()
cd = self.Cd()
ps = self.Ps()
assert isinstance(cp, integer_types), 'cp=%r' % cp
assert isinstance(cd, integer_types), 'cd=%r' % cd
assert isinstance(ps, str), 'ps=%r' % ps
assert isinstance(seid, integer_types), 'seid=%r' % seid
def raw_fields(self):
"""
Gets the fields in their unmodified form
Returns
-------
fields : List[varies]
the fields that define the card
"""
list_fields = ['GRDSET', None, self.Cp(), None, None, None,
self.Cd(), self.ps, self.SEid()]
return list_fields
def repr_fields(self):
"""
Gets the fields in their simplified form
Returns
-------
fields : List[varies]
the fields that define the card
"""
cp = set_blank_if_default(self.Cp(), 0)
cd = set_blank_if_default(self.Cd(), 0)
ps = set_blank_if_default(self.ps, 0)
seid = set_blank_if_default(self.SEid(), 0)
list_fields = ['GRDSET', None, cp, None, None, None, cd, ps, seid]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
The writer method used by BDF.write_card
Parameters
----------
size : int
the size of the card (8/16)
"""
card = self.repr_fields()
return print_card_8(card)
class GRIDB(BaseCard):
"""defines the GRIDB class"""
type = 'GRIDB'
#: allows the get_field method and update_field methods to be used
_field_map = {1: 'nid', 4:'phi', 6:'cd', 7:'ps', 8:'idf'}
def __init__(self, nid, phi, cd, ps, ringfl, comment=''):
"""
Creates the GRIDB card
"""
if comment:
self.comment = comment
#Node.__init__(self)
#: node ID
self.nid = nid
self.phi = phi
# analysis coordinate system
self.cd = cd
#: local SPC constraint
self.ps = ps
#: ringfl
self.ringfl = ringfl
assert self.nid > 0, 'nid=%s' % self.nid
assert self.phi >= 0, 'phi=%s' % self.phi
assert self.cd >= 0, 'cd=%s' % self.cd
assert self.ringfl >= 0, 'ringfl=%s' % self.ringfl
self.cd_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a GRIDB card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
nid = integer(card, 1, 'nid')
phi = double(card, 4, 'phi')
cd = integer(card, 6, 'cd')
ps = components_or_blank(card, 7, 'ps', '')
idf = integer(card, 8, 'ringfl/idf')
return GRIDB(nid, phi, cd, ps, idf, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a GRIDB card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
nid = data[0]
phi = data[1]
cd = data[2]
ps = data[3]
idf = data[4]
return GRIDB(nid, phi, cd, ps, idf, comment=comment)
def _verify(self, xref):
"""
Verifies all methods for this object work
Returns
-------
xref : bool
has this model been cross referenced
"""
pass
def Cd(self):
"""
Gets the output coordinate system
Returns
-------
cd : int
the output coordinate system
"""
if self.cd_ref is None:
return self.cd
return self.cd_ref.cid
def raw_fields(self):
"""
Gets the fields in their unmodified form
Returns
-------
fields : List[varies]
the fields that define the card
"""
list_fields = ['GRIDB', self.nid, None, None, self.phi, None,
self.Cd(), self.ps, self.ringfl]
return list_fields
def get_position(self):
## TODO: fixme
return np.array([0., 0., 0.])
def repr_fields(self):
"""
Gets the fields in their simplified form
Returns
-------
fields : List[varies]
the fields that define the card
"""
#phi = set_blank_if_default(self.phi, 0.0)
cd = set_blank_if_default(self.Cd(), 0)
ps = set_blank_if_default(self.ps, 0)
idf = set_blank_if_default(self.ringfl, 0)
list_fields = ['GRIDB', self.nid, None, None, self.phi, None, cd, ps,
idf]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
The writer method used by BDF.write_card
Parameters
----------
size : int; default=8
the size of the card (8/16)
is_double : bool; default=False
should this card be written with double precision
Returns
-------
msg : str
the card as a string
"""
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class GRID(BaseCard):
"""
+------+-----+----+----+----+----+----+----+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+======+=====+====+====+====+====+====+====+======+
| GRID | NID | CP | X1 | X2 | X3 | CD | PS | SEID |
+------+-----+----+----+----+----+----+----+------+
Attributes
----------
nid : int
node id
xyz : float ndarray
Raw location <:math:`x_1, x_2, x_3`>
cp : int
reference coordinate system
cd : int
analysis coordinate system
ps : str
nodal-based constraints
seid : int
superelement id
cp_ref : Coord() or None
cross-referenced cp
cd_ref : Coord() or None
cross-referenced cd
Methods
-------
Nid()
gets nid
Cp()
gets cp_ref.cid or cp depending on cross-referencing
Cd()
gets cd_ref.cid or cd depending on cross-referencing
Ps()
gets ps
SEid()
superelement id
get_position()
gets xyz in the global frame
get_position_wrt(model, cid)
gets xyz in a local frame
cross_reference(model)
cross-references the card
uncross_reference()
uncross-references the card
set_position(model, xyz, cid=0, xref=True)
updates the coordinate system
Using the GRID object::
model = read_bdf(bdf_filename)
node = model.Node(nid)
# gets the position of the node in the global frame
node.get_position()
node.get_position_wrt(model, cid=0)
# gets the position of the node in a local frame
node.get_position_wrt(model, cid=1)
# change the location of the node
node.set_position(model, array([1.,2.,3.]), cid=3)
"""
type = 'GRID'
#: allows the get_field method and update_field methods to be used
_field_map = {1: 'nid', 2:'cp', 6:'cd', 7:'ps', 8:'seid'}
def _get_field_helper(self, n: int):
"""
Gets complicated parameters on the GRID card
Parameters
----------
n : int
the field number to update
Returns
-------
value : float
the value for the appropriate field
"""
if n == 3:
value = self.xyz[0]
elif n == 4:
value = self.xyz[1]
elif n == 5:
value = self.xyz[2]
else:
raise KeyError('Field %r is an invalid %s entry.' % (n, self.type))
return value
def _update_field_helper(self, n: int, value: Any):
"""
Updates complicated parameters on the GRID card
Parameters
----------
n : int
the field number to update
value : float
the value for the appropriate field
"""
if n == 3:
self.xyz[0] = value
elif n == 4:
self.xyz[1] = value
elif n == 5:
self.xyz[2] = value
else:
raise KeyError('Field %r=%r is an invalid %s entry.' % (n, value, self.type))
@classmethod
def export_to_hdf5(cls, h5_file, model, nids):
"""exports the nodes in a vectorized way"""
comments = []
cp = []
xyz = []
cd = []
ps = []
seid = []
for nid in nids:
node = model.nodes[nid]
#comments.append(element.comment)
xyz.append(node.xyz)
cp.append(node.cp)
cd.append(node.cd)
psi = 0 if node.ps == '' else (int(node.ps))
ps.append(psi)
seid.append(node.seid)
#h5_file.create_dataset('_comment', data=comments)
h5_file.create_dataset('nid', data=nids)
h5_file.create_dataset('xyz', data=xyz)
h5_file.create_dataset('cp', data=cp)
h5_file.create_dataset('cd', data=cd)
h5_file.create_dataset('ps', data=ps)
h5_file.create_dataset('seid', data=seid)
def __init__(self, nid: int, xyz: Union[None, List[float], np.ndarray],
cp: int=0, cd: int=0, ps: str='', seid: int=0,
comment: str='') -> None:
"""
Creates the GRID card
Parameters
----------
nid : int
node id
cp : int; default=0
the xyz coordinate frame
xyz : (3, ) float ndarray; default=None -> [0., 0., 0.]
the xyz/r-theta-z/rho-theta-phi values
cd : int; default=0
the analysis coordinate frame
ps : str; default=''
Additional SPCs in the analysis coordinate frame (e.g. '123').
This corresponds to DOF set ``SG``.
seid : int; default=0
superelement id
TODO: how is this used by Nastran???
comment : str; default=''
a comment for the card
"""
BaseCard.__init__(self)
if comment:
self.comment = comment
self.nid = nid
self.cp = cp
if xyz is None:
xyz = [0., 0., 0.]
self.xyz = np.asarray(xyz, dtype='float64')
assert self.xyz.size == 3, self.xyz.shape
self.cd = cd
self.ps = ps
self.seid = seid
self.cp_ref = None # type: Coord
self.cd_ref = None # type: Coord
self.elements_ref = None # type: List[Element]
@classmethod
def add_op2_data(cls, data, comment: str='') -> Any:
# (List[Union[int, float]], str) -> GRID
"""
Adds a GRID card from the OP2.
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
.. todo:: Currently unused, but is tested by test_nodes.py
"""
nid = data[0]
cp = data[1]
xyz = data[2:5]
cd = data[5]
ps = data[6]
seid = data[7]
if ps == 0:
ps = ''
return GRID(nid, xyz, cp, cd, ps, seid, comment=comment)
@classmethod
def add_card(cls, card: BDFCard, comment: str='') -> Any:
# (Any, str) -> GRID
"""
Adds a GRID card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
nfields = len(card)
#: Node ID
nid = integer(card, 1, 'nid')
#: Grid point coordinate system
cp = integer_or_blank(card, 2, 'cp', 0)
#: node location in local frame
xyz = [
double_or_blank(card, 3, 'x1', 0.),
double_or_blank(card, 4, 'x2', 0.),
double_or_blank(card, 5, 'x3', 0.)]
if nfields > 6:
#: Analysis coordinate system
cd = integer_or_blank(card, 6, 'cd', 0)
#: SPC constraint
ps = components_or_blank(card, 7, 'ps', '')
#u(integer_or_blank(card, 7, 'ps', ''))
#: Superelement ID
seid = integer_or_blank(card, 8, 'seid', 0)
assert len(card) <= 9, 'len(GRID card) = %i\ncard=%s' % (len(card), card)
else:
cd = 0
ps = ''
seid = 0
return GRID(nid, xyz, cp, cd, ps, seid, comment=comment)
def validate(self) -> None:
assert isinstance(self.cp, integer_types), 'cp=%s' % (self.cp)
assert self.nid > 0, 'nid=%s' % (self.nid)
assert self.cp >= 0, 'cp=%s' % (self.cp)
assert self.cd >= -1, 'cd=%s' % (self.cd)
assert self.seid >= 0, 'seid=%s' % (self.seid)
assert len(self.xyz) == 3
def Nid(self) -> int:
"""
Gets the GRID ID
Returns
-------
nid : int
node ID
"""
return self.nid
def Ps(self) -> str:
"""
Gets the GRID-based SPC
Returns
-------
ps : str
the GRID-based SPC
"""
return self.ps
def Cd(self) -> int:
"""
Gets the output coordinate system
Returns
-------
cd : int
the output coordinate system
"""
if self.cd_ref is None:
return self.cd
return self.cd_ref.cid
def Cp(self) -> int:
"""
Gets the analysis coordinate system
Returns
-------
cp : int
the analysis coordinate system
"""
if self.cp_ref is None:
return self.cp
return self.cp_ref.cid
def SEid(self) -> int:
"""
Gets the Superelement ID
Returns
-------
seid : int
the Superelement ID
"""
#if isinstance(self.seid, integer_types):
return self.seid
#return self.seid.seid
def _verify(self, xref: bool) -> None:
"""
Verifies all methods for this object work
Parameters
----------
xref : bool
has this model been cross referenced
"""
nid = self.Nid()
cp = self.Cp()
cd = self.Cd()
xyz = self.xyz
ps = self.Ps()
seid = self.SEid()
assert isinstance(xyz, np.ndarray), 'xyz=%r' % xyz
assert isinstance(nid, integer_types), 'nid=%r' % nid
assert isinstance(cp, integer_types), 'cp=%r' % cp
assert isinstance(cd, integer_types), 'cd=%r' % cd
assert isinstance(ps, str), 'ps=%r' % ps
assert isinstance(seid, integer_types), 'seid=%r' % seid
if xref:
pos_xyz = self.get_position()
assert isinstance(pos_xyz, np.ndarray), 'pos_xyz=%r' % pos_xyz
def set_position(self, model: BDF, xyz: np.ndarray,
cid: int=0, xref: bool=True) -> None:
# (Any, np.ndarray, int) -> None
"""
Updates the GRID location
Parameters
----------
xyz : (3, ) float ndarray
the location of the node.
cp : int; default=0 (global)
the analysis coordinate system
xref : bool; default=True
cross-references the coordinate system
"""
self.xyz = xyz
msg = ', which is required by GRID nid=%s' % self.nid
self.cp = cid
if xref:
self.cp_ref = model.Coord(cid, msg=msg)
def get_position_no_xref(self, model: Any) -> np.ndarray:
# (Any) -> np.ndarray
if self.cp == 0:
return self.xyz
assert isinstance(self.cp, integer_types), self.cp
coord = model.Coord(self.cp)
xyz = coord.transform_node_to_global_no_xref(self.xyz, model)
return xyz
def get_position(self) -> np.ndarray:
"""
Gets the point in the global XYZ coordinate system.
Returns
-------
xyz : (3, ) float ndarray
the position of the GRID in the global coordinate system
"""
try:
xyz = self.cp_ref.transform_node_to_global(self.xyz)
except AttributeError:
if self.cp == 0:
return self.xyz
raise
return xyz
def get_position_assuming_rectangular(self):
# type: () -> np.ndarray
"""
Gets the point in a coordinate system that has unit vectors
in the referenced coordinate system, but is not transformed
from a cylindrical/spherical system. This is used by cards
like CBAR/CBEAM for element offset vectors.
Returns
-------
xyz : (3, ) float ndarray
the position of the GRID in the global coordinate system
"""
try:
xyz = self.cp_ref.transform_node_to_global(self.xyz)
except AttributeError:
if self.cp == 0:
return self.xyz
raise
return xyz
def get_position_wrt_no_xref(self, model, cid):
# type: (Any, int) -> np.ndarray
"""see get_position_wrt"""
if cid == self.cp: # same coordinate system
return self.xyz
msg = ', which is required by GRID nid=%s' % (self.nid)
# converting the xyz point arbitrary->global
cp_ref = model.Coord(self.cp, msg=msg)
p = cp_ref.transform_node_to_global_no_xref(self.xyz, model)
# a matrix global->local matrix is found
coord_b = model.Coord(cid, msg=msg)
xyz = coord_b.transform_node_to_local(p)
return xyz
def get_position_wrt(self, model: BDF, cid: int) -> np.ndarray:
"""
Gets the location of the GRID which started in some arbitrary
system and returns it in the desired coordinate system
Parameters
----------
model : BDF()
the BDF object
cid : int
the desired coordinate ID
Returns
-------
xyz : (3, ) float ndarray
the position of the GRID in an arbitrary coordinate system
"""
if cid == self.Cp(): # same coordinate system
return self.xyz
# converting the xyz point arbitrary->global
p = self.cp_ref.transform_node_to_global(self.xyz)
# a matrix global->local matrix is found
msg = ', which is required by GRID nid=%s' % (self.nid)
coord_b = model.Coord(cid, msg=msg)
xyz = coord_b.transform_node_to_local(p)
return xyz
def cross_reference(self, model: BDF, grdset=None):
# type: (Any, Optional[Any]) -> None
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
grdset : GRDSET / None; default=None
a GRDSET if available (default=None)
.. note:: The gridset object will only update the fields that
have not been set
"""
if grdset:
# update using a grdset object
if not self.cp:
self.cp_ref = grdset.cp_ref
if not self.cd:
self.cd = grdset.cd
self.cd_ref = self.cd_ref
if not self.ps:
self.ps_ref = grdset.ps
if not self.seid:
self.seid_ref = grdset.seid
msg = ', which is required by GRID nid=%s' % (self.nid)
self.cp_ref = model.Coord(self.cp, msg=msg)
if self.cd != -1:
self.cd_ref = model.Coord(self.cd, msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.cd_ref = None
self.cp_ref = None
self.elements_ref = None
def raw_fields(self):
# type: () -> List[Any]
"""
Gets the fields in their unmodified form
Returns
-------
fields : List[int/float/str]
the fields that define the card
"""
list_fields = ['GRID', self.nid, self.Cp()] + list(self.xyz) + \
[self.Cd(), self.ps, self.SEid()]
return list_fields
def repr_fields(self):
# type: () -> List[Any]
"""
Gets the fields in their simplified form
Returns
-------
fields : List[int/float/str]
the fields that define the card
"""
cp = set_blank_if_default(self.Cp(), 0)
cd = set_blank_if_default(self.Cd(), 0)
seid = set_blank_if_default(self.SEid(), 0)
list_fields = ['GRID', self.nid, cp] + list(self.xyz) + [cd, self.ps,
seid]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
The writer method used by BDF.write_card
Parameters
----------
size : int; default=8
the size of the card (8/16)
is_double : bool; default=False
should this card be written with double precision
Returns
-------
msg : str
the card as a string
"""
if size == 8:
return self.write_card_8()
return self.write_card_16(is_double)
def write_card_8(self):
# type: () -> str
"""Writes a GRID card in 8-field format"""
xyz = self.xyz
cp = self.Cp()
cd = self.Cd()
cps = set_string8_blank_if_default(cp, 0)
if [cd, self.ps, self.seid] == [0, '', 0]:
# default
msg = 'GRID %8i%8s%s%s%s\n' % (
self.nid, cps,
print_float_8(xyz[0]),
print_float_8(xyz[1]),
print_float_8(xyz[2]))
#msg = 'GRID,%i,%s,%s,%s,%s\n' % (
#self.nid, cps.strip(),
#print_float_8(xyz[0]),
#print_float_8(xyz[1]),
#print_float_8(xyz[2]))
else:
cds = set_string8_blank_if_default(cd, 0)
seid = set_string8_blank_if_default(self.SEid(), 0)
msg = 'GRID %8i%8s%s%s%s%s%8s%s\n' % (
self.nid, cps,
print_float_8(xyz[0]),
print_float_8(xyz[1]),
print_float_8(xyz[2]),
cds, self.ps, seid)
return self.comment + msg
def write_card_16(self, is_double=False):
# type: (bool) -> str
"""Writes a GRID card in 16-field format"""
xyz = self.xyz
cp = set_string16_blank_if_default(self.Cp(), 0)
cd = set_string16_blank_if_default(self.Cd(), 0)
seid = set_string16_blank_if_default(self.SEid(), 0)
if is_double:
if [cd, self.ps, self.seid] == [0, '', 0]:
msg = ('GRID* %16i%16s%16s%16s\n'
'* %16s\n' % (
self.nid,
cp,
print_scientific_double(xyz[0]),
print_scientific_double(xyz[1]),
print_scientific_double(xyz[2])))
else:
msg = ('GRID* %16i%16s%16s%16s\n'
'* %16s%16s%16s%16s\n' % (
self.nid,
cp,
print_scientific_double(xyz[0]),
print_scientific_double(xyz[1]),
print_scientific_double(xyz[2]),
cd, self.ps, seid))
else:
if [cd, self.ps, self.seid] == [0, '', 0]:
msg = ('GRID* %16i%16s%16s%16s\n'
'* %16s\n' % (
self.nid,
cp,
print_float_16(xyz[0]),
print_float_16(xyz[1]),
print_float_16(xyz[2])))
else:
msg = ('GRID* %16i%16s%16s%16s\n'
'* %16s%16s%16s%16s\n' % (
self.nid,
cp,
print_float_16(xyz[0]),
print_float_16(xyz[1]),
print_float_16(xyz[2]),
cd, self.ps, seid))
return self.comment + msg
class POINT(BaseCard):
"""
+-------+-----+----+----+----+----+
| 1 | 2 | 3 | 4 | 5 | 6 |
+=======+=====+====+====+====+====+
| POINT | NID | CP | X1 | X2 | X3 |
+-------+-----+----+----+----+----+
"""
type = 'POINT'
_field_map = {1: 'nid', 2:'cp'}
def _get_field_helper(self, n: int) -> float:
"""
Gets complicated parameters on the POINT card
Parameters
----------
n : int
the field number to update
Returns
-------
value : varies
the value for the appropriate field
"""
if n == 3:
value = self.xyz[0]
elif n == 4:
value = self.xyz[1]
elif n == 5:
value = self.xyz[2]
else:
raise KeyError('Field %r is an invalid %s entry.' % (n, self.type))
return value
def _update_field_helper(self, n, value):
# type: (int, float) -> None
"""
Updates complicated parameters on the POINT card
Parameters
----------
n : int
the field number to update
value : varies
the value for the appropriate field
"""
if n == 3:
self.xyz[0] = value
elif n == 4:
self.xyz[1] = value
elif n == 5:
self.xyz[2] = value
else:
raise KeyError('Field %r=%r is an invalid %s entry.' % (n, value, self.type))
@classmethod
def _init_from_empty(cls):
nid = 1
xyz = [1., 2., 3.]
return POINT(nid, xyz, cp=0, comment='')
def __init__(self, nid, xyz, cp=0, comment=''):
# type: (int, Union[List[float], np.ndarray], int, str) -> None
"""
Creates the POINT card
Parameters
----------
nid : int
node id
xyz : (3, ) float ndarray; default=None -> [0., 0., 0.]
the xyz/r-theta-z/rho-theta-phi values
cp : int; default=0
coordinate system for the xyz location
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
if xyz is None:
xyz = [0., 0., 0.]
#Node.__init__(self)
#: Node ID
self.nid = nid
#: Grid point coordinate system
self.cp = cp
#: node location in local frame
self.xyz = np.asarray(xyz, dtype='float64')
assert self.xyz.size == 3, self.xyz.shape
self.cp_ref = None
def validate(self):
# type: () -> None
assert self.nid > 0, 'nid=%s' % (self.nid)
assert self.cp >= 0, 'cp=%s' % (self.cp)
assert len(self.xyz) == 3
@classmethod
def add_card(cls, card, comment=''):
# type: (Any, str) -> POINT
"""
Adds a POINT card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
nid = integer(card, 1, 'nid')
cp = integer_or_blank(card, 2, 'cp', 0)
xyz = np.array([
double_or_blank(card, 3, 'x1', 0.),
double_or_blank(card, 4, 'x2', 0.),
double_or_blank(card, 5, 'x3', 0.)], dtype='float64')
assert len(card) <= 9, 'len(POINT card) = %i\ncard=%s' % (len(card), card)
return POINT(nid, xyz, cp=cp, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
# type: (List[Union[int, float]], str) -> POINT
"""
Adds a POINT card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
nid = data[0] # type: int
cp = data[1] # type: int
xyz = np.array(data[2:5]) # type: np.ndarray
return POINT(nid, xyz, cp=cp, comment=comment)
def set_position(self, model, xyz, cid=0):
# type: (Any, np.ndarray, int) -> None
"""
Updates the POINT location
Parameters
----------
xyz : (3,) float ndarray
the location of the node
cp : int; default=0 (global)
the analysis coordinate system
"""
self.xyz = xyz
msg = ', which is required by POINT nid=%s' % self.nid
self.cp = model.Coord(cid, msg=msg)
def get_position(self):
# type: () -> np.ndarray
"""
Gets the point in the global XYZ coordinate system.
Returns
-------
position : (3,) float ndarray
the position of the POINT in the globaly coordinate system
"""
p = self.cp_ref.transform_node_to_global(self.xyz)
return p
def get_position_wrt(self, model, cid):
# type: (Any, int) -> np.ndarray
"""
Gets the location of the POINT which started in some arbitrary
system and returns it in the desired coordinate system
Parameters
----------
model : BDF()
the BDF model object
cid : int
the desired coordinate ID
Returns
-------
xyz : (3,) ndarray
the position of the POINT in an arbitrary coordinate system
"""
if cid == self.Cp(): # same coordinate system
return self.xyz
# converting the xyz point arbitrary->global
p = self.cp_ref.transform_node_to_global(self.xyz)
# a matrix global->local matrix is found
msg = ', which is required by POINT nid=%s' % (self.nid)
coord_b = model.Coord(cid, msg=msg)
xyz = coord_b.transform_node_to_local(p)
return xyz
def Cp(self) -> int:
"""
Gets the analysis coordinate system
Returns
-------
cp : int
the analysis coordinate system
"""
if self.cp_ref is None:
return self.cp
return self.cp_ref.cid
def cross_reference(self, model: BDF) -> None:
# type: (Any) -> None
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
self.cp_ref = model.Coord(self.cp)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.cp_ref = self.Cp()
def raw_fields(self) -> List[Union[str, int, float, None]]:
"""
Gets the fields in their unmodified form
Returns
-------
fields : list[varies]
the fields that define the card
"""
list_fields = ['POINT', self.nid, self.Cp()] + list(self.xyz)
return list_fields
def repr_fields(self):
# type: () -> List[Union[str, int, float]]
"""
Gets the fields in their simplified form
Returns
-------
fields : list[varies]
the fields that define the card
"""
cp = set_blank_if_default(self.Cp(), 0)
list_fields = ['POINT', self.nid, cp] + list(self.xyz)
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
The writer method used by BDF.write_card
Parameters
----------
size : int
the size of the card (8/16)
"""
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
if is_double:
return self.comment + print_card_double(card)
return self.comment + print_card_16(card)
| 27.824346 | 90 | 0.5045 |
bb7c2f1c12ee413207a6618a690e45605f753f3c | 870 | py | Python | lib/systems/benzoyl_chloride.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | lib/systems/benzoyl_chloride.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | lib/systems/benzoyl_chloride.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | import pulsar as psr
def load_ref_system():
""" Returns benzoyl_chloride as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 1.3835 1.0239 -0.0000
C 2.6581 0.4699 -0.0000
C 2.8197 -0.9117 -0.0000
C 1.7027 -1.7411 -0.0000
C 0.4252 -1.1942 -0.0000
C 0.2579 0.1929 0.0000
H 1.2618 2.1144 -0.0000
H 3.5362 1.1247 -0.0000
H 3.8250 -1.3464 -0.0000
H 1.8281 -2.8292 -0.0000
H -0.4505 -1.8565 -0.0000
C -1.0872 0.8033 0.0000
Cl -2.4420 -0.3546 -0.0000
O -1.3442 1.9805 -0.0000
""")
| 39.545455 | 72 | 0.429885 |
53c5b4034ea7518ef8968748a6b03865186ad073 | 4,457 | py | Python | app/run.py | sidheswar12/Disaster-Response-Pipeline | 1812bd5452d445c4b04bbdb77a61efe4307d50e6 | [
"MIT"
] | 2 | 2019-04-15T22:25:13.000Z | 2020-04-15T21:39:40.000Z | app/run.py | sidheswar12/Disaster-Response-Pipeline | 1812bd5452d445c4b04bbdb77a61efe4307d50e6 | [
"MIT"
] | null | null | null | app/run.py | sidheswar12/Disaster-Response-Pipeline | 1812bd5452d445c4b04bbdb77a61efe4307d50e6 | [
"MIT"
] | null | null | null | import json
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize, WhitespaceTokenizer
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar, Heatmap, Histogram
from sklearn.externals import joblib
from sqlalchemy import create_engine
import numpy as np
import re
import nltk
app = Flask(__name__)
def tokenize(text):
# get tokens from text
tokens = WhitespaceTokenizer().tokenize(text)
lemmatizer = WordNetLemmatizer()
# clean tokens
processed_tokens = []
for token in tokens:
token = lemmatizer.lemmatize(token).lower().strip('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
token = re.sub(r'\[[^.,;:]]*\]', '', token)
# add token to compiled list if not empty
if token != '':
processed_tokens.append(token)
return processed_tokens
def find_text_length(data):
return np.array([len(text) for text in data]).reshape(-1, 1)
# load data
#engine = create_engine('sqlite:///../data/disaster_message_categories.db')
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('labeled_messages', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# compute length of texts
df['text_length'] = find_text_length(df['message'])
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract genre counts
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# extract categories
category_map = df.iloc[:,4:].corr().values
category_names = list(df.iloc[:,4:].columns)
# extract length of texts
length_direct = df.loc[df.genre=='direct','text_length']
length_social = df.loc[df.genre=='social','text_length']
length_news = df.loc[df.genre=='news','text_length']
# create visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Heatmap(
x=category_names,
y=category_names[::-1],
z=category_map
)
],
'layout': {
'title': 'Heatmap of Categories'
}
},
{
'data': [
Histogram(
y=length_direct,
name='Direct',
opacity=0.5
),
Histogram(
y=length_social,
name='Social',
opacity=0.5
),
Histogram(
y=length_news,
name='News',
opacity=0.5
)
],
'layout': {
'title': 'Distribution of Text Length',
'yaxis':{
'title':'Count'
},
'xaxis': {
'title':'Text Length'
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# Web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# Use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| 27.012121 | 95 | 0.533767 |
c522348ab6be19ed3923f03fea5f689c83d31211 | 446 | py | Python | data/scripts/templates/object/tangible/loot/quest/shared_wind_crystal.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/loot/quest/shared_wind_crystal.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/loot/quest/shared_wind_crystal.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/quest/shared_wind_crystal.iff"
result.attribute_template_id = -1
result.stfName("item_n","wind_crystal")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.235294 | 71 | 0.724215 |
b65b92acb4c1bfde884552a272de14af0ab933d7 | 490 | py | Python | features/steps/roman.py | TestowanieAutomatyczneUG/laboratorium_14-bozimkiewicz | a77b186acbf20357dde87b517c4bb9bcf0b56fea | [
"MIT"
] | null | null | null | features/steps/roman.py | TestowanieAutomatyczneUG/laboratorium_14-bozimkiewicz | a77b186acbf20357dde87b517c4bb9bcf0b56fea | [
"MIT"
] | null | null | null | features/steps/roman.py | TestowanieAutomatyczneUG/laboratorium_14-bozimkiewicz | a77b186acbf20357dde87b517c4bb9bcf0b56fea | [
"MIT"
] | null | null | null | from behave import *
from src.Roman import Roman
use_step_matcher("re")
@given('we have (?P<arabic>.+) number and number convertion program')
def step_impl(context, arabic):
context.arabic = int(arabic)
context.roman_convert = Roman()
@when('we have (?P<roman>.+) number')
def step_impl(context, roman):
context.roman = roman
@then('the program\'s result should be correct')
def step_impl(context):
assert context.roman == context.roman_convert.roman(context.arabic)
| 23.333333 | 71 | 0.720408 |
fc1378f5a6d51e79b69caca0cf05003c1e16e5db | 64 | py | Python | pj-examples/mylib/js/mylib/misc.py | andrewschaaf/pyxc-pj | aa00298c9fcc62b4e3b7c5b8a8114c7545108cbc | [
"MIT"
] | 17 | 2015-10-26T22:51:30.000Z | 2021-07-08T02:45:51.000Z | pj-examples/mylib/js/mylib/misc.py | andrewschaaf/pyxc-pj | aa00298c9fcc62b4e3b7c5b8a8114c7545108cbc | [
"MIT"
] | 1 | 2016-08-18T18:17:19.000Z | 2018-05-09T04:04:05.000Z | pj-examples/mylib/js/mylib/misc.py | andrewschaaf/pyxc-pj | aa00298c9fcc62b4e3b7c5b8a8114c7545108cbc | [
"MIT"
] | 2 | 2015-05-15T23:45:49.000Z | 2016-02-20T21:00:06.000Z |
def bind(f, obj):
return lambda: f.apply(obj, arguments)
| 10.666667 | 42 | 0.640625 |
7fd1024dcb00be25a1cc7b882090298d7fce9628 | 2,211 | py | Python | GM2AUTOSAR_MM/Properties/unit_contracts/HUnitR02_ConnectedLHS.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | GM2AUTOSAR_MM/Properties/unit_contracts/HUnitR02_ConnectedLHS.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | GM2AUTOSAR_MM/Properties/unit_contracts/HUnitR02_ConnectedLHS.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitR02_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitR02_ConnectedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitR02_ConnectedLHS, self).__init__(name='HUnitR02_ConnectedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR02_ConnectedLHS')
self["equations"] = []
# Set the node attributes
# match class PhysicalNode(2.0.m.0PhysicalNode) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__PhysicalNode"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'2.0.m.0PhysicalNode')
# match class Partition(2.0.m.1Partition) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__Partition"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'2.0.m.1Partition')
# match association PhysicalNode--partition-->Partitionnode
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return attr_value == "partition" """
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__directLink_S"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'2.0.m.0PhysicalNodeassoc22.0.m.1Partition')
# Add the edges
self.add_edges([
(0,2), # match class PhysicalNode(2.0.m.0PhysicalNode) -> association partition
(2,1), # association Partition -> match class Partition(2.0.m.1Partition)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
# define evaluation methods for each match association.
def eval_attr13(self, attr_value, this):
return attr_value == "partition"
def constraint(self, PreNode, graph):
return True
| 31.585714 | 99 | 0.698779 |
bbe10260bf05a2f46f3cb8540d76e6a5abcf4857 | 936 | py | Python | generators/sidewinder.py | kwyckmans/python-mazes | be992077b3d802c6b8618b15c14c20bb30ed0ad9 | [
"Unlicense"
] | null | null | null | generators/sidewinder.py | kwyckmans/python-mazes | be992077b3d802c6b8618b15c14c20bb30ed0ad9 | [
"Unlicense"
] | null | null | null | generators/sidewinder.py | kwyckmans/python-mazes | be992077b3d802c6b8618b15c14c20bb30ed0ad9 | [
"Unlicense"
] | null | null | null | from random import randrange, choice
from typing import List
from core.cell import Cell
from core.grid import Grid
class Sidewinder:
@staticmethod
def on(grid: Grid):
for row in grid.cells:
run: List[Cell] = []
for _, cell in grid.cells[row].items():
run.append(cell)
at_eastern_boundary = cell.east is None
at_northern_boundary = cell.north is None
should_close_out = at_eastern_boundary or (
not at_northern_boundary and randrange(2) == 0
)
if should_close_out:
member = choice(run)
if member.north:
member.link_biderectional(member.north)
run.clear()
else:
if cell.east:
cell.link_biderectional(cell.east)
return grid
| 28.363636 | 66 | 0.521368 |
6cda078e6f582eb6ed7ec4d2db81549f47295beb | 5,666 | py | Python | merlinsar/test/model.py | ykemiche/merlin | f0980d7ee5dbe04ac083ba1f2bb34a9c2d3aa343 | [
"MIT"
] | null | null | null | merlinsar/test/model.py | ykemiche/merlin | f0980d7ee5dbe04ac083ba1f2bb34a9c2d3aa343 | [
"MIT"
] | null | null | null | merlinsar/test/model.py | ykemiche/merlin | f0980d7ee5dbe04ac083ba1f2bb34a9c2d3aa343 | [
"MIT"
] | null | null | null | import time
import numpy as np
import os
from merlinsar.test.utils import *
from scipy import special
import argparse
# DEFINE PARAMETERS OF SPECKLE AND NORMALIZATION FACTOR
M = 10.089038980848645
m = -1.429329123112601
L = 1
c = (1 / 2) * (special.psi(L) - np.log(L))
cn = c / (M - m) # normalized (0,1) mean of log speckle
import torch
import numpy as np
class Model(torch.nn.Module):
def __init__(self,height,width,device):
super().__init__()
self.device=device
self.height = height
self.width = width
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.leaky = torch.nn.LeakyReLU(0.1)
self.enc0 = torch.nn.Conv2d(in_channels=1, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.enc1 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.enc2 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.enc3 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.enc4 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.enc5 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.enc6 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec5 = torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec5b = torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec4 = torch.nn.Conv2d(in_channels=144, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec4b = torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec3 = torch.nn.Conv2d(in_channels=144, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec3b = torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec2 = torch.nn.Conv2d(in_channels=144, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec2b = torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec1a = torch.nn.Conv2d(in_channels=97, out_channels=64, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec1b = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.dec1 = torch.nn.Conv2d(in_channels=32, out_channels=1, kernel_size=(3, 3), stride=(1, 1),
padding='same')
self.upscale2d = torch.nn.UpsamplingNearest2d(scale_factor=2)
def forward(self,x):
""" Defines a class for an autoencoder algorithm for an object (image) x
An autoencoder is a specific type of feedforward neural networks where the
input is the same as the
output. It compresses the input into a lower-dimensional code and then
reconstruct the output from this representattion. It is a dimensionality
reduction algorithm
Parameters
----------
x : np.array
a numpy array containing image
Returns
----------
x-n : np.array
a numpy array containing the denoised image i.e the image itself minus the noise
"""
x=torch.reshape(x, [1, 1, self.height, self.width])
skips = [x]
n = x
# ENCODER
n = self.leaky(self.enc0(n))
n = self.leaky(self.enc1(n))
n = self.pool(n)
skips.append(n)
n = self.leaky(self.enc2(n))
n = self.pool(n)
skips.append(n)
n = self.leaky(self.enc3(n))
n = self.pool(n)
skips.append(n)
n = self.leaky(self.enc4(n))
n = self.pool(n)
skips.append(n)
n = self.leaky(self.enc5(n))
n = self.pool(n)
n = self.leaky(self.enc6(n))
# DECODER
n = self.upscale2d(n)
n = torch.cat((n, skips.pop()), dim=1)
n = self.leaky(self.dec5(n))
n = self.leaky(self.dec5b(n))
n = self.upscale2d(n)
n = torch.cat((n, skips.pop()), dim=1)
n = self.leaky(self.dec4(n))
n = self.leaky(self.dec4b(n))
n = self.upscale2d(n)
n = torch.cat((n, skips.pop()), dim=1)
n = self.leaky(self.dec3(n))
n = self.leaky(self.dec3b(n))
n = self.upscale2d(n)
n = torch.cat((n, skips.pop()), dim=1)
n = self.leaky(self.dec2(n))
n = self.leaky(self.dec2b(n))
n = self.upscale2d(n)
n = torch.cat((n, skips.pop()), dim=1)
n = self.leaky(self.dec1a(n))
n = self.leaky(self.dec1b(n))
n = self.dec1(n)
return x - n
| 36.554839 | 104 | 0.537593 |
71cc46a97f680b3a03074717df10752a7f040143 | 1,457 | py | Python | tests/test_banco_banrisul.py | marciorpbradoo/python-boleto | 8cb96acb6cf8f69b5734a42077de2b3468cc682d | [
"BSD-3-Clause"
] | 106 | 2015-02-25T13:38:56.000Z | 2022-02-15T23:19:14.000Z | tests/test_banco_banrisul.py | marciorpbradoo/python-boleto | 8cb96acb6cf8f69b5734a42077de2b3468cc682d | [
"BSD-3-Clause"
] | 31 | 2019-08-21T16:38:01.000Z | 2022-03-17T20:09:37.000Z | tests/test_banco_banrisul.py | marciorpbradoo/python-boleto | 8cb96acb6cf8f69b5734a42077de2b3468cc682d | [
"BSD-3-Clause"
] | 83 | 2015-01-28T15:06:18.000Z | 2021-12-15T18:16:55.000Z | # -*- coding: utf-8 -*-
import datetime
import unittest
from pyboleto.bank.banrisul import BoletoBanrisul
from .testutils import BoletoTestCase
class TestBancoBanrisul(BoletoTestCase):
def setUp(self):
self.dados = []
for i in range(3):
d = BoletoBanrisul()
d.data_documento = datetime.date(2000, 7, 4)
d.data_vencimento = datetime.date(2000, 7, 4)
d.data_processamento = datetime.date(2012, 7, 11)
d.valor_documento = 550
d.agencia_cedente = '1102'
d.conta_cedente = '9000150'
d.convenio = 7777777
d.nosso_numero = str(22832563 + i)
d.numero_documento = str(22832563 + i)
self.dados.append(d)
def test_linha_digitavel(self):
self.assertEqual(
self.dados[0].linha_digitavel,
'04192.11107 29000.150226 83256.340593 8 10010000055000'
)
def test_tamanho_codigo_de_barras(self):
self.assertEqual(len(self.dados[0].barcode), 44)
def test_codigo_de_barras(self):
self.assertEqual(self.dados[0].barcode,
'04198100100000550002111029000150228325634059')
def test_campo_livre(self):
self.assertEqual(self.dados[0].campo_livre,
'2111029000150228325634059')
suite = unittest.TestLoader().loadTestsFromTestCase(TestBancoBanrisul)
if __name__ == '__main__':
unittest.main()
| 30.354167 | 72 | 0.628689 |
fb32d5f002fba846beab62078e936e60ae8102f8 | 12,924 | py | Python | olive/wallet/wallet_block_store.py | Jsewill/Olive-blockchain | ba0169a56d7e67cefd95dc1f1f60e9a19d5cd2c5 | [
"Apache-2.0"
] | 10 | 2021-08-01T17:15:15.000Z | 2021-09-16T08:04:46.000Z | olive/wallet/wallet_block_store.py | Jsewill/Olive-blockchain | ba0169a56d7e67cefd95dc1f1f60e9a19d5cd2c5 | [
"Apache-2.0"
] | 8 | 2021-08-06T08:11:13.000Z | 2021-11-03T20:49:37.000Z | olive/wallet/wallet_block_store.py | Jsewill/Olive-blockchain | ba0169a56d7e67cefd95dc1f1f60e9a19d5cd2c5 | [
"Apache-2.0"
] | 7 | 2021-08-07T06:45:36.000Z | 2022-03-15T08:43:24.000Z | from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import aiosqlite
from olive.consensus.block_record import BlockRecord
from olive.types.blockchain_format.sized_bytes import bytes32
from olive.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from olive.types.coin_spend import CoinSpend
from olive.types.header_block import HeaderBlock
from olive.util.db_wrapper import DBWrapper
from olive.util.ints import uint32, uint64
from olive.util.lru_cache import LRUCache
from olive.util.streamable import Streamable, streamable
from olive.wallet.block_record import HeaderBlockRecord
@dataclass(frozen=True)
@streamable
class AdditionalCoinSpends(Streamable):
coin_spends_list: List[CoinSpend]
class WalletBlockStore:
"""
This object handles HeaderBlocks and Blocks stored in DB used by wallet.
"""
db: aiosqlite.Connection
db_wrapper: DBWrapper
block_cache: LRUCache
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db = db_wrapper.db
await self.db.execute("pragma journal_mode=wal")
await self.db.execute("pragma synchronous=2")
await self.db.execute(
"CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, height int,"
" timestamp int, block blob)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)")
# Block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS block_records(header_hash "
"text PRIMARY KEY, prev_hash text, height bigint, weight bigint, total_iters text,"
"block blob, sub_epoch_summary blob, is_peak tinyint)"
)
await self.db.execute(
"CREATE TABLE IF NOT EXISTS additional_coin_spends(header_hash text PRIMARY KEY, spends_list_blob blob)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
await self.db.commit()
self.block_cache = LRUCache(1000)
return self
async def _clear_database(self):
cursor_2 = await self.db.execute("DELETE FROM header_blocks")
await cursor_2.close()
await self.db.commit()
async def add_block_record(
self,
header_block_record: HeaderBlockRecord,
block_record: BlockRecord,
additional_coin_spends: List[CoinSpend],
):
"""
Adds a block record to the database. This block record is assumed to be connected
to the chain, but it may or may not be in the LCA path.
"""
cached = self.block_cache.get(header_block_record.header_hash)
if cached is not None:
# Since write to db can fail, we remove from cache here to avoid potential inconsistency
# Adding to cache only from reading
self.block_cache.put(header_block_record.header_hash, None)
if header_block_record.header.foliage_transaction_block is not None:
timestamp = header_block_record.header.foliage_transaction_block.timestamp
else:
timestamp = uint64(0)
cursor = await self.db.execute(
"INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?)",
(
header_block_record.header_hash.hex(),
header_block_record.height,
timestamp,
bytes(header_block_record),
),
)
await cursor.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?, ?, ?, ?,?)",
(
header_block_record.header.header_hash.hex(),
header_block_record.header.prev_header_hash.hex(),
header_block_record.header.height,
header_block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(),
header_block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(),
bytes(block_record),
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included),
False,
),
)
await cursor_2.close()
if len(additional_coin_spends) > 0:
blob: bytes = bytes(AdditionalCoinSpends(additional_coin_spends))
cursor_3 = await self.db.execute(
"INSERT OR REPLACE INTO additional_coin_spends VALUES(?, ?)",
(header_block_record.header_hash.hex(), blob),
)
await cursor_3.close()
async def get_header_block_at(self, heights: List[uint32]) -> List[HeaderBlock]:
if len(heights) == 0:
return []
heights_db = tuple(heights)
formatted_str = f'SELECT block from header_blocks WHERE height in ({"?," * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [HeaderBlock.from_bytes(row[0]) for row in rows]
async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]:
"""Gets a block record from the database, if present"""
cached = self.block_cache.get(header_hash)
if cached is not None:
return cached
cursor = await self.db.execute("SELECT block from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr: HeaderBlockRecord = HeaderBlockRecord.from_bytes(row[0])
self.block_cache.put(hbr.header_hash, hbr)
return hbr
else:
return None
async def get_additional_coin_spends(self, header_hash: bytes32) -> Optional[List[CoinSpend]]:
cursor = await self.db.execute(
"SELECT spends_list_blob from additional_coin_spends WHERE header_hash=?", (header_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin_spends: AdditionalCoinSpends = AdditionalCoinSpends.from_bytes(row[0])
return coin_spends.coin_spends_list
else:
return None
async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
cursor = await self.db.execute(
"SELECT block from block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return BlockRecord.from_bytes(row[0])
return None
async def get_block_records(
self,
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
cursor = await self.db.execute("SELECT header_hash, block, is_peak from block_records")
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
peak: Optional[bytes32] = None
for row in rows:
header_hash_bytes, block_record_bytes, is_peak = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
if is_peak:
assert peak is None # Sanity check, only one peak
peak = header_hash
return ret, peak
def rollback_cache_block(self, header_hash: bytes32):
self.block_cache.remove(header_hash)
async def set_peak(self, header_hash: bytes32) -> None:
cursor_1 = await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
async def get_block_records_close_to_peak(
self, blocks_n: int
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT header_hash, height from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, None
header_hash_bytes, peak_height = row
peak: bytes32 = bytes32(bytes.fromhex(header_hash_bytes))
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_height - blocks_n}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
return ret, peak
async def get_header_blocks_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, HeaderBlock]:
formatted_str = f"SELECT header_hash, block from header_blocks WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, HeaderBlock] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = HeaderBlock.from_bytes(block_record_bytes)
return ret
async def get_block_records_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, BlockRecord]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
return ret
async def get_peak_heights_dicts(self) -> Tuple[Dict[uint32, bytes32], Dict[uint32, SubEpochSummary]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT header_hash from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, {}
peak: bytes32 = bytes.fromhex(row[0])
cursor = await self.db.execute("SELECT header_hash,prev_hash,height,sub_epoch_summary from block_records")
rows = await cursor.fetchall()
await cursor.close()
hash_to_prev_hash: Dict[bytes32, bytes32] = {}
hash_to_height: Dict[bytes32, uint32] = {}
hash_to_summary: Dict[bytes32, SubEpochSummary] = {}
for row in rows:
hash_to_prev_hash[bytes.fromhex(row[0])] = bytes.fromhex(row[1])
hash_to_height[bytes.fromhex(row[0])] = row[2]
if row[3] is not None:
hash_to_summary[bytes.fromhex(row[0])] = SubEpochSummary.from_bytes(row[3])
height_to_hash: Dict[uint32, bytes32] = {}
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
curr_header_hash = peak
curr_height = hash_to_height[curr_header_hash]
while True:
height_to_hash[curr_height] = curr_header_hash
if curr_header_hash in hash_to_summary:
sub_epoch_summaries[curr_height] = hash_to_summary[curr_header_hash]
if curr_height == 0:
break
curr_header_hash = hash_to_prev_hash[curr_header_hash]
curr_height = hash_to_height[curr_header_hash]
return height_to_hash, sub_epoch_summaries
| 39.766154 | 116 | 0.640669 |
f527196844715fa6a8a6522aeb82c419ed53b5ff | 232 | py | Python | tests/test_cmatmul.py | Marcel-Rodekamp/MLP | 349ac8e10679e2ec53980908c580902996a493e7 | [
"MIT"
] | 1 | 2021-06-15T09:01:09.000Z | 2021-06-15T09:01:09.000Z | tests/test_cmatmul.py | Marcel-Rodekamp/MLP | 349ac8e10679e2ec53980908c580902996a493e7 | [
"MIT"
] | null | null | null | tests/test_cmatmul.py | Marcel-Rodekamp/MLP | 349ac8e10679e2ec53980908c580902996a493e7 | [
"MIT"
] | null | null | null | import torch
from MLP.complexifyTorch import cmatmul
def test_forward():
x = (2+1j)*torch.eye(2,2,dtype = torch.cdouble)
y = (2+2j)*torch.eye(2,2,dtype = torch.cdouble)
print(x)
print(cmatmul(x,y))
test_forward()
| 19.333333 | 51 | 0.668103 |
4fd9001fd614691e174098e899b761206568c22a | 2,652 | py | Python | tests/operators/gpu/gk_fused/test_concat_prim.py | xqdan/akg | e28501611d73d3957a1f3c58eeb6b028f2f2765d | [
"Apache-2.0"
] | null | null | null | tests/operators/gpu/gk_fused/test_concat_prim.py | xqdan/akg | e28501611d73d3957a1f3c58eeb6b028f2f2765d | [
"Apache-2.0"
] | null | null | null | tests/operators/gpu/gk_fused/test_concat_prim.py | xqdan/akg | e28501611d73d3957a1f3c58eeb6b028f2f2765d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from __future__ import absolute_import
import numpy as np
from gen_random import random_gaussian
from akg.utils import kernel_exec as utils
from akg.ops.gk_fused_regis import concat_prim_manual, concat_prim_auto
from test_elem import elem, elem_input_hrz, elem_output_hrz, elem_diamond
from test_functions import test_single_out, test_multi_out
def gen_data(shape_lhs, shape_rhs, dtype, concat_axis, multi_out, func_name):
support_list = {"float16": np.float16, "float32": np.float32}
lhs = random_gaussian(shape_lhs, miu=1, sigma=0.1).astype(support_list[dtype])
rhs = random_gaussian(shape_rhs, miu=1, sigma=0.1).astype(support_list[dtype])
expect = compute_expect(lhs, rhs, concat_axis, multi_out, func_name)
if isinstance(expect, (list, tuple)):
output = [np.full(np.shape(e), np.nan, dtype) for e in expect]
else:
output = np.full(np.shape(expect), np.nan, dtype)
return lhs, rhs, output, expect
def compute_expect(lhs, rhs, concat_axis, multi_out, func_name):
concat_res1 = np.concatenate((lhs, lhs), axis=concat_axis)
concat_res2 = np.concatenate((rhs, rhs), axis=concat_axis)
expect = globals()[func_name](concat_res1, concat_res2, multi_out)
return expect
def test_concat_prim(shape_lhs, shape_rhs, dtype, concat_axis=0, multi_out=False, poly_sch=False, fusion_mode=''):
shape_list = [shape_lhs, shape_rhs]
dtype_list = [dtype, dtype]
op_attrs = [concat_axis, multi_out, fusion_mode]
func_name = "elem_" + fusion_mode if fusion_mode != '' else "elem"
if poly_sch:
mod = utils.op_build(concat_prim_auto, shape_list, dtype_list, op_attrs=op_attrs, attrs={"target":"cuda"})
else:
mod = utils.op_build(concat_prim_manual, shape_list, dtype_list, op_attrs=op_attrs)
lhs, rhs, output, expect = gen_data(shape_lhs, shape_rhs, dtype, concat_axis, multi_out, func_name)
input = [lhs, rhs]
if multi_out or fusion_mode == "output_hrz":
test_multi_out(mod, input, output, expect)
else:
test_single_out(mod, input, output, expect)
| 47.357143 | 114 | 0.74095 |
d9b677565d8a3bbf1367af953fc4a149823b6928 | 783 | py | Python | smartbot/plugins/autojoin.py | Muzer/smartbot | c18daac6b066a7d368ef3dd0848a21c16a076604 | [
"MIT"
] | null | null | null | smartbot/plugins/autojoin.py | Muzer/smartbot | c18daac6b066a7d368ef3dd0848a21c16a076604 | [
"MIT"
] | 11 | 2015-01-01T21:34:05.000Z | 2015-06-03T11:13:44.000Z | smartbot/plugins/autojoin.py | Muzer/smartbot | c18daac6b066a7d368ef3dd0848a21c16a076604 | [
"MIT"
] | null | null | null | import smartbot.plugin
from smartbot.formatting import Style
class Plugin(smartbot.plugin.Plugin):
"""
A plugin which automatically joins channels/rooms when the bot connects to
the server.
You should provide a list of channels to join in the plugin configuration,
under the name 'channels'.
"""
names = ["autojoin"]
def __init__(self, channels):
self.channels = channels
def on_ready(self):
"""Join all the channels."""
for channel in self.channels:
self.bot.join(channel)
def on_command(self, msg, stdin, stdout):
print(" ".join(self.channels), file=stdout)
def on_help(self):
"""Get help about the plugin."""
return "{}".format(self.bot.format("autojoin", Style.bold))
| 27 | 78 | 0.64751 |
99e2ccff828adc948c31653ea9ea94def5b7a4fb | 759 | py | Python | docs/_themes/faculty_sphinx_theme/__init__.py | alshapton/Pyntel4004-CLI | d3b0a6ef14ee8b3b90fc01ed3ee489e8804784e6 | [
"MIT"
] | 6 | 2021-02-12T21:37:53.000Z | 2022-02-24T23:09:37.000Z | docs/_themes/faculty_sphinx_theme/__init__.py | alshapton/Pyntel4004-CLI | d3b0a6ef14ee8b3b90fc01ed3ee489e8804784e6 | [
"MIT"
] | 43 | 2021-04-23T09:32:24.000Z | 2022-02-01T15:17:09.000Z | docs/_themes/faculty_sphinx_theme/__init__.py | alshapton/Pyntel4004-cli | d3b0a6ef14ee8b3b90fc01ed3ee489e8804784e6 | [
"MIT"
] | 2 | 2021-06-11T01:12:44.000Z | 2021-09-14T22:44:11.000Z | # Copyright 2020 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def setup(app):
"""Setup for Sphinx doc theme."""
app.add_html_theme(
"faculty-sphinx-theme", os.path.abspath(os.path.dirname(__file__))
)
| 31.625 | 74 | 0.737813 |
374853c846f971d0ee76c06f63789599ee085374 | 2,660 | py | Python | data/p3BR/R2/benchmark/startCirq164.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startCirq164.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startCirq164.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=31
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[2])) # number=28
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=29
c.append(cirq.H.on(input_qubit[2])) # number=30
c.append(cirq.X.on(input_qubit[2])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=25
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=26
c.append(cirq.H.on(input_qubit[2])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=8
c.append(cirq.H.on(input_qubit[1])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=19
c.append(cirq.H.on(input_qubit[1])) # number=20
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.H.on(input_qubit[1])) # number=22
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=23
c.append(cirq.H.on(input_qubit[1])) # number=24
c.append(cirq.Z.on(input_qubit[2])) # number=3
c.append(cirq.X.on(input_qubit[1])) # number=17
c.append(cirq.Y.on(input_qubit[2])) # number=5
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=15
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq164.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 33.670886 | 77 | 0.684962 |
bdd8962a8b6d942921a7221b59cf9e72c2939681 | 5,676 | py | Python | models/losses/loss.py | shinya7y/gangealing | 6e6897640145544496c3115bf3f5b6209c89c7a0 | [
"BSD-2-Clause"
] | 605 | 2021-12-10T00:56:58.000Z | 2022-03-31T03:34:08.000Z | models/losses/loss.py | guoyang-xie/gangealing | 353e059d322105dbc0389409b3579772c0673839 | [
"BSD-2-Clause"
] | 7 | 2021-12-10T11:40:19.000Z | 2022-03-31T07:05:29.000Z | models/losses/loss.py | guoyang-xie/gangealing | 353e059d322105dbc0389409b3579772c0673839 | [
"BSD-2-Clause"
] | 71 | 2021-12-10T01:00:55.000Z | 2022-03-30T04:57:16.000Z | import torch
def total_variation_loss(delta_flow, reduce_batch=True):
# flow should be size (N, H, W, 2)
reduce_dims = (0, 1, 2, 3) if reduce_batch else (1, 2, 3)
distance_fn = lambda a: torch.where(a <= 1.0, 0.5 * a.pow(2), a - 0.5).mean(dim=reduce_dims)
assert delta_flow.size(-1) == 2
diff_y = distance_fn((delta_flow[:, :-1, :, :] - delta_flow[:, 1:, :, :]).abs())
diff_x = distance_fn((delta_flow[:, :, :-1, :] - delta_flow[:, :, 1:, :]).abs())
loss = diff_x + diff_y
return loss
def flow_identity_loss(delta_flow):
# Simply encourages the residual flow to be zero (L2):
loss = delta_flow.pow(2).mean()
return loss
def sample_gan_supervised_pairs(generator, ll, resize_fake2stn, psi, batch, dim_latent, freeze_ll, device, z=None):
with torch.set_grad_enabled(not freeze_ll):
if z is None:
z = torch.randn(batch, dim_latent, device=device)
unaligned_in, w_noise = generator([z], noise=None, return_latents=True)
w_aligned = ll([w_noise[:, 0, :]], psi=psi)
aligned_target, _ = generator(w_aligned, input_is_latent=True, noise=None)
aligned_target = resize_fake2stn(aligned_target)
return unaligned_in, aligned_target
def assign_fake_images_to_clusters(generator, stn, ll, loss_fn, resize_fake2stn, psi, batch, dim_latent, freeze_ll,
num_heads, flips, device, sample_from_full_res=True, z=None, **stn_kwargs):
"""
This function generates fake images, congeals them with the STN and then assigns the congealed images
to their clusters.
:return assignments_over_clusters_and_flips: a torch.min object with size (N,). Has indices and values fields to
access the results of the min operation.
aligned_pred: (N*num_heads*(1+flips), C, flow_size, flow_size) The congealed output images output by the STN
delta_flow: (N*num_heads*(1+flips), flow_size, flow_size, 2) The residual flow regressed by the STN
"""
unaligned_in, aligned_target = sample_gan_supervised_pairs(generator, ll, resize_fake2stn, psi, batch, dim_latent,
freeze_ll, device, z)
if flips: # Try both the flipped and unflipped versions of unaligned_in, and eventually take the min of the two losses
unaligned_in = torch.cat([unaligned_in, unaligned_in.flip(3, )], 0) # (2 * N, C, H, W)
aligned_target = aligned_target.repeat(2, 1, 1, 1) # (2 * N, C, H, W)
loss_size = (2, batch, num_heads)
else:
loss_size = (batch, num_heads)
input_img_for_sampling = unaligned_in if sample_from_full_res else None
resized_unaligned_in = resize_fake2stn(unaligned_in)
aligned_pred, delta_flow = stn(resized_unaligned_in, return_flow=True,
input_img_for_sampling=input_img_for_sampling, **stn_kwargs)
perceptual_loss = loss_fn(aligned_pred, aligned_target).view(*loss_size)
if flips: # Merge cluster and flip dimensions:
distance_collapsed = perceptual_loss.permute(1, 0, 2).reshape(batch, 2 * num_heads) # (2, N, H) --> (N, 2 * H)
else:
distance_collapsed = perceptual_loss
assignments_over_clusters_and_flips = distance_collapsed.min(dim=1) # (N,)
return assignments_over_clusters_and_flips, aligned_pred, delta_flow, unaligned_in, resized_unaligned_in, distance_collapsed
def gangealing_loss(generator, stn, ll, loss_fn, resize_fake2stn, psi, batch, dim_latent, freeze_ll, device, sample_from_full_res=False, **stn_kwargs):
# The basic reconstruction loss used for GANgealing.
# Important: Using a consistent set of noise images for both unaligned_in and
# aligned_target surprisingly makes results much worse in some cases!
# It turns out that it is actually better to have noise randomized between forward passes
unaligned_in, aligned_target = sample_gan_supervised_pairs(generator, ll, resize_fake2stn, psi, batch, dim_latent,
freeze_ll, device, z=None)
input_img_for_sampling = unaligned_in if sample_from_full_res else None
aligned_pred, delta_flow = stn(resize_fake2stn(unaligned_in), return_flow=True,
input_img_for_sampling=input_img_for_sampling, **stn_kwargs)
perceptual_loss = loss_fn(aligned_pred, aligned_target).mean()
return perceptual_loss, delta_flow
def gangealing_cluster_loss(generator, stn, ll, loss_fn, resize_fake2stn, psi, batch, dim_latent, freeze_ll, num_heads,
flips, device, sample_from_full_res=True, **stn_kwargs):
# The reconstruction loss used in clustering variants of GANgealing.
assignments, _, delta_flow, _, _, _ = \
assign_fake_images_to_clusters(generator, stn, ll, loss_fn, resize_fake2stn, psi, batch, dim_latent, freeze_ll,
num_heads, flips, device, sample_from_full_res, z=None, **stn_kwargs)
assigned_perceptual_loss = assignments.values.mean()
HW2 = delta_flow.size()[1:] # delta_flow is of size (2 * N * args.num_heads, H, W, 2)
if flips: # Only the delta_flows corresponding to the assigned clusters get regularized:
delta_flow = delta_flow.view(2, batch, num_heads, *HW2) # (2, N, num_heads, H, W, 2)
delta_flow = delta_flow.permute(1, 0, 2, 3, 4, 5).reshape(batch, 2 * num_heads, *HW2)
else:
delta_flow = delta_flow.view(batch, num_heads, *HW2)
delta_flow = delta_flow[torch.arange(batch), assignments.indices] # (N, H, W, 2)
return assigned_perceptual_loss, delta_flow
| 61.032258 | 151 | 0.679175 |
bf2c07f85b73f9702f647031f5a8197affe68335 | 1,390 | py | Python | launchpad/pip_package/launchpad_version.py | leloykun/launchpad | f591b5931cbba89bc94050a126d86cd1bda312f5 | [
"Apache-2.0"
] | 1 | 2021-05-02T22:03:23.000Z | 2021-05-02T22:03:23.000Z | launchpad/pip_package/launchpad_version.py | leloykun/launchpad | f591b5931cbba89bc94050a126d86cd1bda312f5 | [
"Apache-2.0"
] | null | null | null | launchpad/pip_package/launchpad_version.py | leloykun/launchpad | f591b5931cbba89bc94050a126d86cd1bda312f5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define Launchpad version information."""
# We follow Semantic Versioning (https://semver.org/)
_MAJOR_VERSION = '0'
_MINOR_VERSION = '1'
_PATCH_VERSION = '0'
# When building releases, we can update this value on the release branch to
# reflect the current release candidate ('rc0', 'rc1') or, finally, the official
# stable release (indicated by `_REL_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
_DEV_SUFFIX = 'dev'
_REL_SUFFIX = 'rc0'
# Example, '0.4.0rc0'
__version__ = '.'.join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
__dev_version__ = '{}.{}'.format(__version__, _DEV_SUFFIX)
__rel_version__ = '{}{}'.format(__version__, _REL_SUFFIX)
| 35.641026 | 80 | 0.741007 |
53e9eddf52b295d38d8dfbc4bb07fb6db4f6d0a0 | 3,238 | py | Python | semester-6/Python Practice/matrix.py | saranshbht/bsc-codes | 7386c09cc986de9c84947f7dea7db3dc42219a35 | [
"MIT"
] | 3 | 2021-03-22T12:07:14.000Z | 2021-08-30T17:28:23.000Z | semester-6/Python Practice/matrix.py | saranshbht/bsc-codes | 7386c09cc986de9c84947f7dea7db3dc42219a35 | [
"MIT"
] | null | null | null | semester-6/Python Practice/matrix.py | saranshbht/bsc-codes | 7386c09cc986de9c84947f7dea7db3dc42219a35 | [
"MIT"
] | null | null | null | def addition(a, b):
if len(a) != len(b) or len(a[0]) != len(b[0]):
return "Addition not possible"
c = []
for i in range(0, len(a)):
temp = []
for j in range(len(a[0])):
temp.append(a[i][j] + b[i][j])
c.append(temp)
return c
def subtraction(a, b):
if len(a) != len(b) or len(a[0]) != len(b[0]):
return "Subtraction not possible"
c = []
for i in range(0, len(a)):
temp = []
for j in range(len(a[0])):
temp.append(a[i][j] - b[i][j])
c.append(temp)
return c
def transpose(a):
c = []
for i in range(len(a[0])):
temp = []
for j in range(len(a)):
temp.append(a[j][i])
c.append(temp)
return c
def multiplication(a, b):
if len(a[0]) != len(b):
return "Multiplication not possible"
c = []
for i in range(len(a)):
temp = []
for j in range(len(b[0])):
total = 0
for k in range(len(b)):
total += a[i][k] * b[k][j]
temp.append(total)
c.append(temp)
return c
def inputMatrix():
a = []
n1 = int(input("Enter number of rows of matrix: "))
m1 = int(input("Enter number of columns of matrix: "))
print("Enter matrix")
for i in range(n1):
temp = []
for j in range(m1):
temp.append(int(input()))
a.append(temp)
return a
def printMatrix(res):
for i in res:
for j in i:
print(j, end = ' ')
print()
if __name__ == "__main__":
while True:
print("Menu")
print("1.Addition\n2.Subtraction\n3.Multiplication\n4.Transpose\n0.Exit")
choice = int(input("Enter your choice: "))
if choice == 0:
break
elif choice == 1:
a = inputMatrix()
print("Matrix A")
printMatrix(a)
b = inputMatrix()
print("Matrix B")
printMatrix(b)
res = addition(a, b)
if type(res) == str:
print(res)
else:
print("Matrix A + B")
printMatrix(res)
elif choice == 2:
a = inputMatrix()
print("Matrix A")
printMatrix(a)
b = inputMatrix()
print("Matrix B")
printMatrix(b)
res = subtraction(a, b)
if type(res) == str:
print(res)
else:
print("Matrix A - B")
printMatrix(res)
elif choice == 3:
a = inputMatrix()
print("Matrix A")
printMatrix(a)
b = inputMatrix()
print("Matrix B")
printMatrix(b)
res = multiplication(a, b)
if type(res) == str:
print(res)
else:
print("Matrix A * B")
printMatrix(res)
elif choice == 4:
a = inputMatrix()
print("Matrix A")
printMatrix(b)
res = transpose(a)
if type(res) == str:
print(res)
else:
print("Matrix A`")
printMatrix(res)
| 26.112903 | 81 | 0.440395 |
618e373b307aec4ad37a1bab18debf26f7b04ca9 | 3,978 | py | Python | src/project/settings/base.py | fahadalmutairi/edgetest | cc37a9cd53da74bd32f69f7a099679bccf0268a7 | [
"MIT"
] | null | null | null | src/project/settings/base.py | fahadalmutairi/edgetest | cc37a9cd53da74bd32f69f7a099679bccf0268a7 | [
"MIT"
] | null | null | null | src/project/settings/base.py | fahadalmutairi/edgetest | cc37a9cd53da74bd32f69f7a099679bccf0268a7 | [
"MIT"
] | null | null | null | """
Django settings for project project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from django.core.urlresolvers import reverse_lazy
from os.path import dirname, join, exists
# Build paths inside the project like this: join(BASE_DIR, "directory")
BASE_DIR = dirname(dirname(dirname(__file__)))
STATICFILES_DIRS = [join(BASE_DIR, 'static')]
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = "/media/"
# Use Django templates using the new Django 1.8 TEMPLATES settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
# insert more TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = join(dirname(__file__), 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'authtools',
'crispy_forms',
'easy_thumbnails',
'profiles',
'accounts',
'main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'project.urls'
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in
# os.environ
'default': env.db(),
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = []
# Crispy Form Theme - Bootstrap 3
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# For Bootstrap 3, change error alert to 'danger'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Authentication Settings
AUTH_USER_MODEL = 'authtools.User'
LOGIN_REDIRECT_URL = reverse_lazy("profiles:show_self")
LOGIN_URL = reverse_lazy("accounts:login")
THUMBNAIL_EXTENSION = 'png' # Or any extn for your thumbnails
| 27.818182 | 74 | 0.708145 |
e8a4d387e5f36fade29840902f5eb0269290694e | 47 | py | Python | tensortrade/base/__init__.py | andrewczgithub/tensortrade | b5f5d14c220bcab3394b02286ffd0f52853f519e | [
"Apache-2.0"
] | null | null | null | tensortrade/base/__init__.py | andrewczgithub/tensortrade | b5f5d14c220bcab3394b02286ffd0f52853f519e | [
"Apache-2.0"
] | 1 | 2019-12-14T23:25:00.000Z | 2019-12-14T23:25:00.000Z | tensortrade/base/__init__.py | andrewczgithub/tensortrade | b5f5d14c220bcab3394b02286ffd0f52853f519e | [
"Apache-2.0"
] | null | null | null | from .component import *
from .context import * | 23.5 | 24 | 0.765957 |
4d49958f5d35385dcc9a96dee660be7f3fbd6426 | 1,119 | py | Python | test/integration/test_e2e.py | fredhallgren/nystrompca | 7f8923af08551ad477a446c383822b555326f4bf | [
"Apache-2.0"
] | 4 | 2021-09-14T08:46:10.000Z | 2021-10-31T09:44:06.000Z | test/integration/test_e2e.py | fredhallgren/nystrompca | 7f8923af08551ad477a446c383822b555326f4bf | [
"Apache-2.0"
] | null | null | null | test/integration/test_e2e.py | fredhallgren/nystrompca | 7f8923af08551ad477a446c383822b555326f4bf | [
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Fredrik Hallgren
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
End-to-end test that runs the bound experiments to ensure
successful completion
"""
from numpy.testing import assert_equal
from nystrompca.experiments.bound_experiments import main as bound_main
from nystrompca.experiments.methods_experiments import main as methods_main
def test_methods_experiments():
return_code = methods_main(n=20, m=10, d=5, noplot=True)
assert_equal(return_code, 0)
def test_bound_experiments():
return_code = bound_main(n=100, m=10, d=5, noplot=True)
assert_equal(return_code, 0)
| 27.292683 | 75 | 0.76765 |
fd788604d501569a4b155e20da962186a56d7569 | 27,216 | py | Python | shutterstock_api/models/audio.py | Lumen5/shutterstock-api | d26db2c9cd6688cf828ad15478bf1b4701150a3f | [
"Adobe-Glyph"
] | 1 | 2021-02-23T16:15:16.000Z | 2021-02-23T16:15:16.000Z | shutterstock_api/models/audio.py | Lumen5/shutterstock-api | d26db2c9cd6688cf828ad15478bf1b4701150a3f | [
"Adobe-Glyph"
] | 17 | 2019-07-13T01:23:08.000Z | 2022-03-21T07:17:35.000Z | shutterstock_api/models/audio.py | Lumen5/shutterstock-api | d26db2c9cd6688cf828ad15478bf1b4701150a3f | [
"Adobe-Glyph"
] | 1 | 2021-03-07T19:16:27.000Z | 2021-03-07T19:16:27.000Z | # coding: utf-8
"""
Shutterstock API Reference
The Shutterstock API provides access to Shutterstock's library of media, as well as information about customers' accounts and the contributors that provide the media. # noqa: E501
OpenAPI spec version: 1.0.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from shutterstock_api.models.album import Album # noqa: F401,E501
from shutterstock_api.models.artist import Artist # noqa: F401,E501
from shutterstock_api.models.audio_assets import AudioAssets # noqa: F401,E501
from shutterstock_api.models.contributor import Contributor # noqa: F401,E501
from shutterstock_api.models.model_release import ModelRelease # noqa: F401,E501
class Audio(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'added_date': 'date',
'affiliate_url': 'str',
'album': 'Album',
'artists': 'list[Artist]',
'assets': 'AudioAssets',
'bpm': 'int',
'contributor': 'Contributor',
'deleted_time': 'datetime',
'description': 'str',
'duration': 'int',
'genres': 'list[str]',
'id': 'str',
'instruments': 'list[str]',
'is_adult': 'bool',
'is_instrumental': 'bool',
'isrc': 'str',
'keywords': 'list[str]',
'language': 'str',
'lyrics': 'str',
'media_type': 'str',
'model_releases': 'list[ModelRelease]',
'moods': 'list[str]',
'published_time': 'datetime',
'recording_version': 'str',
'releases': 'list[str]',
'similar_artists': 'list[Artist]',
'submitted_time': 'datetime',
'title': 'str',
'updated_time': 'datetime',
'vocal_description': 'str',
'url': 'str'
}
attribute_map = {
'added_date': 'added_date',
'affiliate_url': 'affiliate_url',
'album': 'album',
'artists': 'artists',
'assets': 'assets',
'bpm': 'bpm',
'contributor': 'contributor',
'deleted_time': 'deleted_time',
'description': 'description',
'duration': 'duration',
'genres': 'genres',
'id': 'id',
'instruments': 'instruments',
'is_adult': 'is_adult',
'is_instrumental': 'is_instrumental',
'isrc': 'isrc',
'keywords': 'keywords',
'language': 'language',
'lyrics': 'lyrics',
'media_type': 'media_type',
'model_releases': 'model_releases',
'moods': 'moods',
'published_time': 'published_time',
'recording_version': 'recording_version',
'releases': 'releases',
'similar_artists': 'similar_artists',
'submitted_time': 'submitted_time',
'title': 'title',
'updated_time': 'updated_time',
'vocal_description': 'vocal_description',
'url': 'url'
}
def __init__(self, added_date=None, affiliate_url=None, album=None, artists=None, assets=None, bpm=None, contributor=None, deleted_time=None, description=None, duration=None, genres=None, id=None, instruments=None, is_adult=None, is_instrumental=None, isrc=None, keywords=None, language=None, lyrics=None, media_type=None, model_releases=None, moods=None, published_time=None, recording_version=None, releases=None, similar_artists=None, submitted_time=None, title=None, updated_time=None, vocal_description=None, url=None): # noqa: E501
"""Audio - a model defined in Swagger""" # noqa: E501
self._added_date = None
self._affiliate_url = None
self._album = None
self._artists = None
self._assets = None
self._bpm = None
self._contributor = None
self._deleted_time = None
self._description = None
self._duration = None
self._genres = None
self._id = None
self._instruments = None
self._is_adult = None
self._is_instrumental = None
self._isrc = None
self._keywords = None
self._language = None
self._lyrics = None
self._media_type = None
self._model_releases = None
self._moods = None
self._published_time = None
self._recording_version = None
self._releases = None
self._similar_artists = None
self._submitted_time = None
self._title = None
self._updated_time = None
self._vocal_description = None
self._url = None
self.discriminator = None
if added_date is not None:
self.added_date = added_date
if affiliate_url is not None:
self.affiliate_url = affiliate_url
if album is not None:
self.album = album
if artists is not None:
self.artists = artists
if assets is not None:
self.assets = assets
if bpm is not None:
self.bpm = bpm
self.contributor = contributor
if deleted_time is not None:
self.deleted_time = deleted_time
if description is not None:
self.description = description
if duration is not None:
self.duration = duration
if genres is not None:
self.genres = genres
self.id = id
if instruments is not None:
self.instruments = instruments
if is_adult is not None:
self.is_adult = is_adult
if is_instrumental is not None:
self.is_instrumental = is_instrumental
if isrc is not None:
self.isrc = isrc
if keywords is not None:
self.keywords = keywords
if language is not None:
self.language = language
if lyrics is not None:
self.lyrics = lyrics
self.media_type = media_type
if model_releases is not None:
self.model_releases = model_releases
if moods is not None:
self.moods = moods
if published_time is not None:
self.published_time = published_time
if recording_version is not None:
self.recording_version = recording_version
if releases is not None:
self.releases = releases
if similar_artists is not None:
self.similar_artists = similar_artists
if submitted_time is not None:
self.submitted_time = submitted_time
if title is not None:
self.title = title
if updated_time is not None:
self.updated_time = updated_time
if vocal_description is not None:
self.vocal_description = vocal_description
if url is not None:
self.url = url
@property
def added_date(self):
"""Gets the added_date of this Audio. # noqa: E501
Date this track was added to the Shutterstock library, in the format: YYYY-MM-DD # noqa: E501
:return: The added_date of this Audio. # noqa: E501
:rtype: date
"""
return self._added_date
@added_date.setter
def added_date(self, added_date):
"""Sets the added_date of this Audio.
Date this track was added to the Shutterstock library, in the format: YYYY-MM-DD # noqa: E501
:param added_date: The added_date of this Audio. # noqa: E501
:type: date
"""
self._added_date = added_date
@property
def affiliate_url(self):
"""Gets the affiliate_url of this Audio. # noqa: E501
Affiliate referral link; appears only for registered affiliate partners # noqa: E501
:return: The affiliate_url of this Audio. # noqa: E501
:rtype: str
"""
return self._affiliate_url
@affiliate_url.setter
def affiliate_url(self, affiliate_url):
"""Sets the affiliate_url of this Audio.
Affiliate referral link; appears only for registered affiliate partners # noqa: E501
:param affiliate_url: The affiliate_url of this Audio. # noqa: E501
:type: str
"""
self._affiliate_url = affiliate_url
@property
def album(self):
"""Gets the album of this Audio. # noqa: E501
:return: The album of this Audio. # noqa: E501
:rtype: Album
"""
return self._album
@album.setter
def album(self, album):
"""Sets the album of this Audio.
:param album: The album of this Audio. # noqa: E501
:type: Album
"""
self._album = album
@property
def artists(self):
"""Gets the artists of this Audio. # noqa: E501
List of artists # noqa: E501
:return: The artists of this Audio. # noqa: E501
:rtype: list[Artist]
"""
return self._artists
@artists.setter
def artists(self, artists):
"""Sets the artists of this Audio.
List of artists # noqa: E501
:param artists: The artists of this Audio. # noqa: E501
:type: list[Artist]
"""
self._artists = artists
@property
def assets(self):
"""Gets the assets of this Audio. # noqa: E501
:return: The assets of this Audio. # noqa: E501
:rtype: AudioAssets
"""
return self._assets
@assets.setter
def assets(self, assets):
"""Sets the assets of this Audio.
:param assets: The assets of this Audio. # noqa: E501
:type: AudioAssets
"""
self._assets = assets
@property
def bpm(self):
"""Gets the bpm of this Audio. # noqa: E501
BPM (beats per minute) of this track # noqa: E501
:return: The bpm of this Audio. # noqa: E501
:rtype: int
"""
return self._bpm
@bpm.setter
def bpm(self, bpm):
"""Sets the bpm of this Audio.
BPM (beats per minute) of this track # noqa: E501
:param bpm: The bpm of this Audio. # noqa: E501
:type: int
"""
self._bpm = bpm
@property
def contributor(self):
"""Gets the contributor of this Audio. # noqa: E501
:return: The contributor of this Audio. # noqa: E501
:rtype: Contributor
"""
return self._contributor
@contributor.setter
def contributor(self, contributor):
"""Sets the contributor of this Audio.
:param contributor: The contributor of this Audio. # noqa: E501
:type: Contributor
"""
if contributor is None:
raise ValueError("Invalid value for `contributor`, must not be `None`") # noqa: E501
self._contributor = contributor
@property
def deleted_time(self):
"""Gets the deleted_time of this Audio. # noqa: E501
:return: The deleted_time of this Audio. # noqa: E501
:rtype: datetime
"""
return self._deleted_time
@deleted_time.setter
def deleted_time(self, deleted_time):
"""Sets the deleted_time of this Audio.
:param deleted_time: The deleted_time of this Audio. # noqa: E501
:type: datetime
"""
self._deleted_time = deleted_time
@property
def description(self):
"""Gets the description of this Audio. # noqa: E501
Description of this track # noqa: E501
:return: The description of this Audio. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Audio.
Description of this track # noqa: E501
:param description: The description of this Audio. # noqa: E501
:type: str
"""
self._description = description
@property
def duration(self):
"""Gets the duration of this Audio. # noqa: E501
Duration of this track in seconds # noqa: E501
:return: The duration of this Audio. # noqa: E501
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this Audio.
Duration of this track in seconds # noqa: E501
:param duration: The duration of this Audio. # noqa: E501
:type: int
"""
self._duration = duration
@property
def genres(self):
"""Gets the genres of this Audio. # noqa: E501
List of all genres for this track # noqa: E501
:return: The genres of this Audio. # noqa: E501
:rtype: list[str]
"""
return self._genres
@genres.setter
def genres(self, genres):
"""Sets the genres of this Audio.
List of all genres for this track # noqa: E501
:param genres: The genres of this Audio. # noqa: E501
:type: list[str]
"""
self._genres = genres
@property
def id(self):
"""Gets the id of this Audio. # noqa: E501
Shutterstock ID of this track # noqa: E501
:return: The id of this Audio. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Audio.
Shutterstock ID of this track # noqa: E501
:param id: The id of this Audio. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def instruments(self):
"""Gets the instruments of this Audio. # noqa: E501
List of all instruments that appear in this track # noqa: E501
:return: The instruments of this Audio. # noqa: E501
:rtype: list[str]
"""
return self._instruments
@instruments.setter
def instruments(self, instruments):
"""Sets the instruments of this Audio.
List of all instruments that appear in this track # noqa: E501
:param instruments: The instruments of this Audio. # noqa: E501
:type: list[str]
"""
self._instruments = instruments
@property
def is_adult(self):
"""Gets the is_adult of this Audio. # noqa: E501
Whether or not this track contains adult content # noqa: E501
:return: The is_adult of this Audio. # noqa: E501
:rtype: bool
"""
return self._is_adult
@is_adult.setter
def is_adult(self, is_adult):
"""Sets the is_adult of this Audio.
Whether or not this track contains adult content # noqa: E501
:param is_adult: The is_adult of this Audio. # noqa: E501
:type: bool
"""
self._is_adult = is_adult
@property
def is_instrumental(self):
"""Gets the is_instrumental of this Audio. # noqa: E501
Whether or not this track is purely instrumental (lacking lyrics) # noqa: E501
:return: The is_instrumental of this Audio. # noqa: E501
:rtype: bool
"""
return self._is_instrumental
@is_instrumental.setter
def is_instrumental(self, is_instrumental):
"""Sets the is_instrumental of this Audio.
Whether or not this track is purely instrumental (lacking lyrics) # noqa: E501
:param is_instrumental: The is_instrumental of this Audio. # noqa: E501
:type: bool
"""
self._is_instrumental = is_instrumental
@property
def isrc(self):
"""Gets the isrc of this Audio. # noqa: E501
# noqa: E501
:return: The isrc of this Audio. # noqa: E501
:rtype: str
"""
return self._isrc
@isrc.setter
def isrc(self, isrc):
"""Sets the isrc of this Audio.
# noqa: E501
:param isrc: The isrc of this Audio. # noqa: E501
:type: str
"""
self._isrc = isrc
@property
def keywords(self):
"""Gets the keywords of this Audio. # noqa: E501
List of all keywords for this track # noqa: E501
:return: The keywords of this Audio. # noqa: E501
:rtype: list[str]
"""
return self._keywords
@keywords.setter
def keywords(self, keywords):
"""Sets the keywords of this Audio.
List of all keywords for this track # noqa: E501
:param keywords: The keywords of this Audio. # noqa: E501
:type: list[str]
"""
self._keywords = keywords
@property
def language(self):
"""Gets the language of this Audio. # noqa: E501
Language of this track's lyrics # noqa: E501
:return: The language of this Audio. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this Audio.
Language of this track's lyrics # noqa: E501
:param language: The language of this Audio. # noqa: E501
:type: str
"""
self._language = language
@property
def lyrics(self):
"""Gets the lyrics of this Audio. # noqa: E501
Lyrics of this track # noqa: E501
:return: The lyrics of this Audio. # noqa: E501
:rtype: str
"""
return self._lyrics
@lyrics.setter
def lyrics(self, lyrics):
"""Sets the lyrics of this Audio.
Lyrics of this track # noqa: E501
:param lyrics: The lyrics of this Audio. # noqa: E501
:type: str
"""
self._lyrics = lyrics
@property
def media_type(self):
"""Gets the media_type of this Audio. # noqa: E501
Media type of this track; should always be \"audio\" # noqa: E501
:return: The media_type of this Audio. # noqa: E501
:rtype: str
"""
return self._media_type
@media_type.setter
def media_type(self, media_type):
"""Sets the media_type of this Audio.
Media type of this track; should always be \"audio\" # noqa: E501
:param media_type: The media_type of this Audio. # noqa: E501
:type: str
"""
if media_type is None:
raise ValueError("Invalid value for `media_type`, must not be `None`") # noqa: E501
self._media_type = media_type
@property
def model_releases(self):
"""Gets the model_releases of this Audio. # noqa: E501
List of all model releases for this track # noqa: E501
:return: The model_releases of this Audio. # noqa: E501
:rtype: list[ModelRelease]
"""
return self._model_releases
@model_releases.setter
def model_releases(self, model_releases):
"""Sets the model_releases of this Audio.
List of all model releases for this track # noqa: E501
:param model_releases: The model_releases of this Audio. # noqa: E501
:type: list[ModelRelease]
"""
self._model_releases = model_releases
@property
def moods(self):
"""Gets the moods of this Audio. # noqa: E501
List of all moods of this track # noqa: E501
:return: The moods of this Audio. # noqa: E501
:rtype: list[str]
"""
return self._moods
@moods.setter
def moods(self, moods):
"""Sets the moods of this Audio.
List of all moods of this track # noqa: E501
:param moods: The moods of this Audio. # noqa: E501
:type: list[str]
"""
self._moods = moods
@property
def published_time(self):
"""Gets the published_time of this Audio. # noqa: E501
Time this track was published, in the format YYYY-MM-DDThh:mm:ssZ # noqa: E501
:return: The published_time of this Audio. # noqa: E501
:rtype: datetime
"""
return self._published_time
@published_time.setter
def published_time(self, published_time):
"""Sets the published_time of this Audio.
Time this track was published, in the format YYYY-MM-DDThh:mm:ssZ # noqa: E501
:param published_time: The published_time of this Audio. # noqa: E501
:type: datetime
"""
self._published_time = published_time
@property
def recording_version(self):
"""Gets the recording_version of this Audio. # noqa: E501
Recording version of this track # noqa: E501
:return: The recording_version of this Audio. # noqa: E501
:rtype: str
"""
return self._recording_version
@recording_version.setter
def recording_version(self, recording_version):
"""Sets the recording_version of this Audio.
Recording version of this track # noqa: E501
:param recording_version: The recording_version of this Audio. # noqa: E501
:type: str
"""
self._recording_version = recording_version
@property
def releases(self):
"""Gets the releases of this Audio. # noqa: E501
List of all releases of this track # noqa: E501
:return: The releases of this Audio. # noqa: E501
:rtype: list[str]
"""
return self._releases
@releases.setter
def releases(self, releases):
"""Sets the releases of this Audio.
List of all releases of this track # noqa: E501
:param releases: The releases of this Audio. # noqa: E501
:type: list[str]
"""
self._releases = releases
@property
def similar_artists(self):
"""Gets the similar_artists of this Audio. # noqa: E501
List of all similar artists of this track # noqa: E501
:return: The similar_artists of this Audio. # noqa: E501
:rtype: list[Artist]
"""
return self._similar_artists
@similar_artists.setter
def similar_artists(self, similar_artists):
"""Sets the similar_artists of this Audio.
List of all similar artists of this track # noqa: E501
:param similar_artists: The similar_artists of this Audio. # noqa: E501
:type: list[Artist]
"""
self._similar_artists = similar_artists
@property
def submitted_time(self):
"""Gets the submitted_time of this Audio. # noqa: E501
Time this track was submitted, in the format YYYY-MM-DDThh:mm:ssZ # noqa: E501
:return: The submitted_time of this Audio. # noqa: E501
:rtype: datetime
"""
return self._submitted_time
@submitted_time.setter
def submitted_time(self, submitted_time):
"""Sets the submitted_time of this Audio.
Time this track was submitted, in the format YYYY-MM-DDThh:mm:ssZ # noqa: E501
:param submitted_time: The submitted_time of this Audio. # noqa: E501
:type: datetime
"""
self._submitted_time = submitted_time
@property
def title(self):
"""Gets the title of this Audio. # noqa: E501
Title of this track # noqa: E501
:return: The title of this Audio. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this Audio.
Title of this track # noqa: E501
:param title: The title of this Audio. # noqa: E501
:type: str
"""
self._title = title
@property
def updated_time(self):
"""Gets the updated_time of this Audio. # noqa: E501
Time this track was last updated, in the format YYYY-MM-DDThh:mm:ssZ # noqa: E501
:return: The updated_time of this Audio. # noqa: E501
:rtype: datetime
"""
return self._updated_time
@updated_time.setter
def updated_time(self, updated_time):
"""Sets the updated_time of this Audio.
Time this track was last updated, in the format YYYY-MM-DDThh:mm:ssZ # noqa: E501
:param updated_time: The updated_time of this Audio. # noqa: E501
:type: datetime
"""
self._updated_time = updated_time
@property
def vocal_description(self):
"""Gets the vocal_description of this Audio. # noqa: E501
Vocal description of this track # noqa: E501
:return: The vocal_description of this Audio. # noqa: E501
:rtype: str
"""
return self._vocal_description
@vocal_description.setter
def vocal_description(self, vocal_description):
"""Sets the vocal_description of this Audio.
Vocal description of this track # noqa: E501
:param vocal_description: The vocal_description of this Audio. # noqa: E501
:type: str
"""
self._vocal_description = vocal_description
@property
def url(self):
"""Gets the url of this Audio. # noqa: E501
Link to track information page; included only for certain accounts # noqa: E501
:return: The url of this Audio. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this Audio.
Link to track information page; included only for certain accounts # noqa: E501
:param url: The url of this Audio. # noqa: E501
:type: str
"""
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Audio, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Audio):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.379562 | 542 | 0.588882 |
b7b45546b8dbe8c366b294795e071034004d54e3 | 7,625 | py | Python | Bell_EBM/Map.py | ZYVE255/ebm-optimizer | 9b1cf6014f987ef4b8d65d4a5659c704b6ea15c4 | [
"MIT"
] | 1 | 2021-03-22T06:48:54.000Z | 2021-03-22T06:48:54.000Z | Bell_EBM/Map.py | taylorbell57/Bell_EBM | 2468251d91c18f6dc2a9e8f2bebdc60988ec9303 | [
"MIT"
] | 10 | 2018-11-02T18:57:03.000Z | 2019-11-28T18:02:33.000Z | Bell_EBM/Map.py | ZYVE255/ebm-optimizer | 9b1cf6014f987ef4b8d65d4a5659c704b6ea15c4 | [
"MIT"
] | 3 | 2019-01-28T21:21:51.000Z | 2021-12-14T08:50:35.000Z | # Author: Taylor Bell
# Last Update: 2019-02-15
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from . import H2_Dissociation_Routines as h2
class Map(object):
"""A map.
Attributes:
dissValues (ndarray): The H2 dissociation fraction values for the map.
lat (ndarray, optional): The unique latitude values in degrees.
lat_radians (ndarray, optional): The unique latitude values in radians.
latGrid (ndarray): The latitude grid in degrees.
latGrid_radians (ndarray): The latitude grid in radians.
lon (ndarray, optional): The unique longitude values in degrees.
lon_radians (ndarray, optional): The unique longitude values in radians.
lonGrid (ndarray): The longitude grid in degrees.
lonGrid_radians (ndarray): The longitude grid in radians.
nlat (int, optional): The number of latitudinal cells to use for rectangular maps.
nlon (int, optional): The number of longitudinal cells to use for rectangular maps.
nside (int, optional): A parameter that sets the resolution of healpy maps.
pixArea (ndarray): The area of each pixel.
time (float): Time of map in days.
useHealpix (bool): Whether the planet's map uses a healpix grid.
values (ndarray): The temperature map values.
"""
def __init__(self, values=None, dissValues=None, time=0., nlat=16, nlon=None):
"""Initialization funciton.
Args:
values(ndarray, optional): The temperature map values.
dissValues(ndarray, optional): The H2 dissociation fraction values for the map.
time (float, optional): Time of map in days.
nlat (int, optional): The number of latitudinal cells to use for rectangular maps.
nlon (int, optional): The number of longitudinal cells to use for rectangular maps.
If nlon==None, uses 2*nlat.
"""
self.time = time
self.nlat = int(nlat)
if nlon==None:
self.nlon = int(2*self.nlat)
else:
self.nlon = int(nlon)
self.npix = int(self.nlat*self.nlon)
self.dlat = 180./self.nlat
self.lat = np.linspace(-90.+self.dlat/2., 90.-self.dlat/2., self.nlat, endpoint=True)
latTop = self.lat+self.dlat/2.
latBot = self.lat-self.dlat/2.
self.dlon = 360./self.nlon
self.lon = np.linspace(-180.+self.dlon/2., 180.-self.dlon/2., self.nlon, endpoint=True)
lonRight = self.lon+self.dlon/2.
lonLeft = self.lon-self.dlon/2.
latArea = np.abs(2.*np.pi*(np.sin(latTop*np.pi/180.)-np.sin(latBot*np.pi/180.)))
areas = latArea.reshape(-1,1)*(np.abs(lonRight-lonLeft)/360.).reshape(1,-1)
lonGrid, latGrid = np.meshgrid(self.lon, self.lat)
# self.pixArea = areas.reshape(1, -1)
# self.latGrid = latGrid.reshape(1, -1)
# self.lonGrid = lonGrid.reshape(1, -1)
self.pixArea = areas#[np.newaxis,:]
self.latGrid = latGrid#[np.newaxis,:]
self.lonGrid = lonGrid#[np.newaxis,:]
self.lat_radians = self.lat*np.pi/180
self.lon_radians = self.lon*np.pi/180
self.latGrid_radians = self.latGrid*np.pi/180.
self.lonGrid_radians = self.lonGrid*np.pi/180.
if values is not None:
if values.size < self.npix:
print('Error: Too few map values ('+str(values.size)+'!='+str(self.npix)+')')
return None
elif values.size > self.npix:
print('Error: Too many map values ('+str(values.size)+'!='+str(self.npix)+')')
return None
else:
self.values = values
else:
self.values = np.zeros_like(self.lonGrid)
if dissValues is not None:
if dissValues.size < self.npix:
print('Error: Too few map values ('+str(dissValues.size)+'!='+str(self.npix)+')')
return None
elif dissValues.size > self.npix:
print('Error: Too many map values ('+str(dissValues.size)+'!='+str(self.npix)+')')
return None
else:
self.dissValues = dissValues
else:
self.dissValues = np.zeros_like(self.lonGrid)
def set_values(self, values, time=None, dissValues=None):
"""Set the temperature map.
Args:
values (ndarray): The map temperatures (in K) with a size of self.npix.
time (float, optional): Time of map in days.
dissValues(ndarray, optional): The H2 dissociation fraction values for the map.
"""
if values.size < self.npix:
print('Error: Too few map values ('+str(values.size)+' < '+str(self.npix)+')')
return None
elif values.size > self.npix:
print('Error: Too many map values ('+str(values.size)+' > '+str(self.npix)+')')
return None
else:
if time is not None:
self.time = time
self.values = values
if dissValues is not None:
if dissValues.size < self.npix:
print('Error: Too few map values ('+str(dissValues.size)+'!='+str(self.npix)+')')
return None
elif dissValues.size > self.npix:
print('Error: Too many map values ('+str(dissValues.size)+'!='+str(self.npix)+')')
return None
else:
self.dissValues = dissValues
def plot_map(self, refLon=None):
"""A convenience routine to plot the temperature map
Args:
refLon (float, optional): The centre longitude used to rotate the map.
Returns:
figure: The figure containing the plot.
"""
tempMap = self.values
if refLon is not None:
rollCount = -(np.where(np.abs(self.lon-refLon) < self.dlon/2.+1e-6)[0][-1]-int(self.lon.size/2.))
tempMap = np.roll(tempMap, rollCount, axis=1)
plt.imshow(tempMap, cmap='inferno', extent=(-180,180,-90,90), origin='lower')
plt.xlabel(r'$\rm Longitude$')
plt.ylabel(r'$\rm Latitude$')
plt.xticks([-180,-90,0,90,180])
plt.yticks([-90,-45,0,45,90])
cbar = plt.colorbar(orientation='vertical', fraction=0.05, pad = 0.05, aspect=9)
cbar.set_label(r'$\rm Temperature~(K)$')
return plt.gcf()
def plot_H2_dissociation(self, refLon=None):
"""A convenience routine to plot the H2 dissociation map.
Args:
refLon (float, optional): The centre longitude used to rotate the map.
Returns:
figure: The figure containing the plot.
"""
dissMap = self.dissValues*100.
if refLon is not None:
rollCount = -(np.where(np.abs(self.lon-refLon) < self.dlon/2.+1e-6)[0][-1]-int(self.lon.size/2.))
dissMap = np.roll(dissMap, rollCount, axis=1)
plt.imshow(dissMap, cmap='inferno', extent=(-180,180,-90,90), vmin=0, origin='lower')
plt.xlabel(r'$\rm Longitude$')
plt.ylabel(r'$\rm Latitude$')
plt.xticks([-180,-90,0,90,180])
plt.yticks([-90,-45,0,45,90])
cbar = plt.colorbar(orientation='vertical', fraction=0.05, pad = 0.05, aspect=9)
cbar.set_label(r'$\rm Dissociation~Fraction~(\%)$')
return plt.gcf()
| 39.304124 | 109 | 0.56918 |
25f9ccc110f52f1529e2b063d4116c228c8a85e1 | 39 | py | Python | tests/__init__.py | TaylorMonacelli/clinepunk | d9a354ad5b03305f9283fb39895f69c01b67d6f6 | [
"MIT"
] | null | null | null | tests/__init__.py | TaylorMonacelli/clinepunk | d9a354ad5b03305f9283fb39895f69c01b67d6f6 | [
"MIT"
] | null | null | null | tests/__init__.py | TaylorMonacelli/clinepunk | d9a354ad5b03305f9283fb39895f69c01b67d6f6 | [
"MIT"
] | null | null | null | """Unit test package for clinepunk."""
| 19.5 | 38 | 0.692308 |
a0295b02105d06e12d2db9d176d19d29d0acb874 | 4,141 | py | Python | code/utils/util_eval.py | Maclory/Deep-Iterative-Collaboration | 3ff19f1e4232e11b33fcd4c035aadaadf9d445f0 | [
"MIT"
] | 276 | 2020-03-16T05:23:24.000Z | 2022-03-31T02:44:10.000Z | code/utils/util_eval.py | Maclory/Deep-Iterative-Collaboration | 3ff19f1e4232e11b33fcd4c035aadaadf9d445f0 | [
"MIT"
] | 47 | 2020-04-20T15:59:12.000Z | 2022-03-30T03:33:52.000Z | code/utils/util_eval.py | Maclory/Deep-Iterative-Collaboration | 3ff19f1e4232e11b33fcd4c035aadaadf9d445f0 | [
"MIT"
] | 64 | 2020-04-03T12:34:33.000Z | 2021-12-20T15:45:58.000Z | import torch
import numpy as np
# Landmarks
def get_peak_2(heatmap_one):
'''
heatmap_one: 32 * 32
'''
h, w = heatmap_one.shape
idx = torch.argsort(heatmap_one.view(-1), descending=True)
top1 = (idx[0].item() // h, idx[0].item() % w)
top2 = (idx[1].item() // h, idx[1].item() % w)
return top1, top2
def get_peak(heatmap_one):
top1, top2 = get_peak_2(heatmap_one)
top1 = np.array(top1)
top2 = np.array(top2)
trans = (top2 - top1) > 0
trans = trans.astype(int) * 2 - 1
peak = top1 * 4 + trans
return peak[[1, 0]] # x, y
def get_landmark(heatmap):
'''
heatmap: 68 * 32 * 32
'''
landmarks = []
num = heatmap.shape[0]
for i in range(num):
landmarks.append(get_peak(heatmap[i]))
return np.array(landmarks)
# PSNR & SSIM
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def calc_metrics(img1, img2, crop_border, test_Y=True):
#
if test_Y and img1.shape[2] == 3: # evaluate on Y channel in YCbCr color space
im1_in = rgb2ycbcr(img1)
im2_in = rgb2ycbcr(img2)
else:
im1_in = img1
im2_in = img2
if im1_in.ndim == 3:
cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border, :]
cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border, :]
elif im1_in.ndim == 2:
cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border]
cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border]
else:
raise ValueError('Wrong image dimension: {}. Should be 2 or 3.'.format(im1_in.ndim))
psnr = calc_psnr(cropped_im1 * 255, cropped_im2 * 255)
ssim = calc_ssim(cropped_im1 * 255, cropped_im2 * 255)
return psnr, ssim
def calc_psnr(img1, img2):
# img1 and img2 have range [0, 255]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calc_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.') | 30.007246 | 92 | 0.573774 |
6bc74e56d2fc72a2f1c2303470bcefeab4ebd665 | 7,540 | py | Python | HW3/crossover2.py | Luceven/ANLY_DataStruct_Algorithms | 7cedd93124ebb25c9991bfa8e2c025354bf2ae87 | [
"MIT"
] | null | null | null | HW3/crossover2.py | Luceven/ANLY_DataStruct_Algorithms | 7cedd93124ebb25c9991bfa8e2c025354bf2ae87 | [
"MIT"
] | null | null | null | HW3/crossover2.py | Luceven/ANLY_DataStruct_Algorithms | 7cedd93124ebb25c9991bfa8e2c025354bf2ae87 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 29 14:29:23 2019
@author: katezeng
"""
import random, sys, datetime, argparse
######### TEST ##########
test1 = [[1, 7], [2, 4]]
test2 = [[3, 3], [5, 2]]
######### TEST ##########
# addisiont of square matrices (a + b = c)
def matrix_add(A, B):
n = len(A)
# C = [[0 for j in range(0, n)] for i in range(0, n)]
C = [[0]*n for _ in range(n)]
for i in range(n):
for j in range(n):
C[i][j] = A[i][j] + B[i][j]
return C
# subtraction of square matrix (a - b = c)
def matrix_sub(A, B):
n = len(A)
C = [[0]*n for _ in range(n)]
for i in range(n):
for j in range(n):
C[i][j] = A[i][j] - B[i][j]
return C
# regular matrix multiplication
def matrix_mult(A, B):
n = len(A)
C = [[0]*n for _ in range(n)]
for i in range(n):
for k in range(n):
for j in range(n):
C[i][j] += A[i][k] * B[k][j]
return C
# the strassen algorithm
def strassen(A, B, cut):
n = len(A)
if n <= cut:
return matrix_mult(A, B)
else:
while n%2 != 0:
A = [x + [0] for x in A]
B = [x + [0] for x in B]
A.append([])
B.append([])
A[n] = [0] * (n + 1)
B[n] = [0] * (n + 1)
n += 1
half = n//2
# top left
A11 = [[0]*half for _ in range(half)]
B11 = [[0]*half for _ in range(half)]
# top right
A12 = [[0]*half for _ in range(half)]
B12 = [[0]*half for _ in range(half)]
# bottom left
A21 = [[0]*half for _ in range(half)]
B21 = [[0]*half for _ in range(half)]
# bottom right
A22 = [[0]*half for _ in range(half)]
B22 = [[0]*half for _ in range(half)]
# middle part for calculation
temp1 = [[0]*half for _ in range(half)]
temp2 = [[0]*half for _ in range(half)]
# divide the matrix into 4 sub-matrices
for i in range(0, half):
for j in range(0, half):
A11[i][j] = A[i][j]
B11[i][j] = B[i][j]
A12[i][j] = A[i][j + half]
B12[i][j] = B[i][j + half]
A21[i][j] = A[i + half][j]
B21[i][j] = B[i + half][j]
A22[i][j] = A[i + half][j + half]
B22[i][j] = B[i + half][j + half]
# strassen calculation from P1 to P7
temp1 = matrix_add(A11, A22)
temp2 = matrix_add(B11, B22)
# P1 = (A11 + A22) * (B11 + B22)
P1 = strassen(temp1, temp2, cut)
# update A21 + A22
temp1 = matrix_add(A21, A22)
# P2 = (A21 + A22) * B11
P2 = strassen(temp1, B11, cut)
# update B12 - B22
temp2 = matrix_sub(B12, B22)
# P3 = A11 * (B12 - B22)
P3 = strassen(A11, temp2, cut)
# update B21 - B11
temp2 = matrix_sub(B21, B11)
# P4 = A22 * (B21 - B11)
P4 = strassen(A22, temp2, cut)
# update A11 + A12
temp1 = matrix_add(A11, A12)
# P5 = (A11 + A12) * B22
P5 = strassen(temp1, B22, cut)
# update A21 - A11
temp1 = matrix_sub(A21, A11)
# update B11 + B12
temp2 = matrix_add(B11, B12)
# P6 = (A21 - A11) * (B11 + B12)
P6 = strassen(temp1, temp2, cut)
# update A12 - A22
temp1 = matrix_sub(A12, A22)
# update B21 + B22
temp2 = matrix_add(B21, B22)
# P7 = (A12 - A22) * (B21 + B22)
P7 = strassen(temp1, temp2, cut)
# C11 = P1 + P4 - P5 + P7
temp1 = matrix_add(P1, P4)
temp2 = matrix_add(temp1, P7)
C11 = matrix_sub(temp2, P5)
# C12 = P3 + P5
C12 = matrix_add(P3, P5)
# C21 = P2 + P4
C21 = matrix_add(P2, P4)
# C22 = P1 + P3 - P2 + P6
temp1 = matrix_add(P1, P3)
temp2 = matrix_add(temp1, P6)
C22 = matrix_sub(temp2, P2)
# output C
C = [[0]*n for _ in range(n)]
for i in range(0, half):
for j in range(0, half):
C[i][j] = C11[i][j]
C[i][j + half] = C12[i][j]
C[i + half][j] = C21[i][j]
C[i + half][j + half] = C22[i][j]
return C
# binary matrix generator
def matrix_generator(dim):
A = [[0]*dim for _ in range(dim)]
B = [[0]*dim for _ in range(dim)]
for i in range(dim):
for j in range(dim):
A[i][j] = random.randint(0,1)
B[i][j] = random.randint(0,1)
return A, B
# print diag entries of matrix
def print_diag(matrix):
n = len(matrix)
res = []
for i in range(n):
res.append(matrix[i][i])
return res
# main function
if __name__ == "__main__":
# for testing cutting point
for x in [4, 8, 16, 32, 64, 128, 256, 512]:
# generate binary matrix for test
A, B = matrix_generator(x)
ord_start = datetime.datetime.now()
matrix_mult(A, B)
ord_stop = datetime.datetime.now()
print("Dimension", x, " Ordinary matrix mult: ", ord_stop - ord_start)
for cut in [4, 8, 16, 32]:
stras_start = datetime.datetime.now()
strassen(A, B, cut)
stras_stop = datetime.datetime.now()
print("Dimension", x, "cutting point", cut, "Strassen algorithm: ", stras_stop - stras_start, "\n")
# for cmd arguments parsing
parser = argparse.ArgumentParser(description="Python Script for Strassen's Algorithm and Ordinary Matrix Multiplication")
parser.add_argument('flag', help='flag', type=int, choices=[0])
parser.add_argument('dimension', help='dimension of inpute squre matrices', type=int)
parser.add_argument('inputfile', help='input file as ASCII file', type=argparse.FileType('r'))
args = parser.parse_args()
if args.flag == 0:
count = 0
rawinput = []
A = []
B = []
with open(args.inputfile.name) as file:
lines = file.readlines()
for i in lines:
if i.strip('\n').strip() != '':
count += 1
rawinput.append(int(i))
if count != 2*args.dimension**2:
print('Specified dimension mismatch with number of lines in ' + args.inputfile.name + ', please try again.')
sys.exit()
else:
#print(rawinput)
temp1 = rawinput[:args.dimension**2]
temp2 = rawinput[args.dimension**2:]
old = 0
for i in range(args.dimension, args.dimension**2+1, args.dimension):
A.append(temp1[old:i])
B.append(temp2[old:i])
old = i
#print(A, "\n\n", B)
#print(len(A))
print("The result from ordinary matrix multiplication:")
ord_res = matrix_mult(A, B)
print(ord_res)
print("\nThe result from modified Strassen's Algorithm with 8 as cutting point:")
stras_res = strassen(A, B, 8)
print(stras_res)
print("\nCheck correctness: they are:", ord_res == stras_res)
print("\nThe diagonal entries are:")
print(print_diag(stras_res))
| 31.949153 | 125 | 0.476127 |
7eae8db0ba67a87a1337bd4a289d505aee365aaa | 113,193 | py | Python | ppdet/data/transform/operators.py | hch-NLP/PaddleDetection | 08ace36a0f995f935ad334b5d7fba80a5ae67cc7 | [
"Apache-2.0"
] | 1 | 2022-03-30T02:39:57.000Z | 2022-03-30T02:39:57.000Z | ppdet/data/transform/operators.py | hch-NLP/PaddleDetection | 08ace36a0f995f935ad334b5d7fba80a5ae67cc7 | [
"Apache-2.0"
] | null | null | null | ppdet/data/transform/operators.py | hch-NLP/PaddleDetection | 08ace36a0f995f935ad334b5d7fba80a5ae67cc7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# function:
# operators to process sample,
# eg: decode/resize/crop image
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
from numbers import Number, Integral
import uuid
import random
import math
import numpy as np
import os
import copy
import logging
import cv2
from PIL import Image, ImageDraw
import pickle
import threading
MUTEX = threading.Lock()
from ppdet.core.workspace import serializable
from ppdet.modeling import bbox_utils
from ..reader import Compose
from .op_helper import (satisfy_sample_constraint, filter_and_process,
generate_sample_bbox, clip_bbox, data_anchor_sampling,
satisfy_sample_constraint_coverage, crop_image_sampling,
generate_sample_bbox_square, bbox_area_sampling,
is_poly, get_border)
from ppdet.utils.logger import setup_logger
from ppdet.modeling.keypoint_utils import get_affine_transform, affine_transform
logger = setup_logger(__name__)
registered_ops = []
def register_op(cls):
registered_ops.append(cls.__name__)
if not hasattr(BaseOperator, cls.__name__):
setattr(BaseOperator, cls.__name__, cls)
else:
raise KeyError("The {} class has been registered.".format(cls.__name__))
return serializable(cls)
class BboxError(ValueError):
pass
class ImageError(ValueError):
pass
class BaseOperator(object):
def __init__(self, name=None):
if name is None:
name = self.__class__.__name__
self._id = name + '_' + str(uuid.uuid4())[-6:]
def apply(self, sample, context=None):
""" Process a sample.
Args:
sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}
context (dict): info about this sample processing
Returns:
result (dict): a processed sample
"""
return sample
def __call__(self, sample, context=None):
""" Process a sample.
Args:
sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}
context (dict): info about this sample processing
Returns:
result (dict): a processed sample
"""
if isinstance(sample, Sequence):
for i in range(len(sample)):
sample[i] = self.apply(sample[i], context)
else:
sample = self.apply(sample, context)
return sample
def __str__(self):
return str(self._id)
@register_op
class Decode(BaseOperator):
def __init__(self):
""" Transform the image data to numpy format following the rgb format
"""
super(Decode, self).__init__()
def apply(self, sample, context=None):
""" load image if 'im_file' field is not empty but 'image' is"""
if 'image' not in sample:
with open(sample['im_file'], 'rb') as f:
sample['image'] = f.read()
sample.pop('im_file')
im = sample['image']
data = np.frombuffer(im, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
if 'keep_ori_im' in sample and sample['keep_ori_im']:
sample['ori_image'] = im
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
sample['image'] = im
if 'h' not in sample:
sample['h'] = im.shape[0]
elif sample['h'] != im.shape[0]:
logger.warning(
"The actual image height: {} is not equal to the "
"height: {} in annotation, and update sample['h'] by actual "
"image height.".format(im.shape[0], sample['h']))
sample['h'] = im.shape[0]
if 'w' not in sample:
sample['w'] = im.shape[1]
elif sample['w'] != im.shape[1]:
logger.warning(
"The actual image width: {} is not equal to the "
"width: {} in annotation, and update sample['w'] by actual "
"image width.".format(im.shape[1], sample['w']))
sample['w'] = im.shape[1]
sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return sample
def _make_dirs(dirname):
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
Path(dirname).mkdir(exist_ok=True)
@register_op
class DecodeCache(BaseOperator):
def __init__(self, cache_root=None):
'''decode image and caching
'''
super(DecodeCache, self).__init__()
self.use_cache = False if cache_root is None else True
self.cache_root = cache_root
if cache_root is not None:
_make_dirs(cache_root)
def apply(self, sample, context=None):
if self.use_cache and os.path.exists(
self.cache_path(self.cache_root, sample['im_file'])):
path = self.cache_path(self.cache_root, sample['im_file'])
im = self.load(path)
else:
if 'image' not in sample:
with open(sample['im_file'], 'rb') as f:
sample['image'] = f.read()
im = sample['image']
data = np.frombuffer(im, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
if 'keep_ori_im' in sample and sample['keep_ori_im']:
sample['ori_image'] = im
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if self.use_cache and not os.path.exists(
self.cache_path(self.cache_root, sample['im_file'])):
path = self.cache_path(self.cache_root, sample['im_file'])
self.dump(im, path)
sample['image'] = im
sample['h'] = im.shape[0]
sample['w'] = im.shape[1]
sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
sample.pop('im_file')
return sample
@staticmethod
def cache_path(dir_oot, im_file):
return os.path.join(dir_oot, os.path.basename(im_file) + '.pkl')
@staticmethod
def load(path):
with open(path, 'rb') as f:
im = pickle.load(f)
return im
@staticmethod
def dump(obj, path):
MUTEX.acquire()
try:
with open(path, 'wb') as f:
pickle.dump(obj, f)
except Exception as e:
logger.warning('dump {} occurs exception {}'.format(path, str(e)))
finally:
MUTEX.release()
@register_op
class SniperDecodeCrop(BaseOperator):
def __init__(self):
super(SniperDecodeCrop, self).__init__()
def __call__(self, sample, context=None):
if 'image' not in sample:
with open(sample['im_file'], 'rb') as f:
sample['image'] = f.read()
sample.pop('im_file')
im = sample['image']
data = np.frombuffer(im, dtype='uint8')
im = cv2.imdecode(data, cv2.IMREAD_COLOR) # BGR mode, but need RGB mode
if 'keep_ori_im' in sample and sample['keep_ori_im']:
sample['ori_image'] = im
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
chip = sample['chip']
x1, y1, x2, y2 = [int(xi) for xi in chip]
im = im[max(y1, 0):min(y2, im.shape[0]), max(x1, 0):min(x2, im.shape[
1]), :]
sample['image'] = im
h = im.shape[0]
w = im.shape[1]
# sample['im_info'] = [h, w, 1.0]
sample['h'] = h
sample['w'] = w
sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return sample
@register_op
class Permute(BaseOperator):
def __init__(self):
"""
Change the channel to be (C, H, W)
"""
super(Permute, self).__init__()
def apply(self, sample, context=None):
im = sample['image']
im = im.transpose((2, 0, 1))
sample['image'] = im
return sample
@register_op
class Lighting(BaseOperator):
"""
Lighting the image by eigenvalues and eigenvectors
Args:
eigval (list): eigenvalues
eigvec (list): eigenvectors
alphastd (float): random weight of lighting, 0.1 by default
"""
def __init__(self, eigval, eigvec, alphastd=0.1):
super(Lighting, self).__init__()
self.alphastd = alphastd
self.eigval = np.array(eigval).astype('float32')
self.eigvec = np.array(eigvec).astype('float32')
def apply(self, sample, context=None):
alpha = np.random.normal(scale=self.alphastd, size=(3, ))
sample['image'] += np.dot(self.eigvec, self.eigval * alpha)
return sample
@register_op
class RandomErasingImage(BaseOperator):
def __init__(self, prob=0.5, lower=0.02, higher=0.4, aspect_ratio=0.3):
"""
Random Erasing Data Augmentation, see https://arxiv.org/abs/1708.04896
Args:
prob (float): probability to carry out random erasing
lower (float): lower limit of the erasing area ratio
higher (float): upper limit of the erasing area ratio
aspect_ratio (float): aspect ratio of the erasing region
"""
super(RandomErasingImage, self).__init__()
self.prob = prob
self.lower = lower
self.higher = higher
self.aspect_ratio = aspect_ratio
def apply(self, sample):
gt_bbox = sample['gt_bbox']
im = sample['image']
if not isinstance(im, np.ndarray):
raise TypeError("{}: image is not a numpy array.".format(self))
if len(im.shape) != 3:
raise ImageError("{}: image is not 3-dimensional.".format(self))
for idx in range(gt_bbox.shape[0]):
if self.prob <= np.random.rand():
continue
x1, y1, x2, y2 = gt_bbox[idx, :]
w_bbox = x2 - x1
h_bbox = y2 - y1
area = w_bbox * h_bbox
target_area = random.uniform(self.lower, self.higher) * area
aspect_ratio = random.uniform(self.aspect_ratio,
1 / self.aspect_ratio)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < w_bbox and h < h_bbox:
off_y1 = random.randint(0, int(h_bbox - h))
off_x1 = random.randint(0, int(w_bbox - w))
im[int(y1 + off_y1):int(y1 + off_y1 + h), int(x1 + off_x1):int(
x1 + off_x1 + w), :] = 0
sample['image'] = im
return sample
@register_op
class NormalizeImage(BaseOperator):
def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1],
is_scale=True):
"""
Args:
mean (list): the pixel mean
std (list): the pixel variance
"""
super(NormalizeImage, self).__init__()
self.mean = mean
self.std = std
self.is_scale = is_scale
if not (isinstance(self.mean, list) and isinstance(self.std, list) and
isinstance(self.is_scale, bool)):
raise TypeError("{}: input type is invalid.".format(self))
from functools import reduce
if reduce(lambda x, y: x * y, self.std) == 0:
raise ValueError('{}: std is invalid!'.format(self))
def apply(self, sample, context=None):
"""Normalize the image.
Operators:
1.(optional) Scale the image to [0,1]
2. Each pixel minus mean and is divided by std
"""
im = sample['image']
im = im.astype(np.float32, copy=False)
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
if self.is_scale:
im = im / 255.0
im -= mean
im /= std
sample['image'] = im
return sample
@register_op
class GridMask(BaseOperator):
def __init__(self,
use_h=True,
use_w=True,
rotate=1,
offset=False,
ratio=0.5,
mode=1,
prob=0.7,
upper_iter=360000):
"""
GridMask Data Augmentation, see https://arxiv.org/abs/2001.04086
Args:
use_h (bool): whether to mask vertically
use_w (boo;): whether to mask horizontally
rotate (float): angle for the mask to rotate
offset (float): mask offset
ratio (float): mask ratio
mode (int): gridmask mode
prob (float): max probability to carry out gridmask
upper_iter (int): suggested to be equal to global max_iter
"""
super(GridMask, self).__init__()
self.use_h = use_h
self.use_w = use_w
self.rotate = rotate
self.offset = offset
self.ratio = ratio
self.mode = mode
self.prob = prob
self.upper_iter = upper_iter
from .gridmask_utils import Gridmask
self.gridmask_op = Gridmask(
use_h,
use_w,
rotate=rotate,
offset=offset,
ratio=ratio,
mode=mode,
prob=prob,
upper_iter=upper_iter)
def apply(self, sample, context=None):
sample['image'] = self.gridmask_op(sample['image'], sample['curr_iter'])
return sample
@register_op
class RandomDistort(BaseOperator):
"""Random color distortion.
Args:
hue (list): hue settings. in [lower, upper, probability] format.
saturation (list): saturation settings. in [lower, upper, probability] format.
contrast (list): contrast settings. in [lower, upper, probability] format.
brightness (list): brightness settings. in [lower, upper, probability] format.
random_apply (bool): whether to apply in random (yolo) or fixed (SSD)
order.
count (int): the number of doing distrot
random_channel (bool): whether to swap channels randomly
"""
def __init__(self,
hue=[-18, 18, 0.5],
saturation=[0.5, 1.5, 0.5],
contrast=[0.5, 1.5, 0.5],
brightness=[0.5, 1.5, 0.5],
random_apply=True,
count=4,
random_channel=False):
super(RandomDistort, self).__init__()
self.hue = hue
self.saturation = saturation
self.contrast = contrast
self.brightness = brightness
self.random_apply = random_apply
self.count = count
self.random_channel = random_channel
def apply_hue(self, img):
low, high, prob = self.hue
if np.random.uniform(0., 1.) < prob:
return img
img = img.astype(np.float32)
# it works, but result differ from HSV version
delta = np.random.uniform(low, high)
u = np.cos(delta * np.pi)
w = np.sin(delta * np.pi)
bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])
tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
t = np.dot(np.dot(ityiq, bt), tyiq).T
img = np.dot(img, t)
return img
def apply_saturation(self, img):
low, high, prob = self.saturation
if np.random.uniform(0., 1.) < prob:
return img
delta = np.random.uniform(low, high)
img = img.astype(np.float32)
# it works, but result differ from HSV version
gray = img * np.array([[[0.299, 0.587, 0.114]]], dtype=np.float32)
gray = gray.sum(axis=2, keepdims=True)
gray *= (1.0 - delta)
img *= delta
img += gray
return img
def apply_contrast(self, img):
low, high, prob = self.contrast
if np.random.uniform(0., 1.) < prob:
return img
delta = np.random.uniform(low, high)
img = img.astype(np.float32)
img *= delta
return img
def apply_brightness(self, img):
low, high, prob = self.brightness
if np.random.uniform(0., 1.) < prob:
return img
delta = np.random.uniform(low, high)
img = img.astype(np.float32)
img += delta
return img
def apply(self, sample, context=None):
img = sample['image']
if self.random_apply:
functions = [
self.apply_brightness, self.apply_contrast,
self.apply_saturation, self.apply_hue
]
distortions = np.random.permutation(functions)[:self.count]
for func in distortions:
img = func(img)
sample['image'] = img
return sample
img = self.apply_brightness(img)
mode = np.random.randint(0, 2)
if mode:
img = self.apply_contrast(img)
img = self.apply_saturation(img)
img = self.apply_hue(img)
if not mode:
img = self.apply_contrast(img)
if self.random_channel:
if np.random.randint(0, 2):
img = img[..., np.random.permutation(3)]
sample['image'] = img
return sample
@register_op
class AutoAugment(BaseOperator):
def __init__(self, autoaug_type="v1"):
"""
Args:
autoaug_type (str): autoaug type, support v0, v1, v2, v3, test
"""
super(AutoAugment, self).__init__()
self.autoaug_type = autoaug_type
def apply(self, sample, context=None):
"""
Learning Data Augmentation Strategies for Object Detection, see https://arxiv.org/abs/1906.11172
"""
im = sample['image']
gt_bbox = sample['gt_bbox']
if not isinstance(im, np.ndarray):
raise TypeError("{}: image is not a numpy array.".format(self))
if len(im.shape) != 3:
raise ImageError("{}: image is not 3-dimensional.".format(self))
if len(gt_bbox) == 0:
return sample
height, width, _ = im.shape
norm_gt_bbox = np.ones_like(gt_bbox, dtype=np.float32)
norm_gt_bbox[:, 0] = gt_bbox[:, 1] / float(height)
norm_gt_bbox[:, 1] = gt_bbox[:, 0] / float(width)
norm_gt_bbox[:, 2] = gt_bbox[:, 3] / float(height)
norm_gt_bbox[:, 3] = gt_bbox[:, 2] / float(width)
from .autoaugment_utils import distort_image_with_autoaugment
im, norm_gt_bbox = distort_image_with_autoaugment(im, norm_gt_bbox,
self.autoaug_type)
gt_bbox[:, 0] = norm_gt_bbox[:, 1] * float(width)
gt_bbox[:, 1] = norm_gt_bbox[:, 0] * float(height)
gt_bbox[:, 2] = norm_gt_bbox[:, 3] * float(width)
gt_bbox[:, 3] = norm_gt_bbox[:, 2] * float(height)
sample['image'] = im
sample['gt_bbox'] = gt_bbox
return sample
@register_op
class RandomFlip(BaseOperator):
def __init__(self, prob=0.5):
"""
Args:
prob (float): the probability of flipping image
"""
super(RandomFlip, self).__init__()
self.prob = prob
if not (isinstance(self.prob, float)):
raise TypeError("{}: input type is invalid.".format(self))
def apply_segm(self, segms, height, width):
def _flip_poly(poly, width):
flipped_poly = np.array(poly)
flipped_poly[0::2] = width - np.array(poly[0::2])
return flipped_poly.tolist()
def _flip_rle(rle, height, width):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, height, width)
mask = mask_util.decode(rle)
mask = mask[:, ::-1]
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
flipped_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
flipped_segms.append([_flip_poly(poly, width) for poly in segm])
else:
# RLE format
import pycocotools.mask as mask_util
flipped_segms.append(_flip_rle(segm, height, width))
return flipped_segms
def apply_keypoint(self, gt_keypoint, width):
for i in range(gt_keypoint.shape[1]):
if i % 2 == 0:
old_x = gt_keypoint[:, i].copy()
gt_keypoint[:, i] = width - old_x
return gt_keypoint
def apply_image(self, image):
return image[:, ::-1, :]
def apply_bbox(self, bbox, width):
oldx1 = bbox[:, 0].copy()
oldx2 = bbox[:, 2].copy()
bbox[:, 0] = width - oldx2
bbox[:, 2] = width - oldx1
return bbox
def apply_rbox(self, bbox, width):
oldx1 = bbox[:, 0].copy()
oldx2 = bbox[:, 2].copy()
oldx3 = bbox[:, 4].copy()
oldx4 = bbox[:, 6].copy()
bbox[:, 0] = width - oldx1
bbox[:, 2] = width - oldx2
bbox[:, 4] = width - oldx3
bbox[:, 6] = width - oldx4
bbox = [bbox_utils.get_best_begin_point_single(e) for e in bbox]
return bbox
def apply(self, sample, context=None):
"""Filp the image and bounding box.
Operators:
1. Flip the image numpy.
2. Transform the bboxes' x coordinates.
(Must judge whether the coordinates are normalized!)
3. Transform the segmentations' x coordinates.
(Must judge whether the coordinates are normalized!)
Output:
sample: the image, bounding box and segmentation part
in sample are flipped.
"""
if np.random.uniform(0, 1) < self.prob:
im = sample['image']
height, width = im.shape[:2]
im = self.apply_image(im)
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], width)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], height,
width)
if 'gt_keypoint' in sample and len(sample['gt_keypoint']) > 0:
sample['gt_keypoint'] = self.apply_keypoint(
sample['gt_keypoint'], width)
if 'semantic' in sample and sample['semantic']:
sample['semantic'] = sample['semantic'][:, ::-1]
if 'gt_segm' in sample and sample['gt_segm'].any():
sample['gt_segm'] = sample['gt_segm'][:, :, ::-1]
if 'gt_rbox2poly' in sample and sample['gt_rbox2poly'].any():
sample['gt_rbox2poly'] = self.apply_rbox(sample['gt_rbox2poly'],
width)
sample['flipped'] = True
sample['image'] = im
return sample
@register_op
class Resize(BaseOperator):
def __init__(self, target_size, keep_ratio, interp=cv2.INTER_LINEAR):
"""
Resize image to target size. if keep_ratio is True,
resize the image's long side to the maximum of target_size
if keep_ratio is False, resize the image to target size(h, w)
Args:
target_size (int|list): image target size
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): the interpolation method
"""
super(Resize, self).__init__()
self.keep_ratio = keep_ratio
self.interp = interp
if not isinstance(target_size, (Integral, Sequence)):
raise TypeError(
"Type of target_size is invalid. Must be Integer or List or Tuple, now is {}".
format(type(target_size)))
if isinstance(target_size, Integral):
target_size = [target_size, target_size]
self.target_size = target_size
def apply_image(self, image, scale):
im_scale_x, im_scale_y = scale
return cv2.resize(
image,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
def apply_bbox(self, bbox, scale, size):
im_scale_x, im_scale_y = scale
resize_w, resize_h = size
bbox[:, 0::2] *= im_scale_x
bbox[:, 1::2] *= im_scale_y
bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, resize_w)
bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, resize_h)
return bbox
def apply_segm(self, segms, im_size, scale):
def _resize_poly(poly, im_scale_x, im_scale_y):
resized_poly = np.array(poly).astype('float32')
resized_poly[0::2] *= im_scale_x
resized_poly[1::2] *= im_scale_y
return resized_poly.tolist()
def _resize_rle(rle, im_h, im_w, im_scale_x, im_scale_y):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, im_h, im_w)
mask = mask_util.decode(rle)
mask = cv2.resize(
mask,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
im_h, im_w = im_size
im_scale_x, im_scale_y = scale
resized_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
resized_segms.append([
_resize_poly(poly, im_scale_x, im_scale_y) for poly in segm
])
else:
# RLE format
import pycocotools.mask as mask_util
resized_segms.append(
_resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))
return resized_segms
def apply(self, sample, context=None):
""" Resize the image numpy.
"""
im = sample['image']
if not isinstance(im, np.ndarray):
raise TypeError("{}: image type is not numpy.".format(self))
if len(im.shape) != 3:
raise ImageError('{}: image is not 3-dimensional.'.format(self))
# apply image
im_shape = im.shape
if self.keep_ratio:
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = min(target_size_min / im_size_min,
target_size_max / im_size_max)
resize_h = im_scale * float(im_shape[0])
resize_w = im_scale * float(im_shape[1])
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = self.target_size
im_scale_y = resize_h / im_shape[0]
im_scale_x = resize_w / im_shape[1]
im = self.apply_image(sample['image'], [im_scale_x, im_scale_y])
sample['image'] = im
sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
if 'scale_factor' in sample:
scale_factor = sample['scale_factor']
sample['scale_factor'] = np.asarray(
[scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
dtype=np.float32)
else:
sample['scale_factor'] = np.asarray(
[im_scale_y, im_scale_x], dtype=np.float32)
# apply bbox
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'],
[im_scale_x, im_scale_y],
[resize_w, resize_h])
# apply rbox
if 'gt_rbox2poly' in sample:
if np.array(sample['gt_rbox2poly']).shape[1] != 8:
logger.warning(
"gt_rbox2poly's length shoule be 8, but actually is {}".
format(len(sample['gt_rbox2poly'])))
sample['gt_rbox2poly'] = self.apply_bbox(sample['gt_rbox2poly'],
[im_scale_x, im_scale_y],
[resize_w, resize_h])
# apply polygon
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_shape[:2],
[im_scale_x, im_scale_y])
# apply semantic
if 'semantic' in sample and sample['semantic']:
semantic = sample['semantic']
semantic = cv2.resize(
semantic.astype('float32'),
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
semantic = np.asarray(semantic).astype('int32')
semantic = np.expand_dims(semantic, 0)
sample['semantic'] = semantic
# apply gt_segm
if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
masks = [
cv2.resize(
gt_segm,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=cv2.INTER_NEAREST)
for gt_segm in sample['gt_segm']
]
sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
return sample
@register_op
class MultiscaleTestResize(BaseOperator):
def __init__(self,
origin_target_size=[800, 1333],
target_size=[],
interp=cv2.INTER_LINEAR,
use_flip=True):
"""
Rescale image to the each size in target size, and capped at max_size.
Args:
origin_target_size (list): origin target size of image
target_size (list): A list of target sizes of image.
interp (int): the interpolation method.
use_flip (bool): whether use flip augmentation.
"""
super(MultiscaleTestResize, self).__init__()
self.interp = interp
self.use_flip = use_flip
if not isinstance(target_size, Sequence):
raise TypeError(
"Type of target_size is invalid. Must be List or Tuple, now is {}".
format(type(target_size)))
self.target_size = target_size
if not isinstance(origin_target_size, Sequence):
raise TypeError(
"Type of origin_target_size is invalid. Must be List or Tuple, now is {}".
format(type(origin_target_size)))
self.origin_target_size = origin_target_size
def apply(self, sample, context=None):
""" Resize the image numpy for multi-scale test.
"""
samples = []
resizer = Resize(
self.origin_target_size, keep_ratio=True, interp=self.interp)
samples.append(resizer(sample.copy(), context))
if self.use_flip:
flipper = RandomFlip(1.1)
samples.append(flipper(sample.copy(), context=context))
for size in self.target_size:
resizer = Resize(size, keep_ratio=True, interp=self.interp)
samples.append(resizer(sample.copy(), context))
return samples
@register_op
class RandomResize(BaseOperator):
def __init__(self,
target_size,
keep_ratio=True,
interp=cv2.INTER_LINEAR,
random_size=True,
random_interp=False):
"""
Resize image to target size randomly. random target_size and interpolation method
Args:
target_size (int, list, tuple): image target size, if random size is True, must be list or tuple
keep_ratio (bool): whether keep_raio or not, default true
interp (int): the interpolation method
random_size (bool): whether random select target size of image
random_interp (bool): whether random select interpolation method
"""
super(RandomResize, self).__init__()
self.keep_ratio = keep_ratio
self.interp = interp
self.interps = [
cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_AREA,
cv2.INTER_CUBIC,
cv2.INTER_LANCZOS4,
]
assert isinstance(target_size, (
Integral, Sequence)), "target_size must be Integer, List or Tuple"
if random_size and not isinstance(target_size, Sequence):
raise TypeError(
"Type of target_size is invalid when random_size is True. Must be List or Tuple, now is {}".
format(type(target_size)))
self.target_size = target_size
self.random_size = random_size
self.random_interp = random_interp
def apply(self, sample, context=None):
""" Resize the image numpy.
"""
if self.random_size:
target_size = random.choice(self.target_size)
else:
target_size = self.target_size
if self.random_interp:
interp = random.choice(self.interps)
else:
interp = self.interp
resizer = Resize(target_size, self.keep_ratio, interp)
return resizer(sample, context=context)
@register_op
class RandomExpand(BaseOperator):
"""Random expand the canvas.
Args:
ratio (float): maximum expansion ratio.
prob (float): probability to expand.
fill_value (list): color value used to fill the canvas. in RGB order.
"""
def __init__(self, ratio=4., prob=0.5, fill_value=(127.5, 127.5, 127.5)):
super(RandomExpand, self).__init__()
assert ratio > 1.01, "expand ratio must be larger than 1.01"
self.ratio = ratio
self.prob = prob
assert isinstance(fill_value, (Number, Sequence)), \
"fill value must be either float or sequence"
if isinstance(fill_value, Number):
fill_value = (fill_value, ) * 3
if not isinstance(fill_value, tuple):
fill_value = tuple(fill_value)
self.fill_value = fill_value
def apply(self, sample, context=None):
if np.random.uniform(0., 1.) < self.prob:
return sample
im = sample['image']
height, width = im.shape[:2]
ratio = np.random.uniform(1., self.ratio)
h = int(height * ratio)
w = int(width * ratio)
if not h > height or not w > width:
return sample
y = np.random.randint(0, h - height)
x = np.random.randint(0, w - width)
offsets, size = [x, y], [h, w]
pad = Pad(size,
pad_mode=-1,
offsets=offsets,
fill_value=self.fill_value)
return pad(sample, context=context)
@register_op
class CropWithSampling(BaseOperator):
def __init__(self, batch_sampler, satisfy_all=False, avoid_no_bbox=True):
"""
Args:
batch_sampler (list): Multiple sets of different
parameters for cropping.
satisfy_all (bool): whether all boxes must satisfy.
e.g.[[1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0]]
[max sample, max trial, min scale, max scale,
min aspect ratio, max aspect ratio,
min overlap, max overlap]
avoid_no_bbox (bool): whether to to avoid the
situation where the box does not appear.
"""
super(CropWithSampling, self).__init__()
self.batch_sampler = batch_sampler
self.satisfy_all = satisfy_all
self.avoid_no_bbox = avoid_no_bbox
def apply(self, sample, context):
"""
Crop the image and modify bounding box.
Operators:
1. Scale the image width and height.
2. Crop the image according to a radom sample.
3. Rescale the bounding box.
4. Determine if the new bbox is satisfied in the new image.
Returns:
sample: the image, bounding box are replaced.
"""
assert 'image' in sample, "image data not found"
im = sample['image']
gt_bbox = sample['gt_bbox']
gt_class = sample['gt_class']
im_height, im_width = im.shape[:2]
gt_score = None
if 'gt_score' in sample:
gt_score = sample['gt_score']
sampled_bbox = []
gt_bbox = gt_bbox.tolist()
for sampler in self.batch_sampler:
found = 0
for i in range(sampler[1]):
if found >= sampler[0]:
break
sample_bbox = generate_sample_bbox(sampler)
if satisfy_sample_constraint(sampler, sample_bbox, gt_bbox,
self.satisfy_all):
sampled_bbox.append(sample_bbox)
found = found + 1
im = np.array(im)
while sampled_bbox:
idx = int(np.random.uniform(0, len(sampled_bbox)))
sample_bbox = sampled_bbox.pop(idx)
sample_bbox = clip_bbox(sample_bbox)
crop_bbox, crop_class, crop_score = \
filter_and_process(sample_bbox, gt_bbox, gt_class, scores=gt_score)
if self.avoid_no_bbox:
if len(crop_bbox) < 1:
continue
xmin = int(sample_bbox[0] * im_width)
xmax = int(sample_bbox[2] * im_width)
ymin = int(sample_bbox[1] * im_height)
ymax = int(sample_bbox[3] * im_height)
im = im[ymin:ymax, xmin:xmax]
sample['image'] = im
sample['gt_bbox'] = crop_bbox
sample['gt_class'] = crop_class
sample['gt_score'] = crop_score
return sample
return sample
@register_op
class CropWithDataAchorSampling(BaseOperator):
def __init__(self,
batch_sampler,
anchor_sampler=None,
target_size=None,
das_anchor_scales=[16, 32, 64, 128],
sampling_prob=0.5,
min_size=8.,
avoid_no_bbox=True):
"""
Args:
anchor_sampler (list): anchor_sampling sets of different
parameters for cropping.
batch_sampler (list): Multiple sets of different
parameters for cropping.
e.g.[[1, 10, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.2, 0.0]]
[[1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0]]
[max sample, max trial, min scale, max scale,
min aspect ratio, max aspect ratio,
min overlap, max overlap, min coverage, max coverage]
target_size (int): target image size.
das_anchor_scales (list[float]): a list of anchor scales in data
anchor smapling.
min_size (float): minimum size of sampled bbox.
avoid_no_bbox (bool): whether to to avoid the
situation where the box does not appear.
"""
super(CropWithDataAchorSampling, self).__init__()
self.anchor_sampler = anchor_sampler
self.batch_sampler = batch_sampler
self.target_size = target_size
self.sampling_prob = sampling_prob
self.min_size = min_size
self.avoid_no_bbox = avoid_no_bbox
self.das_anchor_scales = np.array(das_anchor_scales)
def apply(self, sample, context):
"""
Crop the image and modify bounding box.
Operators:
1. Scale the image width and height.
2. Crop the image according to a radom sample.
3. Rescale the bounding box.
4. Determine if the new bbox is satisfied in the new image.
Returns:
sample: the image, bounding box are replaced.
"""
assert 'image' in sample, "image data not found"
im = sample['image']
gt_bbox = sample['gt_bbox']
gt_class = sample['gt_class']
image_height, image_width = im.shape[:2]
gt_bbox[:, 0] /= image_width
gt_bbox[:, 1] /= image_height
gt_bbox[:, 2] /= image_width
gt_bbox[:, 3] /= image_height
gt_score = None
if 'gt_score' in sample:
gt_score = sample['gt_score']
sampled_bbox = []
gt_bbox = gt_bbox.tolist()
prob = np.random.uniform(0., 1.)
if prob > self.sampling_prob: # anchor sampling
assert self.anchor_sampler
for sampler in self.anchor_sampler:
found = 0
for i in range(sampler[1]):
if found >= sampler[0]:
break
sample_bbox = data_anchor_sampling(
gt_bbox, image_width, image_height,
self.das_anchor_scales, self.target_size)
if sample_bbox == 0:
break
if satisfy_sample_constraint_coverage(sampler, sample_bbox,
gt_bbox):
sampled_bbox.append(sample_bbox)
found = found + 1
im = np.array(im)
while sampled_bbox:
idx = int(np.random.uniform(0, len(sampled_bbox)))
sample_bbox = sampled_bbox.pop(idx)
if 'gt_keypoint' in sample.keys():
keypoints = (sample['gt_keypoint'],
sample['keypoint_ignore'])
crop_bbox, crop_class, crop_score, gt_keypoints = \
filter_and_process(sample_bbox, gt_bbox, gt_class,
scores=gt_score,
keypoints=keypoints)
else:
crop_bbox, crop_class, crop_score = filter_and_process(
sample_bbox, gt_bbox, gt_class, scores=gt_score)
crop_bbox, crop_class, crop_score = bbox_area_sampling(
crop_bbox, crop_class, crop_score, self.target_size,
self.min_size)
if self.avoid_no_bbox:
if len(crop_bbox) < 1:
continue
im = crop_image_sampling(im, sample_bbox, image_width,
image_height, self.target_size)
height, width = im.shape[:2]
crop_bbox[:, 0] *= width
crop_bbox[:, 1] *= height
crop_bbox[:, 2] *= width
crop_bbox[:, 3] *= height
sample['image'] = im
sample['gt_bbox'] = crop_bbox
sample['gt_class'] = crop_class
if 'gt_score' in sample:
sample['gt_score'] = crop_score
if 'gt_keypoint' in sample.keys():
sample['gt_keypoint'] = gt_keypoints[0]
sample['keypoint_ignore'] = gt_keypoints[1]
return sample
return sample
else:
for sampler in self.batch_sampler:
found = 0
for i in range(sampler[1]):
if found >= sampler[0]:
break
sample_bbox = generate_sample_bbox_square(
sampler, image_width, image_height)
if satisfy_sample_constraint_coverage(sampler, sample_bbox,
gt_bbox):
sampled_bbox.append(sample_bbox)
found = found + 1
im = np.array(im)
while sampled_bbox:
idx = int(np.random.uniform(0, len(sampled_bbox)))
sample_bbox = sampled_bbox.pop(idx)
sample_bbox = clip_bbox(sample_bbox)
if 'gt_keypoint' in sample.keys():
keypoints = (sample['gt_keypoint'],
sample['keypoint_ignore'])
crop_bbox, crop_class, crop_score, gt_keypoints = \
filter_and_process(sample_bbox, gt_bbox, gt_class,
scores=gt_score,
keypoints=keypoints)
else:
crop_bbox, crop_class, crop_score = filter_and_process(
sample_bbox, gt_bbox, gt_class, scores=gt_score)
# sampling bbox according the bbox area
crop_bbox, crop_class, crop_score = bbox_area_sampling(
crop_bbox, crop_class, crop_score, self.target_size,
self.min_size)
if self.avoid_no_bbox:
if len(crop_bbox) < 1:
continue
xmin = int(sample_bbox[0] * image_width)
xmax = int(sample_bbox[2] * image_width)
ymin = int(sample_bbox[1] * image_height)
ymax = int(sample_bbox[3] * image_height)
im = im[ymin:ymax, xmin:xmax]
height, width = im.shape[:2]
crop_bbox[:, 0] *= width
crop_bbox[:, 1] *= height
crop_bbox[:, 2] *= width
crop_bbox[:, 3] *= height
sample['image'] = im
sample['gt_bbox'] = crop_bbox
sample['gt_class'] = crop_class
if 'gt_score' in sample:
sample['gt_score'] = crop_score
if 'gt_keypoint' in sample.keys():
sample['gt_keypoint'] = gt_keypoints[0]
sample['keypoint_ignore'] = gt_keypoints[1]
return sample
return sample
@register_op
class RandomCrop(BaseOperator):
"""Random crop image and bboxes.
Args:
aspect_ratio (list): aspect ratio of cropped region.
in [min, max] format.
thresholds (list): iou thresholds for decide a valid bbox crop.
scaling (list): ratio between a cropped region and the original image.
in [min, max] format.
num_attempts (int): number of tries before giving up.
allow_no_crop (bool): allow return without actually cropping them.
cover_all_box (bool): ensure all bboxes are covered in the final crop.
is_mask_crop(bool): whether crop the segmentation.
"""
def __init__(self,
aspect_ratio=[.5, 2.],
thresholds=[.0, .1, .3, .5, .7, .9],
scaling=[.3, 1.],
num_attempts=50,
allow_no_crop=True,
cover_all_box=False,
is_mask_crop=False):
super(RandomCrop, self).__init__()
self.aspect_ratio = aspect_ratio
self.thresholds = thresholds
self.scaling = scaling
self.num_attempts = num_attempts
self.allow_no_crop = allow_no_crop
self.cover_all_box = cover_all_box
self.is_mask_crop = is_mask_crop
def crop_segms(self, segms, valid_ids, crop, height, width):
def _crop_poly(segm, crop):
xmin, ymin, xmax, ymax = crop
crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]
crop_p = np.array(crop_coord).reshape(4, 2)
crop_p = Polygon(crop_p)
crop_segm = list()
for poly in segm:
poly = np.array(poly).reshape(len(poly) // 2, 2)
polygon = Polygon(poly)
if not polygon.is_valid:
exterior = polygon.exterior
multi_lines = exterior.intersection(exterior)
polygons = shapely.ops.polygonize(multi_lines)
polygon = MultiPolygon(polygons)
multi_polygon = list()
if isinstance(polygon, MultiPolygon):
multi_polygon = copy.deepcopy(polygon)
else:
multi_polygon.append(copy.deepcopy(polygon))
for per_polygon in multi_polygon:
inter = per_polygon.intersection(crop_p)
if not inter:
continue
if isinstance(inter, (MultiPolygon, GeometryCollection)):
for part in inter:
if not isinstance(part, Polygon):
continue
part = np.squeeze(
np.array(part.exterior.coords[:-1]).reshape(1,
-1))
part[0::2] -= xmin
part[1::2] -= ymin
crop_segm.append(part.tolist())
elif isinstance(inter, Polygon):
crop_poly = np.squeeze(
np.array(inter.exterior.coords[:-1]).reshape(1, -1))
crop_poly[0::2] -= xmin
crop_poly[1::2] -= ymin
crop_segm.append(crop_poly.tolist())
else:
continue
return crop_segm
def _crop_rle(rle, crop, height, width):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, height, width)
mask = mask_util.decode(rle)
mask = mask[crop[1]:crop[3], crop[0]:crop[2]]
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
crop_segms = []
for id in valid_ids:
segm = segms[id]
if is_poly(segm):
import copy
import shapely.ops
from shapely.geometry import Polygon, MultiPolygon, GeometryCollection
logging.getLogger("shapely").setLevel(logging.WARNING)
# Polygon format
crop_segms.append(_crop_poly(segm, crop))
else:
# RLE format
import pycocotools.mask as mask_util
crop_segms.append(_crop_rle(segm, crop, height, width))
return crop_segms
def apply(self, sample, context=None):
if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
return sample
h, w = sample['image'].shape[:2]
gt_bbox = sample['gt_bbox']
# NOTE Original method attempts to generate one candidate for each
# threshold then randomly sample one from the resulting list.
# Here a short circuit approach is taken, i.e., randomly choose a
# threshold and attempt to find a valid crop, and simply return the
# first one found.
# The probability is not exactly the same, kinda resembling the
# "Monty Hall" problem. Actually carrying out the attempts will affect
# observability (just like opening doors in the "Monty Hall" game).
thresholds = list(self.thresholds)
if self.allow_no_crop:
thresholds.append('no_crop')
np.random.shuffle(thresholds)
for thresh in thresholds:
if thresh == 'no_crop':
return sample
found = False
for i in range(self.num_attempts):
scale = np.random.uniform(*self.scaling)
if self.aspect_ratio is not None:
min_ar, max_ar = self.aspect_ratio
aspect_ratio = np.random.uniform(
max(min_ar, scale**2), min(max_ar, scale**-2))
h_scale = scale / np.sqrt(aspect_ratio)
w_scale = scale * np.sqrt(aspect_ratio)
else:
h_scale = np.random.uniform(*self.scaling)
w_scale = np.random.uniform(*self.scaling)
crop_h = h * h_scale
crop_w = w * w_scale
if self.aspect_ratio is None:
if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0:
continue
crop_h = int(crop_h)
crop_w = int(crop_w)
crop_y = np.random.randint(0, h - crop_h)
crop_x = np.random.randint(0, w - crop_w)
crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
iou = self._iou_matrix(
gt_bbox, np.array(
[crop_box], dtype=np.float32))
if iou.max() < thresh:
continue
if self.cover_all_box and iou.min() < thresh:
continue
cropped_box, valid_ids = self._crop_box_with_center_constraint(
gt_bbox, np.array(
crop_box, dtype=np.float32))
if valid_ids.size > 0:
found = True
break
if found:
if self.is_mask_crop and 'gt_poly' in sample and len(sample[
'gt_poly']) > 0:
crop_polys = self.crop_segms(
sample['gt_poly'],
valid_ids,
np.array(
crop_box, dtype=np.int64),
h,
w)
if [] in crop_polys:
delete_id = list()
valid_polys = list()
for id, crop_poly in enumerate(crop_polys):
if crop_poly == []:
delete_id.append(id)
else:
valid_polys.append(crop_poly)
valid_ids = np.delete(valid_ids, delete_id)
if len(valid_polys) == 0:
return sample
sample['gt_poly'] = valid_polys
else:
sample['gt_poly'] = crop_polys
if 'gt_segm' in sample:
sample['gt_segm'] = self._crop_segm(sample['gt_segm'],
crop_box)
sample['gt_segm'] = np.take(
sample['gt_segm'], valid_ids, axis=0)
sample['image'] = self._crop_image(sample['image'], crop_box)
sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
sample['gt_class'] = np.take(
sample['gt_class'], valid_ids, axis=0)
if 'gt_score' in sample:
sample['gt_score'] = np.take(
sample['gt_score'], valid_ids, axis=0)
if 'is_crowd' in sample:
sample['is_crowd'] = np.take(
sample['is_crowd'], valid_ids, axis=0)
if 'difficult' in sample:
sample['difficult'] = np.take(
sample['difficult'], valid_ids, axis=0)
return sample
return sample
def _iou_matrix(self, a, b):
tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])
br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
area_o = (area_a[:, np.newaxis] + area_b - area_i)
return area_i / (area_o + 1e-10)
def _crop_box_with_center_constraint(self, box, crop):
cropped_box = box.copy()
cropped_box[:, :2] = np.maximum(box[:, :2], crop[:2])
cropped_box[:, 2:] = np.minimum(box[:, 2:], crop[2:])
cropped_box[:, :2] -= crop[:2]
cropped_box[:, 2:] -= crop[:2]
centers = (box[:, :2] + box[:, 2:]) / 2
valid = np.logical_and(crop[:2] <= centers,
centers < crop[2:]).all(axis=1)
valid = np.logical_and(
valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1))
return cropped_box, np.where(valid)[0]
def _crop_image(self, img, crop):
x1, y1, x2, y2 = crop
return img[y1:y2, x1:x2, :]
def _crop_segm(self, segm, crop):
x1, y1, x2, y2 = crop
return segm[:, y1:y2, x1:x2]
@register_op
class RandomScaledCrop(BaseOperator):
"""Resize image and bbox based on long side (with optional random scaling),
then crop or pad image to target size.
Args:
target_dim (int): target size.
scale_range (list): random scale range.
interp (int): interpolation method, default to `cv2.INTER_LINEAR`.
"""
def __init__(self,
target_dim=512,
scale_range=[.1, 2.],
interp=cv2.INTER_LINEAR):
super(RandomScaledCrop, self).__init__()
self.target_dim = target_dim
self.scale_range = scale_range
self.interp = interp
def apply(self, sample, context=None):
img = sample['image']
h, w = img.shape[:2]
random_scale = np.random.uniform(*self.scale_range)
dim = self.target_dim
random_dim = int(dim * random_scale)
dim_max = max(h, w)
scale = random_dim / dim_max
resize_w = w * scale
resize_h = h * scale
offset_x = int(max(0, np.random.uniform(0., resize_w - dim)))
offset_y = int(max(0, np.random.uniform(0., resize_h - dim)))
img = cv2.resize(img, (resize_w, resize_h), interpolation=self.interp)
img = np.array(img)
canvas = np.zeros((dim, dim, 3), dtype=img.dtype)
canvas[:min(dim, resize_h), :min(dim, resize_w), :] = img[
offset_y:offset_y + dim, offset_x:offset_x + dim, :]
sample['image'] = canvas
sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
scale_factor = sample['sacle_factor']
sample['scale_factor'] = np.asarray(
[scale_factor[0] * scale, scale_factor[1] * scale],
dtype=np.float32)
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
scale_array = np.array([scale, scale] * 2, dtype=np.float32)
shift_array = np.array([offset_x, offset_y] * 2, dtype=np.float32)
boxes = sample['gt_bbox'] * scale_array - shift_array
boxes = np.clip(boxes, 0, dim - 1)
# filter boxes with no area
area = np.prod(boxes[..., 2:] - boxes[..., :2], axis=1)
valid = (area > 1.).nonzero()[0]
sample['gt_bbox'] = boxes[valid]
sample['gt_class'] = sample['gt_class'][valid]
return sample
@register_op
class Cutmix(BaseOperator):
def __init__(self, alpha=1.5, beta=1.5):
"""
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features, see https://arxiv.org/abs/1905.04899
Cutmix image and gt_bbbox/gt_score
Args:
alpha (float): alpha parameter of beta distribute
beta (float): beta parameter of beta distribute
"""
super(Cutmix, self).__init__()
self.alpha = alpha
self.beta = beta
if self.alpha <= 0.0:
raise ValueError("alpha shold be positive in {}".format(self))
if self.beta <= 0.0:
raise ValueError("beta shold be positive in {}".format(self))
def apply_image(self, img1, img2, factor):
""" _rand_bbox """
h = max(img1.shape[0], img2.shape[0])
w = max(img1.shape[1], img2.shape[1])
cut_rat = np.sqrt(1. - factor)
cut_w = np.int32(w * cut_rat)
cut_h = np.int32(h * cut_rat)
# uniform
cx = np.random.randint(w)
cy = np.random.randint(h)
bbx1 = np.clip(cx - cut_w // 2, 0, w - 1)
bby1 = np.clip(cy - cut_h // 2, 0, h - 1)
bbx2 = np.clip(cx + cut_w // 2, 0, w - 1)
bby2 = np.clip(cy + cut_h // 2, 0, h - 1)
img_1_pad = np.zeros((h, w, img1.shape[2]), 'float32')
img_1_pad[:img1.shape[0], :img1.shape[1], :] = \
img1.astype('float32')
img_2_pad = np.zeros((h, w, img2.shape[2]), 'float32')
img_2_pad[:img2.shape[0], :img2.shape[1], :] = \
img2.astype('float32')
img_1_pad[bby1:bby2, bbx1:bbx2, :] = img_2_pad[bby1:bby2, bbx1:bbx2, :]
return img_1_pad
def __call__(self, sample, context=None):
if not isinstance(sample, Sequence):
return sample
assert len(sample) == 2, 'cutmix need two samples'
factor = np.random.beta(self.alpha, self.beta)
factor = max(0.0, min(1.0, factor))
if factor >= 1.0:
return sample[0]
if factor <= 0.0:
return sample[1]
img1 = sample[0]['image']
img2 = sample[1]['image']
img = self.apply_image(img1, img2, factor)
gt_bbox1 = sample[0]['gt_bbox']
gt_bbox2 = sample[1]['gt_bbox']
gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
gt_class1 = sample[0]['gt_class']
gt_class2 = sample[1]['gt_class']
gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
gt_score1 = np.ones_like(sample[0]['gt_class'])
gt_score2 = np.ones_like(sample[1]['gt_class'])
gt_score = np.concatenate(
(gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
result = copy.deepcopy(sample[0])
result['image'] = img
result['gt_bbox'] = gt_bbox
result['gt_score'] = gt_score
result['gt_class'] = gt_class
if 'is_crowd' in sample[0]:
is_crowd1 = sample[0]['is_crowd']
is_crowd2 = sample[1]['is_crowd']
is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
result['is_crowd'] = is_crowd
if 'difficult' in sample[0]:
is_difficult1 = sample[0]['difficult']
is_difficult2 = sample[1]['difficult']
is_difficult = np.concatenate(
(is_difficult1, is_difficult2), axis=0)
result['difficult'] = is_difficult
return result
@register_op
class Mixup(BaseOperator):
def __init__(self, alpha=1.5, beta=1.5):
""" Mixup image and gt_bbbox/gt_score
Args:
alpha (float): alpha parameter of beta distribute
beta (float): beta parameter of beta distribute
"""
super(Mixup, self).__init__()
self.alpha = alpha
self.beta = beta
if self.alpha <= 0.0:
raise ValueError("alpha shold be positive in {}".format(self))
if self.beta <= 0.0:
raise ValueError("beta shold be positive in {}".format(self))
def apply_image(self, img1, img2, factor):
h = max(img1.shape[0], img2.shape[0])
w = max(img1.shape[1], img2.shape[1])
img = np.zeros((h, w, img1.shape[2]), 'float32')
img[:img1.shape[0], :img1.shape[1], :] = \
img1.astype('float32') * factor
img[:img2.shape[0], :img2.shape[1], :] += \
img2.astype('float32') * (1.0 - factor)
return img.astype('uint8')
def __call__(self, sample, context=None):
if not isinstance(sample, Sequence):
return sample
assert len(sample) == 2, 'mixup need two samples'
factor = np.random.beta(self.alpha, self.beta)
factor = max(0.0, min(1.0, factor))
if factor >= 1.0:
return sample[0]
if factor <= 0.0:
return sample[1]
im = self.apply_image(sample[0]['image'], sample[1]['image'], factor)
result = copy.deepcopy(sample[0])
result['image'] = im
# apply bbox and score
if 'gt_bbox' in sample[0]:
gt_bbox1 = sample[0]['gt_bbox']
gt_bbox2 = sample[1]['gt_bbox']
gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
result['gt_bbox'] = gt_bbox
if 'gt_class' in sample[0]:
gt_class1 = sample[0]['gt_class']
gt_class2 = sample[1]['gt_class']
gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
result['gt_class'] = gt_class
gt_score1 = np.ones_like(sample[0]['gt_class'])
gt_score2 = np.ones_like(sample[1]['gt_class'])
gt_score = np.concatenate(
(gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
result['gt_score'] = gt_score.astype('float32')
if 'is_crowd' in sample[0]:
is_crowd1 = sample[0]['is_crowd']
is_crowd2 = sample[1]['is_crowd']
is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
result['is_crowd'] = is_crowd
if 'difficult' in sample[0]:
is_difficult1 = sample[0]['difficult']
is_difficult2 = sample[1]['difficult']
is_difficult = np.concatenate(
(is_difficult1, is_difficult2), axis=0)
result['difficult'] = is_difficult
if 'gt_ide' in sample[0]:
gt_ide1 = sample[0]['gt_ide']
gt_ide2 = sample[1]['gt_ide']
gt_ide = np.concatenate((gt_ide1, gt_ide2), axis=0)
result['gt_ide'] = gt_ide
return result
@register_op
class NormalizeBox(BaseOperator):
"""Transform the bounding box's coornidates to [0,1]."""
def __init__(self):
super(NormalizeBox, self).__init__()
def apply(self, sample, context):
im = sample['image']
gt_bbox = sample['gt_bbox']
height, width, _ = im.shape
for i in range(gt_bbox.shape[0]):
gt_bbox[i][0] = gt_bbox[i][0] / width
gt_bbox[i][1] = gt_bbox[i][1] / height
gt_bbox[i][2] = gt_bbox[i][2] / width
gt_bbox[i][3] = gt_bbox[i][3] / height
sample['gt_bbox'] = gt_bbox
if 'gt_keypoint' in sample.keys():
gt_keypoint = sample['gt_keypoint']
for i in range(gt_keypoint.shape[1]):
if i % 2:
gt_keypoint[:, i] = gt_keypoint[:, i] / height
else:
gt_keypoint[:, i] = gt_keypoint[:, i] / width
sample['gt_keypoint'] = gt_keypoint
return sample
@register_op
class BboxXYXY2XYWH(BaseOperator):
"""
Convert bbox XYXY format to XYWH format.
"""
def __init__(self):
super(BboxXYXY2XYWH, self).__init__()
def apply(self, sample, context=None):
assert 'gt_bbox' in sample
bbox = sample['gt_bbox']
bbox[:, 2:4] = bbox[:, 2:4] - bbox[:, :2]
bbox[:, :2] = bbox[:, :2] + bbox[:, 2:4] / 2.
sample['gt_bbox'] = bbox
return sample
@register_op
class PadBox(BaseOperator):
def __init__(self, num_max_boxes=50):
"""
Pad zeros to bboxes if number of bboxes is less than num_max_boxes.
Args:
num_max_boxes (int): the max number of bboxes
"""
self.num_max_boxes = num_max_boxes
super(PadBox, self).__init__()
def apply(self, sample, context=None):
assert 'gt_bbox' in sample
bbox = sample['gt_bbox']
gt_num = min(self.num_max_boxes, len(bbox))
num_max = self.num_max_boxes
# fields = context['fields'] if context else []
pad_bbox = np.zeros((num_max, 4), dtype=np.float32)
if gt_num > 0:
pad_bbox[:gt_num, :] = bbox[:gt_num, :]
sample['gt_bbox'] = pad_bbox
if 'gt_class' in sample:
pad_class = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_class[:gt_num] = sample['gt_class'][:gt_num, 0]
sample['gt_class'] = pad_class
if 'gt_score' in sample:
pad_score = np.zeros((num_max, ), dtype=np.float32)
if gt_num > 0:
pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]
sample['gt_score'] = pad_score
# in training, for example in op ExpandImage,
# the bbox and gt_class is expandded, but the difficult is not,
# so, judging by it's length
if 'difficult' in sample:
pad_diff = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]
sample['difficult'] = pad_diff
if 'is_crowd' in sample:
pad_crowd = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_crowd[:gt_num] = sample['is_crowd'][:gt_num, 0]
sample['is_crowd'] = pad_crowd
if 'gt_ide' in sample:
pad_ide = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_ide[:gt_num] = sample['gt_ide'][:gt_num, 0]
sample['gt_ide'] = pad_ide
return sample
@register_op
class DebugVisibleImage(BaseOperator):
"""
In debug mode, visualize images according to `gt_box`.
(Currently only supported when not cropping and flipping image.)
"""
def __init__(self, output_dir='output/debug', is_normalized=False):
super(DebugVisibleImage, self).__init__()
self.is_normalized = is_normalized
self.output_dir = output_dir
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
if not isinstance(self.is_normalized, bool):
raise TypeError("{}: input type is invalid.".format(self))
def apply(self, sample, context=None):
image = Image.fromarray(sample['image'].astype(np.uint8))
out_file_name = '{:012d}.jpg'.format(sample['im_id'][0])
width = sample['w']
height = sample['h']
gt_bbox = sample['gt_bbox']
gt_class = sample['gt_class']
draw = ImageDraw.Draw(image)
for i in range(gt_bbox.shape[0]):
if self.is_normalized:
gt_bbox[i][0] = gt_bbox[i][0] * width
gt_bbox[i][1] = gt_bbox[i][1] * height
gt_bbox[i][2] = gt_bbox[i][2] * width
gt_bbox[i][3] = gt_bbox[i][3] * height
xmin, ymin, xmax, ymax = gt_bbox[i]
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=2,
fill='green')
# draw label
text = str(gt_class[i][0])
tw, th = draw.textsize(text)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill='green')
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
if 'gt_keypoint' in sample.keys():
gt_keypoint = sample['gt_keypoint']
if self.is_normalized:
for i in range(gt_keypoint.shape[1]):
if i % 2:
gt_keypoint[:, i] = gt_keypoint[:, i] * height
else:
gt_keypoint[:, i] = gt_keypoint[:, i] * width
for i in range(gt_keypoint.shape[0]):
keypoint = gt_keypoint[i]
for j in range(int(keypoint.shape[0] / 2)):
x1 = round(keypoint[2 * j]).astype(np.int32)
y1 = round(keypoint[2 * j + 1]).astype(np.int32)
draw.ellipse(
(x1, y1, x1 + 5, y1 + 5), fill='green', outline='green')
save_path = os.path.join(self.output_dir, out_file_name)
image.save(save_path, quality=95)
return sample
@register_op
class Pad(BaseOperator):
def __init__(self,
size=None,
size_divisor=32,
pad_mode=0,
offsets=None,
fill_value=(127.5, 127.5, 127.5)):
"""
Pad image to a specified size or multiple of size_divisor.
Args:
size (int, Sequence): image target size, if None, pad to multiple of size_divisor, default None
size_divisor (int): size divisor, default 32
pad_mode (int): pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets
if 0, only pad to right and bottom. if 1, pad according to center. if 2, only pad left and top
offsets (list): [offset_x, offset_y], specify offset while padding, only supported pad_mode=-1
fill_value (bool): rgb value of pad area, default (127.5, 127.5, 127.5)
"""
super(Pad, self).__init__()
if not isinstance(size, (int, Sequence)):
raise TypeError(
"Type of target_size is invalid when random_size is True. \
Must be List, now is {}".format(type(size)))
if isinstance(size, int):
size = [size, size]
assert pad_mode in [
-1, 0, 1, 2
], 'currently only supports four modes [-1, 0, 1, 2]'
if pad_mode == -1:
assert offsets, 'if pad_mode is -1, offsets should not be None'
self.size = size
self.size_divisor = size_divisor
self.pad_mode = pad_mode
self.fill_value = fill_value
self.offsets = offsets
def apply_segm(self, segms, offsets, im_size, size):
def _expand_poly(poly, x, y):
expanded_poly = np.array(poly)
expanded_poly[0::2] += x
expanded_poly[1::2] += y
return expanded_poly.tolist()
def _expand_rle(rle, x, y, height, width, h, w):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, height, width)
mask = mask_util.decode(rle)
expanded_mask = np.full((h, w), 0).astype(mask.dtype)
expanded_mask[y:y + height, x:x + width] = mask
rle = mask_util.encode(
np.array(
expanded_mask, order='F', dtype=np.uint8))
return rle
x, y = offsets
height, width = im_size
h, w = size
expanded_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
expanded_segms.append(
[_expand_poly(poly, x, y) for poly in segm])
else:
# RLE format
import pycocotools.mask as mask_util
expanded_segms.append(
_expand_rle(segm, x, y, height, width, h, w))
return expanded_segms
def apply_bbox(self, bbox, offsets):
return bbox + np.array(offsets * 2, dtype=np.float32)
def apply_keypoint(self, keypoints, offsets):
n = len(keypoints[0]) // 2
return keypoints + np.array(offsets * n, dtype=np.float32)
def apply_image(self, image, offsets, im_size, size):
x, y = offsets
im_h, im_w = im_size
h, w = size
canvas = np.ones((h, w, 3), dtype=np.float32)
canvas *= np.array(self.fill_value, dtype=np.float32)
canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)
return canvas
def apply(self, sample, context=None):
im = sample['image']
im_h, im_w = im.shape[:2]
if self.size:
h, w = self.size
assert (
im_h < h and im_w < w
), '(h, w) of target size should be greater than (im_h, im_w)'
else:
h = int(np.ceil(im_h / self.size_divisor) * self.size_divisor)
w = int(np.ceil(im_w / self.size_divisor) * self.size_divisor)
if h == im_h and w == im_w:
return sample
if self.pad_mode == -1:
offset_x, offset_y = self.offsets
elif self.pad_mode == 0:
offset_y, offset_x = 0, 0
elif self.pad_mode == 1:
offset_y, offset_x = (h - im_h) // 2, (w - im_w) // 2
else:
offset_y, offset_x = h - im_h, w - im_w
offsets, im_size, size = [offset_x, offset_y], [im_h, im_w], [h, w]
sample['image'] = self.apply_image(im, offsets, im_size, size)
if self.pad_mode == 0:
return sample
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], offsets)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], offsets,
im_size, size)
if 'gt_keypoint' in sample and len(sample['gt_keypoint']) > 0:
sample['gt_keypoint'] = self.apply_keypoint(sample['gt_keypoint'],
offsets)
return sample
@register_op
class Poly2Mask(BaseOperator):
"""
gt poly to mask annotations
"""
def __init__(self):
super(Poly2Mask, self).__init__()
import pycocotools.mask as maskUtils
self.maskutils = maskUtils
def _poly2mask(self, mask_ann, img_h, img_w):
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = self.maskutils.frPyObjects(mask_ann, img_h, img_w)
rle = self.maskutils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = self.maskutils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = self.maskutils.decode(rle)
return mask
def apply(self, sample, context=None):
assert 'gt_poly' in sample
im_h = sample['h']
im_w = sample['w']
masks = [
self._poly2mask(gt_poly, im_h, im_w)
for gt_poly in sample['gt_poly']
]
sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
return sample
@register_op
class Rbox2Poly(BaseOperator):
"""
Convert rbbox format to poly format.
"""
def __init__(self):
super(Rbox2Poly, self).__init__()
def apply(self, sample, context=None):
assert 'gt_rbox' in sample
assert sample['gt_rbox'].shape[1] == 5
rrects = sample['gt_rbox']
x_ctr = rrects[:, 0]
y_ctr = rrects[:, 1]
width = rrects[:, 2]
height = rrects[:, 3]
x1 = x_ctr - width / 2.0
y1 = y_ctr - height / 2.0
x2 = x_ctr + width / 2.0
y2 = y_ctr + height / 2.0
sample['gt_bbox'] = np.stack([x1, y1, x2, y2], axis=1)
polys = bbox_utils.rbox2poly_np(rrects)
sample['gt_rbox2poly'] = polys
return sample
@register_op
class AugmentHSV(BaseOperator):
def __init__(self, fraction=0.50, is_bgr=True):
"""
Augment the SV channel of image data.
Args:
fraction (float): the fraction for augment. Default: 0.5.
is_bgr (bool): whether the image is BGR mode. Default: True.
"""
super(AugmentHSV, self).__init__()
self.fraction = fraction
self.is_bgr = is_bgr
def apply(self, sample, context=None):
img = sample['image']
if self.is_bgr:
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
a = (random.random() * 2 - 1) * self.fraction + 1
S *= a
if a > 1:
np.clip(S, a_min=0, a_max=255, out=S)
a = (random.random() * 2 - 1) * self.fraction + 1
V *= a
if a > 1:
np.clip(V, a_min=0, a_max=255, out=V)
img_hsv[:, :, 1] = S.astype(np.uint8)
img_hsv[:, :, 2] = V.astype(np.uint8)
if self.is_bgr:
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
else:
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB, dst=img)
sample['image'] = img
return sample
@register_op
class Norm2PixelBbox(BaseOperator):
"""
Transform the bounding box's coornidates which is in [0,1] to pixels.
"""
def __init__(self):
super(Norm2PixelBbox, self).__init__()
def apply(self, sample, context=None):
assert 'gt_bbox' in sample
bbox = sample['gt_bbox']
height, width = sample['image'].shape[:2]
bbox[:, 0::2] = bbox[:, 0::2] * width
bbox[:, 1::2] = bbox[:, 1::2] * height
sample['gt_bbox'] = bbox
return sample
@register_op
class BboxCXCYWH2XYXY(BaseOperator):
"""
Convert bbox CXCYWH format to XYXY format.
[center_x, center_y, width, height] -> [x0, y0, x1, y1]
"""
def __init__(self):
super(BboxCXCYWH2XYXY, self).__init__()
def apply(self, sample, context=None):
assert 'gt_bbox' in sample
bbox0 = sample['gt_bbox']
bbox = bbox0.copy()
bbox[:, :2] = bbox0[:, :2] - bbox0[:, 2:4] / 2.
bbox[:, 2:4] = bbox0[:, :2] + bbox0[:, 2:4] / 2.
sample['gt_bbox'] = bbox
return sample
@register_op
class RandomResizeCrop(BaseOperator):
"""Random resize and crop image and bboxes.
Args:
resizes (list): resize image to one of resizes. if keep_ratio is True and mode is
'long', resize the image's long side to the maximum of target_size, if keep_ratio is
True and mode is 'short', resize the image's short side to the minimum of target_size.
cropsizes (list): crop sizes after resize, [(min_crop_1, max_crop_1), ...]
mode (str): resize mode, `long` or `short`. Details see resizes.
prob (float): probability of this op.
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): the interpolation method
thresholds (list): iou thresholds for decide a valid bbox crop.
num_attempts (int): number of tries before giving up.
allow_no_crop (bool): allow return without actually cropping them.
cover_all_box (bool): ensure all bboxes are covered in the final crop.
is_mask_crop(bool): whether crop the segmentation.
"""
def __init__(
self,
resizes,
cropsizes,
prob=0.5,
mode='short',
keep_ratio=True,
interp=cv2.INTER_LINEAR,
num_attempts=3,
cover_all_box=False,
allow_no_crop=False,
thresholds=[0.3, 0.5, 0.7],
is_mask_crop=False, ):
super(RandomResizeCrop, self).__init__()
self.resizes = resizes
self.cropsizes = cropsizes
self.prob = prob
self.mode = mode
self.resizer = Resize(0, keep_ratio=keep_ratio, interp=interp)
self.croper = RandomCrop(
num_attempts=num_attempts,
cover_all_box=cover_all_box,
thresholds=thresholds,
allow_no_crop=allow_no_crop,
is_mask_crop=is_mask_crop)
def _format_size(self, size):
if isinstance(size, Integral):
size = (size, size)
return size
def apply(self, sample, context=None):
if random.random() < self.prob:
_resize = self._format_size(random.choice(self.resizes))
_cropsize = self._format_size(random.choice(self.cropsizes))
sample = self._resize(
self.resizer,
sample,
size=_resize,
mode=self.mode,
context=context)
sample = self._random_crop(
self.croper, sample, size=_cropsize, context=context)
return sample
@staticmethod
def _random_crop(croper, sample, size, context=None):
if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
return sample
self = croper
h, w = sample['image'].shape[:2]
gt_bbox = sample['gt_bbox']
cropsize = size
min_crop = min(cropsize)
max_crop = max(cropsize)
thresholds = list(self.thresholds)
np.random.shuffle(thresholds)
for thresh in thresholds:
found = False
for _ in range(self.num_attempts):
crop_h = random.randint(min_crop, min(h, max_crop))
crop_w = random.randint(min_crop, min(w, max_crop))
crop_y = random.randint(0, h - crop_h)
crop_x = random.randint(0, w - crop_w)
crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
iou = self._iou_matrix(
gt_bbox, np.array(
[crop_box], dtype=np.float32))
if iou.max() < thresh:
continue
if self.cover_all_box and iou.min() < thresh:
continue
cropped_box, valid_ids = self._crop_box_with_center_constraint(
gt_bbox, np.array(
crop_box, dtype=np.float32))
if valid_ids.size > 0:
found = True
break
if found:
if self.is_mask_crop and 'gt_poly' in sample and len(sample[
'gt_poly']) > 0:
crop_polys = self.crop_segms(
sample['gt_poly'],
valid_ids,
np.array(
crop_box, dtype=np.int64),
h,
w)
if [] in crop_polys:
delete_id = list()
valid_polys = list()
for id, crop_poly in enumerate(crop_polys):
if crop_poly == []:
delete_id.append(id)
else:
valid_polys.append(crop_poly)
valid_ids = np.delete(valid_ids, delete_id)
if len(valid_polys) == 0:
return sample
sample['gt_poly'] = valid_polys
else:
sample['gt_poly'] = crop_polys
if 'gt_segm' in sample:
sample['gt_segm'] = self._crop_segm(sample['gt_segm'],
crop_box)
sample['gt_segm'] = np.take(
sample['gt_segm'], valid_ids, axis=0)
sample['image'] = self._crop_image(sample['image'], crop_box)
sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
sample['gt_class'] = np.take(
sample['gt_class'], valid_ids, axis=0)
if 'gt_score' in sample:
sample['gt_score'] = np.take(
sample['gt_score'], valid_ids, axis=0)
if 'is_crowd' in sample:
sample['is_crowd'] = np.take(
sample['is_crowd'], valid_ids, axis=0)
return sample
return sample
@staticmethod
def _resize(resizer, sample, size, mode='short', context=None):
self = resizer
im = sample['image']
target_size = size
if not isinstance(im, np.ndarray):
raise TypeError("{}: image type is not numpy.".format(self))
if len(im.shape) != 3:
raise ImageError('{}: image is not 3-dimensional.'.format(self))
# apply image
im_shape = im.shape
if self.keep_ratio:
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
target_size_min = np.min(target_size)
target_size_max = np.max(target_size)
if mode == 'long':
im_scale = min(target_size_min / im_size_min,
target_size_max / im_size_max)
else:
im_scale = max(target_size_min / im_size_min,
target_size_max / im_size_max)
resize_h = im_scale * float(im_shape[0])
resize_w = im_scale * float(im_shape[1])
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = target_size
im_scale_y = resize_h / im_shape[0]
im_scale_x = resize_w / im_shape[1]
im = self.apply_image(sample['image'], [im_scale_x, im_scale_y])
sample['image'] = im
sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
if 'scale_factor' in sample:
scale_factor = sample['scale_factor']
sample['scale_factor'] = np.asarray(
[scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
dtype=np.float32)
else:
sample['scale_factor'] = np.asarray(
[im_scale_y, im_scale_x], dtype=np.float32)
# apply bbox
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'],
[im_scale_x, im_scale_y],
[resize_w, resize_h])
# apply rbox
if 'gt_rbox2poly' in sample:
if np.array(sample['gt_rbox2poly']).shape[1] != 8:
logger.warn(
"gt_rbox2poly's length shoule be 8, but actually is {}".
format(len(sample['gt_rbox2poly'])))
sample['gt_rbox2poly'] = self.apply_bbox(sample['gt_rbox2poly'],
[im_scale_x, im_scale_y],
[resize_w, resize_h])
# apply polygon
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_shape[:2],
[im_scale_x, im_scale_y])
# apply semantic
if 'semantic' in sample and sample['semantic']:
semantic = sample['semantic']
semantic = cv2.resize(
semantic.astype('float32'),
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
semantic = np.asarray(semantic).astype('int32')
semantic = np.expand_dims(semantic, 0)
sample['semantic'] = semantic
# apply gt_segm
if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
masks = [
cv2.resize(
gt_segm,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=cv2.INTER_NEAREST)
for gt_segm in sample['gt_segm']
]
sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
return sample
@register_op
class RandomSelect(BaseOperator):
"""
Randomly choose a transformation between transforms1 and transforms2,
and the probability of choosing transforms1 is p.
The code is based on https://github.com/facebookresearch/detr/blob/main/datasets/transforms.py
"""
def __init__(self, transforms1, transforms2, p=0.5):
super(RandomSelect, self).__init__()
self.transforms1 = Compose(transforms1)
self.transforms2 = Compose(transforms2)
self.p = p
def apply(self, sample, context=None):
if random.random() < self.p:
return self.transforms1(sample)
return self.transforms2(sample)
@register_op
class RandomShortSideResize(BaseOperator):
def __init__(self,
short_side_sizes,
max_size=None,
interp=cv2.INTER_LINEAR,
random_interp=False):
"""
Resize the image randomly according to the short side. If max_size is not None,
the long side is scaled according to max_size. The whole process will be keep ratio.
Args:
short_side_sizes (list|tuple): Image target short side size.
max_size (int): The size of the longest side of image after resize.
interp (int): The interpolation method.
random_interp (bool): Whether random select interpolation method.
"""
super(RandomShortSideResize, self).__init__()
assert isinstance(short_side_sizes,
Sequence), "short_side_sizes must be List or Tuple"
self.short_side_sizes = short_side_sizes
self.max_size = max_size
self.interp = interp
self.random_interp = random_interp
self.interps = [
cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_AREA,
cv2.INTER_CUBIC,
cv2.INTER_LANCZOS4,
]
def get_size_with_aspect_ratio(self, image_shape, size, max_size=None):
h, w = image_shape
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(
round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (w, h)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (ow, oh)
def resize(self,
sample,
target_size,
max_size=None,
interp=cv2.INTER_LINEAR):
im = sample['image']
if not isinstance(im, np.ndarray):
raise TypeError("{}: image type is not numpy.".format(self))
if len(im.shape) != 3:
raise ImageError('{}: image is not 3-dimensional.'.format(self))
target_size = self.get_size_with_aspect_ratio(im.shape[:2], target_size,
max_size)
im_scale_y, im_scale_x = target_size[1] / im.shape[0], target_size[
0] / im.shape[1]
sample['image'] = cv2.resize(im, target_size, interpolation=interp)
sample['im_shape'] = np.asarray(target_size[::-1], dtype=np.float32)
if 'scale_factor' in sample:
scale_factor = sample['scale_factor']
sample['scale_factor'] = np.asarray(
[scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
dtype=np.float32)
else:
sample['scale_factor'] = np.asarray(
[im_scale_y, im_scale_x], dtype=np.float32)
# apply bbox
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(
sample['gt_bbox'], [im_scale_x, im_scale_y], target_size)
# apply polygon
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im.shape[:2],
[im_scale_x, im_scale_y])
# apply semantic
if 'semantic' in sample and sample['semantic']:
semantic = sample['semantic']
semantic = cv2.resize(
semantic.astype('float32'),
target_size,
interpolation=self.interp)
semantic = np.asarray(semantic).astype('int32')
semantic = np.expand_dims(semantic, 0)
sample['semantic'] = semantic
# apply gt_segm
if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
masks = [
cv2.resize(
gt_segm, target_size, interpolation=cv2.INTER_NEAREST)
for gt_segm in sample['gt_segm']
]
sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
return sample
def apply_bbox(self, bbox, scale, size):
im_scale_x, im_scale_y = scale
resize_w, resize_h = size
bbox[:, 0::2] *= im_scale_x
bbox[:, 1::2] *= im_scale_y
bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, resize_w)
bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, resize_h)
return bbox.astype('float32')
def apply_segm(self, segms, im_size, scale):
def _resize_poly(poly, im_scale_x, im_scale_y):
resized_poly = np.array(poly).astype('float32')
resized_poly[0::2] *= im_scale_x
resized_poly[1::2] *= im_scale_y
return resized_poly.tolist()
def _resize_rle(rle, im_h, im_w, im_scale_x, im_scale_y):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, im_h, im_w)
mask = mask_util.decode(rle)
mask = cv2.resize(
mask,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
im_h, im_w = im_size
im_scale_x, im_scale_y = scale
resized_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
resized_segms.append([
_resize_poly(poly, im_scale_x, im_scale_y) for poly in segm
])
else:
# RLE format
import pycocotools.mask as mask_util
resized_segms.append(
_resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))
return resized_segms
def apply(self, sample, context=None):
target_size = random.choice(self.short_side_sizes)
interp = random.choice(
self.interps) if self.random_interp else self.interp
return self.resize(sample, target_size, self.max_size, interp)
@register_op
class RandomSizeCrop(BaseOperator):
"""
Cut the image randomly according to `min_size` and `max_size`
"""
def __init__(self, min_size, max_size):
super(RandomSizeCrop, self).__init__()
self.min_size = min_size
self.max_size = max_size
from paddle.vision.transforms.functional import crop as paddle_crop
self.paddle_crop = paddle_crop
@staticmethod
def get_crop_params(img_shape, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img_shape (list|tuple): Image's height and width.
output_size (list|tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
h, w = img_shape
th, tw = output_size
if h + 1 < th or w + 1 < tw:
raise ValueError(
"Required crop size {} is larger then input image size {}".
format((th, tw), (h, w)))
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th + 1)
j = random.randint(0, w - tw + 1)
return i, j, th, tw
def crop(self, sample, region):
image_shape = sample['image'].shape[:2]
sample['image'] = self.paddle_crop(sample['image'], *region)
keep_index = None
# apply bbox
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], region)
bbox = sample['gt_bbox'].reshape([-1, 2, 2])
area = (bbox[:, 1, :] - bbox[:, 0, :]).prod(axis=1)
keep_index = np.where(area > 0)[0]
sample['gt_bbox'] = sample['gt_bbox'][keep_index] if len(
keep_index) > 0 else np.zeros(
[0, 4], dtype=np.float32)
sample['gt_class'] = sample['gt_class'][keep_index] if len(
keep_index) > 0 else np.zeros(
[0, 1], dtype=np.float32)
if 'gt_score' in sample:
sample['gt_score'] = sample['gt_score'][keep_index] if len(
keep_index) > 0 else np.zeros(
[0, 1], dtype=np.float32)
if 'is_crowd' in sample:
sample['is_crowd'] = sample['is_crowd'][keep_index] if len(
keep_index) > 0 else np.zeros(
[0, 1], dtype=np.float32)
# apply polygon
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], region,
image_shape)
if keep_index is not None:
sample['gt_poly'] = sample['gt_poly'][keep_index]
# apply gt_segm
if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
i, j, h, w = region
sample['gt_segm'] = sample['gt_segm'][:, i:i + h, j:j + w]
if keep_index is not None:
sample['gt_segm'] = sample['gt_segm'][keep_index]
return sample
def apply_bbox(self, bbox, region):
i, j, h, w = region
region_size = np.asarray([w, h])
crop_bbox = bbox - np.asarray([j, i, j, i])
crop_bbox = np.minimum(crop_bbox.reshape([-1, 2, 2]), region_size)
crop_bbox = crop_bbox.clip(min=0)
return crop_bbox.reshape([-1, 4]).astype('float32')
def apply_segm(self, segms, region, image_shape):
def _crop_poly(segm, crop):
xmin, ymin, xmax, ymax = crop
crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]
crop_p = np.array(crop_coord).reshape(4, 2)
crop_p = Polygon(crop_p)
crop_segm = list()
for poly in segm:
poly = np.array(poly).reshape(len(poly) // 2, 2)
polygon = Polygon(poly)
if not polygon.is_valid:
exterior = polygon.exterior
multi_lines = exterior.intersection(exterior)
polygons = shapely.ops.polygonize(multi_lines)
polygon = MultiPolygon(polygons)
multi_polygon = list()
if isinstance(polygon, MultiPolygon):
multi_polygon = copy.deepcopy(polygon)
else:
multi_polygon.append(copy.deepcopy(polygon))
for per_polygon in multi_polygon:
inter = per_polygon.intersection(crop_p)
if not inter:
continue
if isinstance(inter, (MultiPolygon, GeometryCollection)):
for part in inter:
if not isinstance(part, Polygon):
continue
part = np.squeeze(
np.array(part.exterior.coords[:-1]).reshape(1,
-1))
part[0::2] -= xmin
part[1::2] -= ymin
crop_segm.append(part.tolist())
elif isinstance(inter, Polygon):
crop_poly = np.squeeze(
np.array(inter.exterior.coords[:-1]).reshape(1, -1))
crop_poly[0::2] -= xmin
crop_poly[1::2] -= ymin
crop_segm.append(crop_poly.tolist())
else:
continue
return crop_segm
def _crop_rle(rle, crop, height, width):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, height, width)
mask = mask_util.decode(rle)
mask = mask[crop[1]:crop[3], crop[0]:crop[2]]
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
i, j, h, w = region
crop = [j, i, j + w, i + h]
height, width = image_shape
crop_segms = []
for segm in segms:
if is_poly(segm):
import copy
import shapely.ops
from shapely.geometry import Polygon, MultiPolygon, GeometryCollection
# Polygon format
crop_segms.append(_crop_poly(segm, crop))
else:
# RLE format
import pycocotools.mask as mask_util
crop_segms.append(_crop_rle(segm, crop, height, width))
return crop_segms
def apply(self, sample, context=None):
h = random.randint(self.min_size,
min(sample['image'].shape[0], self.max_size))
w = random.randint(self.min_size,
min(sample['image'].shape[1], self.max_size))
region = self.get_crop_params(sample['image'].shape[:2], [h, w])
return self.crop(sample, region)
@register_op
class WarpAffine(BaseOperator):
def __init__(self,
keep_res=False,
pad=31,
input_h=512,
input_w=512,
scale=0.4,
shift=0.1):
"""WarpAffine
Warp affine the image
The code is based on https://github.com/xingyizhou/CenterNet/blob/master/src/lib/datasets/sample/ctdet.py
"""
super(WarpAffine, self).__init__()
self.keep_res = keep_res
self.pad = pad
self.input_h = input_h
self.input_w = input_w
self.scale = scale
self.shift = shift
def apply(self, sample, context=None):
img = sample['image']
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
return sample
h, w = img.shape[:2]
if self.keep_res:
input_h = (h | self.pad) + 1
input_w = (w | self.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
c = np.array([w // 2, h // 2], dtype=np.float32)
else:
s = max(h, w) * 1.0
input_h, input_w = self.input_h, self.input_w
c = np.array([w / 2., h / 2.], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
img = cv2.resize(img, (w, h))
inp = cv2.warpAffine(
img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
sample['image'] = inp
return sample
@register_op
class FlipWarpAffine(BaseOperator):
def __init__(self,
keep_res=False,
pad=31,
input_h=512,
input_w=512,
not_rand_crop=False,
scale=0.4,
shift=0.1,
flip=0.5,
is_scale=True,
use_random=True):
"""FlipWarpAffine
1. Random Crop
2. Flip the image horizontal
3. Warp affine the image
"""
super(FlipWarpAffine, self).__init__()
self.keep_res = keep_res
self.pad = pad
self.input_h = input_h
self.input_w = input_w
self.not_rand_crop = not_rand_crop
self.scale = scale
self.shift = shift
self.flip = flip
self.is_scale = is_scale
self.use_random = use_random
def apply(self, sample, context=None):
img = sample['image']
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
return sample
h, w = img.shape[:2]
if self.keep_res:
input_h = (h | self.pad) + 1
input_w = (w | self.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
c = np.array([w // 2, h // 2], dtype=np.float32)
else:
s = max(h, w) * 1.0
input_h, input_w = self.input_h, self.input_w
c = np.array([w / 2., h / 2.], dtype=np.float32)
if self.use_random:
gt_bbox = sample['gt_bbox']
if not self.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = get_border(128, w)
h_border = get_border(128, h)
c[0] = np.random.randint(low=w_border, high=w - w_border)
c[1] = np.random.randint(low=h_border, high=h - h_border)
else:
sf = self.scale
cf = self.shift
c[0] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
c[1] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.flip:
img = img[:, ::-1, :]
c[0] = w - c[0] - 1
oldx1 = gt_bbox[:, 0].copy()
oldx2 = gt_bbox[:, 2].copy()
gt_bbox[:, 0] = w - oldx2 - 1
gt_bbox[:, 2] = w - oldx1 - 1
sample['gt_bbox'] = gt_bbox
trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
if not self.use_random:
img = cv2.resize(img, (w, h))
inp = cv2.warpAffine(
img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
if self.is_scale:
inp = (inp.astype(np.float32) / 255.)
sample['image'] = inp
sample['center'] = c
sample['scale'] = s
return sample
@register_op
class CenterRandColor(BaseOperator):
"""Random color for CenterNet series models.
Args:
saturation (float): saturation settings.
contrast (float): contrast settings.
brightness (float): brightness settings.
"""
def __init__(self, saturation=0.4, contrast=0.4, brightness=0.4):
super(CenterRandColor, self).__init__()
self.saturation = saturation
self.contrast = contrast
self.brightness = brightness
def apply_saturation(self, img, img_gray):
alpha = 1. + np.random.uniform(
low=-self.saturation, high=self.saturation)
self._blend(alpha, img, img_gray[:, :, None])
return img
def apply_contrast(self, img, img_gray):
alpha = 1. + np.random.uniform(low=-self.contrast, high=self.contrast)
img_mean = img_gray.mean()
self._blend(alpha, img, img_mean)
return img
def apply_brightness(self, img, img_gray):
alpha = 1 + np.random.uniform(
low=-self.brightness, high=self.brightness)
img *= alpha
return img
def _blend(self, alpha, img, img_mean):
img *= alpha
img_mean *= (1 - alpha)
img += img_mean
def __call__(self, sample, context=None):
img = sample['image']
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
functions = [
self.apply_brightness,
self.apply_contrast,
self.apply_saturation,
]
distortions = np.random.permutation(functions)
for func in distortions:
img = func(img, img_gray)
sample['image'] = img
return sample
| 37.468719 | 131 | 0.533655 |
a40c6c5d7efdb0e4eb16d93567de08dab37eab12 | 1,867 | py | Python | modules/python/test/test.py | xipingyan/opencv | 39c3334147ec02761b117f180c9c4518be18d1fa | [
"Apache-2.0"
] | 56,632 | 2016-07-04T16:36:08.000Z | 2022-03-31T18:38:14.000Z | modules/python/test/test.py | yusufm423/opencv | 6a2077cbd8a8a0d8cbd3e0e8c3ca239f17e6c067 | [
"Apache-2.0"
] | 13,593 | 2016-07-04T13:59:03.000Z | 2022-03-31T21:04:51.000Z | modules/python/test/test.py | yusufm423/opencv | 6a2077cbd8a8a0d8cbd3e0e8c3ca239f17e6c067 | [
"Apache-2.0"
] | 54,986 | 2016-07-04T14:24:38.000Z | 2022-03-31T22:51:18.000Z | #!/usr/bin/env python
'''
Location of tests:
- <opencv_src>/modules/python/test
- <opencv_src>/modules/<module>/misc/python/test/
'''
from __future__ import print_function
import sys
sys.dont_write_bytecode = True # Don't generate .pyc files / __pycache__ directories
import os
import unittest
# Python 3 moved urlopen to urllib.requests
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from tests_common import NewOpenCVTests
basedir = os.path.abspath(os.path.dirname(__file__))
def load_tests(loader, tests, pattern):
cwd = os.getcwd()
config_file = 'opencv_python_tests.cfg'
locations = [cwd, basedir]
if os.path.exists(config_file):
with open(config_file, 'r') as f:
locations += [str(s).strip() for s in f.readlines()]
else:
print('WARNING: OpenCV tests config file ({}) is missing, running subset of tests'.format(config_file))
tests_pattern = os.environ.get('OPENCV_PYTEST_FILTER', 'test_*') + '.py'
if tests_pattern != 'test_*.py':
print('Tests filter: {}'.format(tests_pattern))
processed = set()
for l in locations:
if not os.path.isabs(l):
l = os.path.normpath(os.path.join(cwd, l))
if l in processed:
continue
processed.add(l)
print('Discovering python tests from: {}'.format(l))
sys_path_modify = l not in sys.path
if sys_path_modify:
sys.path.append(l) # Hack python loader
discovered_tests = loader.discover(l, pattern=tests_pattern, top_level_dir=l)
print(' found {} tests'.format(discovered_tests.countTestCases()))
tests.addTests(loader.discover(l, pattern=tests_pattern))
if sys_path_modify:
sys.path.remove(l)
return tests
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| 30.606557 | 111 | 0.668452 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.