max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
revolt/embed.py
|
MutedByte/revolt.py
| 0
|
6628151
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Union
from .enums import EmbedType
from .asset import Asset
if TYPE_CHECKING:
from .state import State
from .types import Embed as EmbedPayload
from .types import SendableEmbed as SendableEmbedPayload
from .types import WebsiteEmbed as WebsiteEmbedPayload
from .types import ImageEmbed as ImageEmbedPayload
from .types import TextEmbed as TextEmbedPayload
from .types import NoneEmbed as NoneEmbedPayload
__all__ = ("Embed", "WebsiteEmbed", "ImageEmbed", "TextEmbed", "NoneEmbed", "to_embed", "SendableEmbed")
class WebsiteEmbed:
type = EmbedType.website
def __init__(self, embed: WebsiteEmbedPayload):
self.url = embed.get("url")
self.special = embed.get("special")
self.title = embed.get("title")
self.description = embed.get("description")
self.image = embed.get("image")
self.video = embed.get("video")
self.site_name = embed.get("site_name")
self.icon_url = embed.get("icon_url")
self.colour = embed.get("colour")
class ImageEmbed:
type = EmbedType.image
def __init__(self, image: ImageEmbedPayload):
self.url = image.get("url")
self.width = image.get("width")
self.height = image.get("height")
self.size = image.get("size")
class TextEmbed:
type = EmbedType.text
def __init__(self, embed: TextEmbedPayload, state: State):
self.icon_url = embed.get("icon_url")
self.url = embed.get("url")
self.title = embed.get("title")
self.description = embed.get("description")
if media := embed.get("media"):
self.media = Asset(media, state)
else:
self.media = None
self.colour = embed.get("colour")
class NoneEmbed:
type = EmbedType.none
Embed = Union[WebsiteEmbed, ImageEmbed, TextEmbed, NoneEmbed]
def to_embed(payload: EmbedPayload, state: State) -> Embed:
if payload["type"] == "Website":
return WebsiteEmbed(payload)
elif payload["type"] == "Image":
return ImageEmbed(payload)
elif payload["type"] == "Text":
return TextEmbed(payload, state)
else:
return NoneEmbed()
class SendableEmbed:
def __init__(self, **attrs):
self.title: Optional[str] = None
self.description: Optional[str] = None
self.media: Optional[str] = None
self.icon_url: Optional[str] = None
self.colour: Optional[str] = None
self.url: Optional[str] = None
for key, value in attrs.items():
setattr(self, key, value)
def to_dict(self) -> SendableEmbedPayload:
output: SendableEmbedPayload = {"type": "Text"}
if title := self.title:
output["title"] = title
if description := self.description:
output["description"] = description
if media := self.media:
output["media"] = media
if icon_url := self.icon_url:
output["icon_url"] = icon_url
if colour := self.colour:
output["colour"] = colour
if url := self.url:
output["url"] = url
return output
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Union
from .enums import EmbedType
from .asset import Asset
if TYPE_CHECKING:
from .state import State
from .types import Embed as EmbedPayload
from .types import SendableEmbed as SendableEmbedPayload
from .types import WebsiteEmbed as WebsiteEmbedPayload
from .types import ImageEmbed as ImageEmbedPayload
from .types import TextEmbed as TextEmbedPayload
from .types import NoneEmbed as NoneEmbedPayload
__all__ = ("Embed", "WebsiteEmbed", "ImageEmbed", "TextEmbed", "NoneEmbed", "to_embed", "SendableEmbed")
class WebsiteEmbed:
type = EmbedType.website
def __init__(self, embed: WebsiteEmbedPayload):
self.url = embed.get("url")
self.special = embed.get("special")
self.title = embed.get("title")
self.description = embed.get("description")
self.image = embed.get("image")
self.video = embed.get("video")
self.site_name = embed.get("site_name")
self.icon_url = embed.get("icon_url")
self.colour = embed.get("colour")
class ImageEmbed:
type = EmbedType.image
def __init__(self, image: ImageEmbedPayload):
self.url = image.get("url")
self.width = image.get("width")
self.height = image.get("height")
self.size = image.get("size")
class TextEmbed:
type = EmbedType.text
def __init__(self, embed: TextEmbedPayload, state: State):
self.icon_url = embed.get("icon_url")
self.url = embed.get("url")
self.title = embed.get("title")
self.description = embed.get("description")
if media := embed.get("media"):
self.media = Asset(media, state)
else:
self.media = None
self.colour = embed.get("colour")
class NoneEmbed:
type = EmbedType.none
Embed = Union[WebsiteEmbed, ImageEmbed, TextEmbed, NoneEmbed]
def to_embed(payload: EmbedPayload, state: State) -> Embed:
if payload["type"] == "Website":
return WebsiteEmbed(payload)
elif payload["type"] == "Image":
return ImageEmbed(payload)
elif payload["type"] == "Text":
return TextEmbed(payload, state)
else:
return NoneEmbed()
class SendableEmbed:
def __init__(self, **attrs):
self.title: Optional[str] = None
self.description: Optional[str] = None
self.media: Optional[str] = None
self.icon_url: Optional[str] = None
self.colour: Optional[str] = None
self.url: Optional[str] = None
for key, value in attrs.items():
setattr(self, key, value)
def to_dict(self) -> SendableEmbedPayload:
output: SendableEmbedPayload = {"type": "Text"}
if title := self.title:
output["title"] = title
if description := self.description:
output["description"] = description
if media := self.media:
output["media"] = media
if icon_url := self.icon_url:
output["icon_url"] = icon_url
if colour := self.colour:
output["colour"] = colour
if url := self.url:
output["url"] = url
return output
|
none
| 1
| 2.397474
| 2
|
|
marvin/frontpage/packageinfo.py
|
programa-stic/marvin-django
| 81
|
6628152
|
<reponame>programa-stic/marvin-django
# Copyright (c) 2015, Fundacion Dr. <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import settings
sys.path.insert(0, settings.vuln_analysis_dir)
sys.path.insert(0, settings.vuln_analysis_dir+'/androguard')
from androguard.core.bytecodes import apk
from androguard.misc import AnalyzeAPK
from models import *
from django.utils.encoding import smart_text
import simplejson
#import md5
#import sha
from hashlib import sha1, md5
import classifier_interface_file
import os
from apk_storage import *
from git_interface import gitlab_upload_app
import MarvinStaticAnalyzer
import threading
import logging
import constants
import traceback
from functools import wraps
from multiprocessing import Process, Queue
def processify(func):
'''Decorator to run a function as a process.
Be sure that every argument and the return value
is *pickable*.
The created process is joined, so the code does not
run in parallel.
'''
def process_func(q, *args, **kwargs):
try:
ret = func(*args, **kwargs)
except Exception:
ex_type, ex_value, tb = sys.exc_info()
error = ex_type, ex_value, ''.join(traceback.format_tb(tb))
ret = None
else:
error = None
q.put((ret, error))
# register original function with different name
# in sys.modules so it is pickable
process_func.__name__ = func.__name__ + 'processify_func'
setattr(sys.modules[__name__], process_func.__name__, process_func)
@wraps(func)
def wrapper(*args, **kwargs):
q = Queue()
p = Process(target=process_func, args=[q] + list(args), kwargs=kwargs)
p.start()
p.join()
ret, error = q.get()
if error:
ex_type, ex_value, tb_str = error
message = '%s (in subprocess)\n%s' % (ex_value.message, tb_str)
raise ex_type(message)
return ret
return wrapper
@processify
def test_function():
return os.getpid()
@processify
def test_exception():
raise RuntimeError('xyz')
def test():
print os.getpid()
print test_function()
test_exception()
if __name__ == '__main__':
test()
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': '/tmp/packageinfo.debug.log',
# },
# },
# 'loggers': {
# 'packageinfo': {
# 'handlers': ['file'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# },
# }
#logger = logging.getLogger("packageinfo")
logging.basicConfig(filename="/tmp/packageinfo.info.log", level=logging.INFO)
perms_list_file = settings.perms_list_file
model_file = settings.model_file
def data_for_storage(rawfile):
md5hash = md5(rawfile).hexdigest()
try:
myApk = apk.APK(rawfile, raw=True)
package_name = myApk.package_name
return (package_name, md5hash)
except Exception as poof:
return (repr(poof), None)
def process_package(myfile, app_md):
t = threading.Thread (target=process_package_worker, args=(myfile, app_md))
#threads = list()
#threads.append(t)
t.start()
return "Nothing to see yet, move along"
@processify
def process_package_worker(myfile, app_md):
logging.info ("Entrando a process_package")
rawfile = myfile.read()
try:
logging.info ("Extrayendo APK")
(myPackage, d, dx) = AnalyzeAPK(rawfile, raw=True, decompiler="dad")
logging.info ("APK extraido")
except Exception as poof:
logging.error ("Exception reading APK: " + repr (poof))
return "Excepcion leyendo APK: " + repr (poof)
sources = {}
# try:
# map (lambda cd: sources.update({cd.get_name():cd.get_source()}), d.get_classes())
# print "APK decompilado"
# except Exception as poof:
# print "Exception decompiling APK: " + repr (poof)
if myPackage.is_valid_APK():
#misc_info = compile_misc_info(myPackage)
package_name = myPackage.get_package()
version = myPackage.get_androidversion_name()
qs = App.objects.filter(package_name = package_name, version = version)
logging.info ("Busco el objeto en la base: encuentro :"+ package_name +" "+ version +" "+ str(len(qs)))
if len(qs)>0:
logging.error ("El objeto ya existe en la base")
return "El objeto ya existe en la base"
else:
if app_md != None:
app_name= app_md.docV2.title
else:
app_name = get_app_name(myPackage, d)
app = App(package_name = myPackage.get_package(),
version = myPackage.get_androidversion_name(),
app_name = app_name,
md5 = md5(rawfile).hexdigest(),
sha1 = sha1(rawfile).hexdigest(),
bayesConfidence = 0.000)
app.save()
store_apk(rawfile, app.package_name, app.md5)
del rawfile
if app_md != None:
metadata = App_metadata(app_name= app_md.docV2.title,
version_string = app_md.docV2.details.appDetails.versionString,
author = app_md.docV2.creator,
date_upload = app_md.docV2.details.appDetails.uploadDate,
description = app_md.docV2.descriptionHtml,
app = app)
metadata.save()
#store_apk(rawfile, app.package_name, app.md5)
#print "Decompilando clases"
android_manifest = myPackage.get_android_manifest_xml().toxml()
overrides = {"AndroidManifest.xml": android_manifest}
#t = threading.Thread (target=save_sources_worker, args=(d, app, overrides))
save_sources_worker(d, app, overrides)
#threads = list()
#threads.append(t)
#t.start()
permissions = myPackage.get_details_permissions()
add_permissions(permissions, app)
activities = myPackage.get_activities()
for act_name in activities:
django_act = Activity (name = act_name,
app = app)
django_act.save()
services = myPackage.get_services()
for serv_name in services:
django_srv = Service (name = serv_name,
app = app)
django_srv.save()
providers = myPackage.get_providers()
for prov_name in providers:
django_prov = Provider (name = prov_name,
app = app)
django_prov.save()
receivers = myPackage.get_receivers()
for recv_name in receivers:
django_recv = Receiver (name = recv_name,
app = app)
django_recv.save()
# Me estaba subiendo los fuentes al repo antes de terminar de cargarlos
# en la DB. Lo pase al thread que los carga en la DB.
#gitlab_upload_app(app.package_name, app.version)
logging.info ("Entrando a analisis bayesiano")
bayes_analysis(app)
logging.info ("Fin analisis bayesiano")
logging.info( "Entrando a chequeo de vulnerabilidades")
#t = threading.Thread (target=vuln_analysis, args=(app, myPackage, d, dx))
vuln_analysis(app, myPackage, d, dx)
#threads = list()
#threads.append(t)
#t.start()
return app
else:
logging.error ("Error: APK invalido")
return "Error: APK invalido"
def save_sources_worker(d, app, overrides):
logging.info ("Decompilando clases")
for javaclass in d.get_classes():
try:
# print "Decompilando clase " + javaclass.get_name()
source = repr(javaclass.get_source())
except Exception as poof:
logging.info ("Java class "+ javaclass.get_name() + "could not be decompiled: \n" + repr(poof))
source = "Class could not be decompiled"
#sources.update({javaclass.get_name():source})
name = javaclass.get_name()[1:len(javaclass.get_name())-1]
sourcefile = Sourcefile (file_name = name,
file_contents= source[1:len(source)-1],
app = app)
try:
sourcefile.save()
except Exception as poof:
logging.error ("Error grabando archivo fuente: "+repr(poof))
#gitlab_upload_app(app.package_name, app.version)
gitlab_upload_app(app, overrides)
app.sourcesUploaded = True
app.save()
logging.info ("Clases decompiladas")
def bayes_analysis(app):
perms = map (lambda permission:permission.name, app.permission_set.all())
classifier_report = classifier_interface_file.evaluate_apk(perms, perms_list_file, model_file)
app.bayesResult = classifier_report[0]
app.bayesConfidence = classifier_report[1]
app.status = "BAYES_CHECKED"
app.save()
def vuln_analysis_retry(app):
t = threading.Thread (target=vuln_analysis_retry_worker, args=(app,))
#threads = list()
#threads.append(t)
print "Empezando el thread"
t.start()
#t.join()
return "Gracias vuelva prontos"
@processify
def vuln_analysis_retry_worker(app):
print "entrando a retry_worker"
try:
#print "Consiguiendo filename, package_name:" + app.package_name
filename = get_filepath(app.package_name, app.md5)
#print "filename:"+filename
(myPackage, d, dx) = AnalyzeAPK(filename)
#print "Datos recuperados"
vuln_analysis(app, myPackage, d, dx)
except Exception as poof:
#print "Error en retry: " + repr(poof)
logging.error ("Exception en analisis de vulnerabilidades: " + repr (poof))
@processify
def decompile(app):
filename = get_filepath(app.package_name, app.md5)
(myPackage, d, dx) = AnalyzeAPK(filename)
android_manifest = myPackage.get_android_manifest_xml().toxml()
overrides = {"AndroidManifest.xml": android_manifest}
save_sources_worker(d, app, overrides)
def vuln_analysis(app, apk, d, dx):
print "Entrando a vuln_analysis"
prefix1 = app.md5[0:2]
prefix2 = app.md5[2:4]
dir_path = settings.root_apk_dir + '/' + prefix1 + '/' + prefix2 + '/'
file_path = dir_path + app.package_name + '.apk'
my_path = os.getcwd()
os.chdir(settings.vuln_analysis_dir)
vuln_report = {}
app.status = "Checking Vulns"
app.save()
try:
vuln_report = MarvinStaticAnalyzer.analyze_vulnerabilities(file_path, apk, d, dx)
except Exception as poof:
logging.error ("Error analyzing vulns: " + repr(poof))
vuln_report = {"Error in analysis": [{'description':repr(poof)}]}
os.chdir(my_path)
#print vuln_report
update_fields_vr(app, vuln_report)
app.status = "Vulns checked"
app.save()
logging.info("Fin chequeo de vulnerabilidades")
#return vuln_report
def update_fields_vr(app, vuln_report):
for field in vuln_report.keys():
for instance in vuln_report[field]:
report = VulnerabilityResult(name = field,
description = instance['description'],
confidence = instance['confidence'],
dynamicTest = instance['dynamic_test'],
dynamic_test_params = instance['dynamic_test_params'],
app = app)
#if report.name in constants.STATIC_VULN_TYPES:
# report.severity = constants.SEVERITY_PRIORITIES[constants.STATIC_VULN_TYPES[report.name]]
#if report.name in constants.DYNAMIC_VULN_TYPES:
# report.severity = constants.SEVERITY_PRIORITIES[constants.DYNAMIC_VULN_TYPES[report.name]]
report.severity = instance['severity']
if 'reference_class' in instance:
report.vuln_class = instance['reference_class']
if 'reference_method' in instance:
report.vuln_method = instance['reference_method']
if report.confidence is None:
report.confidence = 1
if report.dynamicTest is None:
report.dynamicTest = False
report.save()
if instance['dynamic_test'] :
dynamicTestResult = DynamicTestResults(name = '' ,status = 'UNKNOWN' ,count = 0 ,description = '' ,vuln = report)
dynamicTestResult.save()
def add_permissions(permissions, app):
for perm_name in permissions.keys():
#print perm_name
res = Permission.objects.search.query('match', name = perm_name)
if len(res)==0:
django_perm = Permission (name = perm_name,
perm_description = permissions[perm_name][1],
perm_danger = permissions[perm_name][0])
django_perm.save()
else:
django_perm = res[0]
django_perm.app.add(app)
def get_app_name(a, d):
try:
app_name = a.xml['AndroidManifest.xml'].getElementsByTagName('application').pop().attributes['android:label'].nodeValue
except Exception as poof:
app_name = 'Error:' + repr(poof)
if app_name[0] == '@':
package_name = a.package
class_name = "L"+package_name.replace('.','/')+"/R$string;"
my_R_strings = d.get_class(class_name)
if my_R_strings == None:
return package_name
else:
res = a.get_android_resources()
for element in my_R_strings.get_fields():
elem_offset = format (element.init_value.get_value(),"03X")
if elem_offset == app_name[1:]:
resource_name = element.get_name()
app_name = res.get_string(package_name, resource_name)[1]
return app_name
# classifier_report = classifier_interface_file.evaluate_apk(permissions, perms_list_file, model_file)
# marvin_es.store_cr(package_name, classifier_report)
|
# Copyright (c) 2015, Fundacion Dr. <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import settings
sys.path.insert(0, settings.vuln_analysis_dir)
sys.path.insert(0, settings.vuln_analysis_dir+'/androguard')
from androguard.core.bytecodes import apk
from androguard.misc import AnalyzeAPK
from models import *
from django.utils.encoding import smart_text
import simplejson
#import md5
#import sha
from hashlib import sha1, md5
import classifier_interface_file
import os
from apk_storage import *
from git_interface import gitlab_upload_app
import MarvinStaticAnalyzer
import threading
import logging
import constants
import traceback
from functools import wraps
from multiprocessing import Process, Queue
def processify(func):
'''Decorator to run a function as a process.
Be sure that every argument and the return value
is *pickable*.
The created process is joined, so the code does not
run in parallel.
'''
def process_func(q, *args, **kwargs):
try:
ret = func(*args, **kwargs)
except Exception:
ex_type, ex_value, tb = sys.exc_info()
error = ex_type, ex_value, ''.join(traceback.format_tb(tb))
ret = None
else:
error = None
q.put((ret, error))
# register original function with different name
# in sys.modules so it is pickable
process_func.__name__ = func.__name__ + 'processify_func'
setattr(sys.modules[__name__], process_func.__name__, process_func)
@wraps(func)
def wrapper(*args, **kwargs):
q = Queue()
p = Process(target=process_func, args=[q] + list(args), kwargs=kwargs)
p.start()
p.join()
ret, error = q.get()
if error:
ex_type, ex_value, tb_str = error
message = '%s (in subprocess)\n%s' % (ex_value.message, tb_str)
raise ex_type(message)
return ret
return wrapper
@processify
def test_function():
return os.getpid()
@processify
def test_exception():
raise RuntimeError('xyz')
def test():
print os.getpid()
print test_function()
test_exception()
if __name__ == '__main__':
test()
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': '/tmp/packageinfo.debug.log',
# },
# },
# 'loggers': {
# 'packageinfo': {
# 'handlers': ['file'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# },
# }
#logger = logging.getLogger("packageinfo")
logging.basicConfig(filename="/tmp/packageinfo.info.log", level=logging.INFO)
perms_list_file = settings.perms_list_file
model_file = settings.model_file
def data_for_storage(rawfile):
md5hash = md5(rawfile).hexdigest()
try:
myApk = apk.APK(rawfile, raw=True)
package_name = myApk.package_name
return (package_name, md5hash)
except Exception as poof:
return (repr(poof), None)
def process_package(myfile, app_md):
t = threading.Thread (target=process_package_worker, args=(myfile, app_md))
#threads = list()
#threads.append(t)
t.start()
return "Nothing to see yet, move along"
@processify
def process_package_worker(myfile, app_md):
logging.info ("Entrando a process_package")
rawfile = myfile.read()
try:
logging.info ("Extrayendo APK")
(myPackage, d, dx) = AnalyzeAPK(rawfile, raw=True, decompiler="dad")
logging.info ("APK extraido")
except Exception as poof:
logging.error ("Exception reading APK: " + repr (poof))
return "Excepcion leyendo APK: " + repr (poof)
sources = {}
# try:
# map (lambda cd: sources.update({cd.get_name():cd.get_source()}), d.get_classes())
# print "APK decompilado"
# except Exception as poof:
# print "Exception decompiling APK: " + repr (poof)
if myPackage.is_valid_APK():
#misc_info = compile_misc_info(myPackage)
package_name = myPackage.get_package()
version = myPackage.get_androidversion_name()
qs = App.objects.filter(package_name = package_name, version = version)
logging.info ("Busco el objeto en la base: encuentro :"+ package_name +" "+ version +" "+ str(len(qs)))
if len(qs)>0:
logging.error ("El objeto ya existe en la base")
return "El objeto ya existe en la base"
else:
if app_md != None:
app_name= app_md.docV2.title
else:
app_name = get_app_name(myPackage, d)
app = App(package_name = myPackage.get_package(),
version = myPackage.get_androidversion_name(),
app_name = app_name,
md5 = md5(rawfile).hexdigest(),
sha1 = sha1(rawfile).hexdigest(),
bayesConfidence = 0.000)
app.save()
store_apk(rawfile, app.package_name, app.md5)
del rawfile
if app_md != None:
metadata = App_metadata(app_name= app_md.docV2.title,
version_string = app_md.docV2.details.appDetails.versionString,
author = app_md.docV2.creator,
date_upload = app_md.docV2.details.appDetails.uploadDate,
description = app_md.docV2.descriptionHtml,
app = app)
metadata.save()
#store_apk(rawfile, app.package_name, app.md5)
#print "Decompilando clases"
android_manifest = myPackage.get_android_manifest_xml().toxml()
overrides = {"AndroidManifest.xml": android_manifest}
#t = threading.Thread (target=save_sources_worker, args=(d, app, overrides))
save_sources_worker(d, app, overrides)
#threads = list()
#threads.append(t)
#t.start()
permissions = myPackage.get_details_permissions()
add_permissions(permissions, app)
activities = myPackage.get_activities()
for act_name in activities:
django_act = Activity (name = act_name,
app = app)
django_act.save()
services = myPackage.get_services()
for serv_name in services:
django_srv = Service (name = serv_name,
app = app)
django_srv.save()
providers = myPackage.get_providers()
for prov_name in providers:
django_prov = Provider (name = prov_name,
app = app)
django_prov.save()
receivers = myPackage.get_receivers()
for recv_name in receivers:
django_recv = Receiver (name = recv_name,
app = app)
django_recv.save()
# Me estaba subiendo los fuentes al repo antes de terminar de cargarlos
# en la DB. Lo pase al thread que los carga en la DB.
#gitlab_upload_app(app.package_name, app.version)
logging.info ("Entrando a analisis bayesiano")
bayes_analysis(app)
logging.info ("Fin analisis bayesiano")
logging.info( "Entrando a chequeo de vulnerabilidades")
#t = threading.Thread (target=vuln_analysis, args=(app, myPackage, d, dx))
vuln_analysis(app, myPackage, d, dx)
#threads = list()
#threads.append(t)
#t.start()
return app
else:
logging.error ("Error: APK invalido")
return "Error: APK invalido"
def save_sources_worker(d, app, overrides):
logging.info ("Decompilando clases")
for javaclass in d.get_classes():
try:
# print "Decompilando clase " + javaclass.get_name()
source = repr(javaclass.get_source())
except Exception as poof:
logging.info ("Java class "+ javaclass.get_name() + "could not be decompiled: \n" + repr(poof))
source = "Class could not be decompiled"
#sources.update({javaclass.get_name():source})
name = javaclass.get_name()[1:len(javaclass.get_name())-1]
sourcefile = Sourcefile (file_name = name,
file_contents= source[1:len(source)-1],
app = app)
try:
sourcefile.save()
except Exception as poof:
logging.error ("Error grabando archivo fuente: "+repr(poof))
#gitlab_upload_app(app.package_name, app.version)
gitlab_upload_app(app, overrides)
app.sourcesUploaded = True
app.save()
logging.info ("Clases decompiladas")
def bayes_analysis(app):
perms = map (lambda permission:permission.name, app.permission_set.all())
classifier_report = classifier_interface_file.evaluate_apk(perms, perms_list_file, model_file)
app.bayesResult = classifier_report[0]
app.bayesConfidence = classifier_report[1]
app.status = "BAYES_CHECKED"
app.save()
def vuln_analysis_retry(app):
t = threading.Thread (target=vuln_analysis_retry_worker, args=(app,))
#threads = list()
#threads.append(t)
print "Empezando el thread"
t.start()
#t.join()
return "Gracias vuelva prontos"
@processify
def vuln_analysis_retry_worker(app):
print "entrando a retry_worker"
try:
#print "Consiguiendo filename, package_name:" + app.package_name
filename = get_filepath(app.package_name, app.md5)
#print "filename:"+filename
(myPackage, d, dx) = AnalyzeAPK(filename)
#print "Datos recuperados"
vuln_analysis(app, myPackage, d, dx)
except Exception as poof:
#print "Error en retry: " + repr(poof)
logging.error ("Exception en analisis de vulnerabilidades: " + repr (poof))
@processify
def decompile(app):
filename = get_filepath(app.package_name, app.md5)
(myPackage, d, dx) = AnalyzeAPK(filename)
android_manifest = myPackage.get_android_manifest_xml().toxml()
overrides = {"AndroidManifest.xml": android_manifest}
save_sources_worker(d, app, overrides)
def vuln_analysis(app, apk, d, dx):
print "Entrando a vuln_analysis"
prefix1 = app.md5[0:2]
prefix2 = app.md5[2:4]
dir_path = settings.root_apk_dir + '/' + prefix1 + '/' + prefix2 + '/'
file_path = dir_path + app.package_name + '.apk'
my_path = os.getcwd()
os.chdir(settings.vuln_analysis_dir)
vuln_report = {}
app.status = "Checking Vulns"
app.save()
try:
vuln_report = MarvinStaticAnalyzer.analyze_vulnerabilities(file_path, apk, d, dx)
except Exception as poof:
logging.error ("Error analyzing vulns: " + repr(poof))
vuln_report = {"Error in analysis": [{'description':repr(poof)}]}
os.chdir(my_path)
#print vuln_report
update_fields_vr(app, vuln_report)
app.status = "Vulns checked"
app.save()
logging.info("Fin chequeo de vulnerabilidades")
#return vuln_report
def update_fields_vr(app, vuln_report):
for field in vuln_report.keys():
for instance in vuln_report[field]:
report = VulnerabilityResult(name = field,
description = instance['description'],
confidence = instance['confidence'],
dynamicTest = instance['dynamic_test'],
dynamic_test_params = instance['dynamic_test_params'],
app = app)
#if report.name in constants.STATIC_VULN_TYPES:
# report.severity = constants.SEVERITY_PRIORITIES[constants.STATIC_VULN_TYPES[report.name]]
#if report.name in constants.DYNAMIC_VULN_TYPES:
# report.severity = constants.SEVERITY_PRIORITIES[constants.DYNAMIC_VULN_TYPES[report.name]]
report.severity = instance['severity']
if 'reference_class' in instance:
report.vuln_class = instance['reference_class']
if 'reference_method' in instance:
report.vuln_method = instance['reference_method']
if report.confidence is None:
report.confidence = 1
if report.dynamicTest is None:
report.dynamicTest = False
report.save()
if instance['dynamic_test'] :
dynamicTestResult = DynamicTestResults(name = '' ,status = 'UNKNOWN' ,count = 0 ,description = '' ,vuln = report)
dynamicTestResult.save()
def add_permissions(permissions, app):
for perm_name in permissions.keys():
#print perm_name
res = Permission.objects.search.query('match', name = perm_name)
if len(res)==0:
django_perm = Permission (name = perm_name,
perm_description = permissions[perm_name][1],
perm_danger = permissions[perm_name][0])
django_perm.save()
else:
django_perm = res[0]
django_perm.app.add(app)
def get_app_name(a, d):
try:
app_name = a.xml['AndroidManifest.xml'].getElementsByTagName('application').pop().attributes['android:label'].nodeValue
except Exception as poof:
app_name = 'Error:' + repr(poof)
if app_name[0] == '@':
package_name = a.package
class_name = "L"+package_name.replace('.','/')+"/R$string;"
my_R_strings = d.get_class(class_name)
if my_R_strings == None:
return package_name
else:
res = a.get_android_resources()
for element in my_R_strings.get_fields():
elem_offset = format (element.init_value.get_value(),"03X")
if elem_offset == app_name[1:]:
resource_name = element.get_name()
app_name = res.get_string(package_name, resource_name)[1]
return app_name
# classifier_report = classifier_interface_file.evaluate_apk(permissions, perms_list_file, model_file)
# marvin_es.store_cr(package_name, classifier_report)
|
en
| 0.476154
|
# Copyright (c) 2015, Fundacion Dr. <NAME> # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #import md5 #import sha Decorator to run a function as a process. Be sure that every argument and the return value is *pickable*. The created process is joined, so the code does not run in parallel. # register original function with different name # in sys.modules so it is pickable # LOGGING = { # 'version': 1, # 'disable_existing_loggers': False, # 'handlers': { # 'file': { # 'level': 'DEBUG', # 'class': 'logging.FileHandler', # 'filename': '/tmp/packageinfo.debug.log', # }, # }, # 'loggers': { # 'packageinfo': { # 'handlers': ['file'], # 'level': 'DEBUG', # 'propagate': True, # }, # }, # } #logger = logging.getLogger("packageinfo") #threads = list() #threads.append(t) # try: # map (lambda cd: sources.update({cd.get_name():cd.get_source()}), d.get_classes()) # print "APK decompilado" # except Exception as poof: # print "Exception decompiling APK: " + repr (poof) #misc_info = compile_misc_info(myPackage) #store_apk(rawfile, app.package_name, app.md5) #print "Decompilando clases" #t = threading.Thread (target=save_sources_worker, args=(d, app, overrides)) #threads = list() #threads.append(t) #t.start() # Me estaba subiendo los fuentes al repo antes de terminar de cargarlos # en la DB. Lo pase al thread que los carga en la DB. #gitlab_upload_app(app.package_name, app.version) #t = threading.Thread (target=vuln_analysis, args=(app, myPackage, d, dx)) #threads = list() #threads.append(t) #t.start() # print "Decompilando clase " + javaclass.get_name() #sources.update({javaclass.get_name():source}) #gitlab_upload_app(app.package_name, app.version) #threads = list() #threads.append(t) #t.join() #print "Consiguiendo filename, package_name:" + app.package_name #print "filename:"+filename #print "Datos recuperados" #print "Error en retry: " + repr(poof) #print vuln_report #return vuln_report #if report.name in constants.STATIC_VULN_TYPES: # report.severity = constants.SEVERITY_PRIORITIES[constants.STATIC_VULN_TYPES[report.name]] #if report.name in constants.DYNAMIC_VULN_TYPES: # report.severity = constants.SEVERITY_PRIORITIES[constants.DYNAMIC_VULN_TYPES[report.name]] #print perm_name # classifier_report = classifier_interface_file.evaluate_apk(permissions, perms_list_file, model_file) # marvin_es.store_cr(package_name, classifier_report)
| 1.301162
| 1
|
modules/dbnd/src/dbnd/_core/utils/basics/load_python_module.py
|
busunkim96/dbnd
| 224
|
6628153
|
<filename>modules/dbnd/src/dbnd/_core/utils/basics/load_python_module.py
import importlib
import logging
import os
import re
import sys
from dbnd._core.errors import DatabandError, friendly_error
from dbnd._core.utils.basics.memoized import cached
logger = logging.getLogger(__name__)
try:
import_errors = (ImportError, ModuleNotFoundError)
except Exception:
# we are python2
import_errors = (ImportError,)
@cached()
def _load_module(module, description):
try:
try:
return importlib.import_module(module)
except import_errors as ex:
# in some cases it will not help
# like "tests" package.
# it too late to fix it as tests already loaded from site-packages..
if os.getcwd() in sys.path:
raise
# we'll try to load current folder to PYTHONPATH, just in case
logger.info(
"Databand has failed to load module '%s', "
"it will retry with cwd at PYTHONPATH." % module
)
sys.path.insert(0, os.getcwd())
m = importlib.import_module(module)
logger.info(
"We have managed to load module after adding %s to PYTHONPATH, "
"please consider using 'pip install -e . ' with your project"
% os.getcwd()
)
return m
except import_errors as ex:
logger.warning(
"Failed to load module '%s' %s: cwd='%s', sys.path=\n\t%s",
module,
friendly_error.dbnd_module_not_found_tip(module),
os.getcwd(),
"\n\t".join(sys.path),
)
raise friendly_error.failed_to_import_user_module(
ex, module=module, description=description
)
def load_python_module(module, module_source):
logger.info("Loading modules '%s' from %s.", module, module_source)
for m in module.split(","):
_load_module(m, module_source)
def load_python_attr_from_module(attr_path):
m = re.match(r"^(\S+)\.(\S+)", attr_path)
if not m:
raise friendly_error.config.wrong_func_attr_format(attr_path)
module_path, attr_name = m.group(1), m.group(2)
module = _load_module(module_path, description="")
if not hasattr(module, attr_name):
raise DatabandError("Failed to import symbol %s" % attr_path)
attr = getattr(module, attr_name)
return attr
def load_python_callable(callable_path):
callable_attr = load_python_attr_from_module(callable_path)
if not callable(callable_attr):
raise DatabandError("The `%s` is not `callable`" % callable_attr)
return callable_attr
def run_user_func(callable_path):
if not callable_path:
return None
f = load_python_callable(callable_path=callable_path)
try:
return f()
except Exception:
logger.warning("Failed to run user function %s", callable_path)
raise
|
<filename>modules/dbnd/src/dbnd/_core/utils/basics/load_python_module.py
import importlib
import logging
import os
import re
import sys
from dbnd._core.errors import DatabandError, friendly_error
from dbnd._core.utils.basics.memoized import cached
logger = logging.getLogger(__name__)
try:
import_errors = (ImportError, ModuleNotFoundError)
except Exception:
# we are python2
import_errors = (ImportError,)
@cached()
def _load_module(module, description):
try:
try:
return importlib.import_module(module)
except import_errors as ex:
# in some cases it will not help
# like "tests" package.
# it too late to fix it as tests already loaded from site-packages..
if os.getcwd() in sys.path:
raise
# we'll try to load current folder to PYTHONPATH, just in case
logger.info(
"Databand has failed to load module '%s', "
"it will retry with cwd at PYTHONPATH." % module
)
sys.path.insert(0, os.getcwd())
m = importlib.import_module(module)
logger.info(
"We have managed to load module after adding %s to PYTHONPATH, "
"please consider using 'pip install -e . ' with your project"
% os.getcwd()
)
return m
except import_errors as ex:
logger.warning(
"Failed to load module '%s' %s: cwd='%s', sys.path=\n\t%s",
module,
friendly_error.dbnd_module_not_found_tip(module),
os.getcwd(),
"\n\t".join(sys.path),
)
raise friendly_error.failed_to_import_user_module(
ex, module=module, description=description
)
def load_python_module(module, module_source):
logger.info("Loading modules '%s' from %s.", module, module_source)
for m in module.split(","):
_load_module(m, module_source)
def load_python_attr_from_module(attr_path):
m = re.match(r"^(\S+)\.(\S+)", attr_path)
if not m:
raise friendly_error.config.wrong_func_attr_format(attr_path)
module_path, attr_name = m.group(1), m.group(2)
module = _load_module(module_path, description="")
if not hasattr(module, attr_name):
raise DatabandError("Failed to import symbol %s" % attr_path)
attr = getattr(module, attr_name)
return attr
def load_python_callable(callable_path):
callable_attr = load_python_attr_from_module(callable_path)
if not callable(callable_attr):
raise DatabandError("The `%s` is not `callable`" % callable_attr)
return callable_attr
def run_user_func(callable_path):
if not callable_path:
return None
f = load_python_callable(callable_path=callable_path)
try:
return f()
except Exception:
logger.warning("Failed to run user function %s", callable_path)
raise
|
en
| 0.951823
|
# we are python2 # in some cases it will not help # like "tests" package. # it too late to fix it as tests already loaded from site-packages.. # we'll try to load current folder to PYTHONPATH, just in case
| 2.389972
| 2
|
jerex/models/modules/coreference_resolution.py
|
Brant-Skywalker/jerex
| 39
|
6628154
|
import torch
from torch import nn as nn
from jerex import util
class CoreferenceResolution(nn.Module):
def __init__(self, hidden_size, meta_embedding_size, ed_embeddings_count, prop_drop):
super().__init__()
self.coref_linear = nn.Linear(hidden_size * 2 + meta_embedding_size, hidden_size)
self.coref_classifier = nn.Linear(hidden_size, 1)
self.coref_ed_embeddings = nn.Embedding(ed_embeddings_count, meta_embedding_size)
self.dropout = nn.Dropout(prop_drop)
def forward(self, mention_reprs, coref_mention_pairs, coref_eds, max_pairs=None):
batch_size = coref_mention_pairs.shape[0]
# classify corefs
coref_clf = torch.zeros([batch_size, coref_mention_pairs.shape[1]]).to(self._device)
# coref
# obtain coref logits
# chunk processing to reduce memory usage
max_pairs = max_pairs if max_pairs is not None else coref_mention_pairs.shape[1]
coref_eds = self.coref_ed_embeddings(coref_eds)
for i in range(0, coref_mention_pairs.shape[1], max_pairs):
chunk_corefs = coref_mention_pairs[:, i:i + max_pairs]
chunk_coref_eds = coref_eds[:, i:i + max_pairs]
chunk_coref_clf = self._classify_corefs(mention_reprs, chunk_corefs, chunk_coref_eds)
coref_clf[:, i:i + max_pairs] = chunk_coref_clf
return coref_clf
def _classify_corefs(self, mention_reprs, coref_mention_pairs, coref_eds):
batch_size = coref_mention_pairs.shape[0]
# get pairs of entity mention representations
mention_pairs1 = util.batch_index(mention_reprs, coref_mention_pairs)
mention_pairs = mention_pairs1.view(batch_size, mention_pairs1.shape[1], -1)
coref_repr = torch.cat([mention_pairs, coref_eds], dim=2)
coref_repr = torch.relu(self.coref_linear(coref_repr))
coref_repr = self.dropout(coref_repr)
# classify coref candidates
chunk_coref_logits = self.coref_classifier(coref_repr)
chunk_coref_logits = chunk_coref_logits.squeeze(dim=-1)
return chunk_coref_logits
@property
def _device(self):
return self.coref_classifier.weight.device
|
import torch
from torch import nn as nn
from jerex import util
class CoreferenceResolution(nn.Module):
def __init__(self, hidden_size, meta_embedding_size, ed_embeddings_count, prop_drop):
super().__init__()
self.coref_linear = nn.Linear(hidden_size * 2 + meta_embedding_size, hidden_size)
self.coref_classifier = nn.Linear(hidden_size, 1)
self.coref_ed_embeddings = nn.Embedding(ed_embeddings_count, meta_embedding_size)
self.dropout = nn.Dropout(prop_drop)
def forward(self, mention_reprs, coref_mention_pairs, coref_eds, max_pairs=None):
batch_size = coref_mention_pairs.shape[0]
# classify corefs
coref_clf = torch.zeros([batch_size, coref_mention_pairs.shape[1]]).to(self._device)
# coref
# obtain coref logits
# chunk processing to reduce memory usage
max_pairs = max_pairs if max_pairs is not None else coref_mention_pairs.shape[1]
coref_eds = self.coref_ed_embeddings(coref_eds)
for i in range(0, coref_mention_pairs.shape[1], max_pairs):
chunk_corefs = coref_mention_pairs[:, i:i + max_pairs]
chunk_coref_eds = coref_eds[:, i:i + max_pairs]
chunk_coref_clf = self._classify_corefs(mention_reprs, chunk_corefs, chunk_coref_eds)
coref_clf[:, i:i + max_pairs] = chunk_coref_clf
return coref_clf
def _classify_corefs(self, mention_reprs, coref_mention_pairs, coref_eds):
batch_size = coref_mention_pairs.shape[0]
# get pairs of entity mention representations
mention_pairs1 = util.batch_index(mention_reprs, coref_mention_pairs)
mention_pairs = mention_pairs1.view(batch_size, mention_pairs1.shape[1], -1)
coref_repr = torch.cat([mention_pairs, coref_eds], dim=2)
coref_repr = torch.relu(self.coref_linear(coref_repr))
coref_repr = self.dropout(coref_repr)
# classify coref candidates
chunk_coref_logits = self.coref_classifier(coref_repr)
chunk_coref_logits = chunk_coref_logits.squeeze(dim=-1)
return chunk_coref_logits
@property
def _device(self):
return self.coref_classifier.weight.device
|
en
| 0.686937
|
# classify corefs # coref # obtain coref logits # chunk processing to reduce memory usage # get pairs of entity mention representations # classify coref candidates
| 2.1549
| 2
|
demo/scripts/hot_tails.py
|
o-linder/runawayelectrongeneration
| 7
|
6628155
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------|
# Header
# -----------------------------------------------------------------------------|
from matplotlib import rc
import matplotlib.pyplot as plt
#from modules.dataTools import max_along_axis
import numpy as np
import scipy.constants as physConst
import scipy.integrate as integrate
from scipy.optimize import curve_fit
plt.ion()
rc('text', usetex=True)
rc('font', size=10, family='serif')
# -----------------------------------------------------------------------------|
# Class to calculate hot-tail population
# -----------------------------------------------------------------------------|
class hot_tail_generation:
# minimum value for exponential during integration
__exp_min_val = 1e-100
# factor for converting number to current density
__j_conv = physConst.e*physConst.c
# maximum number of iteration during quadrature
__quad_max_iter = 1000
# -------------------------------------------------------------------------|
def __init__(self, t, E, t_dec=None, t_del=0, ne=None, ne_i=None,
ne_f=None, Te=None, Te_i=None, Te_f=None, calc_evolution=True):
# ----- Can hot-tail calculations be performed --------------------|
self.calc_possible = True
# ----- Time array, delay and decay -------------------------------|
self.t = np.atleast_1d(t)
self.t_dec = t_dec
self.t_del = t_del
# ----- Electric field --------------------------------------------|
self.E = np.abs(np.atleast_1d(E))
if self.E.size == 1:
self.E *= np.ones(self.t.shape)
# ----- Electron temperature --------------------------------------|
if Te is not None:
self.Te = np.atleast_1d(Te)
if self.Te.size == 1:
self.Te_i = Te_i
self.Te_f = Te_f
elif self.Te.size > 1:
self.Te_i = self.Te[0]
self.Te_f = self.Te[-1]
if self.t_dec is None:
print('Decay time not provided. Trying to perform a fit.')
self.t_dec =self.fit_exponential(self.t, self.Te)[0]
elif np.all(np.array([Te_i, Te_f, self.t_dec]) != None):
self.Te_i = Te_i
self.Te_f = Te_f
self.Te = self.Te_f + (self.Te_i - self.Te_f)\
*np.exp(-self.t/self.t_dec)
else:
self.calc_possible = False
print('Cannot set electron temperature.')
# ----- Electron density ------------------------------------------|
self.set_electron_density(ne=ne, ne_i=ne_i, ne_f=ne_f)
# ----- Additional quantities -------------------------------------|
self.nu_0 = np.zeros(self.t.shape)
self.v_T0 = np.zeros(self.t.shape)
self.v_c = np.zeros(self.t.shape)
self.tau = np.zeros(self.t.shape)
self.calc_additional_quantities()
# ----- Calculate evolution of the hot-tail population ------------|
self.n_hot = np.zeros(self.t.shape)
self.j_hot = np.zeros(self.t.shape)
if calc_evolution:
self.calc_evolution()
# ----- end method __init__ -------------------------------------------|
# -------------------------------------------------------------------------|
def calc_evolution(self, assume_single_max=False, increasing_only=True):
"""Calculates the evolution of the hot-tail population. If the switch
`assume_single_max` is set, the calculation is stopped as soon as the
first maximum is encountered.
"""
self.n_hot = np.zeros(self.t.shape)
# Check if hot-tail calculation possible
if not self.calc_possible:
print('Calculation of hot-tail population not possible. Abort.')
return
# ----- Evolve hot-tail population --------------------------------|
for i in range(self.t.size):
if self.t[i] < self.t_del: continue
# ----- Determine integration limits --------------------------|
# Between v_c and where exponential drops below a value of
# `__exp_min_val`
int_lim = ( self.v_c[i],
((-np.log(self.__exp_min_val))**(3/2)-3*self.tau[i])**(1/3)\
*self.v_T0)
if int_lim[1]/self.v_c[i] < 1 or np.isnan(int_lim[1]): continue
# ----- Hot-tail population at `t[i]` -------------------------|
self.n_hot[i] = 4*self.ne_i/(np.sqrt(np.pi)*self.v_T0**3) \
*integrate.quadrature(
lambda v: np.exp(-((v/self.v_T0)**3 + 3*self.tau[i])**(2/3)) \
*(v**2 - self.v_c[i]**2),
*int_lim, maxiter=self.__quad_max_iter)[0]
# stop calculation if maximum has been reached
if assume_single_max and i > 0 and self.n_hot[i] < self.n_hot[i-1]:
break
# ----- Final hot-tail density does not decay ---------------------|
# This assumes, that electrons with velocities exceeding the
# critical velocity do not equilibriate through collisions since
# they experience net acceleration by the applied electric field.
# if increasing_only:
# __ = max_along_axis(self.n_hot)
# ----- Calculate hot-tail carried current ------------------------|
# This assumes j_hot = e c n_hot
self.j_hot = self.__j_conv * self.n_hot
# ----- end method calc_evolution -------------------------------------|
# -------------------------------------------------------------------------|
# Setup electron temperature and density profiles
# -------------------------------------------------------------------------|
def set_electron_density(self, ne=None, ne_i=None, ne_f=None):
"""Function to set the electron density evolution.
"""
if ne is not None:
self.ne = np.atleast_1d(ne)
if self.ne.size == 1:
self.ne_i = ne_i
self.ne_f = ne_f
elif self.ne.size > 1:
self.ne_i = self.ne[0]
self.ne_f = self.ne[-1]
elif np.all(np.array([ne_i, ne_f, self.t_dec]) != None):
self.ne_i = ne_i
self.ne_f = ne_f
self.ne = self.ne_f + (self.ne_i - self.ne_f)\
*np.exp(-self.t/self.t_dec)
elif ne_i is not None:
self.ne_i = ne_i
self.ne_f = ne_i
self.ne = ne_i*np.ones(self.t.shape)
else:
self.calc_possible = False
print('Cannot set electron density. Abort.')
# ----- end method set_electron_density -------------------------------|
def fit_exponential(self, x, y):
"""Fit an exponential to the data (`x`, `y`) by taking the logarim of
`y` and fitting a linear function to it, thus retrieve the decay time.
"""
popt, pcov = curve_fit(self.lin_func, x, np.log(y), p0=(1e-4, 1e0))
return popt[0], np.sqrt(pcov[0,0])
# ----- end method fit_exponential ------------------------------------|
# -------------------------------------------------------------------------|
def lin_func(self, x, a, b):
"""Linear function for interpolation, yielding the negative, inverse
slope `a` and the offset `b`. This can be used to determine a decay
time for an exponentially decreasing function.
"""
return -x/a+b
# ----- end method lin_func -------------------------------------------|
# -------------------------------------------------------------------------|
# Additional quantities necessary to determine hot-tail population
# -------------------------------------------------------------------------|
def calc_additional_quantities(self):
"""Calculates additional quantities needed to evaluate the evolution
of the hot-tail population.
"""
if not self.calc_possible: return
# initial collision frequency
self.nu_0 = self.__nu__(self.ne_i, self.Te_i)
# initial thermal velocity
self.v_T0 = self.__v_T__(self.Te_i)
# critical velocity
self.v_c = self.__v_c__(self.ne, self.Te, self.E)
# tau
self.tau = self.__tau__(self.t, self.t_dec, self.nu_0,
ne_i=self.ne_i, ne_f=self.ne_f, method='ppg')
# ----- end method calc_additional_quantities -------------------------|
# ---------------------------------------------------------------------|
def __EVDF__(self, v, n, v_T, tau=0):
"""Calculates the value of the Maxwellian electron velocity
distribution function at velocity `v` in units of m/s for electron
density `n` in units of m**-3, thermal velocity `v_T` in units of m/s
and `tau`.
From <NAME> and <NAME>. Phys. Plasmas 15, 072502 (2008),
eq. (9).
"""
return n/(np.sqrt(np.pi)*v_T)**3*np.exp(-((v/v_T)**3 + 3*tau)**(2/3))
# ----- end method __EVDF__ -------------------------------------------|
# ---------------------------------------------------------------------|
def __lnLambda__(self, n, T):
"""
Calculates Coulomb logarithm for electron-electron collisions of thermal particles of
density `n` in units of m**-3 and temperature `T` in units of eV.
From <NAME>. Tokamaks. Oxford University Press 2004, p. 727.
"""
return 14.9 - .5*np.log(n*1e-20) + np.log(1e-3*T)
# ----- end method __lnLambda__ ---------------------------------------|
# ---------------------------------------------------------------------|
def __nu__(self, n, T):
"""
Calculates the electron-electron collision frequency for thermal particles of density
`n` in units of m**-3 and temperature `T` in units of eV.
From <NAME> al., Plasma Phys. Control. Fusion 44, B247 (2002).
"""
return n*self.__lnLambda__(n, T)/self.__v_T__(T)**3 \
*physConst.e**4/(4*np.pi*physConst.epsilon_0**2*physConst.m_e**2)
# ---- end method __nu__ ----------------------------------------------|
# ---------------------------------------------------------------------|
def __tau__(self, t, t_char, nu_0, ne_i=1, ne_f=0, method='ppg'):
"""
Calcualtes the parameter tau for hot-tail generation using either the `method` 'ppg' from
Geri's implementation or 'Smith' from <NAME> and <NAME>. Phys. Plasmas 15, 072502
(2008), eq. (17). In case of 'ppg', the characteristic time `t_char` is the exponential
decay time, in case of 'Smith', `t_char` is the time delay.
"""
# ----- Check input -----------------------------------------------|
# Eliminates the need of providing initial and final electron
# density if this quantity does not change throughout the
# temperature decay.
if ne_f == 0:
ne_f = ne_i
# ----- Calculate quantity tau ------------------------------------|
tau = np.empty(t.shape)
if method=='ppg':
tau[t < 2*t_char] = t[t < 2*t_char]**2/4/t_char
tau[t >= 2*t_char] = t[t >= 2*t_char] - t_char
elif method=='Smith':
tau[t <= t_char] = 0.
tau[t > t_char] = t[t > t_char] - t_char
return tau*nu_0*ne_f/ne_i
# ----- end method __tau__ --------------------------------------------|
# ---------------------------------------------------------------------|
def __v_c__(self, n, T, E):
"""
Calculates critical velocity for electron runaway with electron density `n` in units of
m**-3, electron temperature `T` in units of eV and external electric field `E` in units
of V/m.
From <NAME> and <NAME>. Phys. Plasmas 15, 072502 (2008).
"""
return np.sqrt(n*physConst.e**3*self.__lnLambda__(n, T)) \
/np.sqrt((4*np.pi*physConst.epsilon_0**2*physConst.m_e*E))
# ---------------------------------------------------------------------|
def __v_T__(self, T):
"""
Calculates electron thermal velocity at temperature `T`, with `T` in units of eV.
"""
return np.sqrt(2*T*physConst.e/physConst.m_e)
# ----- end method __v_T__ --------------------------------------------|
# -------------------------------------------------------------------------|
# Plot the evolution of key quantities, being the
# -------------------------------------------------------------------------|
def plot_evolution(self):
"""
Plot the evolution of the hot-tail population and associated quantities.
"""
fig, ax = plt.subplots(3, 2, figsize=(7,6))
ax = fig.axes
ax[0].plot(self.t, 1e-16*self.n_hot, c='k')
ax[0].set_title(r'Hot-tail population')
ax[0].set_ylabel(r'$n_{\rm hot}$~(10$^{16}$~ m$^{-3}$)')
ax_t = ax[0].twinx()
ax_t.plot(self.t, 1e-6*self.j_hot, c='k')
ax_t.set_ylabel(r'$j_{\rm hot}$~(MA/m$^2$)')
ax_t.set_ylim(bottom=0)
ax[1].plot(self.t, self.Te, c='k')
ax[1].semilogy()
ax[1].set_title('Electron temperature')
ax[1].set_ylabel(r'$T_{\rm e}$~(eV)')
ax[1].set_ylim(bottom=1)
ax[2].plot(self.t, self.v_c/self.v_T0, c='k')
ax[2].set_title('Critical velocity')
ax[2].set_ylabel(r'$v_{\rm c}/v_{T_0}$')
ax[3].plot(self.t, 1e-19*self.ne, c='k')
ax[3].set_title('Electron density')
ax[3].set_ylabel(r'$n_{\rm e}$~(10$^{19}$~m$^{-3}$)')
ax[4].plot(self.t, self.tau, c='k')
ax[4].set_title(r'$\tau$')
ax[4].set_ylabel(r'$\tau$')
ax[5].plot(self.t, self.E, c='k')
ax[5].set_title('Electric field')
ax[5].set_ylabel(r'$E$~(V/m)')
for i, a in enumerate(ax):
a.set_xlabel(r'$t~({\rm s})$')
a.set_xlim((self.t[0], self.t[-1]))
if i != 1:
a.set_ylim(bottom=0)
plt.tight_layout()
return fig
# ----- end method plot_evolution -------------------------------------|
# -----------------------------------------------------------------------------|
# Function to demonstrate hot-tail population evolution
# -----------------------------------------------------------------------------|
def demo():
t = np.arange(0, 2.e-3 + 5.e-6, 5.e-6)
E = 1. + (0.01 - 1.)*np.exp(-t/5.e-4)
ht = hot_tail_generation(t, E, t_del=0, t_dec=1.5e-4,
ne_i=3.e19, ne_f=15.e19, Te_i=7.e3, Te_f=10, calc_evolution=False)
ht.calc_evolution(assume_single_max=False, increasing_only=False)
# ht.plot_evolution()
return ht
# ----- end function demo -------------------------------------------------|
# -----------------------------------------------------------------------------|
# Run demo
# -----------------------------------------------------------------------------|
ht = demo()
np.savetxt('dat/hot_tails_python.dat',
np.array([ht.t, ht.n_hot, ht.ne, ht.Te, ht.E, ht.v_c/ht.v_T0, ht.tau]).T,
fmt='%19.12e',
header= 'Time (s) ' + \
' n_hot (m**-3) ' + \
' n_e (m**-3) ' + \
' T_e (ev) ' + \
' E_par (V/m) ' + \
' v_c (v_th0) ' + \
' tau',
)
# ----- end script hot_tails.py -----------------------------------------------|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------|
# Header
# -----------------------------------------------------------------------------|
from matplotlib import rc
import matplotlib.pyplot as plt
#from modules.dataTools import max_along_axis
import numpy as np
import scipy.constants as physConst
import scipy.integrate as integrate
from scipy.optimize import curve_fit
plt.ion()
rc('text', usetex=True)
rc('font', size=10, family='serif')
# -----------------------------------------------------------------------------|
# Class to calculate hot-tail population
# -----------------------------------------------------------------------------|
class hot_tail_generation:
# minimum value for exponential during integration
__exp_min_val = 1e-100
# factor for converting number to current density
__j_conv = physConst.e*physConst.c
# maximum number of iteration during quadrature
__quad_max_iter = 1000
# -------------------------------------------------------------------------|
def __init__(self, t, E, t_dec=None, t_del=0, ne=None, ne_i=None,
ne_f=None, Te=None, Te_i=None, Te_f=None, calc_evolution=True):
# ----- Can hot-tail calculations be performed --------------------|
self.calc_possible = True
# ----- Time array, delay and decay -------------------------------|
self.t = np.atleast_1d(t)
self.t_dec = t_dec
self.t_del = t_del
# ----- Electric field --------------------------------------------|
self.E = np.abs(np.atleast_1d(E))
if self.E.size == 1:
self.E *= np.ones(self.t.shape)
# ----- Electron temperature --------------------------------------|
if Te is not None:
self.Te = np.atleast_1d(Te)
if self.Te.size == 1:
self.Te_i = Te_i
self.Te_f = Te_f
elif self.Te.size > 1:
self.Te_i = self.Te[0]
self.Te_f = self.Te[-1]
if self.t_dec is None:
print('Decay time not provided. Trying to perform a fit.')
self.t_dec =self.fit_exponential(self.t, self.Te)[0]
elif np.all(np.array([Te_i, Te_f, self.t_dec]) != None):
self.Te_i = Te_i
self.Te_f = Te_f
self.Te = self.Te_f + (self.Te_i - self.Te_f)\
*np.exp(-self.t/self.t_dec)
else:
self.calc_possible = False
print('Cannot set electron temperature.')
# ----- Electron density ------------------------------------------|
self.set_electron_density(ne=ne, ne_i=ne_i, ne_f=ne_f)
# ----- Additional quantities -------------------------------------|
self.nu_0 = np.zeros(self.t.shape)
self.v_T0 = np.zeros(self.t.shape)
self.v_c = np.zeros(self.t.shape)
self.tau = np.zeros(self.t.shape)
self.calc_additional_quantities()
# ----- Calculate evolution of the hot-tail population ------------|
self.n_hot = np.zeros(self.t.shape)
self.j_hot = np.zeros(self.t.shape)
if calc_evolution:
self.calc_evolution()
# ----- end method __init__ -------------------------------------------|
# -------------------------------------------------------------------------|
def calc_evolution(self, assume_single_max=False, increasing_only=True):
"""Calculates the evolution of the hot-tail population. If the switch
`assume_single_max` is set, the calculation is stopped as soon as the
first maximum is encountered.
"""
self.n_hot = np.zeros(self.t.shape)
# Check if hot-tail calculation possible
if not self.calc_possible:
print('Calculation of hot-tail population not possible. Abort.')
return
# ----- Evolve hot-tail population --------------------------------|
for i in range(self.t.size):
if self.t[i] < self.t_del: continue
# ----- Determine integration limits --------------------------|
# Between v_c and where exponential drops below a value of
# `__exp_min_val`
int_lim = ( self.v_c[i],
((-np.log(self.__exp_min_val))**(3/2)-3*self.tau[i])**(1/3)\
*self.v_T0)
if int_lim[1]/self.v_c[i] < 1 or np.isnan(int_lim[1]): continue
# ----- Hot-tail population at `t[i]` -------------------------|
self.n_hot[i] = 4*self.ne_i/(np.sqrt(np.pi)*self.v_T0**3) \
*integrate.quadrature(
lambda v: np.exp(-((v/self.v_T0)**3 + 3*self.tau[i])**(2/3)) \
*(v**2 - self.v_c[i]**2),
*int_lim, maxiter=self.__quad_max_iter)[0]
# stop calculation if maximum has been reached
if assume_single_max and i > 0 and self.n_hot[i] < self.n_hot[i-1]:
break
# ----- Final hot-tail density does not decay ---------------------|
# This assumes, that electrons with velocities exceeding the
# critical velocity do not equilibriate through collisions since
# they experience net acceleration by the applied electric field.
# if increasing_only:
# __ = max_along_axis(self.n_hot)
# ----- Calculate hot-tail carried current ------------------------|
# This assumes j_hot = e c n_hot
self.j_hot = self.__j_conv * self.n_hot
# ----- end method calc_evolution -------------------------------------|
# -------------------------------------------------------------------------|
# Setup electron temperature and density profiles
# -------------------------------------------------------------------------|
def set_electron_density(self, ne=None, ne_i=None, ne_f=None):
"""Function to set the electron density evolution.
"""
if ne is not None:
self.ne = np.atleast_1d(ne)
if self.ne.size == 1:
self.ne_i = ne_i
self.ne_f = ne_f
elif self.ne.size > 1:
self.ne_i = self.ne[0]
self.ne_f = self.ne[-1]
elif np.all(np.array([ne_i, ne_f, self.t_dec]) != None):
self.ne_i = ne_i
self.ne_f = ne_f
self.ne = self.ne_f + (self.ne_i - self.ne_f)\
*np.exp(-self.t/self.t_dec)
elif ne_i is not None:
self.ne_i = ne_i
self.ne_f = ne_i
self.ne = ne_i*np.ones(self.t.shape)
else:
self.calc_possible = False
print('Cannot set electron density. Abort.')
# ----- end method set_electron_density -------------------------------|
def fit_exponential(self, x, y):
"""Fit an exponential to the data (`x`, `y`) by taking the logarim of
`y` and fitting a linear function to it, thus retrieve the decay time.
"""
popt, pcov = curve_fit(self.lin_func, x, np.log(y), p0=(1e-4, 1e0))
return popt[0], np.sqrt(pcov[0,0])
# ----- end method fit_exponential ------------------------------------|
# -------------------------------------------------------------------------|
def lin_func(self, x, a, b):
"""Linear function for interpolation, yielding the negative, inverse
slope `a` and the offset `b`. This can be used to determine a decay
time for an exponentially decreasing function.
"""
return -x/a+b
# ----- end method lin_func -------------------------------------------|
# -------------------------------------------------------------------------|
# Additional quantities necessary to determine hot-tail population
# -------------------------------------------------------------------------|
def calc_additional_quantities(self):
"""Calculates additional quantities needed to evaluate the evolution
of the hot-tail population.
"""
if not self.calc_possible: return
# initial collision frequency
self.nu_0 = self.__nu__(self.ne_i, self.Te_i)
# initial thermal velocity
self.v_T0 = self.__v_T__(self.Te_i)
# critical velocity
self.v_c = self.__v_c__(self.ne, self.Te, self.E)
# tau
self.tau = self.__tau__(self.t, self.t_dec, self.nu_0,
ne_i=self.ne_i, ne_f=self.ne_f, method='ppg')
# ----- end method calc_additional_quantities -------------------------|
# ---------------------------------------------------------------------|
def __EVDF__(self, v, n, v_T, tau=0):
"""Calculates the value of the Maxwellian electron velocity
distribution function at velocity `v` in units of m/s for electron
density `n` in units of m**-3, thermal velocity `v_T` in units of m/s
and `tau`.
From <NAME> and <NAME>. Phys. Plasmas 15, 072502 (2008),
eq. (9).
"""
return n/(np.sqrt(np.pi)*v_T)**3*np.exp(-((v/v_T)**3 + 3*tau)**(2/3))
# ----- end method __EVDF__ -------------------------------------------|
# ---------------------------------------------------------------------|
def __lnLambda__(self, n, T):
"""
Calculates Coulomb logarithm for electron-electron collisions of thermal particles of
density `n` in units of m**-3 and temperature `T` in units of eV.
From <NAME>. Tokamaks. Oxford University Press 2004, p. 727.
"""
return 14.9 - .5*np.log(n*1e-20) + np.log(1e-3*T)
# ----- end method __lnLambda__ ---------------------------------------|
# ---------------------------------------------------------------------|
def __nu__(self, n, T):
"""
Calculates the electron-electron collision frequency for thermal particles of density
`n` in units of m**-3 and temperature `T` in units of eV.
From <NAME> al., Plasma Phys. Control. Fusion 44, B247 (2002).
"""
return n*self.__lnLambda__(n, T)/self.__v_T__(T)**3 \
*physConst.e**4/(4*np.pi*physConst.epsilon_0**2*physConst.m_e**2)
# ---- end method __nu__ ----------------------------------------------|
# ---------------------------------------------------------------------|
def __tau__(self, t, t_char, nu_0, ne_i=1, ne_f=0, method='ppg'):
"""
Calcualtes the parameter tau for hot-tail generation using either the `method` 'ppg' from
Geri's implementation or 'Smith' from <NAME> and <NAME>. Phys. Plasmas 15, 072502
(2008), eq. (17). In case of 'ppg', the characteristic time `t_char` is the exponential
decay time, in case of 'Smith', `t_char` is the time delay.
"""
# ----- Check input -----------------------------------------------|
# Eliminates the need of providing initial and final electron
# density if this quantity does not change throughout the
# temperature decay.
if ne_f == 0:
ne_f = ne_i
# ----- Calculate quantity tau ------------------------------------|
tau = np.empty(t.shape)
if method=='ppg':
tau[t < 2*t_char] = t[t < 2*t_char]**2/4/t_char
tau[t >= 2*t_char] = t[t >= 2*t_char] - t_char
elif method=='Smith':
tau[t <= t_char] = 0.
tau[t > t_char] = t[t > t_char] - t_char
return tau*nu_0*ne_f/ne_i
# ----- end method __tau__ --------------------------------------------|
# ---------------------------------------------------------------------|
def __v_c__(self, n, T, E):
"""
Calculates critical velocity for electron runaway with electron density `n` in units of
m**-3, electron temperature `T` in units of eV and external electric field `E` in units
of V/m.
From <NAME> and <NAME>. Phys. Plasmas 15, 072502 (2008).
"""
return np.sqrt(n*physConst.e**3*self.__lnLambda__(n, T)) \
/np.sqrt((4*np.pi*physConst.epsilon_0**2*physConst.m_e*E))
# ---------------------------------------------------------------------|
def __v_T__(self, T):
"""
Calculates electron thermal velocity at temperature `T`, with `T` in units of eV.
"""
return np.sqrt(2*T*physConst.e/physConst.m_e)
# ----- end method __v_T__ --------------------------------------------|
# -------------------------------------------------------------------------|
# Plot the evolution of key quantities, being the
# -------------------------------------------------------------------------|
def plot_evolution(self):
"""
Plot the evolution of the hot-tail population and associated quantities.
"""
fig, ax = plt.subplots(3, 2, figsize=(7,6))
ax = fig.axes
ax[0].plot(self.t, 1e-16*self.n_hot, c='k')
ax[0].set_title(r'Hot-tail population')
ax[0].set_ylabel(r'$n_{\rm hot}$~(10$^{16}$~ m$^{-3}$)')
ax_t = ax[0].twinx()
ax_t.plot(self.t, 1e-6*self.j_hot, c='k')
ax_t.set_ylabel(r'$j_{\rm hot}$~(MA/m$^2$)')
ax_t.set_ylim(bottom=0)
ax[1].plot(self.t, self.Te, c='k')
ax[1].semilogy()
ax[1].set_title('Electron temperature')
ax[1].set_ylabel(r'$T_{\rm e}$~(eV)')
ax[1].set_ylim(bottom=1)
ax[2].plot(self.t, self.v_c/self.v_T0, c='k')
ax[2].set_title('Critical velocity')
ax[2].set_ylabel(r'$v_{\rm c}/v_{T_0}$')
ax[3].plot(self.t, 1e-19*self.ne, c='k')
ax[3].set_title('Electron density')
ax[3].set_ylabel(r'$n_{\rm e}$~(10$^{19}$~m$^{-3}$)')
ax[4].plot(self.t, self.tau, c='k')
ax[4].set_title(r'$\tau$')
ax[4].set_ylabel(r'$\tau$')
ax[5].plot(self.t, self.E, c='k')
ax[5].set_title('Electric field')
ax[5].set_ylabel(r'$E$~(V/m)')
for i, a in enumerate(ax):
a.set_xlabel(r'$t~({\rm s})$')
a.set_xlim((self.t[0], self.t[-1]))
if i != 1:
a.set_ylim(bottom=0)
plt.tight_layout()
return fig
# ----- end method plot_evolution -------------------------------------|
# -----------------------------------------------------------------------------|
# Function to demonstrate hot-tail population evolution
# -----------------------------------------------------------------------------|
def demo():
t = np.arange(0, 2.e-3 + 5.e-6, 5.e-6)
E = 1. + (0.01 - 1.)*np.exp(-t/5.e-4)
ht = hot_tail_generation(t, E, t_del=0, t_dec=1.5e-4,
ne_i=3.e19, ne_f=15.e19, Te_i=7.e3, Te_f=10, calc_evolution=False)
ht.calc_evolution(assume_single_max=False, increasing_only=False)
# ht.plot_evolution()
return ht
# ----- end function demo -------------------------------------------------|
# -----------------------------------------------------------------------------|
# Run demo
# -----------------------------------------------------------------------------|
ht = demo()
np.savetxt('dat/hot_tails_python.dat',
np.array([ht.t, ht.n_hot, ht.ne, ht.Te, ht.E, ht.v_c/ht.v_T0, ht.tau]).T,
fmt='%19.12e',
header= 'Time (s) ' + \
' n_hot (m**-3) ' + \
' n_e (m**-3) ' + \
' T_e (ev) ' + \
' E_par (V/m) ' + \
' v_c (v_th0) ' + \
' tau',
)
# ----- end script hot_tails.py -----------------------------------------------|
|
en
| 0.429805
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # # -----------------------------------------------------------------------------| # Header # -----------------------------------------------------------------------------| #from modules.dataTools import max_along_axis # -----------------------------------------------------------------------------| # Class to calculate hot-tail population # -----------------------------------------------------------------------------| # minimum value for exponential during integration # factor for converting number to current density # maximum number of iteration during quadrature # -------------------------------------------------------------------------| # ----- Can hot-tail calculations be performed --------------------| # ----- Time array, delay and decay -------------------------------| # ----- Electric field --------------------------------------------| # ----- Electron temperature --------------------------------------| # ----- Electron density ------------------------------------------| # ----- Additional quantities -------------------------------------| # ----- Calculate evolution of the hot-tail population ------------| # ----- end method __init__ -------------------------------------------| # -------------------------------------------------------------------------| Calculates the evolution of the hot-tail population. If the switch
`assume_single_max` is set, the calculation is stopped as soon as the
first maximum is encountered. # Check if hot-tail calculation possible # ----- Evolve hot-tail population --------------------------------| # ----- Determine integration limits --------------------------| # Between v_c and where exponential drops below a value of # `__exp_min_val` # ----- Hot-tail population at `t[i]` -------------------------| # stop calculation if maximum has been reached # ----- Final hot-tail density does not decay ---------------------| # This assumes, that electrons with velocities exceeding the # critical velocity do not equilibriate through collisions since # they experience net acceleration by the applied electric field. # if increasing_only: # __ = max_along_axis(self.n_hot) # ----- Calculate hot-tail carried current ------------------------| # This assumes j_hot = e c n_hot # ----- end method calc_evolution -------------------------------------| # -------------------------------------------------------------------------| # Setup electron temperature and density profiles # -------------------------------------------------------------------------| Function to set the electron density evolution. # ----- end method set_electron_density -------------------------------| Fit an exponential to the data (`x`, `y`) by taking the logarim of
`y` and fitting a linear function to it, thus retrieve the decay time. # ----- end method fit_exponential ------------------------------------| # -------------------------------------------------------------------------| Linear function for interpolation, yielding the negative, inverse
slope `a` and the offset `b`. This can be used to determine a decay
time for an exponentially decreasing function. # ----- end method lin_func -------------------------------------------| # -------------------------------------------------------------------------| # Additional quantities necessary to determine hot-tail population # -------------------------------------------------------------------------| Calculates additional quantities needed to evaluate the evolution
of the hot-tail population. # initial collision frequency # initial thermal velocity # critical velocity # tau # ----- end method calc_additional_quantities -------------------------| # ---------------------------------------------------------------------| Calculates the value of the Maxwellian electron velocity
distribution function at velocity `v` in units of m/s for electron
density `n` in units of m**-3, thermal velocity `v_T` in units of m/s
and `tau`.
From <NAME> and <NAME>. Phys. Plasmas 15, 072502 (2008),
eq. (9). # ----- end method __EVDF__ -------------------------------------------| # ---------------------------------------------------------------------| Calculates Coulomb logarithm for electron-electron collisions of thermal particles of
density `n` in units of m**-3 and temperature `T` in units of eV.
From <NAME>. Tokamaks. Oxford University Press 2004, p. 727. # ----- end method __lnLambda__ ---------------------------------------| # ---------------------------------------------------------------------| Calculates the electron-electron collision frequency for thermal particles of density
`n` in units of m**-3 and temperature `T` in units of eV.
From <NAME> al., Plasma Phys. Control. Fusion 44, B247 (2002). # ---- end method __nu__ ----------------------------------------------| # ---------------------------------------------------------------------| Calcualtes the parameter tau for hot-tail generation using either the `method` 'ppg' from
Geri's implementation or 'Smith' from <NAME> and <NAME>. Phys. Plasmas 15, 072502
(2008), eq. (17). In case of 'ppg', the characteristic time `t_char` is the exponential
decay time, in case of 'Smith', `t_char` is the time delay. # ----- Check input -----------------------------------------------| # Eliminates the need of providing initial and final electron # density if this quantity does not change throughout the # temperature decay. # ----- Calculate quantity tau ------------------------------------| # ----- end method __tau__ --------------------------------------------| # ---------------------------------------------------------------------| Calculates critical velocity for electron runaway with electron density `n` in units of
m**-3, electron temperature `T` in units of eV and external electric field `E` in units
of V/m.
From <NAME> and <NAME>. Phys. Plasmas 15, 072502 (2008). # ---------------------------------------------------------------------| Calculates electron thermal velocity at temperature `T`, with `T` in units of eV. # ----- end method __v_T__ --------------------------------------------| # -------------------------------------------------------------------------| # Plot the evolution of key quantities, being the # -------------------------------------------------------------------------| Plot the evolution of the hot-tail population and associated quantities. # ----- end method plot_evolution -------------------------------------| # -----------------------------------------------------------------------------| # Function to demonstrate hot-tail population evolution # -----------------------------------------------------------------------------| # ht.plot_evolution() # ----- end function demo -------------------------------------------------| # -----------------------------------------------------------------------------| # Run demo # -----------------------------------------------------------------------------| # ----- end script hot_tails.py -----------------------------------------------|
| 2.705318
| 3
|
espresso/tasks/speech_recognition.py
|
rakhi-alina/espresso
| 0
|
6628156
|
# Copyright (c) <NAME>
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import itertools
import json
import logging
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
from fairseq import utils
from fairseq.data import BaseWrapperDataset, ConcatDataset
from fairseq.dataclass import FairseqDataclass
from fairseq.logging import metrics
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
from espresso.data import (
AsrDataset,
AsrDictionary,
AsrTextDataset,
FeatScpCachedDataset,
)
logger = logging.getLogger(__name__)
@dataclass
class SpeechRecognitionEspressoConfig(FairseqDataclass):
data: Optional[str] = field(
default=None, metadata={"help": "path to data directory"}
)
dict: Optional[str] = field(default=None, metadata={"help": "path to the dictionary"})
non_lang_syms: Optional[str] = field(
default=None,
metadata={
"help": "path to a file listing non-linguistic symbols, e.g., <NOISE> "
"etc. One entry per line. To be filtered out when calculating WER/CER"
},
)
word_dict: Optional[str] = field(
default=None,
metadata={"help": "path to the word dictionary. Only relevant for decoding"},
)
wer_output_filter: Optional[str] = field(
default=None,
metadata={"help": "path to wer_output_filter file for WER evaluation"},
)
max_source_positions: Optional[int] = field(
default=1024, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: Optional[int] = field(
default=1024, metadata={"help": "max number of tokens in the target sequence"}
)
upsample_primary: int = field(
default=1, metadata={"help": "amount to upsample primary dataset"},
)
num_batch_buckets: Optional[int] = field(
default=0,
metadata={
"help": "if >0, then bucket source and target lengths into N "
"buckets and pad accordingly; this is useful on TPUs "
"to minimize the number of compilations"
},
)
feat_in_channels: int = field(default=1, metadata={"help": "feature input channels"})
specaugment_config: Optional[str] = field(
default=None,
metadata={
"help": "SpecAugment config string. If not None and not empty, "
"then apply SpecAugment. Should be an evaluatable expression of "
"a python dict. See speech_tools.specaug_interpolate.specaug() for "
"all allowed arguments. Argments not appearing in this string "
"will take on their default values"
},
)
# TODO common vars below add to parent
seed: int = II("common.seed")
data_buffer_size: int = II("dataset.data_buffer_size")
tpu: bool = II("common.tpu")
train_subset: str = II("dataset.train_subset")
valid_subset: str = II("dataset.valid_subset")
gen_subset: str = II("dataset.gen_subset")
required_seq_len_multiple: int = II("dataset.required_seq_len_multiple")
def get_asr_dataset_from_json(
data_path,
split,
tgt_dict,
combine,
upsample_primary=1,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
seed=1,
specaugment_config=None,
):
"""
Parse data json and create dataset.
See espresso/tools/asr_prep_json.py which pack json from raw files
Json example:
{
"011c0202": {
"feat": "fbank/raw_fbank_pitch_train_si284.1.ark:54819",
"text": "THE HOTEL",
"utt2num_frames": "693",
},
"011c0203": {
...
}
}
"""
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
data_json_path = os.path.join(data_path, "{}.json".format(split_k))
if not os.path.isfile(data_json_path):
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {}".format(data_json_path)
)
with open(data_json_path, "rb") as f:
loaded_json = json.load(f, object_pairs_hook=OrderedDict)
utt_ids, feats, texts, utt2num_frames = [], [], [], []
for utt_id, val in loaded_json.items():
utt_ids.append(utt_id)
feats.append(val["feat"])
if "text" in val:
texts.append(val["text"])
if "utt2num_frames" in val:
utt2num_frames.append(int(val["utt2num_frames"]))
assert len(utt2num_frames) == 0 or len(utt_ids) == len(utt2num_frames)
src_datasets.append(FeatScpCachedDataset(
utt_ids, feats, utt2num_frames=utt2num_frames, seed=seed,
specaugment_config=specaugment_config if split == "train" else None,
ordered_prefetch=True,
))
if len(texts) > 0:
assert len(utt_ids) == len(texts)
assert tgt_dict is not None
tgt_datasets.append(AsrTextDataset(utt_ids, texts, tgt_dict))
logger.info("{} {} examples".format(data_json_path, len(src_datasets[-1])))
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
feat_dim = src_datasets[0].feat_dim
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
for i in range(1, len(src_datasets)):
assert (
feat_dim == src_datasets[i].feat_dim
), "feature dimension does not match across multiple json files"
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return AsrDataset(
src_dataset,
src_dataset.sizes,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=False,
left_pad_target=False,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("speech_recognition_espresso", dataclass=SpeechRecognitionEspressoConfig)
class SpeechRecognitionEspressoTask(FairseqTask):
"""
Transcribe from speech (source) to text (target).
Args:
tgt_dict (~fairseq.data.AsrDictionary): dictionary for the output tokens
word_dict (~fairseq.data.AsrDictionary): dictionary for the words
(for decoding with word-based LMs)
feat_in_channels (int): input feature channels
.. note::
The speech recognition task is compatible with :mod:`speech-train`,
:mod:`speech-recognize` and :mod:`fairseq-interactive`.
The speech recognition task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.speech_recognition_parser
:prog:
"""
@classmethod
def load_dictionary(cls, filename, non_lang_syms=None):
"""Load the dictionary from the filename
Args:
filename (str): the filename
non_lang_syms (str): non_lang_syms filename
"""
return AsrDictionary.load(filename, f_non_lang_syms=non_lang_syms)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Disable this method
"""
raise NotImplementedError
def __init__(self, cfg: SpeechRecognitionEspressoConfig, tgt_dict, feat_dim, word_dict=None):
super().__init__(cfg)
self.tgt_dict = tgt_dict
self.word_dict = word_dict
self.feat_dim = feat_dim
self.feat_in_channels = cfg.feat_in_channels
self.specaugment_config = cfg.specaugment_config
torch.backends.cudnn.deterministic = True
# Compansate for the removel of :func:`torch.rand()` from
# :func:`fairseq.distributed_utils.distributed_init()` by fairseq,
# to make previous experiments reproducible.
torch.rand(1)
@classmethod
def setup_task(cls, cfg: SpeechRecognitionEspressoConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (SpeechRecognitionEspressoConfig): configuration of this task
"""
# load dictionaries
dict_path = os.path.join(cfg.data, "dict.txt") if cfg.dict is None else cfg.dict
tgt_dict = cls.load_dictionary(dict_path, non_lang_syms=cfg.non_lang_syms)
logger.info("dictionary: {} types".format(len(tgt_dict)))
# minimum code for loading data in order to obtain feat_dim
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
data_path = paths[0]
split = cfg.valid_subset.split(",")[0] # valid set is usually much smaller than train set, so it's faster
try:
src_dataset = get_asr_dataset_from_json(data_path, split, tgt_dict, combine=False).src
except FileNotFoundError:
logger.warning(f"'{split}' set not found. Try to obtain feat_dim from '{cfg.gen_subset}'")
src_dataset = get_asr_dataset_from_json(data_path, cfg.gen_subset, tgt_dict, combine=False).src
if isinstance(src_dataset, ConcatDataset):
feat_dim = src_dataset.datasets[0].feat_dim
elif isinstance(src_dataset, BaseWrapperDataset):
feat_dim = src_dataset.dataset.feat_dim
else:
feat_dim = src_dataset.feat_dim
if cfg.word_dict is not None:
word_dict = cls.load_dictionary(cfg.word_dict)
logger.info("word dictionary: {} types".format(len(word_dict)))
return cls(cfg, tgt_dict, feat_dim, word_dict=word_dict)
else:
return cls(cfg, tgt_dict, feat_dim)
def load_dataset(
self,
split: str,
epoch: int = 1,
combine: bool = False,
task_cfg: FairseqDataclass = None,
**kwargs,
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
epoch (int): epoch number determining which shard of training data to load
combine (bool): combines a split segmented into pieces into one dataset
task_cfg (FairseqDataclass): optional task configuration stored in the checkpoint that can be used
to load datasets
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
if split != self.cfg.train_subset:
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
task_cfg = task_cfg or self.cfg
self.datasets[split] = get_asr_dataset_from_json(
data_path,
split,
self.tgt_dict,
combine=combine,
upsample_primary=self.cfg.upsample_primary,
num_buckets=self.cfg.num_batch_buckets,
shuffle=(split != self.cfg.gen_subset),
pad_to_multiple=self.cfg.required_seq_len_multiple,
seed=self.cfg.seed,
specaugment_config=self.specaugment_config,
)
# update the counts of <eos> and <unk> in tgt_dict with training data
if split == "train":
tgt_dataset = self.datasets[split].tgt
self.tgt_dict.count[self.tgt_dict.eos()] = len(tgt_dataset)
unk_count = 0
for i in range(len(tgt_dataset)):
unk_count += (tgt_dataset[i][0] == self.tgt_dict.unk()).int().sum().item()
self.tgt_dict.count[self.tgt_dict.unk()] = unk_count
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return AsrDataset(
src_tokens,
src_lengths,
dictionary=self.target_dictionary,
constraints=constraints,
)
def build_model(self, model_cfg: FairseqDataclass):
model = super().build_model(model_cfg)
# build the greedy decoder for validation with WER
from espresso.tools.simple_greedy_decoder import SimpleGreedyDecoder
self.decoder_for_validation = SimpleGreedyDecoder(
[model], self.target_dictionary, for_validation=True,
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
(
logging_output["word_error"], logging_output["word_count"],
logging_output["char_error"], logging_output["char_count"],
) = self._inference_with_wer(self.decoder_for_validation, sample, model)
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
word_error = sum(log.get("word_error", 0) for log in logging_outputs)
word_count = sum(log.get("word_count", 0) for log in logging_outputs)
char_error = sum(log.get("char_error", 0) for log in logging_outputs)
char_count = sum(log.get("char_count", 0) for log in logging_outputs)
if word_count > 0:
metrics.log_scalar("wer", float(word_error) / word_count * 100, word_count, round=4)
if char_count > 0:
metrics.log_scalar("cer", float(char_error) / char_count * 100, char_count, round=4)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.cfg.max_source_positions, self.cfg.max_target_positions)
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.AsrDictionary`."""
return self.tgt_dict
def build_tokenizer(self, cfg: FairseqDataclass):
"""Build the pre-tokenizer for this task."""
self.tgt_dict.build_tokenizer(cfg)
# the instance is built within self.tgt_dict
return self.tgt_dict.tokenizer
def build_bpe(self, cfg: FairseqDataclass):
"""Build the tokenizer for this task."""
self.tgt_dict.build_bpe(cfg)
# the instance is built within self.tgt_dict
return self.tgt_dict.bpe
@property
def word_dictionary(self):
"""Return the target :class:`~fairseq.data.AsrDictionary`."""
return self.word_dict
def _inference_with_wer(self, decoder, sample, model):
from espresso.tools import wer
scorer = wer.Scorer(self.target_dictionary, wer_output_filter=self.cfg.wer_output_filter)
tokens, lprobs, _ = decoder.decode([model], sample)
pred = tokens[:, 1:].data.cpu() # bsz x len
target = sample["target"]
assert pred.size(0) == target.size(0)
# compute word error stats
scorer.reset()
for i in range(target.size(0)):
utt_id = sample["utt_id"][i]
ref_tokens = sample["token_text"][i]
pred_tokens = self.target_dictionary.string(pred.data[i])
scorer.add_evaluation(utt_id, ref_tokens, pred_tokens)
return (
scorer.tot_word_error(), scorer.tot_word_count(),
scorer.tot_char_error(), scorer.tot_char_count(),
)
|
# Copyright (c) <NAME>
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import itertools
import json
import logging
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
from fairseq import utils
from fairseq.data import BaseWrapperDataset, ConcatDataset
from fairseq.dataclass import FairseqDataclass
from fairseq.logging import metrics
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
from espresso.data import (
AsrDataset,
AsrDictionary,
AsrTextDataset,
FeatScpCachedDataset,
)
logger = logging.getLogger(__name__)
@dataclass
class SpeechRecognitionEspressoConfig(FairseqDataclass):
data: Optional[str] = field(
default=None, metadata={"help": "path to data directory"}
)
dict: Optional[str] = field(default=None, metadata={"help": "path to the dictionary"})
non_lang_syms: Optional[str] = field(
default=None,
metadata={
"help": "path to a file listing non-linguistic symbols, e.g., <NOISE> "
"etc. One entry per line. To be filtered out when calculating WER/CER"
},
)
word_dict: Optional[str] = field(
default=None,
metadata={"help": "path to the word dictionary. Only relevant for decoding"},
)
wer_output_filter: Optional[str] = field(
default=None,
metadata={"help": "path to wer_output_filter file for WER evaluation"},
)
max_source_positions: Optional[int] = field(
default=1024, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: Optional[int] = field(
default=1024, metadata={"help": "max number of tokens in the target sequence"}
)
upsample_primary: int = field(
default=1, metadata={"help": "amount to upsample primary dataset"},
)
num_batch_buckets: Optional[int] = field(
default=0,
metadata={
"help": "if >0, then bucket source and target lengths into N "
"buckets and pad accordingly; this is useful on TPUs "
"to minimize the number of compilations"
},
)
feat_in_channels: int = field(default=1, metadata={"help": "feature input channels"})
specaugment_config: Optional[str] = field(
default=None,
metadata={
"help": "SpecAugment config string. If not None and not empty, "
"then apply SpecAugment. Should be an evaluatable expression of "
"a python dict. See speech_tools.specaug_interpolate.specaug() for "
"all allowed arguments. Argments not appearing in this string "
"will take on their default values"
},
)
# TODO common vars below add to parent
seed: int = II("common.seed")
data_buffer_size: int = II("dataset.data_buffer_size")
tpu: bool = II("common.tpu")
train_subset: str = II("dataset.train_subset")
valid_subset: str = II("dataset.valid_subset")
gen_subset: str = II("dataset.gen_subset")
required_seq_len_multiple: int = II("dataset.required_seq_len_multiple")
def get_asr_dataset_from_json(
data_path,
split,
tgt_dict,
combine,
upsample_primary=1,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
seed=1,
specaugment_config=None,
):
"""
Parse data json and create dataset.
See espresso/tools/asr_prep_json.py which pack json from raw files
Json example:
{
"011c0202": {
"feat": "fbank/raw_fbank_pitch_train_si284.1.ark:54819",
"text": "THE HOTEL",
"utt2num_frames": "693",
},
"011c0203": {
...
}
}
"""
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
data_json_path = os.path.join(data_path, "{}.json".format(split_k))
if not os.path.isfile(data_json_path):
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {}".format(data_json_path)
)
with open(data_json_path, "rb") as f:
loaded_json = json.load(f, object_pairs_hook=OrderedDict)
utt_ids, feats, texts, utt2num_frames = [], [], [], []
for utt_id, val in loaded_json.items():
utt_ids.append(utt_id)
feats.append(val["feat"])
if "text" in val:
texts.append(val["text"])
if "utt2num_frames" in val:
utt2num_frames.append(int(val["utt2num_frames"]))
assert len(utt2num_frames) == 0 or len(utt_ids) == len(utt2num_frames)
src_datasets.append(FeatScpCachedDataset(
utt_ids, feats, utt2num_frames=utt2num_frames, seed=seed,
specaugment_config=specaugment_config if split == "train" else None,
ordered_prefetch=True,
))
if len(texts) > 0:
assert len(utt_ids) == len(texts)
assert tgt_dict is not None
tgt_datasets.append(AsrTextDataset(utt_ids, texts, tgt_dict))
logger.info("{} {} examples".format(data_json_path, len(src_datasets[-1])))
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
feat_dim = src_datasets[0].feat_dim
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
for i in range(1, len(src_datasets)):
assert (
feat_dim == src_datasets[i].feat_dim
), "feature dimension does not match across multiple json files"
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return AsrDataset(
src_dataset,
src_dataset.sizes,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=False,
left_pad_target=False,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("speech_recognition_espresso", dataclass=SpeechRecognitionEspressoConfig)
class SpeechRecognitionEspressoTask(FairseqTask):
"""
Transcribe from speech (source) to text (target).
Args:
tgt_dict (~fairseq.data.AsrDictionary): dictionary for the output tokens
word_dict (~fairseq.data.AsrDictionary): dictionary for the words
(for decoding with word-based LMs)
feat_in_channels (int): input feature channels
.. note::
The speech recognition task is compatible with :mod:`speech-train`,
:mod:`speech-recognize` and :mod:`fairseq-interactive`.
The speech recognition task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.speech_recognition_parser
:prog:
"""
@classmethod
def load_dictionary(cls, filename, non_lang_syms=None):
"""Load the dictionary from the filename
Args:
filename (str): the filename
non_lang_syms (str): non_lang_syms filename
"""
return AsrDictionary.load(filename, f_non_lang_syms=non_lang_syms)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Disable this method
"""
raise NotImplementedError
def __init__(self, cfg: SpeechRecognitionEspressoConfig, tgt_dict, feat_dim, word_dict=None):
super().__init__(cfg)
self.tgt_dict = tgt_dict
self.word_dict = word_dict
self.feat_dim = feat_dim
self.feat_in_channels = cfg.feat_in_channels
self.specaugment_config = cfg.specaugment_config
torch.backends.cudnn.deterministic = True
# Compansate for the removel of :func:`torch.rand()` from
# :func:`fairseq.distributed_utils.distributed_init()` by fairseq,
# to make previous experiments reproducible.
torch.rand(1)
@classmethod
def setup_task(cls, cfg: SpeechRecognitionEspressoConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (SpeechRecognitionEspressoConfig): configuration of this task
"""
# load dictionaries
dict_path = os.path.join(cfg.data, "dict.txt") if cfg.dict is None else cfg.dict
tgt_dict = cls.load_dictionary(dict_path, non_lang_syms=cfg.non_lang_syms)
logger.info("dictionary: {} types".format(len(tgt_dict)))
# minimum code for loading data in order to obtain feat_dim
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
data_path = paths[0]
split = cfg.valid_subset.split(",")[0] # valid set is usually much smaller than train set, so it's faster
try:
src_dataset = get_asr_dataset_from_json(data_path, split, tgt_dict, combine=False).src
except FileNotFoundError:
logger.warning(f"'{split}' set not found. Try to obtain feat_dim from '{cfg.gen_subset}'")
src_dataset = get_asr_dataset_from_json(data_path, cfg.gen_subset, tgt_dict, combine=False).src
if isinstance(src_dataset, ConcatDataset):
feat_dim = src_dataset.datasets[0].feat_dim
elif isinstance(src_dataset, BaseWrapperDataset):
feat_dim = src_dataset.dataset.feat_dim
else:
feat_dim = src_dataset.feat_dim
if cfg.word_dict is not None:
word_dict = cls.load_dictionary(cfg.word_dict)
logger.info("word dictionary: {} types".format(len(word_dict)))
return cls(cfg, tgt_dict, feat_dim, word_dict=word_dict)
else:
return cls(cfg, tgt_dict, feat_dim)
def load_dataset(
self,
split: str,
epoch: int = 1,
combine: bool = False,
task_cfg: FairseqDataclass = None,
**kwargs,
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
epoch (int): epoch number determining which shard of training data to load
combine (bool): combines a split segmented into pieces into one dataset
task_cfg (FairseqDataclass): optional task configuration stored in the checkpoint that can be used
to load datasets
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
if split != self.cfg.train_subset:
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
task_cfg = task_cfg or self.cfg
self.datasets[split] = get_asr_dataset_from_json(
data_path,
split,
self.tgt_dict,
combine=combine,
upsample_primary=self.cfg.upsample_primary,
num_buckets=self.cfg.num_batch_buckets,
shuffle=(split != self.cfg.gen_subset),
pad_to_multiple=self.cfg.required_seq_len_multiple,
seed=self.cfg.seed,
specaugment_config=self.specaugment_config,
)
# update the counts of <eos> and <unk> in tgt_dict with training data
if split == "train":
tgt_dataset = self.datasets[split].tgt
self.tgt_dict.count[self.tgt_dict.eos()] = len(tgt_dataset)
unk_count = 0
for i in range(len(tgt_dataset)):
unk_count += (tgt_dataset[i][0] == self.tgt_dict.unk()).int().sum().item()
self.tgt_dict.count[self.tgt_dict.unk()] = unk_count
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return AsrDataset(
src_tokens,
src_lengths,
dictionary=self.target_dictionary,
constraints=constraints,
)
def build_model(self, model_cfg: FairseqDataclass):
model = super().build_model(model_cfg)
# build the greedy decoder for validation with WER
from espresso.tools.simple_greedy_decoder import SimpleGreedyDecoder
self.decoder_for_validation = SimpleGreedyDecoder(
[model], self.target_dictionary, for_validation=True,
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
(
logging_output["word_error"], logging_output["word_count"],
logging_output["char_error"], logging_output["char_count"],
) = self._inference_with_wer(self.decoder_for_validation, sample, model)
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
word_error = sum(log.get("word_error", 0) for log in logging_outputs)
word_count = sum(log.get("word_count", 0) for log in logging_outputs)
char_error = sum(log.get("char_error", 0) for log in logging_outputs)
char_count = sum(log.get("char_count", 0) for log in logging_outputs)
if word_count > 0:
metrics.log_scalar("wer", float(word_error) / word_count * 100, word_count, round=4)
if char_count > 0:
metrics.log_scalar("cer", float(char_error) / char_count * 100, char_count, round=4)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.cfg.max_source_positions, self.cfg.max_target_positions)
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.AsrDictionary`."""
return self.tgt_dict
def build_tokenizer(self, cfg: FairseqDataclass):
"""Build the pre-tokenizer for this task."""
self.tgt_dict.build_tokenizer(cfg)
# the instance is built within self.tgt_dict
return self.tgt_dict.tokenizer
def build_bpe(self, cfg: FairseqDataclass):
"""Build the tokenizer for this task."""
self.tgt_dict.build_bpe(cfg)
# the instance is built within self.tgt_dict
return self.tgt_dict.bpe
@property
def word_dictionary(self):
"""Return the target :class:`~fairseq.data.AsrDictionary`."""
return self.word_dict
def _inference_with_wer(self, decoder, sample, model):
from espresso.tools import wer
scorer = wer.Scorer(self.target_dictionary, wer_output_filter=self.cfg.wer_output_filter)
tokens, lprobs, _ = decoder.decode([model], sample)
pred = tokens[:, 1:].data.cpu() # bsz x len
target = sample["target"]
assert pred.size(0) == target.size(0)
# compute word error stats
scorer.reset()
for i in range(target.size(0)):
utt_id = sample["utt_id"][i]
ref_tokens = sample["token_text"][i]
pred_tokens = self.target_dictionary.string(pred.data[i])
scorer.add_evaluation(utt_id, ref_tokens, pred_tokens)
return (
scorer.tot_word_error(), scorer.tot_word_count(),
scorer.tot_char_error(), scorer.tot_char_count(),
)
|
en
| 0.709116
|
# Copyright (c) <NAME> # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # TODO common vars below add to parent Parse data json and create dataset. See espresso/tools/asr_prep_json.py which pack json from raw files Json example: { "011c0202": { "feat": "fbank/raw_fbank_pitch_train_si284.1.ark:54819", "text": "THE HOTEL", "utt2num_frames": "693", }, "011c0203": { ... } } Transcribe from speech (source) to text (target). Args: tgt_dict (~fairseq.data.AsrDictionary): dictionary for the output tokens word_dict (~fairseq.data.AsrDictionary): dictionary for the words (for decoding with word-based LMs) feat_in_channels (int): input feature channels .. note:: The speech recognition task is compatible with :mod:`speech-train`, :mod:`speech-recognize` and :mod:`fairseq-interactive`. The speech recognition task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.speech_recognition_parser :prog: Load the dictionary from the filename Args: filename (str): the filename non_lang_syms (str): non_lang_syms filename Disable this method # Compansate for the removel of :func:`torch.rand()` from # :func:`fairseq.distributed_utils.distributed_init()` by fairseq, # to make previous experiments reproducible. Setup the task (e.g., load dictionaries). Args: cfg (SpeechRecognitionEspressoConfig): configuration of this task # load dictionaries # minimum code for loading data in order to obtain feat_dim # valid set is usually much smaller than train set, so it's faster Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) epoch (int): epoch number determining which shard of training data to load combine (bool): combines a split segmented into pieces into one dataset task_cfg (FairseqDataclass): optional task configuration stored in the checkpoint that can be used to load datasets # if not training data set, use the first shard for valid and test # update the counts of <eos> and <unk> in tgt_dict with training data # build the greedy decoder for validation with WER Return the max sentence length allowed by the task. Return the target :class:`~fairseq.data.AsrDictionary`. Build the pre-tokenizer for this task. # the instance is built within self.tgt_dict Build the tokenizer for this task. # the instance is built within self.tgt_dict Return the target :class:`~fairseq.data.AsrDictionary`. # bsz x len # compute word error stats
| 1.750382
| 2
|
tests/unit/bokeh/application/handlers/test_document_lifecycle.py
|
jeisch/bokeh
| 1
|
6628157
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.document import Document
# Module under test
import bokeh.application.handlers.document_lifecycle as bahd
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
class MockSessionContext(object):
def __init__(self, doc):
self._document = doc
self.status = None
self.counter = 0
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class Test_DocumentLifecycleHandler(object):
# Public methods ----------------------------------------------------------
def test_document_bad_on_session_destroyed_signature(self):
doc = Document()
def destroy(a, b):
pass
with pytest.raises(ValueError):
doc.on_session_destroyed(destroy)
def test_document_on_session_destroyed(self):
doc = Document()
handler = bahd.DocumentLifecycleHandler()
def destroy(session_context):
assert doc is session_context._document
session_context.status = 'Destroyed'
doc.on_session_destroyed(destroy)
session_context = MockSessionContext(doc)
handler.on_session_destroyed(session_context)
assert session_context.status == 'Destroyed'
assert session_context._document.session_destroyed_callbacks == set()
def test_document_on_session_destroyed_calls_multiple(self):
doc = Document()
def increment(session_context):
session_context.counter += 1
doc.on_session_destroyed(increment)
def increment_by_two(session_context):
session_context.counter += 2
doc.on_session_destroyed(increment_by_two)
handler = bahd.DocumentLifecycleHandler()
session_context = MockSessionContext(doc)
handler.on_session_destroyed(session_context)
assert session_context.counter == 3, 'DocumentLifecycleHandler did not call all callbacks'
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.document import Document
# Module under test
import bokeh.application.handlers.document_lifecycle as bahd
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
class MockSessionContext(object):
def __init__(self, doc):
self._document = doc
self.status = None
self.counter = 0
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class Test_DocumentLifecycleHandler(object):
# Public methods ----------------------------------------------------------
def test_document_bad_on_session_destroyed_signature(self):
doc = Document()
def destroy(a, b):
pass
with pytest.raises(ValueError):
doc.on_session_destroyed(destroy)
def test_document_on_session_destroyed(self):
doc = Document()
handler = bahd.DocumentLifecycleHandler()
def destroy(session_context):
assert doc is session_context._document
session_context.status = 'Destroyed'
doc.on_session_destroyed(destroy)
session_context = MockSessionContext(doc)
handler.on_session_destroyed(session_context)
assert session_context.status == 'Destroyed'
assert session_context._document.session_destroyed_callbacks == set()
def test_document_on_session_destroyed_calls_multiple(self):
doc = Document()
def increment(session_context):
session_context.counter += 1
doc.on_session_destroyed(increment)
def increment_by_two(session_context):
session_context.counter += 2
doc.on_session_destroyed(increment_by_two)
handler = bahd.DocumentLifecycleHandler()
session_context = MockSessionContext(doc)
handler.on_session_destroyed(session_context)
assert session_context.counter == 3, 'DocumentLifecycleHandler did not call all callbacks'
|
en
| 0.143991
|
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports # External imports # Bokeh imports # Module under test #----------------------------------------------------------------------------- # Setup #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- # Public methods ----------------------------------------------------------
| 1.303138
| 1
|
MagicCube/projection.py
|
MarioBerrios/RubikCube_2021
| 0
|
6628158
|
import numpy as np
class Quaternion:
"""Quaternion Rotation:
Class to aid in representing 3D rotations via quaternions.
"""
@classmethod
def from_v_theta(cls, v, theta):
"""
Construct quaternions from unit vectors v and rotation angles theta
Parameters
----------
v : array_like
array of vectors, last dimension 3. Vectors will be normalized.
theta : array_like
array of rotation angles in radians, shape = v.shape[:-1].
Returns
-------
q : quaternion object
quaternion representing the rotations
"""
theta = np.asarray(theta)
v = np.asarray(v)
s = np.sin(0.5 * theta)
c = np.cos(0.5 * theta)
v = v * s / np.sqrt(np.sum(v * v, -1))
x_shape = v.shape[:-1] + (4,)
x = np.ones(x_shape).reshape(-1, 4)
x[:, 0] = c.ravel()
x[:, 1:] = v.reshape(-1, 3)
x = x.reshape(x_shape)
return cls(x)
def __init__(self, x):
self.x = np.asarray(x, dtype=float)
def __repr__(self):
return "Quaternion:\n" + self.x.__repr__()
def __mul__(self, other):
# multiplication of two quaternions.
# we don't implement multiplication by a scalar
sxr = self.x.reshape(self.x.shape[:-1] + (4, 1))
oxr = other.x.reshape(other.x.shape[:-1] + (1, 4))
prod = sxr * oxr
return_shape = prod.shape[:-1]
prod = prod.reshape((-1, 4, 4)).transpose((1, 2, 0))
ret = np.array([(prod[0, 0] - prod[1, 1]
- prod[2, 2] - prod[3, 3]),
(prod[0, 1] + prod[1, 0]
+ prod[2, 3] - prod[3, 2]),
(prod[0, 2] - prod[1, 3]
+ prod[2, 0] + prod[3, 1]),
(prod[0, 3] + prod[1, 2]
- prod[2, 1] + prod[3, 0])],
dtype=np.float,
order='F').T
return self.__class__(ret.reshape(return_shape))
def as_v_theta(self):
"""Return the v, theta equivalent of the (normalized) quaternion"""
x = self.x.reshape((-1, 4)).T
# compute theta
norm = np.sqrt((x ** 2).sum(0))
theta = 2 * np.arccos(x[0] / norm)
# compute the unit vector
v = np.array(x[1:], order='F', copy=True)
v /= np.sqrt(np.sum(v ** 2, 0))
# reshape the results
v = v.T.reshape(self.x.shape[:-1] + (3,))
theta = theta.reshape(self.x.shape[:-1])
return v, theta
def as_rotation_matrix(self):
"""Return the rotation matrix of the (normalized) quaternion"""
v, theta = self.as_v_theta()
shape = theta.shape
theta = theta.reshape(-1)
v = v.reshape(-1, 3).T
c = np.cos(theta)
s = np.sin(theta)
mat = np.array([[v[0] * v[0] * (1. - c) + c,
v[0] * v[1] * (1. - c) - v[2] * s,
v[0] * v[2] * (1. - c) + v[1] * s],
[v[1] * v[0] * (1. - c) + v[2] * s,
v[1] * v[1] * (1. - c) + c,
v[1] * v[2] * (1. - c) - v[0] * s],
[v[2] * v[0] * (1. - c) - v[1] * s,
v[2] * v[1] * (1. - c) + v[0] * s,
v[2] * v[2] * (1. - c) + c]],
order='F').T
return mat.reshape(shape + (3, 3))
def rotate(self, points):
M = self.as_rotation_matrix()
return np.dot(points, M.T)
def project_points(points, q, view, vertical=[0, 1, 0]):
"""Project points using a quaternion q and a view v
Parameters
----------
points : array_like
array of last-dimension 3
q : Quaternion
quaternion representation of the rotation
view : array_like
length-3 vector giving the point of view
vertical : array_like
direction of y-axis for view. An error will be raised if it
is parallel to the view.
Returns
-------
proj: array_like
array of projected points: same shape as points.
"""
points = np.asarray(points)
view = np.asarray(view)
xdir = np.cross(vertical, view).astype(float)
if np.all(xdir == 0):
raise ValueError("vertical is parallel to v")
xdir /= np.sqrt(np.dot(xdir, xdir))
# get the unit vector corresponing to vertical
ydir = np.cross(view, xdir)
ydir /= np.sqrt(np.dot(ydir, ydir))
# normalize the viewer location: this is the z-axis
v2 = np.dot(view, view)
zdir = view / np.sqrt(v2)
# rotate the points
R = q.as_rotation_matrix()
Rpts = np.dot(points.astype(float), R.T)
# project the points onto the view
dpoint = Rpts - view
dpoint_view = np.dot(dpoint, view).reshape(dpoint.shape[:-1] + (1,))
dproj = -dpoint * v2 / dpoint_view
trans = list(range(1, dproj.ndim)) + [0]
return np.array([np.dot(dproj, xdir),
np.dot(dproj, ydir),
-np.dot(dpoint, zdir)]).transpose(trans)
|
import numpy as np
class Quaternion:
"""Quaternion Rotation:
Class to aid in representing 3D rotations via quaternions.
"""
@classmethod
def from_v_theta(cls, v, theta):
"""
Construct quaternions from unit vectors v and rotation angles theta
Parameters
----------
v : array_like
array of vectors, last dimension 3. Vectors will be normalized.
theta : array_like
array of rotation angles in radians, shape = v.shape[:-1].
Returns
-------
q : quaternion object
quaternion representing the rotations
"""
theta = np.asarray(theta)
v = np.asarray(v)
s = np.sin(0.5 * theta)
c = np.cos(0.5 * theta)
v = v * s / np.sqrt(np.sum(v * v, -1))
x_shape = v.shape[:-1] + (4,)
x = np.ones(x_shape).reshape(-1, 4)
x[:, 0] = c.ravel()
x[:, 1:] = v.reshape(-1, 3)
x = x.reshape(x_shape)
return cls(x)
def __init__(self, x):
self.x = np.asarray(x, dtype=float)
def __repr__(self):
return "Quaternion:\n" + self.x.__repr__()
def __mul__(self, other):
# multiplication of two quaternions.
# we don't implement multiplication by a scalar
sxr = self.x.reshape(self.x.shape[:-1] + (4, 1))
oxr = other.x.reshape(other.x.shape[:-1] + (1, 4))
prod = sxr * oxr
return_shape = prod.shape[:-1]
prod = prod.reshape((-1, 4, 4)).transpose((1, 2, 0))
ret = np.array([(prod[0, 0] - prod[1, 1]
- prod[2, 2] - prod[3, 3]),
(prod[0, 1] + prod[1, 0]
+ prod[2, 3] - prod[3, 2]),
(prod[0, 2] - prod[1, 3]
+ prod[2, 0] + prod[3, 1]),
(prod[0, 3] + prod[1, 2]
- prod[2, 1] + prod[3, 0])],
dtype=np.float,
order='F').T
return self.__class__(ret.reshape(return_shape))
def as_v_theta(self):
"""Return the v, theta equivalent of the (normalized) quaternion"""
x = self.x.reshape((-1, 4)).T
# compute theta
norm = np.sqrt((x ** 2).sum(0))
theta = 2 * np.arccos(x[0] / norm)
# compute the unit vector
v = np.array(x[1:], order='F', copy=True)
v /= np.sqrt(np.sum(v ** 2, 0))
# reshape the results
v = v.T.reshape(self.x.shape[:-1] + (3,))
theta = theta.reshape(self.x.shape[:-1])
return v, theta
def as_rotation_matrix(self):
"""Return the rotation matrix of the (normalized) quaternion"""
v, theta = self.as_v_theta()
shape = theta.shape
theta = theta.reshape(-1)
v = v.reshape(-1, 3).T
c = np.cos(theta)
s = np.sin(theta)
mat = np.array([[v[0] * v[0] * (1. - c) + c,
v[0] * v[1] * (1. - c) - v[2] * s,
v[0] * v[2] * (1. - c) + v[1] * s],
[v[1] * v[0] * (1. - c) + v[2] * s,
v[1] * v[1] * (1. - c) + c,
v[1] * v[2] * (1. - c) - v[0] * s],
[v[2] * v[0] * (1. - c) - v[1] * s,
v[2] * v[1] * (1. - c) + v[0] * s,
v[2] * v[2] * (1. - c) + c]],
order='F').T
return mat.reshape(shape + (3, 3))
def rotate(self, points):
M = self.as_rotation_matrix()
return np.dot(points, M.T)
def project_points(points, q, view, vertical=[0, 1, 0]):
"""Project points using a quaternion q and a view v
Parameters
----------
points : array_like
array of last-dimension 3
q : Quaternion
quaternion representation of the rotation
view : array_like
length-3 vector giving the point of view
vertical : array_like
direction of y-axis for view. An error will be raised if it
is parallel to the view.
Returns
-------
proj: array_like
array of projected points: same shape as points.
"""
points = np.asarray(points)
view = np.asarray(view)
xdir = np.cross(vertical, view).astype(float)
if np.all(xdir == 0):
raise ValueError("vertical is parallel to v")
xdir /= np.sqrt(np.dot(xdir, xdir))
# get the unit vector corresponing to vertical
ydir = np.cross(view, xdir)
ydir /= np.sqrt(np.dot(ydir, ydir))
# normalize the viewer location: this is the z-axis
v2 = np.dot(view, view)
zdir = view / np.sqrt(v2)
# rotate the points
R = q.as_rotation_matrix()
Rpts = np.dot(points.astype(float), R.T)
# project the points onto the view
dpoint = Rpts - view
dpoint_view = np.dot(dpoint, view).reshape(dpoint.shape[:-1] + (1,))
dproj = -dpoint * v2 / dpoint_view
trans = list(range(1, dproj.ndim)) + [0]
return np.array([np.dot(dproj, xdir),
np.dot(dproj, ydir),
-np.dot(dpoint, zdir)]).transpose(trans)
|
en
| 0.725653
|
Quaternion Rotation: Class to aid in representing 3D rotations via quaternions. Construct quaternions from unit vectors v and rotation angles theta Parameters ---------- v : array_like array of vectors, last dimension 3. Vectors will be normalized. theta : array_like array of rotation angles in radians, shape = v.shape[:-1]. Returns ------- q : quaternion object quaternion representing the rotations # multiplication of two quaternions. # we don't implement multiplication by a scalar Return the v, theta equivalent of the (normalized) quaternion # compute theta # compute the unit vector # reshape the results Return the rotation matrix of the (normalized) quaternion Project points using a quaternion q and a view v Parameters ---------- points : array_like array of last-dimension 3 q : Quaternion quaternion representation of the rotation view : array_like length-3 vector giving the point of view vertical : array_like direction of y-axis for view. An error will be raised if it is parallel to the view. Returns ------- proj: array_like array of projected points: same shape as points. # get the unit vector corresponing to vertical # normalize the viewer location: this is the z-axis # rotate the points # project the points onto the view
| 3.86424
| 4
|
homebrew.py
|
DiogoRibeiro7/homebrew_scraping
| 0
|
6628159
|
<gh_stars>0
import requests
import json
import time
r = requests.get('https://formulae.brew.sh/api/formula.json')
packages_json = r.json()
results = []
t1 = time.perf_counter()
for package in packages_json:
packages_name = package['name']
packages_desc = package['desc']
packages_url = f'https://formulae.brew.sh/api/formula/{packages_name}.json'
r = requests.get(packages_url)
packages_json = r.json()
install_30 = packages_json['analytics']['install_on_request']['30d'][packages_name]
install_90 = packages_json['analytics']['install_on_request']['90d'][packages_name]
install_365 = packages_json['analytics']['install_on_request']['365d'][packages_name]
data = {
'name': packages_name,
'desc': packages_desc,
'analytics':{
'30d': install_30,
'90d': install_90,
'365d': install_365
}
}
results.append(data)
time.sleep(r.elapsed.total_seconds())
print(f'Got {packages_name} in {r.elapsed.total_seconds()}')
t2 = time.perf_counter()
print(f'Finished in {t2-t1} seconds')
with open('package_info.json','w') as f:
json.dump(results,f,indent=2)
|
import requests
import json
import time
r = requests.get('https://formulae.brew.sh/api/formula.json')
packages_json = r.json()
results = []
t1 = time.perf_counter()
for package in packages_json:
packages_name = package['name']
packages_desc = package['desc']
packages_url = f'https://formulae.brew.sh/api/formula/{packages_name}.json'
r = requests.get(packages_url)
packages_json = r.json()
install_30 = packages_json['analytics']['install_on_request']['30d'][packages_name]
install_90 = packages_json['analytics']['install_on_request']['90d'][packages_name]
install_365 = packages_json['analytics']['install_on_request']['365d'][packages_name]
data = {
'name': packages_name,
'desc': packages_desc,
'analytics':{
'30d': install_30,
'90d': install_90,
'365d': install_365
}
}
results.append(data)
time.sleep(r.elapsed.total_seconds())
print(f'Got {packages_name} in {r.elapsed.total_seconds()}')
t2 = time.perf_counter()
print(f'Finished in {t2-t1} seconds')
with open('package_info.json','w') as f:
json.dump(results,f,indent=2)
|
none
| 1
| 2.766248
| 3
|
|
client/redis_sploit.py
|
n57uctf/DestructiveFarm
| 0
|
6628160
|
<gh_stars>0
#!/usr/bin/env python3
import sys
import redis
with redis.Redis(host=sys.argv[1], port=6379, db=0) as r:
keys = r.keys() # list all keys
for key in keys:
print(r.get(key)) # value by key
|
#!/usr/bin/env python3
import sys
import redis
with redis.Redis(host=sys.argv[1], port=6379, db=0) as r:
keys = r.keys() # list all keys
for key in keys:
print(r.get(key)) # value by key
|
en
| 0.461653
|
#!/usr/bin/env python3 # list all keys # value by key
| 3.112936
| 3
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/create_data_lake_analytics_account_parameters.py
|
Christina-Kang/azure-sdk-for-python
| 1
|
6628161
|
<reponame>Christina-Kang/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CreateDataLakeAnalyticsAccountParameters(Model):
"""The parameters to use for creating a Data Lake Analytics account.
:param location: The resource location.
:type location: str
:param tags: The resource tags.
:type tags: dict[str, str]
:param default_data_lake_store_account: The default Data Lake Store
account associated with this account.
:type default_data_lake_store_account: str
:param data_lake_store_accounts: The list of Data Lake Store accounts
associated with this account.
:type data_lake_store_accounts:
list[~azure.mgmt.datalake.analytics.account.models.AddDataLakeStoreWithAccountParameters]
:param storage_accounts: The list of Azure Blob Storage accounts
associated with this account.
:type storage_accounts:
list[~azure.mgmt.datalake.analytics.account.models.AddStorageAccountWithAccountParameters]
:param compute_policies: The list of compute policies associated with this
account.
:type compute_policies:
list[~azure.mgmt.datalake.analytics.account.models.CreateComputePolicyWithAccountParameters]
:param firewall_rules: The list of firewall rules associated with this
account.
:type firewall_rules:
list[~azure.mgmt.datalake.analytics.account.models.CreateFirewallRuleWithAccountParameters]
:param firewall_state: The current state of the IP address firewall for
this account. Possible values include: 'Enabled', 'Disabled'
:type firewall_state: str or
~azure.mgmt.datalake.analytics.account.models.FirewallState
:param firewall_allow_azure_ips: The current state of allowing or
disallowing IPs originating within Azure through the firewall. If the
firewall is disabled, this is not enforced. Possible values include:
'Enabled', 'Disabled'
:type firewall_allow_azure_ips: str or
~azure.mgmt.datalake.analytics.account.models.FirewallAllowAzureIpsState
:param new_tier: The commitment tier for the next month. Possible values
include: 'Consumption', 'Commitment_100AUHours', 'Commitment_500AUHours',
'Commitment_1000AUHours', 'Commitment_5000AUHours',
'Commitment_10000AUHours', 'Commitment_50000AUHours',
'Commitment_100000AUHours', 'Commitment_500000AUHours'
:type new_tier: str or
~azure.mgmt.datalake.analytics.account.models.TierType
:param max_job_count: The maximum supported jobs running under the account
at the same time. Default value: 3 .
:type max_job_count: int
:param max_degree_of_parallelism: The maximum supported degree of
parallelism for this account. Default value: 30 .
:type max_degree_of_parallelism: int
:param max_degree_of_parallelism_per_job: The maximum supported degree of
parallelism per job for this account.
:type max_degree_of_parallelism_per_job: int
:param min_priority_per_job: The minimum supported priority per job for
this account.
:type min_priority_per_job: int
:param query_store_retention: The number of days that job metadata is
retained. Default value: 30 .
:type query_store_retention: int
"""
_validation = {
'location': {'required': True},
'default_data_lake_store_account': {'required': True},
'data_lake_store_accounts': {'required': True},
'max_job_count': {'minimum': 1},
'max_degree_of_parallelism': {'minimum': 1},
'max_degree_of_parallelism_per_job': {'minimum': 1},
'min_priority_per_job': {'minimum': 1},
'query_store_retention': {'maximum': 180, 'minimum': 1},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'default_data_lake_store_account': {'key': 'properties.defaultDataLakeStoreAccount', 'type': 'str'},
'data_lake_store_accounts': {'key': 'properties.dataLakeStoreAccounts', 'type': '[AddDataLakeStoreWithAccountParameters]'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[AddStorageAccountWithAccountParameters]'},
'compute_policies': {'key': 'properties.computePolicies', 'type': '[CreateComputePolicyWithAccountParameters]'},
'firewall_rules': {'key': 'properties.firewallRules', 'type': '[CreateFirewallRuleWithAccountParameters]'},
'firewall_state': {'key': 'properties.firewallState', 'type': 'FirewallState'},
'firewall_allow_azure_ips': {'key': 'properties.firewallAllowAzureIps', 'type': 'FirewallAllowAzureIpsState'},
'new_tier': {'key': 'properties.newTier', 'type': 'TierType'},
'max_job_count': {'key': 'properties.maxJobCount', 'type': 'int'},
'max_degree_of_parallelism': {'key': 'properties.maxDegreeOfParallelism', 'type': 'int'},
'max_degree_of_parallelism_per_job': {'key': 'properties.maxDegreeOfParallelismPerJob', 'type': 'int'},
'min_priority_per_job': {'key': 'properties.minPriorityPerJob', 'type': 'int'},
'query_store_retention': {'key': 'properties.queryStoreRetention', 'type': 'int'},
}
def __init__(self, location, default_data_lake_store_account, data_lake_store_accounts, tags=None, storage_accounts=None, compute_policies=None, firewall_rules=None, firewall_state=None, firewall_allow_azure_ips=None, new_tier=None, max_job_count=3, max_degree_of_parallelism=30, max_degree_of_parallelism_per_job=None, min_priority_per_job=None, query_store_retention=30):
super(CreateDataLakeAnalyticsAccountParameters, self).__init__()
self.location = location
self.tags = tags
self.default_data_lake_store_account = default_data_lake_store_account
self.data_lake_store_accounts = data_lake_store_accounts
self.storage_accounts = storage_accounts
self.compute_policies = compute_policies
self.firewall_rules = firewall_rules
self.firewall_state = firewall_state
self.firewall_allow_azure_ips = firewall_allow_azure_ips
self.new_tier = new_tier
self.max_job_count = max_job_count
self.max_degree_of_parallelism = max_degree_of_parallelism
self.max_degree_of_parallelism_per_job = max_degree_of_parallelism_per_job
self.min_priority_per_job = min_priority_per_job
self.query_store_retention = query_store_retention
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CreateDataLakeAnalyticsAccountParameters(Model):
"""The parameters to use for creating a Data Lake Analytics account.
:param location: The resource location.
:type location: str
:param tags: The resource tags.
:type tags: dict[str, str]
:param default_data_lake_store_account: The default Data Lake Store
account associated with this account.
:type default_data_lake_store_account: str
:param data_lake_store_accounts: The list of Data Lake Store accounts
associated with this account.
:type data_lake_store_accounts:
list[~azure.mgmt.datalake.analytics.account.models.AddDataLakeStoreWithAccountParameters]
:param storage_accounts: The list of Azure Blob Storage accounts
associated with this account.
:type storage_accounts:
list[~azure.mgmt.datalake.analytics.account.models.AddStorageAccountWithAccountParameters]
:param compute_policies: The list of compute policies associated with this
account.
:type compute_policies:
list[~azure.mgmt.datalake.analytics.account.models.CreateComputePolicyWithAccountParameters]
:param firewall_rules: The list of firewall rules associated with this
account.
:type firewall_rules:
list[~azure.mgmt.datalake.analytics.account.models.CreateFirewallRuleWithAccountParameters]
:param firewall_state: The current state of the IP address firewall for
this account. Possible values include: 'Enabled', 'Disabled'
:type firewall_state: str or
~azure.mgmt.datalake.analytics.account.models.FirewallState
:param firewall_allow_azure_ips: The current state of allowing or
disallowing IPs originating within Azure through the firewall. If the
firewall is disabled, this is not enforced. Possible values include:
'Enabled', 'Disabled'
:type firewall_allow_azure_ips: str or
~azure.mgmt.datalake.analytics.account.models.FirewallAllowAzureIpsState
:param new_tier: The commitment tier for the next month. Possible values
include: 'Consumption', 'Commitment_100AUHours', 'Commitment_500AUHours',
'Commitment_1000AUHours', 'Commitment_5000AUHours',
'Commitment_10000AUHours', 'Commitment_50000AUHours',
'Commitment_100000AUHours', 'Commitment_500000AUHours'
:type new_tier: str or
~azure.mgmt.datalake.analytics.account.models.TierType
:param max_job_count: The maximum supported jobs running under the account
at the same time. Default value: 3 .
:type max_job_count: int
:param max_degree_of_parallelism: The maximum supported degree of
parallelism for this account. Default value: 30 .
:type max_degree_of_parallelism: int
:param max_degree_of_parallelism_per_job: The maximum supported degree of
parallelism per job for this account.
:type max_degree_of_parallelism_per_job: int
:param min_priority_per_job: The minimum supported priority per job for
this account.
:type min_priority_per_job: int
:param query_store_retention: The number of days that job metadata is
retained. Default value: 30 .
:type query_store_retention: int
"""
_validation = {
'location': {'required': True},
'default_data_lake_store_account': {'required': True},
'data_lake_store_accounts': {'required': True},
'max_job_count': {'minimum': 1},
'max_degree_of_parallelism': {'minimum': 1},
'max_degree_of_parallelism_per_job': {'minimum': 1},
'min_priority_per_job': {'minimum': 1},
'query_store_retention': {'maximum': 180, 'minimum': 1},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'default_data_lake_store_account': {'key': 'properties.defaultDataLakeStoreAccount', 'type': 'str'},
'data_lake_store_accounts': {'key': 'properties.dataLakeStoreAccounts', 'type': '[AddDataLakeStoreWithAccountParameters]'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[AddStorageAccountWithAccountParameters]'},
'compute_policies': {'key': 'properties.computePolicies', 'type': '[CreateComputePolicyWithAccountParameters]'},
'firewall_rules': {'key': 'properties.firewallRules', 'type': '[CreateFirewallRuleWithAccountParameters]'},
'firewall_state': {'key': 'properties.firewallState', 'type': 'FirewallState'},
'firewall_allow_azure_ips': {'key': 'properties.firewallAllowAzureIps', 'type': 'FirewallAllowAzureIpsState'},
'new_tier': {'key': 'properties.newTier', 'type': 'TierType'},
'max_job_count': {'key': 'properties.maxJobCount', 'type': 'int'},
'max_degree_of_parallelism': {'key': 'properties.maxDegreeOfParallelism', 'type': 'int'},
'max_degree_of_parallelism_per_job': {'key': 'properties.maxDegreeOfParallelismPerJob', 'type': 'int'},
'min_priority_per_job': {'key': 'properties.minPriorityPerJob', 'type': 'int'},
'query_store_retention': {'key': 'properties.queryStoreRetention', 'type': 'int'},
}
def __init__(self, location, default_data_lake_store_account, data_lake_store_accounts, tags=None, storage_accounts=None, compute_policies=None, firewall_rules=None, firewall_state=None, firewall_allow_azure_ips=None, new_tier=None, max_job_count=3, max_degree_of_parallelism=30, max_degree_of_parallelism_per_job=None, min_priority_per_job=None, query_store_retention=30):
super(CreateDataLakeAnalyticsAccountParameters, self).__init__()
self.location = location
self.tags = tags
self.default_data_lake_store_account = default_data_lake_store_account
self.data_lake_store_accounts = data_lake_store_accounts
self.storage_accounts = storage_accounts
self.compute_policies = compute_policies
self.firewall_rules = firewall_rules
self.firewall_state = firewall_state
self.firewall_allow_azure_ips = firewall_allow_azure_ips
self.new_tier = new_tier
self.max_job_count = max_job_count
self.max_degree_of_parallelism = max_degree_of_parallelism
self.max_degree_of_parallelism_per_job = max_degree_of_parallelism_per_job
self.min_priority_per_job = min_priority_per_job
self.query_store_retention = query_store_retention
|
en
| 0.555896
|
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- The parameters to use for creating a Data Lake Analytics account. :param location: The resource location. :type location: str :param tags: The resource tags. :type tags: dict[str, str] :param default_data_lake_store_account: The default Data Lake Store account associated with this account. :type default_data_lake_store_account: str :param data_lake_store_accounts: The list of Data Lake Store accounts associated with this account. :type data_lake_store_accounts: list[~azure.mgmt.datalake.analytics.account.models.AddDataLakeStoreWithAccountParameters] :param storage_accounts: The list of Azure Blob Storage accounts associated with this account. :type storage_accounts: list[~azure.mgmt.datalake.analytics.account.models.AddStorageAccountWithAccountParameters] :param compute_policies: The list of compute policies associated with this account. :type compute_policies: list[~azure.mgmt.datalake.analytics.account.models.CreateComputePolicyWithAccountParameters] :param firewall_rules: The list of firewall rules associated with this account. :type firewall_rules: list[~azure.mgmt.datalake.analytics.account.models.CreateFirewallRuleWithAccountParameters] :param firewall_state: The current state of the IP address firewall for this account. Possible values include: 'Enabled', 'Disabled' :type firewall_state: str or ~azure.mgmt.datalake.analytics.account.models.FirewallState :param firewall_allow_azure_ips: The current state of allowing or disallowing IPs originating within Azure through the firewall. If the firewall is disabled, this is not enforced. Possible values include: 'Enabled', 'Disabled' :type firewall_allow_azure_ips: str or ~azure.mgmt.datalake.analytics.account.models.FirewallAllowAzureIpsState :param new_tier: The commitment tier for the next month. Possible values include: 'Consumption', 'Commitment_100AUHours', 'Commitment_500AUHours', 'Commitment_1000AUHours', 'Commitment_5000AUHours', 'Commitment_10000AUHours', 'Commitment_50000AUHours', 'Commitment_100000AUHours', 'Commitment_500000AUHours' :type new_tier: str or ~azure.mgmt.datalake.analytics.account.models.TierType :param max_job_count: The maximum supported jobs running under the account at the same time. Default value: 3 . :type max_job_count: int :param max_degree_of_parallelism: The maximum supported degree of parallelism for this account. Default value: 30 . :type max_degree_of_parallelism: int :param max_degree_of_parallelism_per_job: The maximum supported degree of parallelism per job for this account. :type max_degree_of_parallelism_per_job: int :param min_priority_per_job: The minimum supported priority per job for this account. :type min_priority_per_job: int :param query_store_retention: The number of days that job metadata is retained. Default value: 30 . :type query_store_retention: int
| 1.827979
| 2
|
python/GafferSceneTest/TextTest.py
|
Tuftux/gaffer
| 1
|
6628162
|
<filename>python/GafferSceneTest/TextTest.py
##########################################################################
#
# Copyright (c) 2013, <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import imath
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class TextTest( GafferSceneTest.SceneTestCase ) :
def testConstruct( self ) :
t = GafferScene.Text()
self.assertEqual( t.getName(), "Text" )
self.assertEqual( t["name"].getValue(), "text" )
def testCompute( self ) :
t = GafferScene.Text()
self.assertEqual( t["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( t["out"].transform( "/" ), imath.M44f() )
self.assertEqual( t["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "text" ] ) )
m1 = t["out"].object( "/text" )
self.assertTrue( isinstance( m1, IECoreScene.MeshPrimitive ) )
t["text"].setValue( "Hello World 2" )
m2 = t["out"].object( "/text" )
self.assertTrue( isinstance( m2, IECoreScene.MeshPrimitive ) )
self.assertGreater( m2.bound().size().x, m1.bound().size().x )
def testAffects( self ) :
t = GafferScene.Text()
s = GafferTest.CapturingSlot( t.plugDirtiedSignal() )
t["name"].setValue( "ground" )
self.assertEqual(
{ x[0] for x in s if not x[0].getName().startswith( "__" ) },
{ t["name"], t["out"]["childNames"], t["out"]["set"], t["out"] }
)
del s[:]
t["text"].setValue( "cat" )
self.assertTrue( "out.object" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertTrue( "out.bound" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertFalse( "out.childNames" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertFalse( "out.transform" in [ x[0].relativeName( x[0].node() ) for x in s ] )
del s[:]
t["font"].setValue( os.path.expandvars( "$GAFFER_ROOT/fonts/VeraBI.ttf" ) )
self.assertTrue( "out.object" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertTrue( "out.bound" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertFalse( "out.childNames" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertFalse( "out.transform" in [ x[0].relativeName( x[0].node() ) for x in s ] )
if __name__ == "__main__":
unittest.main()
|
<filename>python/GafferSceneTest/TextTest.py
##########################################################################
#
# Copyright (c) 2013, <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import imath
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class TextTest( GafferSceneTest.SceneTestCase ) :
def testConstruct( self ) :
t = GafferScene.Text()
self.assertEqual( t.getName(), "Text" )
self.assertEqual( t["name"].getValue(), "text" )
def testCompute( self ) :
t = GafferScene.Text()
self.assertEqual( t["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( t["out"].transform( "/" ), imath.M44f() )
self.assertEqual( t["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "text" ] ) )
m1 = t["out"].object( "/text" )
self.assertTrue( isinstance( m1, IECoreScene.MeshPrimitive ) )
t["text"].setValue( "Hello World 2" )
m2 = t["out"].object( "/text" )
self.assertTrue( isinstance( m2, IECoreScene.MeshPrimitive ) )
self.assertGreater( m2.bound().size().x, m1.bound().size().x )
def testAffects( self ) :
t = GafferScene.Text()
s = GafferTest.CapturingSlot( t.plugDirtiedSignal() )
t["name"].setValue( "ground" )
self.assertEqual(
{ x[0] for x in s if not x[0].getName().startswith( "__" ) },
{ t["name"], t["out"]["childNames"], t["out"]["set"], t["out"] }
)
del s[:]
t["text"].setValue( "cat" )
self.assertTrue( "out.object" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertTrue( "out.bound" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertFalse( "out.childNames" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertFalse( "out.transform" in [ x[0].relativeName( x[0].node() ) for x in s ] )
del s[:]
t["font"].setValue( os.path.expandvars( "$GAFFER_ROOT/fonts/VeraBI.ttf" ) )
self.assertTrue( "out.object" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertTrue( "out.bound" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertFalse( "out.childNames" in [ x[0].relativeName( x[0].node() ) for x in s ] )
self.assertFalse( "out.transform" in [ x[0].relativeName( x[0].node() ) for x in s ] )
if __name__ == "__main__":
unittest.main()
|
en
| 0.61563
|
########################################################################## # # Copyright (c) 2013, <NAME>. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of <NAME> nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ##########################################################################
| 1.438328
| 1
|
InvertedPendulum-v1/01_tensor.py
|
hyunjun529/Learn-OpenAI-GYM
| 0
|
6628163
|
import logging
import numpy as np
import sys
import tensorflow as tf
import gym
from gym import wrappers
# logging
gym.undo_logger_setup()
logger = logging.getLogger()
formatter = logging.Formatter('[%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# Gym
env = gym.make('InvertedPendulum-v1')
outdir = './log/01'
env = wrappers.Monitor(env, directory=outdir, force=True)
env.seed(0)
max_episodes = 50000
num_observation = env.observation_space.shape[0]
num_action = env.action_space.shape[0]
batch_size = 50
# TensorFlow
# https://www.tensorflow.org/get_started/mnist/pros
#https://github.com/hunkim/ReinforcementZeroToAll/blob/master/08_2_softmax_pg_cartpole.py
hidden_layer = 10
learning_rate = 1e-5
gamma = .99
X = tf.placeholder(tf.float32, [None, num_observation], name="input_x")
W1 = tf.get_variable("W1", shape=[num_observation, hidden_layer],
initializer=tf.contrib.layers.xavier_initializer())
layer1 = tf.nn.relu(tf.matmul(X, W1))
W2 = tf.get_variable("W2", shape=[hidden_layer, num_action],
initializer=tf.contrib.layers.xavier_initializer())
action_pred = tf.nn.sigmoid(tf.matmul(layer1, W2))
Y = tf.placeholder(tf.float32, [None, num_action], name="input_y")
advantages = tf.placeholder(tf.float32, name="reward_signal")
log_lik = -Y*tf.log(action_pred) - (1 - Y)*tf.log(1 - action_pred) # using logistic regression cost function
loss = tf.reduce_sum(log_lik * advantages)
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# dicount reward function
def discount_rewards(rewards, gamma=0.99):
"""Takes 1d float array of rewards and computes discounted reward
e.g. f([1, 1, 1], 0.99) -> [1, 0.99, 0.9801] -> [1.22 -0.004 -1.22]
"""
d_rewards = np.array([val * (gamma ** i) for i, val in enumerate(rewards)])
# Normalize/standardize rewards
d_rewards -= d_rewards.mean()
d_rewards /= d_rewards.std()
return d_rewards
# run TensorFlow and TensorBoard
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# run Gym
ary_state = np.empty(0).reshape(0, num_observation)
ary_action = np.empty(0).reshape(0, num_action)
ary_reward = np.empty(0).reshape(0, 1)
batch_reward = np.empty(0).reshape(0, 1)
for episode in range(max_episodes):
done = False
cnt_step = 0
ob = env.reset()
ary_reward = np.empty(0).reshape(0, 1)
while not done:
# env.render()
x = np.reshape(ob, [1, num_observation])
ary_state = np.vstack([ary_state, x])
action_prob = sess.run(action_pred, feed_dict={X: x})
action_prob = np.squeeze(action_prob)
random_noise = np.random.uniform(0, 1, num_action)
if np.random.rand(1) < (1 - episode / max_episodes):
action_prob = action_prob + random_noise
action = np.argmax(action_prob)
y = np.eye(num_action)[action:action + 1]
ary_action = np.vstack([ary_action, y])
ob, reward, done, _ = env.step(action)
cnt_step += reward
ary_reward = np.vstack([ary_reward, reward])
'''
if cnt_step >= 1000:
done = True
'''
discounted_rewards = discount_rewards(ary_reward)
batch_reward = np.vstack([batch_reward, discounted_rewards])
if episode % batch_size == 0:
l, _ = sess.run(
[loss, train],
feed_dict={X: ary_state, Y: ary_action, advantages: batch_reward})
logger.info("========LEARN=========")
ary_state = np.empty(0).reshape(0, num_observation)
ary_action = np.empty(0).reshape(0, num_action)
ary_reward = np.empty(0).reshape(0, 1)
batch_reward = np.empty(0).reshape(0, 1)
logger.info(str(episode) + "\t: " + str(int(cnt_step)) + "\t: " + str(l))
input("Y?")
# result
'''
ob = env.reset()
reward_sum = 0
while True:
env.render()
x = np.reshape(ob, [1, num_observation])
action_prob = sess.run(action_pred, feed_dict={X: x})
action = np.argmax(action_prob)
ob, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print("Total score: {}".format(reward_sum))
break
'''
env.close()
# gym.upload(outdir)
|
import logging
import numpy as np
import sys
import tensorflow as tf
import gym
from gym import wrappers
# logging
gym.undo_logger_setup()
logger = logging.getLogger()
formatter = logging.Formatter('[%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# Gym
env = gym.make('InvertedPendulum-v1')
outdir = './log/01'
env = wrappers.Monitor(env, directory=outdir, force=True)
env.seed(0)
max_episodes = 50000
num_observation = env.observation_space.shape[0]
num_action = env.action_space.shape[0]
batch_size = 50
# TensorFlow
# https://www.tensorflow.org/get_started/mnist/pros
#https://github.com/hunkim/ReinforcementZeroToAll/blob/master/08_2_softmax_pg_cartpole.py
hidden_layer = 10
learning_rate = 1e-5
gamma = .99
X = tf.placeholder(tf.float32, [None, num_observation], name="input_x")
W1 = tf.get_variable("W1", shape=[num_observation, hidden_layer],
initializer=tf.contrib.layers.xavier_initializer())
layer1 = tf.nn.relu(tf.matmul(X, W1))
W2 = tf.get_variable("W2", shape=[hidden_layer, num_action],
initializer=tf.contrib.layers.xavier_initializer())
action_pred = tf.nn.sigmoid(tf.matmul(layer1, W2))
Y = tf.placeholder(tf.float32, [None, num_action], name="input_y")
advantages = tf.placeholder(tf.float32, name="reward_signal")
log_lik = -Y*tf.log(action_pred) - (1 - Y)*tf.log(1 - action_pred) # using logistic regression cost function
loss = tf.reduce_sum(log_lik * advantages)
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# dicount reward function
def discount_rewards(rewards, gamma=0.99):
"""Takes 1d float array of rewards and computes discounted reward
e.g. f([1, 1, 1], 0.99) -> [1, 0.99, 0.9801] -> [1.22 -0.004 -1.22]
"""
d_rewards = np.array([val * (gamma ** i) for i, val in enumerate(rewards)])
# Normalize/standardize rewards
d_rewards -= d_rewards.mean()
d_rewards /= d_rewards.std()
return d_rewards
# run TensorFlow and TensorBoard
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# run Gym
ary_state = np.empty(0).reshape(0, num_observation)
ary_action = np.empty(0).reshape(0, num_action)
ary_reward = np.empty(0).reshape(0, 1)
batch_reward = np.empty(0).reshape(0, 1)
for episode in range(max_episodes):
done = False
cnt_step = 0
ob = env.reset()
ary_reward = np.empty(0).reshape(0, 1)
while not done:
# env.render()
x = np.reshape(ob, [1, num_observation])
ary_state = np.vstack([ary_state, x])
action_prob = sess.run(action_pred, feed_dict={X: x})
action_prob = np.squeeze(action_prob)
random_noise = np.random.uniform(0, 1, num_action)
if np.random.rand(1) < (1 - episode / max_episodes):
action_prob = action_prob + random_noise
action = np.argmax(action_prob)
y = np.eye(num_action)[action:action + 1]
ary_action = np.vstack([ary_action, y])
ob, reward, done, _ = env.step(action)
cnt_step += reward
ary_reward = np.vstack([ary_reward, reward])
'''
if cnt_step >= 1000:
done = True
'''
discounted_rewards = discount_rewards(ary_reward)
batch_reward = np.vstack([batch_reward, discounted_rewards])
if episode % batch_size == 0:
l, _ = sess.run(
[loss, train],
feed_dict={X: ary_state, Y: ary_action, advantages: batch_reward})
logger.info("========LEARN=========")
ary_state = np.empty(0).reshape(0, num_observation)
ary_action = np.empty(0).reshape(0, num_action)
ary_reward = np.empty(0).reshape(0, 1)
batch_reward = np.empty(0).reshape(0, 1)
logger.info(str(episode) + "\t: " + str(int(cnt_step)) + "\t: " + str(l))
input("Y?")
# result
'''
ob = env.reset()
reward_sum = 0
while True:
env.render()
x = np.reshape(ob, [1, num_observation])
action_prob = sess.run(action_pred, feed_dict={X: x})
action = np.argmax(action_prob)
ob, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print("Total score: {}".format(reward_sum))
break
'''
env.close()
# gym.upload(outdir)
|
en
| 0.52546
|
# logging # Gym # TensorFlow # https://www.tensorflow.org/get_started/mnist/pros #https://github.com/hunkim/ReinforcementZeroToAll/blob/master/08_2_softmax_pg_cartpole.py # using logistic regression cost function # dicount reward function Takes 1d float array of rewards and computes discounted reward e.g. f([1, 1, 1], 0.99) -> [1, 0.99, 0.9801] -> [1.22 -0.004 -1.22] # Normalize/standardize rewards # run TensorFlow and TensorBoard # run Gym # env.render() if cnt_step >= 1000: done = True # result ob = env.reset() reward_sum = 0 while True: env.render() x = np.reshape(ob, [1, num_observation]) action_prob = sess.run(action_pred, feed_dict={X: x}) action = np.argmax(action_prob) ob, reward, done, _ = env.step(action) reward_sum += reward if done: print("Total score: {}".format(reward_sum)) break # gym.upload(outdir)
| 2.416929
| 2
|
vsts/vsts/file_container/v4_0/file_container_client.py
|
dhilmathy/azure-devops-python-api
| 0
|
6628164
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class FileContainerClient(VssClient):
"""FileContainer
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(FileContainerClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def create_items(self, items, container_id, scope=None):
"""CreateItems.
[Preview API] Creates the specified items in in the referenced container.
:param :class:`<VssJsonCollectionWrapper> <file-container.v4_0.models.VssJsonCollectionWrapper>` items:
:param int container_id:
:param str scope: A guid representing the scope of the container. This is often the project id.
:rtype: [FileContainerItem]
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'int')
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
content = self._serialize.body(items, 'VssJsonCollectionWrapper')
response = self._send(http_method='POST',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[FileContainerItem]', self._unwrap_collection(response))
def delete_item(self, container_id, item_path, scope=None):
"""DeleteItem.
[Preview API] Deletes the specified items in a container.
:param long container_id: Container Id.
:param str item_path: Path to delete.
:param str scope: A guid representing the scope of the container. This is often the project id.
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'long')
query_parameters = {}
if item_path is not None:
query_parameters['itemPath'] = self._serialize.query('item_path', item_path, 'str')
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
self._send(http_method='DELETE',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters)
def get_containers(self, scope=None, artifact_uris=None):
"""GetContainers.
[Preview API] Gets containers filtered by a comma separated list of artifact uris within the same scope, if not specified returns all containers
:param str scope: A guid representing the scope of the container. This is often the project id.
:param str artifact_uris:
:rtype: [FileContainer]
"""
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
if artifact_uris is not None:
query_parameters['artifactUris'] = self._serialize.query('artifact_uris', artifact_uris, 'str')
response = self._send(http_method='GET',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='4.0-preview.4',
query_parameters=query_parameters)
return self._deserialize('[FileContainer]', self._unwrap_collection(response))
def get_items(self, container_id, scope=None, item_path=None, metadata=None, format=None, download_file_name=None, include_download_tickets=None, is_shallow=None):
"""GetItems.
[Preview API]
:param long container_id:
:param str scope:
:param str item_path:
:param bool metadata:
:param str format:
:param str download_file_name:
:param bool include_download_tickets:
:param bool is_shallow:
:rtype: [FileContainerItem]
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'long')
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
if item_path is not None:
query_parameters['itemPath'] = self._serialize.query('item_path', item_path, 'str')
if metadata is not None:
query_parameters['metadata'] = self._serialize.query('metadata', metadata, 'bool')
if format is not None:
query_parameters['$format'] = self._serialize.query('format', format, 'str')
if download_file_name is not None:
query_parameters['downloadFileName'] = self._serialize.query('download_file_name', download_file_name, 'str')
if include_download_tickets is not None:
query_parameters['includeDownloadTickets'] = self._serialize.query('include_download_tickets', include_download_tickets, 'bool')
if is_shallow is not None:
query_parameters['isShallow'] = self._serialize.query('is_shallow', is_shallow, 'bool')
response = self._send(http_method='GET',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[FileContainerItem]', self._unwrap_collection(response))
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class FileContainerClient(VssClient):
"""FileContainer
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(FileContainerClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def create_items(self, items, container_id, scope=None):
"""CreateItems.
[Preview API] Creates the specified items in in the referenced container.
:param :class:`<VssJsonCollectionWrapper> <file-container.v4_0.models.VssJsonCollectionWrapper>` items:
:param int container_id:
:param str scope: A guid representing the scope of the container. This is often the project id.
:rtype: [FileContainerItem]
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'int')
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
content = self._serialize.body(items, 'VssJsonCollectionWrapper')
response = self._send(http_method='POST',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[FileContainerItem]', self._unwrap_collection(response))
def delete_item(self, container_id, item_path, scope=None):
"""DeleteItem.
[Preview API] Deletes the specified items in a container.
:param long container_id: Container Id.
:param str item_path: Path to delete.
:param str scope: A guid representing the scope of the container. This is often the project id.
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'long')
query_parameters = {}
if item_path is not None:
query_parameters['itemPath'] = self._serialize.query('item_path', item_path, 'str')
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
self._send(http_method='DELETE',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters)
def get_containers(self, scope=None, artifact_uris=None):
"""GetContainers.
[Preview API] Gets containers filtered by a comma separated list of artifact uris within the same scope, if not specified returns all containers
:param str scope: A guid representing the scope of the container. This is often the project id.
:param str artifact_uris:
:rtype: [FileContainer]
"""
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
if artifact_uris is not None:
query_parameters['artifactUris'] = self._serialize.query('artifact_uris', artifact_uris, 'str')
response = self._send(http_method='GET',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='4.0-preview.4',
query_parameters=query_parameters)
return self._deserialize('[FileContainer]', self._unwrap_collection(response))
def get_items(self, container_id, scope=None, item_path=None, metadata=None, format=None, download_file_name=None, include_download_tickets=None, is_shallow=None):
"""GetItems.
[Preview API]
:param long container_id:
:param str scope:
:param str item_path:
:param bool metadata:
:param str format:
:param str download_file_name:
:param bool include_download_tickets:
:param bool is_shallow:
:rtype: [FileContainerItem]
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'long')
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
if item_path is not None:
query_parameters['itemPath'] = self._serialize.query('item_path', item_path, 'str')
if metadata is not None:
query_parameters['metadata'] = self._serialize.query('metadata', metadata, 'bool')
if format is not None:
query_parameters['$format'] = self._serialize.query('format', format, 'str')
if download_file_name is not None:
query_parameters['downloadFileName'] = self._serialize.query('download_file_name', download_file_name, 'str')
if include_download_tickets is not None:
query_parameters['includeDownloadTickets'] = self._serialize.query('include_download_tickets', include_download_tickets, 'bool')
if is_shallow is not None:
query_parameters['isShallow'] = self._serialize.query('is_shallow', is_shallow, 'bool')
response = self._send(http_method='GET',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[FileContainerItem]', self._unwrap_collection(response))
|
en
| 0.610497
|
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- FileContainer :param str base_url: Service URL :param Authentication creds: Authenticated credentials. CreateItems. [Preview API] Creates the specified items in in the referenced container. :param :class:`<VssJsonCollectionWrapper> <file-container.v4_0.models.VssJsonCollectionWrapper>` items: :param int container_id: :param str scope: A guid representing the scope of the container. This is often the project id. :rtype: [FileContainerItem] DeleteItem. [Preview API] Deletes the specified items in a container. :param long container_id: Container Id. :param str item_path: Path to delete. :param str scope: A guid representing the scope of the container. This is often the project id. GetContainers. [Preview API] Gets containers filtered by a comma separated list of artifact uris within the same scope, if not specified returns all containers :param str scope: A guid representing the scope of the container. This is often the project id. :param str artifact_uris: :rtype: [FileContainer] GetItems. [Preview API] :param long container_id: :param str scope: :param str item_path: :param bool metadata: :param str format: :param str download_file_name: :param bool include_download_tickets: :param bool is_shallow: :rtype: [FileContainerItem]
| 1.985438
| 2
|
administracion/serializers.py
|
ederivero/MinimarketDjango
| 0
|
6628165
|
<reponame>ederivero/MinimarketDjango<gh_stars>0
from rest_framework import serializers
from .models import ProductoModel, AlmacenModel, ProductoAlmacenModel, CabeceraVentaModel, DetalleVentaModel
class ProductoSerializer(serializers.ModelSerializer):
class Meta:
model = ProductoModel
fields = "__all__"
# si quisiese todos los campos menos uno u otro
# exclude = ["campo1","campo2"...]
# o uso el fields o uso el exclude, mas no se pueden usar los dos al mismo tiempo
def update(self):
# print(self.validated_data["productoNombre"])
self.instance.productoNombre = self.validated_data.get("productoNombre", self.instance.productoNombre)
self.instance.productoPrecio = self.validated_data.get("productoPrecio", self.instance.productoPrecio)
self.instance.productoMinimo = self.validated_data.get("productoMinimo", self.instance.productoMinimo)
self.instance.save()
return self.instance
# self.instance retorna la instancia actual que hay en mi clase, esta se logra gracias a la instancia dada al llamar al serializador
# self.validated_data => esta es la data ya validada luego de llamar al metodo is_valid() en el controlador, si no se llama a este metodo este atributo va a ser None
def delete(self):
self.instance.estado = False
self.instance.save()
return self.instance
class AlmacenSerializer(serializers.ModelSerializer):
class Meta:
model = AlmacenModel
fields = '__all__'
class ProductoAlmacenSerializer(serializers.ModelSerializer):
almacen = AlmacenSerializer(source="almacenId", read_only=True)
# FORMA 1
producto = ProductoSerializer(source="productoId", read_only=True)
# FORMA 2
# cuando yo uso el mismo campo con su nombre que le voy a pasar como recurso al serializador ya no es necesario ponerlo como parametro del serializador
# productoId = ProductoSerializer(read_only=True)
class Meta:
model = ProductoAlmacenModel
fields = '__all__'
# https://www.django-rest-framework.org/api-guide/serializers/#additional-keyword-arguments
# la configuracion adicional que yo le pueda poner a los campos de mi modelo se la pongo en el atributo llamado extra_kwargs, le puedo modificar parametros del mismo modelo como su longitud maxima (max_length) o logitud minima (min_length)
extra_kwargs = {
"productoId":{
"write_only":True
},
"almacenId": {
"write_only": True
}
}
# FORMA 1
# para evitar que me muestre de nuevo ese productoId lo quito de la lista
# exclude = ['productoId', 'almacenId']
# este serializador lo voy a usar para cuando quiera devolver de mis productos sus almacenes
class ProductoAlmacenAlmacenVistaSerializer(serializers.ModelSerializer):
almacen = AlmacenSerializer(source="almacenId", read_only=True)
class Meta:
model = ProductoAlmacenModel
fields = ['almacen']
# este serializador lo voy a usar para cuando quiera devolver de mis almacenes sus productos
class ProductoAlmacenProductoVistaSerializer(serializers.ModelSerializer):
producto = ProductoSerializer(source="productoId", read_only=True)
class Meta:
model = ProductoAlmacenModel
fields = ['producto']
class AlmacenSerializerMany(serializers.ModelSerializer):
# esto es una relacion inversa porque yo a partir del padre estoy devolviendo a todos sus hijos que le pertenecen y necesito para ello el campo related_name definido en la foreign key
productosAlmacen = ProductoAlmacenProductoVistaSerializer(source="almacenesProductos", many=True, read_only=True)
class Meta:
model = AlmacenModel
fields = '__all__'
class CabeceraVentaSerializer(serializers.ModelSerializer):
class Meta:
model = CabeceraVentaModel
fields = '__all__'
# https://www.django-rest-framework.org/api-guide/fields/
class ItemDiccionario(serializers.Serializer):
# un Serializer si se hereda es automaticamente un diccionario
id = serializers.IntegerField()
cantidad = serializers.IntegerField()
# no solamente se usa serializadores para modelos, tambien se pueden usar para validar campos independientes de algun modelo
# solamente cuando nosotros queremos usar una lista sin importar que contenga usamos el serializer.ListField, si muy por el contrario queremos usar otro serializador (herencia) tenemos que simplemente llamarlo y con poner como parametro "many=True" ya se convertirá en una Lista y recordar que todo serializador es al final un diccionario
class VentaSerializer(serializers.Serializer):
articulos = ItemDiccionario(many=True)
fecha = serializers.DateTimeField()
nombre = serializers.CharField(max_length=45)
class VentaDetalleSerializer(serializers.ModelSerializer):
class Meta:
model = DetalleVentaModel
fields = '__all__'
class VentaCompletaSerializer(serializers.ModelSerializer):
# siempre que yo quiera usar una relacion en un serializer debo de indicar que many=True puesto que al tener el padre uno o muchos hijos va a devolver una lista de todos los hijos y para que lo itere el serializador
cuerpo = VentaDetalleSerializer(source='cabeceraVentas', many=True, read_only=True)
class Meta:
model = CabeceraVentaModel
fields= '__all__'
|
from rest_framework import serializers
from .models import ProductoModel, AlmacenModel, ProductoAlmacenModel, CabeceraVentaModel, DetalleVentaModel
class ProductoSerializer(serializers.ModelSerializer):
class Meta:
model = ProductoModel
fields = "__all__"
# si quisiese todos los campos menos uno u otro
# exclude = ["campo1","campo2"...]
# o uso el fields o uso el exclude, mas no se pueden usar los dos al mismo tiempo
def update(self):
# print(self.validated_data["productoNombre"])
self.instance.productoNombre = self.validated_data.get("productoNombre", self.instance.productoNombre)
self.instance.productoPrecio = self.validated_data.get("productoPrecio", self.instance.productoPrecio)
self.instance.productoMinimo = self.validated_data.get("productoMinimo", self.instance.productoMinimo)
self.instance.save()
return self.instance
# self.instance retorna la instancia actual que hay en mi clase, esta se logra gracias a la instancia dada al llamar al serializador
# self.validated_data => esta es la data ya validada luego de llamar al metodo is_valid() en el controlador, si no se llama a este metodo este atributo va a ser None
def delete(self):
self.instance.estado = False
self.instance.save()
return self.instance
class AlmacenSerializer(serializers.ModelSerializer):
class Meta:
model = AlmacenModel
fields = '__all__'
class ProductoAlmacenSerializer(serializers.ModelSerializer):
almacen = AlmacenSerializer(source="almacenId", read_only=True)
# FORMA 1
producto = ProductoSerializer(source="productoId", read_only=True)
# FORMA 2
# cuando yo uso el mismo campo con su nombre que le voy a pasar como recurso al serializador ya no es necesario ponerlo como parametro del serializador
# productoId = ProductoSerializer(read_only=True)
class Meta:
model = ProductoAlmacenModel
fields = '__all__'
# https://www.django-rest-framework.org/api-guide/serializers/#additional-keyword-arguments
# la configuracion adicional que yo le pueda poner a los campos de mi modelo se la pongo en el atributo llamado extra_kwargs, le puedo modificar parametros del mismo modelo como su longitud maxima (max_length) o logitud minima (min_length)
extra_kwargs = {
"productoId":{
"write_only":True
},
"almacenId": {
"write_only": True
}
}
# FORMA 1
# para evitar que me muestre de nuevo ese productoId lo quito de la lista
# exclude = ['productoId', 'almacenId']
# este serializador lo voy a usar para cuando quiera devolver de mis productos sus almacenes
class ProductoAlmacenAlmacenVistaSerializer(serializers.ModelSerializer):
almacen = AlmacenSerializer(source="almacenId", read_only=True)
class Meta:
model = ProductoAlmacenModel
fields = ['almacen']
# este serializador lo voy a usar para cuando quiera devolver de mis almacenes sus productos
class ProductoAlmacenProductoVistaSerializer(serializers.ModelSerializer):
producto = ProductoSerializer(source="productoId", read_only=True)
class Meta:
model = ProductoAlmacenModel
fields = ['producto']
class AlmacenSerializerMany(serializers.ModelSerializer):
# esto es una relacion inversa porque yo a partir del padre estoy devolviendo a todos sus hijos que le pertenecen y necesito para ello el campo related_name definido en la foreign key
productosAlmacen = ProductoAlmacenProductoVistaSerializer(source="almacenesProductos", many=True, read_only=True)
class Meta:
model = AlmacenModel
fields = '__all__'
class CabeceraVentaSerializer(serializers.ModelSerializer):
class Meta:
model = CabeceraVentaModel
fields = '__all__'
# https://www.django-rest-framework.org/api-guide/fields/
class ItemDiccionario(serializers.Serializer):
# un Serializer si se hereda es automaticamente un diccionario
id = serializers.IntegerField()
cantidad = serializers.IntegerField()
# no solamente se usa serializadores para modelos, tambien se pueden usar para validar campos independientes de algun modelo
# solamente cuando nosotros queremos usar una lista sin importar que contenga usamos el serializer.ListField, si muy por el contrario queremos usar otro serializador (herencia) tenemos que simplemente llamarlo y con poner como parametro "many=True" ya se convertirá en una Lista y recordar que todo serializador es al final un diccionario
class VentaSerializer(serializers.Serializer):
articulos = ItemDiccionario(many=True)
fecha = serializers.DateTimeField()
nombre = serializers.CharField(max_length=45)
class VentaDetalleSerializer(serializers.ModelSerializer):
class Meta:
model = DetalleVentaModel
fields = '__all__'
class VentaCompletaSerializer(serializers.ModelSerializer):
# siempre que yo quiera usar una relacion en un serializer debo de indicar que many=True puesto que al tener el padre uno o muchos hijos va a devolver una lista de todos los hijos y para que lo itere el serializador
cuerpo = VentaDetalleSerializer(source='cabeceraVentas', many=True, read_only=True)
class Meta:
model = CabeceraVentaModel
fields= '__all__'
|
es
| 0.925549
|
# si quisiese todos los campos menos uno u otro # exclude = ["campo1","campo2"...] # o uso el fields o uso el exclude, mas no se pueden usar los dos al mismo tiempo # print(self.validated_data["productoNombre"]) # self.instance retorna la instancia actual que hay en mi clase, esta se logra gracias a la instancia dada al llamar al serializador # self.validated_data => esta es la data ya validada luego de llamar al metodo is_valid() en el controlador, si no se llama a este metodo este atributo va a ser None # FORMA 1 # FORMA 2 # cuando yo uso el mismo campo con su nombre que le voy a pasar como recurso al serializador ya no es necesario ponerlo como parametro del serializador # productoId = ProductoSerializer(read_only=True) # https://www.django-rest-framework.org/api-guide/serializers/#additional-keyword-arguments # la configuracion adicional que yo le pueda poner a los campos de mi modelo se la pongo en el atributo llamado extra_kwargs, le puedo modificar parametros del mismo modelo como su longitud maxima (max_length) o logitud minima (min_length) # FORMA 1 # para evitar que me muestre de nuevo ese productoId lo quito de la lista # exclude = ['productoId', 'almacenId'] # este serializador lo voy a usar para cuando quiera devolver de mis productos sus almacenes # este serializador lo voy a usar para cuando quiera devolver de mis almacenes sus productos # esto es una relacion inversa porque yo a partir del padre estoy devolviendo a todos sus hijos que le pertenecen y necesito para ello el campo related_name definido en la foreign key # https://www.django-rest-framework.org/api-guide/fields/ # un Serializer si se hereda es automaticamente un diccionario # no solamente se usa serializadores para modelos, tambien se pueden usar para validar campos independientes de algun modelo # solamente cuando nosotros queremos usar una lista sin importar que contenga usamos el serializer.ListField, si muy por el contrario queremos usar otro serializador (herencia) tenemos que simplemente llamarlo y con poner como parametro "many=True" ya se convertirá en una Lista y recordar que todo serializador es al final un diccionario # siempre que yo quiera usar una relacion en un serializer debo de indicar que many=True puesto que al tener el padre uno o muchos hijos va a devolver una lista de todos los hijos y para que lo itere el serializador
| 2.336441
| 2
|
pdb2pqr-1.9.0/contrib/ZSI-2.1-a1/test/wsdl2py/test_TerraService.py
|
Acpharis/protein_prep
| 0
|
6628166
|
#!/usr/bin/env python
############################################################################
# <NAME>, LBNL
# See LBNLCopyright for copyright notice!
###########################################################################
import sys, unittest
from ServiceTest import ServiceTestCase, ServiceTestSuite
import re
from ZSI import EvaluateException
"""
Unittest for contacting the TerraService Web service.
WSDL: http://terraservice.net/TerraService.asmx?WSDL
"""
CONFIG_FILE = 'config.txt'
CONFIG_SECTION = 'complex_types'
SERVICE_NAME = 'TerraService'
PORT_NAME = 'TerraServiceSoap'
EXCEPTION_STRING_SERIALIZE = r"Serializing ConvertPlaceToLonLatPt xmlns=\"http://terraserver-usa.com/terraserver/\"._place, Exception Serializing place xmlns=\"http://terraserver-usa.com/terraserver/\"._City, AttributeError 'int' object has no attribute \'replace\'"
SERIALIZE_PATTERN = re.compile(EXCEPTION_STRING_SERIALIZE)
class TerraServiceTest(ServiceTestCase):
"""Test case for TerraService Web service
"""
name = "test_TerraService"
def test_ConvertPlaceToLonLatPt(self):
operationName = 'ConvertPlaceToLonLatPt'
request = self.getInputMessageInstance(operationName)
request._place = self._moduleDict[self._typeModuleName].ns1.Place_Def()
request._place._City = 'Oak Harbor'
request._place._State = 'Washington'
request._place._Country = 'United States'
response = self.RPC(operationName, request)
def test_ConvertLonLatPtToNearestPlace(self):
operationName = 'ConvertLonLatPtToNearestPlace'
request = self.getInputMessageInstance(operationName)
request._place = self._moduleDict[self._typeModuleName].ns1.Place_Def()
request._point = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._point._Lon = -122.643
request._point._Lat = 48.297
response = self.RPC(operationName, request)
def test_ConvertLonLatPtToUtmPt(self):
operationName = 'ConvertLonLatPtToUtmPt'
request = self.getInputMessageInstance(operationName)
request._point = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._point._Lon = -122.643
request._point._Lat = 48.297
response = self.RPC(operationName, request)
def test_ConvertUtmPtToLonLatPt(self):
operationName = 'ConvertUtmPtToLonLatPt'
request = self.getInputMessageInstance(operationName)
request._utm = self._moduleDict[self._typeModuleName].ns1.UtmPt_Def()
request._utm._X = 526703.512403
request._utm._Y = 5348595.96493
request._utm._Zone = 10
response = self.RPC(operationName, request)
def test_CountPlacesInRect(self):
operationName = 'CountPlacesInRect'
request = self.getInputMessageInstance(operationName)
request._upperleft = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._upperleft._Lon = -122.647
request._upperleft._Lat = 48.293
request._lowerright = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._lowerright._Lon = request._upperleft._Lon + 1.0
request._lowerright._Lat = request._upperleft._Lon - 1.0
request._ptype = "HillMountain"
response = self.RPC(operationName, request)
def test_GetAreaFromPt(self):
operationName = 'GetAreaFromPt'
request = self.getInputMessageInstance(operationName)
request._center = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._center._Lon = -122.647
request._center._Lat = 48.293
request._theme = 'Topo'
request._scale = "Scale2m"
request._displayPixWidth = 2
request._displayPixHeight = 2
response = self.RPC(operationName, request)
def test_GetAreaFromRect(self):
operationName = 'GetAreaFromRect'
request = self.getInputMessageInstance(operationName)
request._upperLeft = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._upperLeft._Lon = -122.647
request._upperLeft._Lat = 48.293
request._lowerRight = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._lowerRight._Lon = request._upperLeft._Lon + 1.0
request._lowerRight._Lat = request._upperLeft._Lat - 1.0
request._theme = 'Topo'
request._scale = "Scale2m"
response = self.RPC(operationName, request)
def test_GetAreaFromTileId(self):
operationName = 'GetAreaFromTileId'
request = self.getInputMessageInstance(operationName)
id = self._moduleDict[self._typeModuleName].ns1.TileId_Def()
id._Theme = 'Topo'
id._Scale = "Scale2m"
id._Scene = 8
id._X = 20
id._y = 20
request._id = id
request._displayPixWidth = 2
request._displayPixHeight = 2
response = self.RPC(operationName, request)
def test_GetLatLonMetrics(self):
operationName = 'GetLatLonMetrics'
request = self.getInputMessageInstance(operationName)
request._point = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._point._Lon = -122.647
request._point._Lat = 48.293
response = self.RPC(operationName, request)
# derived type (enum) problem
# skipping it for now
# derived type (enum) problem
# also inconsistent timeout problem for this call
def test_GetPlaceListInRect(self):
operationName = 'GetPlaceListInRect'
request = self.getInputMessageInstance(operationName)
request._upperleft = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._upperleft._Lon = -123.0
request._upperleft._Lat = 44.0
request._lowerright = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
# needs to be small, otherwise different items
# returned each time
request._lowerright._Lon = -122.8
request._lowerright._Lat = 43.8
request._ptype = "HillMountain"
request._MaxItems = 3
response = self.RPC(operationName, request)
def test_GetTheme(self):
operationName = 'GetTheme'
request = self.getInputMessageInstance(operationName)
request._theme = 'Topo'
response = self.RPC(operationName, request)
def test_GetTile(self):
operationName = 'GetTile'
request = self.getInputMessageInstance(operationName)
request._id = self._moduleDict[self._typeModuleName].ns1.TileId_Def()
request._id._Theme = 'Topo'
request._id._Scale = 'Scale2m'
request._id._Scene = 8
request._id._X = 20
request._id._Y = 20
response = self.RPC(operationName, request)
def test_GetTileMetaFromLonLatPt(self):
operationName = 'GetTileMetaFromLonLatPt'
request = self.getInputMessageInstance(operationName)
request._theme = 'Topo'
request._point = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._point._Lon = -122.64
request._point._Lat = 48.29
request._scale = "Scale4m"
response = self.RPC(operationName, request)
def test_GetTileMetaFromTileId(self):
operationName = 'GetTileMetaFromTileId'
request = self.getInputMessageInstance(operationName)
request._id = self._moduleDict[self._typeModuleName].ns1.TileId_Def()
request._id._Theme = 'Topo'
request._id._Scale = 'Scale2m'
request._id._Scene = 8
request._id._X = 20
request._id._Y = 20
response = self.RPC(operationName, request)
class TerraServiceTestFailures(ServiceTestCase):
name = "test_TerraService"
def test_ConvertPlaceToLonLatPt_x1(self):
"""
This test should fail
"""
operationName = 'ConvertPlaceToLonLatPt'
request = self.getInputMessageInstance(operationName)
request._place = self._moduleDict[self._typeModuleName].ns1.Place_Def()
request._place._City = 1
request._place._State = 'Washington'
request._place._Country = 'United States'
try:
response = self.RPC(operationName, request)
except Exception, msg:
exceptionString = str(msg)
if SERIALIZE_PATTERN.match(exceptionString):
pass
else:
raise
def test_GetPlaceFacts(self):
operationName = 'GetPlaceFacts'
request = self.getInputMessageInstance(operationName)
request._place = self._moduleDict[self._typeModuleName].ns1.Place_Def()
request._place._City = 'Seattle'
request._place._State = 'Washington'
request._place._Country = 'United States'
try:
response = self.RPC(operationName, request)
except EvaluateException, ex:
pass
def test_GetPlaceList(self):
operationName = 'GetPlaceList'
request = self.getInputMessageInstance(operationName)
request._placeName = 'New York'
request._MaxItems = 5
request._imagePresence = 0
try:
response = self.RPC(operationName, request)
except EvaluateException, ex:
pass
def makeTestSuite():
suite = ServiceTestSuite()
suite.addTest(unittest.makeSuite(TerraServiceTest, 'test_'))
suite.addTest(unittest.makeSuite(TerraServiceTestFailures, 'test_'))
return suite
if __name__ == "__main__" :
unittest.TestProgram(defaultTest="makeTestSuite")
|
#!/usr/bin/env python
############################################################################
# <NAME>, LBNL
# See LBNLCopyright for copyright notice!
###########################################################################
import sys, unittest
from ServiceTest import ServiceTestCase, ServiceTestSuite
import re
from ZSI import EvaluateException
"""
Unittest for contacting the TerraService Web service.
WSDL: http://terraservice.net/TerraService.asmx?WSDL
"""
CONFIG_FILE = 'config.txt'
CONFIG_SECTION = 'complex_types'
SERVICE_NAME = 'TerraService'
PORT_NAME = 'TerraServiceSoap'
EXCEPTION_STRING_SERIALIZE = r"Serializing ConvertPlaceToLonLatPt xmlns=\"http://terraserver-usa.com/terraserver/\"._place, Exception Serializing place xmlns=\"http://terraserver-usa.com/terraserver/\"._City, AttributeError 'int' object has no attribute \'replace\'"
SERIALIZE_PATTERN = re.compile(EXCEPTION_STRING_SERIALIZE)
class TerraServiceTest(ServiceTestCase):
"""Test case for TerraService Web service
"""
name = "test_TerraService"
def test_ConvertPlaceToLonLatPt(self):
operationName = 'ConvertPlaceToLonLatPt'
request = self.getInputMessageInstance(operationName)
request._place = self._moduleDict[self._typeModuleName].ns1.Place_Def()
request._place._City = 'Oak Harbor'
request._place._State = 'Washington'
request._place._Country = 'United States'
response = self.RPC(operationName, request)
def test_ConvertLonLatPtToNearestPlace(self):
operationName = 'ConvertLonLatPtToNearestPlace'
request = self.getInputMessageInstance(operationName)
request._place = self._moduleDict[self._typeModuleName].ns1.Place_Def()
request._point = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._point._Lon = -122.643
request._point._Lat = 48.297
response = self.RPC(operationName, request)
def test_ConvertLonLatPtToUtmPt(self):
operationName = 'ConvertLonLatPtToUtmPt'
request = self.getInputMessageInstance(operationName)
request._point = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._point._Lon = -122.643
request._point._Lat = 48.297
response = self.RPC(operationName, request)
def test_ConvertUtmPtToLonLatPt(self):
operationName = 'ConvertUtmPtToLonLatPt'
request = self.getInputMessageInstance(operationName)
request._utm = self._moduleDict[self._typeModuleName].ns1.UtmPt_Def()
request._utm._X = 526703.512403
request._utm._Y = 5348595.96493
request._utm._Zone = 10
response = self.RPC(operationName, request)
def test_CountPlacesInRect(self):
operationName = 'CountPlacesInRect'
request = self.getInputMessageInstance(operationName)
request._upperleft = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._upperleft._Lon = -122.647
request._upperleft._Lat = 48.293
request._lowerright = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._lowerright._Lon = request._upperleft._Lon + 1.0
request._lowerright._Lat = request._upperleft._Lon - 1.0
request._ptype = "HillMountain"
response = self.RPC(operationName, request)
def test_GetAreaFromPt(self):
operationName = 'GetAreaFromPt'
request = self.getInputMessageInstance(operationName)
request._center = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._center._Lon = -122.647
request._center._Lat = 48.293
request._theme = 'Topo'
request._scale = "Scale2m"
request._displayPixWidth = 2
request._displayPixHeight = 2
response = self.RPC(operationName, request)
def test_GetAreaFromRect(self):
operationName = 'GetAreaFromRect'
request = self.getInputMessageInstance(operationName)
request._upperLeft = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._upperLeft._Lon = -122.647
request._upperLeft._Lat = 48.293
request._lowerRight = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._lowerRight._Lon = request._upperLeft._Lon + 1.0
request._lowerRight._Lat = request._upperLeft._Lat - 1.0
request._theme = 'Topo'
request._scale = "Scale2m"
response = self.RPC(operationName, request)
def test_GetAreaFromTileId(self):
operationName = 'GetAreaFromTileId'
request = self.getInputMessageInstance(operationName)
id = self._moduleDict[self._typeModuleName].ns1.TileId_Def()
id._Theme = 'Topo'
id._Scale = "Scale2m"
id._Scene = 8
id._X = 20
id._y = 20
request._id = id
request._displayPixWidth = 2
request._displayPixHeight = 2
response = self.RPC(operationName, request)
def test_GetLatLonMetrics(self):
operationName = 'GetLatLonMetrics'
request = self.getInputMessageInstance(operationName)
request._point = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._point._Lon = -122.647
request._point._Lat = 48.293
response = self.RPC(operationName, request)
# derived type (enum) problem
# skipping it for now
# derived type (enum) problem
# also inconsistent timeout problem for this call
def test_GetPlaceListInRect(self):
operationName = 'GetPlaceListInRect'
request = self.getInputMessageInstance(operationName)
request._upperleft = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._upperleft._Lon = -123.0
request._upperleft._Lat = 44.0
request._lowerright = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
# needs to be small, otherwise different items
# returned each time
request._lowerright._Lon = -122.8
request._lowerright._Lat = 43.8
request._ptype = "HillMountain"
request._MaxItems = 3
response = self.RPC(operationName, request)
def test_GetTheme(self):
operationName = 'GetTheme'
request = self.getInputMessageInstance(operationName)
request._theme = 'Topo'
response = self.RPC(operationName, request)
def test_GetTile(self):
operationName = 'GetTile'
request = self.getInputMessageInstance(operationName)
request._id = self._moduleDict[self._typeModuleName].ns1.TileId_Def()
request._id._Theme = 'Topo'
request._id._Scale = 'Scale2m'
request._id._Scene = 8
request._id._X = 20
request._id._Y = 20
response = self.RPC(operationName, request)
def test_GetTileMetaFromLonLatPt(self):
operationName = 'GetTileMetaFromLonLatPt'
request = self.getInputMessageInstance(operationName)
request._theme = 'Topo'
request._point = self._moduleDict[self._typeModuleName].ns1.LonLatPt_Def()
request._point._Lon = -122.64
request._point._Lat = 48.29
request._scale = "Scale4m"
response = self.RPC(operationName, request)
def test_GetTileMetaFromTileId(self):
operationName = 'GetTileMetaFromTileId'
request = self.getInputMessageInstance(operationName)
request._id = self._moduleDict[self._typeModuleName].ns1.TileId_Def()
request._id._Theme = 'Topo'
request._id._Scale = 'Scale2m'
request._id._Scene = 8
request._id._X = 20
request._id._Y = 20
response = self.RPC(operationName, request)
class TerraServiceTestFailures(ServiceTestCase):
name = "test_TerraService"
def test_ConvertPlaceToLonLatPt_x1(self):
"""
This test should fail
"""
operationName = 'ConvertPlaceToLonLatPt'
request = self.getInputMessageInstance(operationName)
request._place = self._moduleDict[self._typeModuleName].ns1.Place_Def()
request._place._City = 1
request._place._State = 'Washington'
request._place._Country = 'United States'
try:
response = self.RPC(operationName, request)
except Exception, msg:
exceptionString = str(msg)
if SERIALIZE_PATTERN.match(exceptionString):
pass
else:
raise
def test_GetPlaceFacts(self):
operationName = 'GetPlaceFacts'
request = self.getInputMessageInstance(operationName)
request._place = self._moduleDict[self._typeModuleName].ns1.Place_Def()
request._place._City = 'Seattle'
request._place._State = 'Washington'
request._place._Country = 'United States'
try:
response = self.RPC(operationName, request)
except EvaluateException, ex:
pass
def test_GetPlaceList(self):
operationName = 'GetPlaceList'
request = self.getInputMessageInstance(operationName)
request._placeName = 'New York'
request._MaxItems = 5
request._imagePresence = 0
try:
response = self.RPC(operationName, request)
except EvaluateException, ex:
pass
def makeTestSuite():
suite = ServiceTestSuite()
suite.addTest(unittest.makeSuite(TerraServiceTest, 'test_'))
suite.addTest(unittest.makeSuite(TerraServiceTestFailures, 'test_'))
return suite
if __name__ == "__main__" :
unittest.TestProgram(defaultTest="makeTestSuite")
|
en
| 0.396761
|
#!/usr/bin/env python ############################################################################ # <NAME>, LBNL # See LBNLCopyright for copyright notice! ########################################################################### Unittest for contacting the TerraService Web service. WSDL: http://terraservice.net/TerraService.asmx?WSDL Test case for TerraService Web service # derived type (enum) problem # skipping it for now # derived type (enum) problem # also inconsistent timeout problem for this call # needs to be small, otherwise different items # returned each time This test should fail
| 2.457571
| 2
|
pytorch_utils.py
|
cswin/CADA
| 5
|
6628167
|
<gh_stars>1-10
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def lr_poly(base_lr, iter, max_iter, power):
return base_lr * ((1 - float(iter) / max_iter) ** (power))
def adjust_learning_rate(optimizer, i_iter, args):
lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power)
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) > 1:
optimizer.param_groups[1]['lr'] = lr * 10
def adjust_learning_rate_D(optimizer, i_iter, args):
lr = lr_poly(args.learning_rate_D, i_iter, args.num_steps, args.power)
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) > 1:
optimizer.param_groups[1]['lr'] = lr * 10
def calc_mse_loss(item1, item2, batch_size):
criterion = nn.MSELoss(reduce=False)
return criterion(item1, item2).sum() / batch_size
def calc_l1_loss(item1, item2, batch_size, gpu):
item2 = Variable(item2.float()).cuda(gpu)
criterion = nn.L1Loss()
return criterion(item1, item2).sum() / batch_size
class LossMulti(nn.Module):
def __init__(self, jaccard_weight=0, class_weights=None, num_classes=1):
if class_weights is not None:
self.nll_weight = class_weights#Variable(class_weights.float()).cuda()
else:
self.nll_weight = None
self.jaccard_weight = jaccard_weight
self.num_classes = num_classes
def __call__(self, outputs, targets):
loss = (1 - self.jaccard_weight) * F.cross_entropy(outputs, targets, weight=self.nll_weight)
if self.jaccard_weight:
eps = 1e-15
outputs = F.softmax(outputs)
for cls in range(self.num_classes):
jaccard_target = (targets == cls).float()
jaccard_output = outputs[:, cls]#.exp()
intersection = (jaccard_output * jaccard_target).sum()
union = jaccard_output.sum() + jaccard_target.sum()
loss -= torch.log((intersection + eps) / (union - intersection + eps)) * self.jaccard_weight
return loss
def Weighted_Jaccard_loss (label, pred, class_weights=None, gpu=0):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
label = Variable(label.long()).cuda(gpu)
if class_weights is not None and class_weights != 0:
class_weights = torch.Tensor(class_weights)
class_weights = Variable(class_weights).cuda(gpu)
criterion = LossMulti(jaccard_weight=0.5, class_weights=class_weights,num_classes=3)#.cuda(gpu)
else:
criterion = LossMulti(jaccard_weight=0.5, num_classes=3) # .cuda(gpu)
return criterion(pred, label)
def dice_loss(true, logits, eps=1e-7):
"""Computes the Sørensen–Dice loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
true: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
dice_loss: the Sørensen–Dice loss.
https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
dice_loss = (2. * intersection / (cardinality + eps)).mean()
return (1 - dice_loss)
|
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def lr_poly(base_lr, iter, max_iter, power):
return base_lr * ((1 - float(iter) / max_iter) ** (power))
def adjust_learning_rate(optimizer, i_iter, args):
lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power)
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) > 1:
optimizer.param_groups[1]['lr'] = lr * 10
def adjust_learning_rate_D(optimizer, i_iter, args):
lr = lr_poly(args.learning_rate_D, i_iter, args.num_steps, args.power)
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) > 1:
optimizer.param_groups[1]['lr'] = lr * 10
def calc_mse_loss(item1, item2, batch_size):
criterion = nn.MSELoss(reduce=False)
return criterion(item1, item2).sum() / batch_size
def calc_l1_loss(item1, item2, batch_size, gpu):
item2 = Variable(item2.float()).cuda(gpu)
criterion = nn.L1Loss()
return criterion(item1, item2).sum() / batch_size
class LossMulti(nn.Module):
def __init__(self, jaccard_weight=0, class_weights=None, num_classes=1):
if class_weights is not None:
self.nll_weight = class_weights#Variable(class_weights.float()).cuda()
else:
self.nll_weight = None
self.jaccard_weight = jaccard_weight
self.num_classes = num_classes
def __call__(self, outputs, targets):
loss = (1 - self.jaccard_weight) * F.cross_entropy(outputs, targets, weight=self.nll_weight)
if self.jaccard_weight:
eps = 1e-15
outputs = F.softmax(outputs)
for cls in range(self.num_classes):
jaccard_target = (targets == cls).float()
jaccard_output = outputs[:, cls]#.exp()
intersection = (jaccard_output * jaccard_target).sum()
union = jaccard_output.sum() + jaccard_target.sum()
loss -= torch.log((intersection + eps) / (union - intersection + eps)) * self.jaccard_weight
return loss
def Weighted_Jaccard_loss (label, pred, class_weights=None, gpu=0):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
label = Variable(label.long()).cuda(gpu)
if class_weights is not None and class_weights != 0:
class_weights = torch.Tensor(class_weights)
class_weights = Variable(class_weights).cuda(gpu)
criterion = LossMulti(jaccard_weight=0.5, class_weights=class_weights,num_classes=3)#.cuda(gpu)
else:
criterion = LossMulti(jaccard_weight=0.5, num_classes=3) # .cuda(gpu)
return criterion(pred, label)
def dice_loss(true, logits, eps=1e-7):
"""Computes the Sørensen–Dice loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
true: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
dice_loss: the Sørensen–Dice loss.
https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
dice_loss = (2. * intersection / (cardinality + eps)).mean()
return (1 - dice_loss)
|
en
| 0.65396
|
#Variable(class_weights.float()).cuda() #.exp() This function returns cross entropy loss for semantic segmentation # out shape batch_size x channels x h x w -> batch_size x channels x h x w # label shape h x w x 1 x batch_size -> batch_size x 1 x h x w #.cuda(gpu) # .cuda(gpu) Computes the Sørensen–Dice loss. Note that PyTorch optimizers minimize a loss. In this case, we would like to maximize the dice loss so we return the negated dice loss. Args: true: a tensor of shape [B, 1, H, W]. logits: a tensor of shape [B, C, H, W]. Corresponds to the raw output or logits of the model. eps: added to the denominator for numerical stability. Returns: dice_loss: the Sørensen–Dice loss. https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
| 2.457918
| 2
|
face_detection.py
|
Ankush1099/Face-Detection-
| 0
|
6628168
|
#Face Recognition
#Importing the libraries
import cv2
#Loading the cascades
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
#Defining the function that will do the detections
def detect(gray, frane):
faces = face_cascade.detectMultiScale(gray, 1.3, 5) #faces are tuples that will contain coordinate x,y,w,h
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 3)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2)
return frame
#Doing some face recognition with the webcam
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
|
#Face Recognition
#Importing the libraries
import cv2
#Loading the cascades
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
#Defining the function that will do the detections
def detect(gray, frane):
faces = face_cascade.detectMultiScale(gray, 1.3, 5) #faces are tuples that will contain coordinate x,y,w,h
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 3)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2)
return frame
#Doing some face recognition with the webcam
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
|
en
| 0.746858
|
#Face Recognition #Importing the libraries #Loading the cascades #Defining the function that will do the detections #faces are tuples that will contain coordinate x,y,w,h #Doing some face recognition with the webcam
| 3.314415
| 3
|
src/system_sensors.py
|
leelooauto/system_sensors
| 0
|
6628169
|
#!/usr/bin/env python3
from os import error
import sys
import time
import yaml
import signal
import argparse
import threading
import paho.mqtt.client as mqtt
from sensors import *
mqttClient = None
global poll_interval
deviceName = None
settings = {}
class ProgramKilled(Exception):
pass
def signal_handler(signum, frame):
raise ProgramKilled
class Job(threading.Thread):
def __init__(self, interval, execute, *args, **kwargs):
threading.Thread.__init__(self)
self.daemon = False
self.stopped = threading.Event()
self.interval = interval
self.execute = execute
self.args = args
self.kwargs = kwargs
def stop(self):
self.stopped.set()
self.join()
def run(self):
while not self.stopped.wait(self.interval.total_seconds()):
self.execute(*self.args, **self.kwargs)
def update_sensors():
payload_str = f'{{'
for sensor, attr in sensors.items():
# skip sensors that have been disabled
if settings['sensors'][sensor] == False:
continue
payload_str += f'"{sensor}": "{attr["function"]()}",'
payload_str = payload_str[:-1]
payload_str += f'}}'
mqttClient.publish(
topic=f'system-sensors/{attr["sensor_type"]}/{deviceName}/state',
payload=payload_str,
qos=1,
retain=False,
)
def send_config_message(mqttClient):
write_message_to_console('send config message')
for sensor, attr in sensors.items():
if settings['sensors'][sensor] == False:
continue
mqttClient.publish(
topic=f'homeassistant/{attr["sensor_type"]}/{deviceName}/{sensor}/config',
payload = (f'{{'
+ (f'"device_class":"{attr["class"]}",' if 'class' in attr else '')
+ f'"name":"{deviceNameDisplay} {attr["name"]}",'
+ f'"state_topic":"system-sensors/sensor/{deviceName}/state",'
+ (f'"unit_of_measurement":"{attr["unit"]}",' if 'unit' in attr else '')
+ f'"value_template":"{{{{value_json.{sensor}}}}}",'
+ f'"unique_id":"{deviceName}_sensor_{sensor}",'
+ f'"availability_topic":"system-sensors/sensor/{deviceName}/availability",'
+ f'"device":{{"identifiers":["{deviceName}_sensor"],'
+ f'"name":"{deviceNameDisplay} Sensors","model":"RPI {deviceNameDisplay}", "manufacturer":"RPI"}}'
+ (f',"icon":"mdi:{attr["icon"]}"' if 'icon' in attr else '')
+ f'}}'
),
qos=1,
retain=True,
)
mqttClient.publish(f'system-sensors/sensor/{deviceName}/availability', 'online', retain=True)
def _parser():
"""Generate argument parser"""
parser = argparse.ArgumentParser()
parser.add_argument('settings', help='path to the settings file')
return parser
def set_defaults(settings):
global poll_interval
set_default_timezone(pytz.timezone(settings['timezone']))
poll_interval = settings['update_interval'] if 'update_interval' in settings else 60
if 'port' not in settings['mqtt']:
settings['mqtt']['port'] = 1883
if 'sensors' not in settings:
settings['sensors'] = {}
for sensor in sensors:
if sensor not in settings['sensors']:
settings['sensors'][sensor] = True
if 'external_drives' not in settings['sensors']:
settings['sensors']['external_drives'] = {}
def check_settings(settings):
values_to_check = ['mqtt', 'timezone', 'deviceName', 'client_id']
for value in values_to_check:
if value not in settings:
write_message_to_console('{value} not defined in settings.yaml! Please check the documentation')
sys.exit()
if 'hostname' not in settings['mqtt']:
write_message_to_console('hostname not defined in settings.yaml! Please check the documentation')
sys.exit()
if 'user' in settings['mqtt'] and 'password' not in settings['mqtt']:
write_message_to_console('password not defined in settings.yaml! Please check the documentation')
sys.exit()
if 'power_status' in settings['sensors'] and rpi_power_disabled:
write_message_to_console('Unable to import rpi_bad_power library. Power supply info will not be shown.')
settings['sensors']['power_status'] = False
if 'updates' in settings['sensors'] and apt_disabled:
write_message_to_console('Unable to import apt package. Available updates will not be shown.')
settings['sensors']['updates'] = False
if 'power_integer_state' in settings:
write_message_to_console('power_integer_state is deprecated please remove this option power state is now a binary_sensor!')
def add_drives():
for drive in settings['sensors']['external_drives']:
# check if drives exist?
sensors[f'disk_use_{drive.lower()}'] = {
'name': f'Disk Use {drive}',
'unit': '%',
'icon': 'harddisk'
}
def on_connect(client, userdata, flags, rc):
if rc == 0:
write_message_to_console('Connected to broker')
client.subscribe('hass/status')
mqttClient.publish(f'system-sensors/sensor/{deviceName}/availability', 'online', retain=True)
elif rc == 5:
write_message_to_console('Authentication failed.\n Exiting.')
sys.exit()
else:
write_message_to_console('Connection failed')
def on_message(client, userdata, message):
print (f'Message received: {message.payload.decode()}' )
if(message.payload.decode() == 'online'):
send_config_message(client)
if __name__ == '__main__':
args = _parser().parse_args()
with open(args.settings) as f:
settings = yaml.safe_load(f)
# are these arguments necessary?
set_defaults(settings)
check_settings(settings)
add_drives()
deviceName = settings['deviceName'].replace(' ', '').lower()
deviceNameDisplay = settings['deviceName']
mqttClient = mqtt.Client(client_id=settings['client_id'])
mqttClient.on_connect = on_connect #attach function to callback
mqttClient.on_message = on_message
mqttClient.will_set(f'system-sensors/sensor/{deviceName}/availability', 'offline', retain=True)
if 'user' in settings['mqtt']:
mqttClient.username_pw_set(
settings['mqtt']['user'], settings['mqtt']['password']
)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
while True:
try:
mqttClient.connect(settings['mqtt']['hostname'], settings['mqtt']['port'])
break
except ConnectionRefusedError:
# sleep for 2 minutes if broker is unavailable and retry.
# Make this value configurable?
# this feels like a dirty hack. Is there some other way to do this?
time.sleep(120)
except OSError:
# sleep for 10 minutes if broker is not reachable, i.e. network is down
# Make this value configurable?
# this feels like a dirty hack. Is there some other way to do this?
time.sleep(600)
try:
send_config_message(mqttClient)
update_sensors()
except:
write_message_to_console(f'something went wrong') # say what went wrong
job = Job(interval=dt.timedelta(seconds=poll_interval), execute=update_sensors)
job.start()
mqttClient.loop_start()
while True:
try:
sys.stdout.flush()
time.sleep(1)
except ProgramKilled:
write_message_to_console('Program killed: running cleanup code')
mqttClient.publish(f'system-sensors/sensor/{deviceName}/availability', 'offline', retain=True)
mqttClient.disconnect()
mqttClient.loop_stop()
sys.stdout.flush()
job.stop()
break
|
#!/usr/bin/env python3
from os import error
import sys
import time
import yaml
import signal
import argparse
import threading
import paho.mqtt.client as mqtt
from sensors import *
mqttClient = None
global poll_interval
deviceName = None
settings = {}
class ProgramKilled(Exception):
pass
def signal_handler(signum, frame):
raise ProgramKilled
class Job(threading.Thread):
def __init__(self, interval, execute, *args, **kwargs):
threading.Thread.__init__(self)
self.daemon = False
self.stopped = threading.Event()
self.interval = interval
self.execute = execute
self.args = args
self.kwargs = kwargs
def stop(self):
self.stopped.set()
self.join()
def run(self):
while not self.stopped.wait(self.interval.total_seconds()):
self.execute(*self.args, **self.kwargs)
def update_sensors():
payload_str = f'{{'
for sensor, attr in sensors.items():
# skip sensors that have been disabled
if settings['sensors'][sensor] == False:
continue
payload_str += f'"{sensor}": "{attr["function"]()}",'
payload_str = payload_str[:-1]
payload_str += f'}}'
mqttClient.publish(
topic=f'system-sensors/{attr["sensor_type"]}/{deviceName}/state',
payload=payload_str,
qos=1,
retain=False,
)
def send_config_message(mqttClient):
write_message_to_console('send config message')
for sensor, attr in sensors.items():
if settings['sensors'][sensor] == False:
continue
mqttClient.publish(
topic=f'homeassistant/{attr["sensor_type"]}/{deviceName}/{sensor}/config',
payload = (f'{{'
+ (f'"device_class":"{attr["class"]}",' if 'class' in attr else '')
+ f'"name":"{deviceNameDisplay} {attr["name"]}",'
+ f'"state_topic":"system-sensors/sensor/{deviceName}/state",'
+ (f'"unit_of_measurement":"{attr["unit"]}",' if 'unit' in attr else '')
+ f'"value_template":"{{{{value_json.{sensor}}}}}",'
+ f'"unique_id":"{deviceName}_sensor_{sensor}",'
+ f'"availability_topic":"system-sensors/sensor/{deviceName}/availability",'
+ f'"device":{{"identifiers":["{deviceName}_sensor"],'
+ f'"name":"{deviceNameDisplay} Sensors","model":"RPI {deviceNameDisplay}", "manufacturer":"RPI"}}'
+ (f',"icon":"mdi:{attr["icon"]}"' if 'icon' in attr else '')
+ f'}}'
),
qos=1,
retain=True,
)
mqttClient.publish(f'system-sensors/sensor/{deviceName}/availability', 'online', retain=True)
def _parser():
"""Generate argument parser"""
parser = argparse.ArgumentParser()
parser.add_argument('settings', help='path to the settings file')
return parser
def set_defaults(settings):
global poll_interval
set_default_timezone(pytz.timezone(settings['timezone']))
poll_interval = settings['update_interval'] if 'update_interval' in settings else 60
if 'port' not in settings['mqtt']:
settings['mqtt']['port'] = 1883
if 'sensors' not in settings:
settings['sensors'] = {}
for sensor in sensors:
if sensor not in settings['sensors']:
settings['sensors'][sensor] = True
if 'external_drives' not in settings['sensors']:
settings['sensors']['external_drives'] = {}
def check_settings(settings):
values_to_check = ['mqtt', 'timezone', 'deviceName', 'client_id']
for value in values_to_check:
if value not in settings:
write_message_to_console('{value} not defined in settings.yaml! Please check the documentation')
sys.exit()
if 'hostname' not in settings['mqtt']:
write_message_to_console('hostname not defined in settings.yaml! Please check the documentation')
sys.exit()
if 'user' in settings['mqtt'] and 'password' not in settings['mqtt']:
write_message_to_console('password not defined in settings.yaml! Please check the documentation')
sys.exit()
if 'power_status' in settings['sensors'] and rpi_power_disabled:
write_message_to_console('Unable to import rpi_bad_power library. Power supply info will not be shown.')
settings['sensors']['power_status'] = False
if 'updates' in settings['sensors'] and apt_disabled:
write_message_to_console('Unable to import apt package. Available updates will not be shown.')
settings['sensors']['updates'] = False
if 'power_integer_state' in settings:
write_message_to_console('power_integer_state is deprecated please remove this option power state is now a binary_sensor!')
def add_drives():
for drive in settings['sensors']['external_drives']:
# check if drives exist?
sensors[f'disk_use_{drive.lower()}'] = {
'name': f'Disk Use {drive}',
'unit': '%',
'icon': 'harddisk'
}
def on_connect(client, userdata, flags, rc):
if rc == 0:
write_message_to_console('Connected to broker')
client.subscribe('hass/status')
mqttClient.publish(f'system-sensors/sensor/{deviceName}/availability', 'online', retain=True)
elif rc == 5:
write_message_to_console('Authentication failed.\n Exiting.')
sys.exit()
else:
write_message_to_console('Connection failed')
def on_message(client, userdata, message):
print (f'Message received: {message.payload.decode()}' )
if(message.payload.decode() == 'online'):
send_config_message(client)
if __name__ == '__main__':
args = _parser().parse_args()
with open(args.settings) as f:
settings = yaml.safe_load(f)
# are these arguments necessary?
set_defaults(settings)
check_settings(settings)
add_drives()
deviceName = settings['deviceName'].replace(' ', '').lower()
deviceNameDisplay = settings['deviceName']
mqttClient = mqtt.Client(client_id=settings['client_id'])
mqttClient.on_connect = on_connect #attach function to callback
mqttClient.on_message = on_message
mqttClient.will_set(f'system-sensors/sensor/{deviceName}/availability', 'offline', retain=True)
if 'user' in settings['mqtt']:
mqttClient.username_pw_set(
settings['mqtt']['user'], settings['mqtt']['password']
)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
while True:
try:
mqttClient.connect(settings['mqtt']['hostname'], settings['mqtt']['port'])
break
except ConnectionRefusedError:
# sleep for 2 minutes if broker is unavailable and retry.
# Make this value configurable?
# this feels like a dirty hack. Is there some other way to do this?
time.sleep(120)
except OSError:
# sleep for 10 minutes if broker is not reachable, i.e. network is down
# Make this value configurable?
# this feels like a dirty hack. Is there some other way to do this?
time.sleep(600)
try:
send_config_message(mqttClient)
update_sensors()
except:
write_message_to_console(f'something went wrong') # say what went wrong
job = Job(interval=dt.timedelta(seconds=poll_interval), execute=update_sensors)
job.start()
mqttClient.loop_start()
while True:
try:
sys.stdout.flush()
time.sleep(1)
except ProgramKilled:
write_message_to_console('Program killed: running cleanup code')
mqttClient.publish(f'system-sensors/sensor/{deviceName}/availability', 'offline', retain=True)
mqttClient.disconnect()
mqttClient.loop_stop()
sys.stdout.flush()
job.stop()
break
|
en
| 0.83924
|
#!/usr/bin/env python3 # skip sensors that have been disabled Generate argument parser # check if drives exist? # are these arguments necessary? #attach function to callback # sleep for 2 minutes if broker is unavailable and retry. # Make this value configurable? # this feels like a dirty hack. Is there some other way to do this? # sleep for 10 minutes if broker is not reachable, i.e. network is down # Make this value configurable? # this feels like a dirty hack. Is there some other way to do this? # say what went wrong
| 2.664771
| 3
|
test/__init__.py
|
movermeyer/nibbler-python
| 0
|
6628170
|
import unittest
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Valid email addresses:
self.valid_addresses = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'dis<EMAIL>',
'<EMAIL>',
'"much.more unusual"@<EMAIL>',
'"very.unusual.@.unusual.com"@example.<EMAIL>',
('"very.(),:;<>[]\\".VERY.\\"very@\\\\ \\"very\\".unusual"'
'@strange.example.com'),
'postbox@com',
'admin@mailserver1',
'!#$%&\'*+-/=?^_`{}|~@<EMAIL>',
'"()<>[]:,;@\\\\\\"!#$%&\'*+-/=?^_`{}| ~.a"@<EMAIL>',
'" "@example.org',
'abc."defghi".<EMAIL>',
'test...<EMAIL>'
]
# invalid email addresses:
self.invalid_addresses = [
'Abc.example.com',
'<EMAIL>',
'a"b(c)d,e:f;g<h>i[<EMAIL>',
'just"not"<EMAIL>',
'this is"<EMAIL>',
'this\\ still\\"<EMAIL>',
'abc"defghi"<EMAIL>'
]
|
import unittest
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Valid email addresses:
self.valid_addresses = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'dis<EMAIL>',
'<EMAIL>',
'"much.more unusual"@<EMAIL>',
'"very.unusual.@.unusual.com"@example.<EMAIL>',
('"very.(),:;<>[]\\".VERY.\\"very@\\\\ \\"very\\".unusual"'
'@strange.example.com'),
'postbox@com',
'admin@mailserver1',
'!#$%&\'*+-/=?^_`{}|~@<EMAIL>',
'"()<>[]:,;@\\\\\\"!#$%&\'*+-/=?^_`{}| ~.a"@<EMAIL>',
'" "@example.org',
'abc."defghi".<EMAIL>',
'test...<EMAIL>'
]
# invalid email addresses:
self.invalid_addresses = [
'Abc.example.com',
'<EMAIL>',
'a"b(c)d,e:f;g<h>i[<EMAIL>',
'just"not"<EMAIL>',
'this is"<EMAIL>',
'this\\ still\\"<EMAIL>',
'abc"defghi"<EMAIL>'
]
|
en
| 0.141612
|
# Valid email addresses: #$%&\'*+-/=?^_`{}|~@<EMAIL>', #$%&\'*+-/=?^_`{}| ~.a"@<EMAIL>', # invalid email addresses:
| 3.252594
| 3
|
Sapphire/Parse7.py
|
Rhodolite/Parser-py
| 0
|
6628171
|
#
# Copyright (c) 2017 <NAME>. All rights reserved.
#
@gem('Sapphire.Parse7')
def gem():
require_gem('Sapphire.Core')
require_gem('Sapphire.Expression')
require_gem('Sapphire.Match')
require_gem('Sapphire.Statement')
show = false
def parse7_expression(m):
[
name, left_parenthesis, single_quote, right_parenthesis,
] = m.group('name', 'left_parenthesis', 'single_quote', 'OLD__right_parenthesis')
expression = conjure_identifier(name)
if left_parenthesis is none:
return expression
if single_quote is none:
return CallExpression(
expression,
Arguments_0(
conjure_left_parenthesis(left_parenthesis),
conjure_right_parenthesis(right_parenthesis),
),
)
return CallExpression(
expression,
Arguments_1(
conjure_left_parenthesis(left_parenthesis),
SingleQuote(single_quote),
conjure_right_parenthesis(right_parenthesis),
),
)
def parse7_statement_class(m0, s):
if m is none:
raise_unknown_line()
[
name1, left_parenthesis, name2, right_parenthesis__colon, newline,
] = m.group('name1', 'left_parenthesis', 'name2', 'ow__right_parenthesis__colon__ow', 'newline')
parameters = ParameterColon_1(
conjure_left_parenthesis(left_parenthesis),
conjure_identifier(name2),
OperatorRightParenthesisColon(right_parenthesis__colon),
)
return ClassHeader(KeywordClass(m0.group('indented') + m0.group('keyword__ow')), name1, parameters, newline)
def parse7_statement_decorator_header(m0, s):
if m is none:
raise_unknown_line()
return DecoratorHeader(
OperatorAtSign(m0.group('indented') + m0.group('keyword__ow')),
parse7_expression(m),
conjure_token_newline(m.group('ow_comment_newline')),
)
def parse7_statement_define_header(m0, s):
if m is none:
raise_unknown_line()
[
name1, left_parenthesis, name2, right_parenthesis__colon, comment_newline,
] = m.group('name1', 'left_parenthesis', 'name2', 'ow__right_parenthesis__colon__ow', 'comment_newline')
if name2 is none:
parameters = ParameterColon_0(left_parenthesis + right_parenthesis__colon)
else:
parameters = ParameterColon_1(
conjure_left_parenthesis(left_parenthesis),
conjure_identifier(name2),
OperatorRightParenthesisColon(right_parenthesis__colon),
)
return FunctionHeader(
KeywordFunction(m0.group('indented') + m0.group('keyword__ow')),
name1,
parameters,
conjure_token_newline(comment_newline),
)
def parse7_statement_from(m0, s):
if m is none:
raise_unknown_line()
[
name1, dot, name2, w_import_w, name3, w_as_w, name4, comma
] = m.group('name1', 'ow_dot_ow', 'name2', 'w_import_w', 'name3', 'w_as_w', 'name4', 'ow_comma_ow')
if dot is none:
module = conjure_identifier(name1)
else:
module = MemberExpression_1(conjure_identifier(name1), conjure_dot(dot), conjure_identifier(name2))
as_fragment = FromAsFragment(conjure_identifier(name3), conjure_keyword_as(w_as_w), conjure_identifier(name4))
if comma is none:
return StatementFromImport(
KeywordFrom(m0.group('indented') + m0.group('keyword__ow')),
module,
KeywordImport(w_import_w),
as_fragment,
conjure_token_newline(m.group('ow_comment_newline')),
)
if m2 is none:
return raise_unknown_line()
[
name1, w_as_w, name2, comma_2
] = m2.group('name1', 'w_as_w', 'name2', 'ow_comma_ow')
as_fragment_2 = FromAsFragment(conjure_identifier(name1), conjure_keyword_as(w_as_w), conjure_identifier(name2))
if comma_2 is none:
return StatementFromImport(
KeywordFrom(m0.group('indented') + m0.group('keyword__ow')),
module,
KeywordImport(w_import_w),
CommaExpression_1(as_fragment, conjure_comma(comma), as_fragment_2),
conjure_token_newline(m2.group('ow_comment_newline')),
)
raise_runtime_error('parse7_statement_from: incomplete')
def parse7_statement_import(m0, s):
if m is none:
raise_unknown_line()
return StatementImport_1(
KeywordImport(m0.group('indented') + m0.group('keyword__ow')),
conjure_identifier(m.group('name1')),
conjure_token_newline(m.group('ow_comment_newline')),
)
def parse7_statement_return(m0, s):
if m is none:
raise_unknown_line()
return ReturnStatement_1(
conjure_keyword_return(m0.group('indented') + m0.group('keyword__ow')),
parse7_expression(m),
conjure_token_newline(m.group('ow_comment_newline')),
)
find_parse7_line = {
'class' : parse7_statement_class,
'def' : parse7_statement_define_header,
'from' : parse7_statement_from,
'import' : parse7_statement_import,
'return' : parse7_statement_return,
'@' : parse7_statement_decorator_header,
}.__getitem__
@share
def parse7_python_from_path(path):
data = read_text_from_path(path)
many = []
append = many.append
iterate_lines = z_initialize(data)
for s in iterate_lines:
if m is none:
raise_unknown_line()
[keyword, name] = m.group('keyword', 'name')
if keyword is not none:
assert name is none
append(find_parse7_line(keyword)(m, s))
continue
[indented, comment, newline_2] = m.group('indented', 'comment', 'newline_2')
assert newline_2 is not none
if comment is not none:
if indented is '':
append(Comment(comment, newline_2))
continue
append(IndentedComment(indented, comment, newline_2))
continue
append(EmptyLine(indented + newline_2))
continue
if show:
for v in many:
line('%r', v)
with create_StringOutput() as f:
w = f.write
for v in many:
v.write(w)
if data != f.result:
with FileOutput('oops.txt') as f:
f.write(f.result)
raise_runtime_error('mismatch on %r: output saved in %r', path, 'oops.txt')
|
#
# Copyright (c) 2017 <NAME>. All rights reserved.
#
@gem('Sapphire.Parse7')
def gem():
require_gem('Sapphire.Core')
require_gem('Sapphire.Expression')
require_gem('Sapphire.Match')
require_gem('Sapphire.Statement')
show = false
def parse7_expression(m):
[
name, left_parenthesis, single_quote, right_parenthesis,
] = m.group('name', 'left_parenthesis', 'single_quote', 'OLD__right_parenthesis')
expression = conjure_identifier(name)
if left_parenthesis is none:
return expression
if single_quote is none:
return CallExpression(
expression,
Arguments_0(
conjure_left_parenthesis(left_parenthesis),
conjure_right_parenthesis(right_parenthesis),
),
)
return CallExpression(
expression,
Arguments_1(
conjure_left_parenthesis(left_parenthesis),
SingleQuote(single_quote),
conjure_right_parenthesis(right_parenthesis),
),
)
def parse7_statement_class(m0, s):
if m is none:
raise_unknown_line()
[
name1, left_parenthesis, name2, right_parenthesis__colon, newline,
] = m.group('name1', 'left_parenthesis', 'name2', 'ow__right_parenthesis__colon__ow', 'newline')
parameters = ParameterColon_1(
conjure_left_parenthesis(left_parenthesis),
conjure_identifier(name2),
OperatorRightParenthesisColon(right_parenthesis__colon),
)
return ClassHeader(KeywordClass(m0.group('indented') + m0.group('keyword__ow')), name1, parameters, newline)
def parse7_statement_decorator_header(m0, s):
if m is none:
raise_unknown_line()
return DecoratorHeader(
OperatorAtSign(m0.group('indented') + m0.group('keyword__ow')),
parse7_expression(m),
conjure_token_newline(m.group('ow_comment_newline')),
)
def parse7_statement_define_header(m0, s):
if m is none:
raise_unknown_line()
[
name1, left_parenthesis, name2, right_parenthesis__colon, comment_newline,
] = m.group('name1', 'left_parenthesis', 'name2', 'ow__right_parenthesis__colon__ow', 'comment_newline')
if name2 is none:
parameters = ParameterColon_0(left_parenthesis + right_parenthesis__colon)
else:
parameters = ParameterColon_1(
conjure_left_parenthesis(left_parenthesis),
conjure_identifier(name2),
OperatorRightParenthesisColon(right_parenthesis__colon),
)
return FunctionHeader(
KeywordFunction(m0.group('indented') + m0.group('keyword__ow')),
name1,
parameters,
conjure_token_newline(comment_newline),
)
def parse7_statement_from(m0, s):
if m is none:
raise_unknown_line()
[
name1, dot, name2, w_import_w, name3, w_as_w, name4, comma
] = m.group('name1', 'ow_dot_ow', 'name2', 'w_import_w', 'name3', 'w_as_w', 'name4', 'ow_comma_ow')
if dot is none:
module = conjure_identifier(name1)
else:
module = MemberExpression_1(conjure_identifier(name1), conjure_dot(dot), conjure_identifier(name2))
as_fragment = FromAsFragment(conjure_identifier(name3), conjure_keyword_as(w_as_w), conjure_identifier(name4))
if comma is none:
return StatementFromImport(
KeywordFrom(m0.group('indented') + m0.group('keyword__ow')),
module,
KeywordImport(w_import_w),
as_fragment,
conjure_token_newline(m.group('ow_comment_newline')),
)
if m2 is none:
return raise_unknown_line()
[
name1, w_as_w, name2, comma_2
] = m2.group('name1', 'w_as_w', 'name2', 'ow_comma_ow')
as_fragment_2 = FromAsFragment(conjure_identifier(name1), conjure_keyword_as(w_as_w), conjure_identifier(name2))
if comma_2 is none:
return StatementFromImport(
KeywordFrom(m0.group('indented') + m0.group('keyword__ow')),
module,
KeywordImport(w_import_w),
CommaExpression_1(as_fragment, conjure_comma(comma), as_fragment_2),
conjure_token_newline(m2.group('ow_comment_newline')),
)
raise_runtime_error('parse7_statement_from: incomplete')
def parse7_statement_import(m0, s):
if m is none:
raise_unknown_line()
return StatementImport_1(
KeywordImport(m0.group('indented') + m0.group('keyword__ow')),
conjure_identifier(m.group('name1')),
conjure_token_newline(m.group('ow_comment_newline')),
)
def parse7_statement_return(m0, s):
if m is none:
raise_unknown_line()
return ReturnStatement_1(
conjure_keyword_return(m0.group('indented') + m0.group('keyword__ow')),
parse7_expression(m),
conjure_token_newline(m.group('ow_comment_newline')),
)
find_parse7_line = {
'class' : parse7_statement_class,
'def' : parse7_statement_define_header,
'from' : parse7_statement_from,
'import' : parse7_statement_import,
'return' : parse7_statement_return,
'@' : parse7_statement_decorator_header,
}.__getitem__
@share
def parse7_python_from_path(path):
data = read_text_from_path(path)
many = []
append = many.append
iterate_lines = z_initialize(data)
for s in iterate_lines:
if m is none:
raise_unknown_line()
[keyword, name] = m.group('keyword', 'name')
if keyword is not none:
assert name is none
append(find_parse7_line(keyword)(m, s))
continue
[indented, comment, newline_2] = m.group('indented', 'comment', 'newline_2')
assert newline_2 is not none
if comment is not none:
if indented is '':
append(Comment(comment, newline_2))
continue
append(IndentedComment(indented, comment, newline_2))
continue
append(EmptyLine(indented + newline_2))
continue
if show:
for v in many:
line('%r', v)
with create_StringOutput() as f:
w = f.write
for v in many:
v.write(w)
if data != f.result:
with FileOutput('oops.txt') as f:
f.write(f.result)
raise_runtime_error('mismatch on %r: output saved in %r', path, 'oops.txt')
|
en
| 0.86582
|
# # Copyright (c) 2017 <NAME>. All rights reserved. #
| 2.4331
| 2
|
ansible/modules/cloud/amazon/efs_facts.py
|
EnjoyLifeFund/py36pkgs
| 0
|
6628172
|
<gh_stars>0
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: efs_facts
short_description: Get information about Amazon EFS file systems
description:
- Module searches Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "<NAME> (@ryansydnor)"
options:
name:
description:
- Creation Token of Amazon EFS file system.
required: false
default: None
id:
description:
- ID of Amazon EFS.
required: false
default: None
tags:
description:
- List of tags of Amazon EFS. Should be defined as dictionary
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- SubnetId - Mandatory. The ID of the subnet to add the mount target in.
- IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet.
- SecurityGroups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified."
required: false
default: None
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
# find all existing efs
- efs_facts:
register: result
- efs_facts:
name: myTestNameTag
- efs_facts:
id: fs-1234abcd
# Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
- efs_facts:
tags:
name: myTestNameTag
targets:
- subnet-1a2b3c4d
- sg-4d3c2b1a
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned:
type: datetime
sample: 2015-11-16 07:30:57-05:00
creation_token:
description: EFS creation token
returned:
type: UUID
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned:
type: unique ID
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned:
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system
returned:
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned:
type: list of dicts
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned:
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned:
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned:
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned:
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned:
type: str
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned:
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from time import sleep
from collections import defaultdict
try:
from botocore.exceptions import ClientError
import boto3
HAS_BOTO3 = True
except ImportError as e:
HAS_BOTO3 = False
class EFSConnection(object):
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
self.region = region
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = iterate_all(
'Tags',
self.connection.describe_tags,
**kwargs
)
return dict((tag['Key'], tag['Value']) for tag in tags)
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from boto result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
def prefix_to_attr(attr_id):
"""
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
'fsmt-': 'MountTargetId',
'subnet-': 'SubnetId',
'eni-': 'NetworkInterfaceId',
'sg-': 'SecurityGroups'
}
prefix = first_or_default(filter(
lambda pref: str(attr_id).startswith(pref),
attr_by_prefix.keys()
))
if prefix:
return attr_by_prefix[prefix]
return 'IpAddress'
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def has_tags(available, required):
"""
Helper method to determine if tag requested already exists
"""
for key, value in required.items():
if key not in available or value != available[key]:
return False
return True
def has_targets(available, required):
"""
Helper method to determine if mount tager requested already exists
"""
grouped = group_list_of_dict(available)
for (value, field) in required:
if field not in grouped or value not in grouped[field]:
return False
return True
def group_list_of_dict(array):
"""
Helper method to group list of dict to dict with all possible values
"""
result = defaultdict(list)
for item in array:
for key, value in item.items():
result[key] += value if isinstance(value, list) else [value]
return result
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
id=dict(required=False, type='str', default=None),
name=dict(required=False, type='str', default=None),
tags=dict(required=False, type="dict", default={}),
targets=dict(required=False, type="list", default=[])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
targets = module.params.get('targets')
file_systems_info = connection.get_file_systems(FileSystemId=fs_id, CreationToken=name)
if tags:
file_systems_info = filter(lambda item: has_tags(item['Tags'], tags), file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
file_systems_info = filter(lambda item:
has_targets(item['MountTargets'], targets), file_systems_info)
file_systems_info = [camel_dict_to_snake_dict(x) for x in file_systems_info]
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: efs_facts
short_description: Get information about Amazon EFS file systems
description:
- Module searches Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "<NAME> (@ryansydnor)"
options:
name:
description:
- Creation Token of Amazon EFS file system.
required: false
default: None
id:
description:
- ID of Amazon EFS.
required: false
default: None
tags:
description:
- List of tags of Amazon EFS. Should be defined as dictionary
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- SubnetId - Mandatory. The ID of the subnet to add the mount target in.
- IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet.
- SecurityGroups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified."
required: false
default: None
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
# find all existing efs
- efs_facts:
register: result
- efs_facts:
name: myTestNameTag
- efs_facts:
id: fs-1234abcd
# Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
- efs_facts:
tags:
name: myTestNameTag
targets:
- subnet-1a2b3c4d
- sg-4d3c2b1a
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned:
type: datetime
sample: 2015-11-16 07:30:57-05:00
creation_token:
description: EFS creation token
returned:
type: UUID
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned:
type: unique ID
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned:
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system
returned:
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned:
type: list of dicts
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned:
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned:
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned:
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned:
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned:
type: str
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned:
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from time import sleep
from collections import defaultdict
try:
from botocore.exceptions import ClientError
import boto3
HAS_BOTO3 = True
except ImportError as e:
HAS_BOTO3 = False
class EFSConnection(object):
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
self.region = region
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = iterate_all(
'Tags',
self.connection.describe_tags,
**kwargs
)
return dict((tag['Key'], tag['Value']) for tag in tags)
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from boto result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
def prefix_to_attr(attr_id):
"""
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
'fsmt-': 'MountTargetId',
'subnet-': 'SubnetId',
'eni-': 'NetworkInterfaceId',
'sg-': 'SecurityGroups'
}
prefix = first_or_default(filter(
lambda pref: str(attr_id).startswith(pref),
attr_by_prefix.keys()
))
if prefix:
return attr_by_prefix[prefix]
return 'IpAddress'
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def has_tags(available, required):
"""
Helper method to determine if tag requested already exists
"""
for key, value in required.items():
if key not in available or value != available[key]:
return False
return True
def has_targets(available, required):
"""
Helper method to determine if mount tager requested already exists
"""
grouped = group_list_of_dict(available)
for (value, field) in required:
if field not in grouped or value not in grouped[field]:
return False
return True
def group_list_of_dict(array):
"""
Helper method to group list of dict to dict with all possible values
"""
result = defaultdict(list)
for item in array:
for key, value in item.items():
result[key] += value if isinstance(value, list) else [value]
return result
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
id=dict(required=False, type='str', default=None),
name=dict(required=False, type='str', default=None),
tags=dict(required=False, type="dict", default={}),
targets=dict(required=False, type="list", default=[])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
targets = module.params.get('targets')
file_systems_info = connection.get_file_systems(FileSystemId=fs_id, CreationToken=name)
if tags:
file_systems_info = filter(lambda item: has_tags(item['Tags'], tags), file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
file_systems_info = filter(lambda item:
has_targets(item['MountTargets'], targets), file_systems_info)
file_systems_info = [camel_dict_to_snake_dict(x) for x in file_systems_info]
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
en
| 0.761336
|
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. --- module: efs_facts short_description: Get information about Amazon EFS file systems description: - Module searches Amazon EFS file systems version_added: "2.2" requirements: [ boto3 ] author: - "<NAME> (@ryansydnor)" options: name: description: - Creation Token of Amazon EFS file system. required: false default: None id: description: - ID of Amazon EFS. required: false default: None tags: description: - List of tags of Amazon EFS. Should be defined as dictionary required: false default: None targets: description: - "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes: - SubnetId - Mandatory. The ID of the subnet to add the mount target in. - IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet. - SecurityGroups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified." required: false default: None extends_documentation_fragment: - aws # find all existing efs - efs_facts: register: result - efs_facts: name: myTestNameTag - efs_facts: id: fs-1234abcd # Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a' - efs_facts: tags: name: myTestNameTag targets: - subnet-1a2b3c4d - sg-4d3c2b1a creation_time: description: timestamp of creation date returned: type: datetime sample: 2015-11-16 07:30:57-05:00 creation_token: description: EFS creation token returned: type: UUID sample: console-88609e04-9a0e-4a2e-912c-feaa99509961 file_system_id: description: ID of the file system returned: type: unique ID sample: fs-xxxxxxxx life_cycle_state: description: state of the EFS file system returned: type: str sample: creating, available, deleting, deleted mount_point: description: url of file system returned: type: str sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/ mount_targets: description: list of mount targets returned: type: list of dicts sample: [ { "file_system_id": "fs-a7ad440e", "ip_address": "172.31.17.173", "life_cycle_state": "available", "mount_target_id": "fsmt-d8907871", "network_interface_id": "eni-6e387e26", "owner_id": "740748460359", "security_groups": [ "sg-a30b22c6" ], "subnet_id": "subnet-e265c895" }, ... ] name: description: name of the file system returned: type: str sample: my-efs number_of_mount_targets: description: the number of targets mounted returned: type: int sample: 3 owner_id: description: AWS account ID of EFS owner returned: type: str sample: XXXXXXXXXXXX size_in_bytes: description: size of the file system in bytes as of a timestamp returned: type: dict sample: { "timestamp": "2015-12-21 13:59:59-05:00", "value": 12288 } performance_mode: description: performance mode of the file system returned: type: str sample: "generalPurpose" tags: description: tags on the efs instance returned: type: dict sample: { "name": "my-efs", "key": "Value" } Returns generator of file systems including all attributes of FS Suffix of network path to be used as NFS device for mount. More detail here: http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html Returns tag list for selected instance of EFS Returns mount targets for selected instance of EFS Returns security groups for selected instance of EFS Method creates iterator from boto result set Helper method to convert ID prefix to mount target attribute Helper method to fetch first element of list (if exists) Helper method to determine if tag requested already exists Helper method to determine if mount tager requested already exists Helper method to group list of dict to dict with all possible values Module action handler
| 1.623327
| 2
|
libs/automic.py
|
ufopilot/AutomicTerminal
| 0
|
6628173
|
import base64
import automic_rest as aut
from . settings import Settings
class Automic():
def __init__(self, system=None, client=None, user=None, password=None):
self.settings = Settings()
self.user = self.settings.items['user']
self.password = self.settings.items['password']
self.client = client
self.system = system.lower()
self.sslverify = self.settings.items['systems'][self.system]['rest_sslverify']
self.sslcert = self.settings.items['systems'][self.system]['rest_sslcert']
self.noproxy = self.settings.items['systems'][self.system]['rest_noproxy']
def isBase64(self, s):
try:
base64.b64encode(base64.b64decode(s)) == s
return True
except Exception:
return False
def connect(self):
try:
url = self.settings.items['systems'][self.system]['rest_url']
if self.isBase64(self.password):
password = base64.b64decode(self.password).decode("utf-8")
else:
password = <PASSWORD>
credentials = self.user + ':' + password
auth = base64.b64encode(credentials.encode()).decode()
aut.connection(
url=url,
auth=auth, # base64 userid:password
noproxy=self.noproxy, # defalut False
sslverify=self.sslverify, # default True
cert=self.sslcert, # default None
timeout=60 # default 3600
)
return True
except:
return False
def list_executions(self):
try:
return aut.listExecutions(client_id=self.client).response['data']
except:
return None
def list_agents(self):
try:
return aut.listAgents(client_id=self.client).response['data']
except:
return None
def health_check(self):
try:
return aut.healthCheck(client_id=self.client).response
except:
return None
|
import base64
import automic_rest as aut
from . settings import Settings
class Automic():
def __init__(self, system=None, client=None, user=None, password=None):
self.settings = Settings()
self.user = self.settings.items['user']
self.password = self.settings.items['password']
self.client = client
self.system = system.lower()
self.sslverify = self.settings.items['systems'][self.system]['rest_sslverify']
self.sslcert = self.settings.items['systems'][self.system]['rest_sslcert']
self.noproxy = self.settings.items['systems'][self.system]['rest_noproxy']
def isBase64(self, s):
try:
base64.b64encode(base64.b64decode(s)) == s
return True
except Exception:
return False
def connect(self):
try:
url = self.settings.items['systems'][self.system]['rest_url']
if self.isBase64(self.password):
password = base64.b64decode(self.password).decode("utf-8")
else:
password = <PASSWORD>
credentials = self.user + ':' + password
auth = base64.b64encode(credentials.encode()).decode()
aut.connection(
url=url,
auth=auth, # base64 userid:password
noproxy=self.noproxy, # defalut False
sslverify=self.sslverify, # default True
cert=self.sslcert, # default None
timeout=60 # default 3600
)
return True
except:
return False
def list_executions(self):
try:
return aut.listExecutions(client_id=self.client).response['data']
except:
return None
def list_agents(self):
try:
return aut.listAgents(client_id=self.client).response['data']
except:
return None
def health_check(self):
try:
return aut.healthCheck(client_id=self.client).response
except:
return None
|
en
| 0.078466
|
# base64 userid:password # defalut False # default True # default None # default 3600
| 2.466646
| 2
|
notion_database/migrations/0001_initial.py
|
marcphilippebeaujean-abertay/recur-notion
| 2
|
6628174
|
# Generated by Django 4.0.1 on 2022-01-20 06:54
import django.core.serializers.json
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="NotionDatabase",
fields=[
(
"database_id",
models.CharField(
blank=None,
max_length=255,
null=None,
primary_key=True,
serialize=False,
),
),
(
"database_name",
models.CharField(blank=None, max_length=255, null=None),
),
(
"properties_schema_json",
models.JSONField(
default=dict,
encoder=django.core.serializers.json.DjangoJSONEncoder,
),
),
],
),
]
|
# Generated by Django 4.0.1 on 2022-01-20 06:54
import django.core.serializers.json
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="NotionDatabase",
fields=[
(
"database_id",
models.CharField(
blank=None,
max_length=255,
null=None,
primary_key=True,
serialize=False,
),
),
(
"database_name",
models.CharField(blank=None, max_length=255, null=None),
),
(
"properties_schema_json",
models.JSONField(
default=dict,
encoder=django.core.serializers.json.DjangoJSONEncoder,
),
),
],
),
]
|
en
| 0.885175
|
# Generated by Django 4.0.1 on 2022-01-20 06:54
| 1.832095
| 2
|
experiment_gener.py
|
firstgenius/Sorts
| 0
|
6628175
|
from random import randint
def random_generator_lst(n):
gen_lst = []
for i in range(n):
gen_lst.append(randint(-100000, 100000))
return gen_lst
def increase_generator_lst(n):
gen_lst = []
for i in range(n):
gen_lst.append(i)
return gen_lst
def decrease_generator_lst(n):
gen_lst = []
for i in range(n, 0, -1):
gen_lst.append(i)
return gen_lst
def repeated_generator_lst(n):
gen_lst = []
for i in range(n):
gen_lst.append(randint(1, 3))
return gen_lst
def main_genertor(index, number):
if index == 0:
return random_generator_lst(number)
elif index == 1:
return increase_generator_lst(number)
elif index == 2:
return decrease_generator_lst(number)
else:
return repeated_generator_lst(number)
if __name__ == '__main__':
print(repeated_generator_lst(2**3))
|
from random import randint
def random_generator_lst(n):
gen_lst = []
for i in range(n):
gen_lst.append(randint(-100000, 100000))
return gen_lst
def increase_generator_lst(n):
gen_lst = []
for i in range(n):
gen_lst.append(i)
return gen_lst
def decrease_generator_lst(n):
gen_lst = []
for i in range(n, 0, -1):
gen_lst.append(i)
return gen_lst
def repeated_generator_lst(n):
gen_lst = []
for i in range(n):
gen_lst.append(randint(1, 3))
return gen_lst
def main_genertor(index, number):
if index == 0:
return random_generator_lst(number)
elif index == 1:
return increase_generator_lst(number)
elif index == 2:
return decrease_generator_lst(number)
else:
return repeated_generator_lst(number)
if __name__ == '__main__':
print(repeated_generator_lst(2**3))
|
none
| 1
| 3.529111
| 4
|
|
test/functional/feature_maxreorgdepth.py
|
MiracleCity/MiracleCity
| 0
|
6628176
|
<filename>test/functional/feature_maxreorgdepth.py
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Miracle Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Max Reorg Test
"""
import sys
import time
from test_framework.test_framework import MiracleTestFramework
from test_framework.util import *
from test_framework.mininode import *
class MaxReorgTest(MiracleTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
if len(sys.argv) > 1:
self.num_nodes = int(sys.argv[1])
self.max_reorg_depth = 60
self.min_reorg_peers = 4
self.min_reorg_age = 60 * 60 * 12
# self.extra_args = [[f"-maxreorg={self.max_reorg_depth}", f"-minreorgpeers={self.min_reorg_peers}", f"-minreorgage={self.min_reorg_age}"] for i in range(self.num_nodes)]
def add_options(self, parser):
parser.add_option("--height", dest="height", default=65,
help="The height of good branch when adversary surprises.")
parser.add_option("--tip_age", dest="tip_age", default=60*5,
help="Age of tip of non-adversaries at time of reorg.")
parser.add_option("--should_reorg", dest="should_reorg", default=0,
help="Whether a reorg is expected (0 or 1).")
def setup_network(self):
"""Make this a fully connected network"""
self.log.info("Running setup_network")
self.setup_nodes()
# Connect every node to every other
connect_all_nodes_bi(self.nodes)
self.sync_all()
def reorg_test(self):
height = int(self.options.height)
peers = self.num_nodes
tip_age = int(self.options.tip_age)
should_reorg = int(self.options.should_reorg)
self.log.info(f"Doing a reorg test with height: {height}, peers: {peers}, tip_age: {tip_age}. " + \
f"Should reorg? *{should_reorg}*")
asset_name = "MOON_STONES"
adversary = self.nodes[0]
subject = self.nodes[-1]
# enough to activate assets
start = 432
self.log.info(f"Setting all node times to {tip_age} seconds ago...")
now = int(round(time.time()))
set_node_times(self.nodes, now - tip_age)
self.log.info(f"Mining {start} starter blocks on all nodes and syncing...")
subject.generate(round(start/2))
self.sync_all()
adversary.generate(round(start/2))
self.sync_all()
self.log.info("Stopping adversary node...")
self.stop_node(0)
self.log.info(f"Subject is issuing asset: {asset_name}...")
subject.issue(asset_name)
self.log.info(f"Miners are mining {height} blocks...")
subject.generate(height)
wait_until(lambda: [n.getblockcount() for n in self.nodes[1:]] == [height+start] * (peers-1))
print([start] + [n.getblockcount() for n in self.nodes[1:]])
self.log.info("Restarting adversary node...")
self.start_node(0)
self.log.info(f"Adversary is issuing asset: {asset_name}...")
adversary.issue(asset_name)
self.log.info(f"Adversary is mining {height*2} (2 x {height}) blocks over the next ~{tip_age} seconds...")
interval = round(tip_age / (height * 2)) + 1
for i in range(0, height*2):
set_node_times(self.nodes, (now - tip_age) + ((i+1) * interval))
adversary.generate(1)
assert(adversary.getblockcount() - start == (subject.getblockcount() - start) * 2)
besttimes = [n.getblock(n.getbestblockhash())['time'] for n in self.nodes]
print(besttimes)
print(f"adversary: {besttimes[0]}; subject: {besttimes[-1]}; difference: {besttimes[0] - besttimes[-1]}; expected gte: {tip_age}")
assert(besttimes[0] - besttimes[-1] >= tip_age)
print([n.getblockcount() for n in self.nodes])
self.log.info("Reconnecting the network and syncing the chain...")
for i in range(1, peers):
connect_nodes_bi(self.nodes, 0, i)
expected_height = start + height
subject_owns_asset = True
if should_reorg > 0:
self.log.info(f"Expected a reorg -- blockcount should be {expected_height} and subject should own {asset_name} (waiting 5 seconds)...")
expected_height += height
subject_owns_asset = False
else:
self.log.info(f"Didn't expect a reorg -- blockcount should remain {expected_height} and both subject and adversary should own {asset_name} (waiting 5 seconds)...")
try:
wait_until(lambda: [n.getblockcount() for n in self.nodes] == [expected_height] * peers, timeout=5)
except:
pass
print([n.getblockcount() for n in self.nodes])
assert_equal(subject.getblockcount(), expected_height)
assert_contains_pair(asset_name + '!', 1, adversary.listmyassets())
if subject_owns_asset:
assert_contains_pair(asset_name + '!', 1, subject.listmyassets())
else:
assert_does_not_contain_key(asset_name + '!', subject.listmyassets())
def run_test(self):
self.log.info(f"Number of peers: {self.num_nodes}")
self.log.info(f"Chain params: max_reorg_depth: {self.max_reorg_depth}, " + \
f"max_reorg_peers: {self.min_reorg_peers}, " + \
f"min_reorg_age: {self.min_reorg_age}.")
self.reorg_test()
if __name__ == '__main__':
MaxReorgTest().main()
|
<filename>test/functional/feature_maxreorgdepth.py
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Miracle Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Max Reorg Test
"""
import sys
import time
from test_framework.test_framework import MiracleTestFramework
from test_framework.util import *
from test_framework.mininode import *
class MaxReorgTest(MiracleTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
if len(sys.argv) > 1:
self.num_nodes = int(sys.argv[1])
self.max_reorg_depth = 60
self.min_reorg_peers = 4
self.min_reorg_age = 60 * 60 * 12
# self.extra_args = [[f"-maxreorg={self.max_reorg_depth}", f"-minreorgpeers={self.min_reorg_peers}", f"-minreorgage={self.min_reorg_age}"] for i in range(self.num_nodes)]
def add_options(self, parser):
parser.add_option("--height", dest="height", default=65,
help="The height of good branch when adversary surprises.")
parser.add_option("--tip_age", dest="tip_age", default=60*5,
help="Age of tip of non-adversaries at time of reorg.")
parser.add_option("--should_reorg", dest="should_reorg", default=0,
help="Whether a reorg is expected (0 or 1).")
def setup_network(self):
"""Make this a fully connected network"""
self.log.info("Running setup_network")
self.setup_nodes()
# Connect every node to every other
connect_all_nodes_bi(self.nodes)
self.sync_all()
def reorg_test(self):
height = int(self.options.height)
peers = self.num_nodes
tip_age = int(self.options.tip_age)
should_reorg = int(self.options.should_reorg)
self.log.info(f"Doing a reorg test with height: {height}, peers: {peers}, tip_age: {tip_age}. " + \
f"Should reorg? *{should_reorg}*")
asset_name = "MOON_STONES"
adversary = self.nodes[0]
subject = self.nodes[-1]
# enough to activate assets
start = 432
self.log.info(f"Setting all node times to {tip_age} seconds ago...")
now = int(round(time.time()))
set_node_times(self.nodes, now - tip_age)
self.log.info(f"Mining {start} starter blocks on all nodes and syncing...")
subject.generate(round(start/2))
self.sync_all()
adversary.generate(round(start/2))
self.sync_all()
self.log.info("Stopping adversary node...")
self.stop_node(0)
self.log.info(f"Subject is issuing asset: {asset_name}...")
subject.issue(asset_name)
self.log.info(f"Miners are mining {height} blocks...")
subject.generate(height)
wait_until(lambda: [n.getblockcount() for n in self.nodes[1:]] == [height+start] * (peers-1))
print([start] + [n.getblockcount() for n in self.nodes[1:]])
self.log.info("Restarting adversary node...")
self.start_node(0)
self.log.info(f"Adversary is issuing asset: {asset_name}...")
adversary.issue(asset_name)
self.log.info(f"Adversary is mining {height*2} (2 x {height}) blocks over the next ~{tip_age} seconds...")
interval = round(tip_age / (height * 2)) + 1
for i in range(0, height*2):
set_node_times(self.nodes, (now - tip_age) + ((i+1) * interval))
adversary.generate(1)
assert(adversary.getblockcount() - start == (subject.getblockcount() - start) * 2)
besttimes = [n.getblock(n.getbestblockhash())['time'] for n in self.nodes]
print(besttimes)
print(f"adversary: {besttimes[0]}; subject: {besttimes[-1]}; difference: {besttimes[0] - besttimes[-1]}; expected gte: {tip_age}")
assert(besttimes[0] - besttimes[-1] >= tip_age)
print([n.getblockcount() for n in self.nodes])
self.log.info("Reconnecting the network and syncing the chain...")
for i in range(1, peers):
connect_nodes_bi(self.nodes, 0, i)
expected_height = start + height
subject_owns_asset = True
if should_reorg > 0:
self.log.info(f"Expected a reorg -- blockcount should be {expected_height} and subject should own {asset_name} (waiting 5 seconds)...")
expected_height += height
subject_owns_asset = False
else:
self.log.info(f"Didn't expect a reorg -- blockcount should remain {expected_height} and both subject and adversary should own {asset_name} (waiting 5 seconds)...")
try:
wait_until(lambda: [n.getblockcount() for n in self.nodes] == [expected_height] * peers, timeout=5)
except:
pass
print([n.getblockcount() for n in self.nodes])
assert_equal(subject.getblockcount(), expected_height)
assert_contains_pair(asset_name + '!', 1, adversary.listmyassets())
if subject_owns_asset:
assert_contains_pair(asset_name + '!', 1, subject.listmyassets())
else:
assert_does_not_contain_key(asset_name + '!', subject.listmyassets())
def run_test(self):
self.log.info(f"Number of peers: {self.num_nodes}")
self.log.info(f"Chain params: max_reorg_depth: {self.max_reorg_depth}, " + \
f"max_reorg_peers: {self.min_reorg_peers}, " + \
f"min_reorg_age: {self.min_reorg_age}.")
self.reorg_test()
if __name__ == '__main__':
MaxReorgTest().main()
|
en
| 0.607086
|
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Copyright (c) 2017-2018 The Miracle Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. Max Reorg Test # self.extra_args = [[f"-maxreorg={self.max_reorg_depth}", f"-minreorgpeers={self.min_reorg_peers}", f"-minreorgage={self.min_reorg_age}"] for i in range(self.num_nodes)] Make this a fully connected network # Connect every node to every other # enough to activate assets
| 2.199921
| 2
|
test/test_callback_server.py
|
Tianhao-Gu/JobRunner
| 0
|
6628177
|
<filename>test/test_callback_server.py<gh_stars>0
# Import the Sanic app, usually created with Sanic(__name__)
from JobRunner.callback_server import app
import json
from queue import Queue
from unittest.mock import patch
_TOKEN = 'bogus'
def _post(data):
header = {"Authorization": _TOKEN}
sa = {'access_log': False}
return app.test_client.post('/',
server_kwargs=sa,
headers=header, data=data)[1]
def test_index_returns_200():
response = app.test_client.get('/')[1]
assert response.status == 200
def test_index_post_empty():
response = _post(None)
print(response.json)
assert response.json == {}
def test_index_post():
out_q = Queue()
in_q = Queue()
conf = {
'token': _TOKEN,
'out_q': out_q,
'in_q': in_q
}
app.config.update(conf)
data = json.dumps({'method': 'bogus._test_submit'})
response = _post(data)
assert 'result' in response.json
job_id = response.json['result']
mess = out_q.get()
assert 'submit' in mess
data = json.dumps({'method': 'bogus._check_job', 'params': [job_id]})
response = _post(data)
assert 'result' in response.json
assert response.json['result'][0]['finished'] is False
data = json.dumps({'method': 'bogus.get_provenance', 'params': [job_id]})
response = _post(data)
assert 'result' in response.json
assert response.json['result'][0] is None
in_q.put(['prov', job_id, 'bogus'])
response = _post(data)
assert 'result' in response.json
assert response.json['result'][0] == 'bogus'
in_q.put(['output', job_id, {'foo': 'bar'}])
data = json.dumps({'method': 'bogus._check_job', 'params': [job_id]})
response = _post(data)
assert 'result' in response.json
assert response.json['result'][0]['finished'] is True
assert 'foo' in response.json['result'][0]
@patch('JobRunner.callback_server.uuid', autospec=True)
def test_index_submit_sync(mock_uuid):
out_q = Queue()
in_q = Queue()
conf = {
'token': _TOKEN,
'out_q': out_q,
'in_q': in_q
}
app.config.update(conf)
mock_uuid.uuid1.return_value = 'bogus'
data = json.dumps({'method': 'bogus.test'})
in_q.put(['output', 'bogus', {'foo': 'bar'}])
response = _post(data)
assert 'finished' in response.json
assert 'foo' in response.json
|
<filename>test/test_callback_server.py<gh_stars>0
# Import the Sanic app, usually created with Sanic(__name__)
from JobRunner.callback_server import app
import json
from queue import Queue
from unittest.mock import patch
_TOKEN = 'bogus'
def _post(data):
header = {"Authorization": _TOKEN}
sa = {'access_log': False}
return app.test_client.post('/',
server_kwargs=sa,
headers=header, data=data)[1]
def test_index_returns_200():
response = app.test_client.get('/')[1]
assert response.status == 200
def test_index_post_empty():
response = _post(None)
print(response.json)
assert response.json == {}
def test_index_post():
out_q = Queue()
in_q = Queue()
conf = {
'token': _TOKEN,
'out_q': out_q,
'in_q': in_q
}
app.config.update(conf)
data = json.dumps({'method': 'bogus._test_submit'})
response = _post(data)
assert 'result' in response.json
job_id = response.json['result']
mess = out_q.get()
assert 'submit' in mess
data = json.dumps({'method': 'bogus._check_job', 'params': [job_id]})
response = _post(data)
assert 'result' in response.json
assert response.json['result'][0]['finished'] is False
data = json.dumps({'method': 'bogus.get_provenance', 'params': [job_id]})
response = _post(data)
assert 'result' in response.json
assert response.json['result'][0] is None
in_q.put(['prov', job_id, 'bogus'])
response = _post(data)
assert 'result' in response.json
assert response.json['result'][0] == 'bogus'
in_q.put(['output', job_id, {'foo': 'bar'}])
data = json.dumps({'method': 'bogus._check_job', 'params': [job_id]})
response = _post(data)
assert 'result' in response.json
assert response.json['result'][0]['finished'] is True
assert 'foo' in response.json['result'][0]
@patch('JobRunner.callback_server.uuid', autospec=True)
def test_index_submit_sync(mock_uuid):
out_q = Queue()
in_q = Queue()
conf = {
'token': _TOKEN,
'out_q': out_q,
'in_q': in_q
}
app.config.update(conf)
mock_uuid.uuid1.return_value = 'bogus'
data = json.dumps({'method': 'bogus.test'})
in_q.put(['output', 'bogus', {'foo': 'bar'}])
response = _post(data)
assert 'finished' in response.json
assert 'foo' in response.json
|
en
| 0.904072
|
# Import the Sanic app, usually created with Sanic(__name__)
| 2.267965
| 2
|
onigurumacffi.py
|
asottile/onigurumacffi
| 11
|
6628178
|
import enum
import re
from typing import Any
from typing import Optional
from typing import Tuple
import _onigurumacffi
_ffi = _onigurumacffi.ffi
_lib = _onigurumacffi.lib
_BACKREF_RE = re.compile(r'((?<!\\)(?:\\\\)*)\\([0-9]+)')
class OnigError(RuntimeError):
pass
class OnigSearchOption(enum.IntEnum):
NONE = _lib.ONIG_OPTION_NONE
NOTBOL = _lib.ONIG_OPTION_NOTBOL
NOTEOL = _lib.ONIG_OPTION_NOTEOL
POSIX_REGION = _lib.ONIG_OPTION_POSIX_REGION
CHECK_VALIDITY_OF_STRING = _lib.ONIG_OPTION_CHECK_VALIDITY_OF_STRING
NOT_BEGIN_STRING = _lib.ONIG_OPTION_NOT_BEGIN_STRING
NOT_BEGIN_POSITION = _lib.ONIG_OPTION_NOT_BEGIN_POSITION
NOT_END_STRING = _lib.ONIG_OPTION_NOT_END_STRING
def _err(code: int, *args: Any) -> str:
buf = _ffi.new('OnigUChar[ONIG_MAX_ERROR_MESSAGE_LEN]')
length = _lib.onig_error_code_to_str(buf, code, *args)
return bytes(buf[0:length]).decode()
def _check(code: int, *args: Any) -> None:
if code < 0:
raise OnigError(_err(code, *args))
_check(_lib.onigcffi_initialize())
__onig_version__ = _ffi.string(_lib.onig_version()).decode()
class _Match:
__slots__ = ('_s_b', '_begs', '_ends')
def __init__(
self,
s_b: bytes,
begs: Tuple[int, ...],
ends: Tuple[int, ...],
) -> None:
self._s_b = s_b
self._begs = begs
self._ends = ends
def __repr__(self) -> str:
return f'<onigurumacffi._Match span={self.span()} match={self[0]!r}>'
def group(self, n: int = 0) -> str:
return self._s_b[self._begs[n]:self._ends[n]].decode()
__getitem__ = group
def start(self, n: int = 0) -> int:
return len(self._s_b[:self._begs[n]].decode())
def end(self, n: int = 0) -> int:
return len(self._s_b[:self._ends[n]].decode())
def span(self, n: int = 0) -> Tuple[int, int]:
return self.start(n), self.end(n)
def expand(self, s: str) -> str:
return _BACKREF_RE.sub(lambda m: f'{m[1]}{self[int(m[2])]}', s)
@property
def string(self) -> str:
return self._s_b.decode()
def _start_params(s: str, start: int) -> Tuple[bytes, int]:
return s.encode(), len(s[:start].encode())
def _region() -> Any:
return _ffi.gc(_lib.onig_region_new(), _lib.onigcffi_region_free)
def _match_ret(ret: int, s_b: bytes, region: Any) -> Optional[_Match]:
if ret == _lib.ONIG_MISMATCH:
return None
else:
_check(ret)
begs = tuple(region[0].beg[0:region[0].num_regs])
ends = tuple(region[0].end[0:region[0].num_regs])
return _Match(s_b, begs, ends)
class _Pattern:
def __init__(self, pattern: str, regex_t: Any) -> None:
self._pattern = pattern
self._regex_t = _ffi.gc(regex_t, _lib.onig_free)
def __repr__(self) -> str:
return f'{__name__}.compile({self._pattern!r})'
def number_of_captures(self) -> int:
return _lib.onig_number_of_captures(self._regex_t)
def match(
self,
s: str,
start: int = 0,
flags: OnigSearchOption = OnigSearchOption.NONE,
) -> Optional[_Match]:
s_b, start_b = _start_params(s, start)
region = _region()
ret = _lib.onigcffi_match(
self._regex_t, s_b, len(s_b), start_b, region, flags,
)
return _match_ret(ret, s_b, region)
def search(
self,
s: str,
start: int = 0,
flags: OnigSearchOption = OnigSearchOption.NONE,
) -> Optional[_Match]:
s_b, start_b = _start_params(s, start)
region = _region()
ret = _lib.onigcffi_search(
self._regex_t, s_b, len(s_b), start_b, region, flags,
)
return _match_ret(ret, s_b, region)
class _RegSet:
def __init__(self, patterns: Tuple[str, ...], regset_t: Any) -> None:
self._patterns = patterns
self._regset_t = _ffi.gc(regset_t, _lib.onig_regset_free)
def __repr__(self) -> str:
patterns = ', '.join(repr(pattern) for pattern in self._patterns)
return f'{__name__}.compile_regset({patterns})'
def search(
self,
s: str,
start: int = 0,
flags: OnigSearchOption = OnigSearchOption.NONE,
) -> Tuple[int, Optional[_Match]]:
s_b, start_b = _start_params(s, start)
region = _ffi.new('OnigRegion*[1]')
idx = _lib.onigcffi_regset_search(
self._regset_t, s_b, len(s_b), start_b, region, flags,
)
return idx, _match_ret(idx, s_b, region[0])
def _compile_regex_t(pattern: str, dest: Any) -> None:
pattern_b = pattern.encode()
err_info = _ffi.new('OnigErrorInfo[1]')
ret = _lib.onigcffi_new(dest, pattern_b, len(pattern_b), err_info)
_check(ret, err_info)
def compile(pattern: str) -> _Pattern:
regex = _ffi.new('regex_t*[1]')
_compile_regex_t(pattern, regex)
return _Pattern(pattern, regex[0])
def compile_regset(*patterns: str) -> _RegSet:
regexes = _ffi.new('regex_t*[]', len(patterns))
for i, pattern in enumerate(patterns):
_compile_regex_t(pattern, regexes + i)
regset = _ffi.new('OnigRegSet*[1]')
_check(_lib.onig_regset_new(regset, len(patterns), regexes))
return _RegSet(patterns, regset[0])
|
import enum
import re
from typing import Any
from typing import Optional
from typing import Tuple
import _onigurumacffi
_ffi = _onigurumacffi.ffi
_lib = _onigurumacffi.lib
_BACKREF_RE = re.compile(r'((?<!\\)(?:\\\\)*)\\([0-9]+)')
class OnigError(RuntimeError):
pass
class OnigSearchOption(enum.IntEnum):
NONE = _lib.ONIG_OPTION_NONE
NOTBOL = _lib.ONIG_OPTION_NOTBOL
NOTEOL = _lib.ONIG_OPTION_NOTEOL
POSIX_REGION = _lib.ONIG_OPTION_POSIX_REGION
CHECK_VALIDITY_OF_STRING = _lib.ONIG_OPTION_CHECK_VALIDITY_OF_STRING
NOT_BEGIN_STRING = _lib.ONIG_OPTION_NOT_BEGIN_STRING
NOT_BEGIN_POSITION = _lib.ONIG_OPTION_NOT_BEGIN_POSITION
NOT_END_STRING = _lib.ONIG_OPTION_NOT_END_STRING
def _err(code: int, *args: Any) -> str:
buf = _ffi.new('OnigUChar[ONIG_MAX_ERROR_MESSAGE_LEN]')
length = _lib.onig_error_code_to_str(buf, code, *args)
return bytes(buf[0:length]).decode()
def _check(code: int, *args: Any) -> None:
if code < 0:
raise OnigError(_err(code, *args))
_check(_lib.onigcffi_initialize())
__onig_version__ = _ffi.string(_lib.onig_version()).decode()
class _Match:
__slots__ = ('_s_b', '_begs', '_ends')
def __init__(
self,
s_b: bytes,
begs: Tuple[int, ...],
ends: Tuple[int, ...],
) -> None:
self._s_b = s_b
self._begs = begs
self._ends = ends
def __repr__(self) -> str:
return f'<onigurumacffi._Match span={self.span()} match={self[0]!r}>'
def group(self, n: int = 0) -> str:
return self._s_b[self._begs[n]:self._ends[n]].decode()
__getitem__ = group
def start(self, n: int = 0) -> int:
return len(self._s_b[:self._begs[n]].decode())
def end(self, n: int = 0) -> int:
return len(self._s_b[:self._ends[n]].decode())
def span(self, n: int = 0) -> Tuple[int, int]:
return self.start(n), self.end(n)
def expand(self, s: str) -> str:
return _BACKREF_RE.sub(lambda m: f'{m[1]}{self[int(m[2])]}', s)
@property
def string(self) -> str:
return self._s_b.decode()
def _start_params(s: str, start: int) -> Tuple[bytes, int]:
return s.encode(), len(s[:start].encode())
def _region() -> Any:
return _ffi.gc(_lib.onig_region_new(), _lib.onigcffi_region_free)
def _match_ret(ret: int, s_b: bytes, region: Any) -> Optional[_Match]:
if ret == _lib.ONIG_MISMATCH:
return None
else:
_check(ret)
begs = tuple(region[0].beg[0:region[0].num_regs])
ends = tuple(region[0].end[0:region[0].num_regs])
return _Match(s_b, begs, ends)
class _Pattern:
def __init__(self, pattern: str, regex_t: Any) -> None:
self._pattern = pattern
self._regex_t = _ffi.gc(regex_t, _lib.onig_free)
def __repr__(self) -> str:
return f'{__name__}.compile({self._pattern!r})'
def number_of_captures(self) -> int:
return _lib.onig_number_of_captures(self._regex_t)
def match(
self,
s: str,
start: int = 0,
flags: OnigSearchOption = OnigSearchOption.NONE,
) -> Optional[_Match]:
s_b, start_b = _start_params(s, start)
region = _region()
ret = _lib.onigcffi_match(
self._regex_t, s_b, len(s_b), start_b, region, flags,
)
return _match_ret(ret, s_b, region)
def search(
self,
s: str,
start: int = 0,
flags: OnigSearchOption = OnigSearchOption.NONE,
) -> Optional[_Match]:
s_b, start_b = _start_params(s, start)
region = _region()
ret = _lib.onigcffi_search(
self._regex_t, s_b, len(s_b), start_b, region, flags,
)
return _match_ret(ret, s_b, region)
class _RegSet:
def __init__(self, patterns: Tuple[str, ...], regset_t: Any) -> None:
self._patterns = patterns
self._regset_t = _ffi.gc(regset_t, _lib.onig_regset_free)
def __repr__(self) -> str:
patterns = ', '.join(repr(pattern) for pattern in self._patterns)
return f'{__name__}.compile_regset({patterns})'
def search(
self,
s: str,
start: int = 0,
flags: OnigSearchOption = OnigSearchOption.NONE,
) -> Tuple[int, Optional[_Match]]:
s_b, start_b = _start_params(s, start)
region = _ffi.new('OnigRegion*[1]')
idx = _lib.onigcffi_regset_search(
self._regset_t, s_b, len(s_b), start_b, region, flags,
)
return idx, _match_ret(idx, s_b, region[0])
def _compile_regex_t(pattern: str, dest: Any) -> None:
pattern_b = pattern.encode()
err_info = _ffi.new('OnigErrorInfo[1]')
ret = _lib.onigcffi_new(dest, pattern_b, len(pattern_b), err_info)
_check(ret, err_info)
def compile(pattern: str) -> _Pattern:
regex = _ffi.new('regex_t*[1]')
_compile_regex_t(pattern, regex)
return _Pattern(pattern, regex[0])
def compile_regset(*patterns: str) -> _RegSet:
regexes = _ffi.new('regex_t*[]', len(patterns))
for i, pattern in enumerate(patterns):
_compile_regex_t(pattern, regexes + i)
regset = _ffi.new('OnigRegSet*[1]')
_check(_lib.onig_regset_new(regset, len(patterns), regexes))
return _RegSet(patterns, regset[0])
|
none
| 1
| 2.44557
| 2
|
|
AMmodel/transducer_wrap.py
|
ishine/TensorflowASR-1
| 1
|
6628179
|
import os
import tensorflow as tf
from utils.tools import shape_list, get_shape_invariants, merge_repeated
from utils.text_featurizers import TextFeaturizer
from AMmodel.layers.time_frequency import Melspectrogram, Spectrogram
from AMmodel.layers.LayerNormLstmCell import LayerNormLSTMCell
class TransducerPrediction(tf.keras.Model):
def __init__(self,
vocabulary_size: int,
embed_dim: int,
embed_dropout: float = 0,
num_lstms: int = 1,
lstm_units: int = 512,
name="transducer_prediction",
**kwargs):
super(TransducerPrediction, self).__init__(name=name, **kwargs)
self.embed = tf.keras.layers.Embedding(
input_dim=vocabulary_size, output_dim=embed_dim, mask_zero=False)
self.do = tf.keras.layers.Dropout(embed_dropout)
self.lstm_cells = []
# lstms units must equal (for using beam search)
for i in range(num_lstms):
lstm = LayerNormLSTMCell(units=lstm_units,dropout=embed_dropout,recurrent_dropout=embed_dropout)
self.lstm_cells.append(lstm)
self.decoder_lstms = tf.keras.layers.RNN(
self.lstm_cells, return_sequences=True, return_state=True)
def get_initial_state(self, input_sample):
return self.decoder_lstms.get_initial_state(input_sample)
# @tf.function(experimental_relax_shapes=True)
def call(self,
inputs,
training=False,
p_memory_states=None,
**kwargs):
# inputs has shape [B, U]
outputs = self.embed(inputs, training=training)
outputs = self.do(outputs, training=training)
if p_memory_states is None: # Zeros mean no initial_state
p_memory_states = self.get_initial_state(outputs)
# n_memory_states = []
# for i, lstm in enumerate(self.lstms):
outputs = self.decoder_lstms(outputs, training=training, initial_state=p_memory_states)
# new_memory_states = outputs[1:]
outputs = outputs[0]
# n_memory_states.append(new_memory_states)
# return shapes [B, T, P], ([num_lstms, B, P], [num_lstms, B, P]) if using lstm
return outputs # , new_memory_states
def get_config(self):
conf = super(TransducerPrediction, self).get_config()
conf.update(self.embed.get_config())
conf.update(self.do.get_config())
for lstm in self.lstms:
conf.update(lstm.get_config())
return conf
class TransducerJoint(tf.keras.Model):
def __init__(self,
vocabulary_size: int,
joint_dim: int = 1024,
name="tranducer_joint",
**kwargs):
super(TransducerJoint, self).__init__(name=name, **kwargs)
self.ffn_enc = tf.keras.layers.Dense(joint_dim)
self.ffn_pred = tf.keras.layers.Dense(joint_dim)
self.ffn_out = tf.keras.layers.Dense(vocabulary_size)
# @tf.function(experimental_relax_shapes=True)
def call(self, inputs, training=False, **kwargs):
# enc has shape [B, T, E]
# pred has shape [B, U, P]
enc, pred = inputs
enc_out = self.ffn_enc(enc, training=training) # [B, T ,E] => [B, T, V]
pred_out = self.ffn_pred(pred, training=training) # [B, U, P] => [B, U, V]
# => [B, T, U, V]
outputs = tf.nn.tanh(tf.expand_dims(enc_out, axis=2) + tf.expand_dims(pred_out, axis=1))
outputs = self.ffn_out(outputs, training=training)
return outputs
def get_config(self):
conf = super(TransducerJoint, self).get_config()
conf.update(self.ffn_enc.get_config())
conf.update(self.ffn_pred.get_config())
conf.update(self.ffn_out.get_config())
return conf
class Transducer(tf.keras.Model):
""" Transducer Model Warper """
def __init__(self,
encoder: tf.keras.Model,
vocabulary_size: int,
embed_dim: int = 512,
embed_dropout: float = 0,
num_lstms: int = 1,
lstm_units: int = 320,
joint_dim: int = 1024,
name="transducer", speech_config=dict,
**kwargs):
super(Transducer, self).__init__(name=name, **kwargs)
self.encoder = encoder
self.predict_net = TransducerPrediction(
vocabulary_size=vocabulary_size,
embed_dim=embed_dim,
embed_dropout=embed_dropout,
num_lstms=num_lstms,
lstm_units=lstm_units,
name=f"{name}_prediction"
)
self.joint_net = TransducerJoint(
vocabulary_size=vocabulary_size,
joint_dim=joint_dim,
name=f"{name}_joint"
)
self.speech_config = speech_config
self.mel_layer = None
if speech_config['use_mel_layer']:
if speech_config['mel_layer_type'] == 'Melspectrogram':
self.mel_layer = Melspectrogram(sr=speech_config['sample_rate'],
n_mels=speech_config['num_feature_bins'],
n_hop=int(
speech_config['stride_ms'] * speech_config['sample_rate'] // 1000),
n_dft=1024,
trainable_fb=speech_config['trainable_kernel']
)
else:
self.mel_layer = Spectrogram(
n_hop=int(speech_config['stride_ms'] * speech_config['sample_rate'] // 1000),
n_dft=1024,
trainable_kernel=speech_config['trainable_kernel']
)
self.mel_layer.trainable = speech_config['trainable_kernel']
self.wav_info = speech_config['add_wav_info']
if self.wav_info:
assert speech_config['use_mel_layer'] == True, 'shold set use_mel_layer is True'
self.kept_decode = None
self.startid = 0
self.endid = 1
self.max_iter = 10
def _build(self, sample_shape): # Call on real data for building model
features = tf.random.normal(shape=sample_shape)
predicted = tf.constant([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]])
return self([features, predicted], training=True)
def save_seperate(self, path_to_dir: str):
self.encoder.save(os.path.join(path_to_dir, "encoder"))
self.predict_net.save(os.path.join(path_to_dir, "prediction"))
self.joint_net.save(os.path.join(path_to_dir, "joint"))
def summary(self, line_length=None, **kwargs):
self.predict_net.summary(line_length=line_length, **kwargs)
self.joint_net.summary(line_length=line_length, **kwargs)
super(Transducer, self).summary(line_length=line_length, **kwargs)
# @tf.function(experimental_relax_shapes=True)
def call(self,inputs, training=False):
features, predicted=inputs
if self.mel_layer is not None:
if self.wav_info :
wav=features
features = self.mel_layer(features)
else:
features = self.mel_layer(features)
# print(inputs.shape)
if self.wav_info :
enc = self.encoder([features,wav], training=training)
else:
enc = self.encoder(features, training=training)
pred = self.predict_net(predicted, training=training)
outputs = self.joint_net([enc, pred], training=training)
return outputs
def add_featurizers(self,
text_featurizer: TextFeaturizer):
self.text_featurizer = text_featurizer
def return_pb_function(self, shape):
@tf.function(input_signature=[
tf.TensorSpec(shape, dtype=tf.float32), # features
tf.TensorSpec([None, 1], dtype=tf.int32), # features
])
def recognize_pb(features, lengths):
b_i = tf.constant(0, dtype=tf.int32)
B = tf.shape(features)[0]
decoded = tf.constant([], dtype=tf.int32)
def _cond(b_i, B, features, decoded): return tf.less(b_i, B)
def _body(b_i, B, features, decoded):
yseq = self.perform_greedy(tf.expand_dims(features[b_i], axis=0),
streaming=False)
yseq = tf.concat([yseq, tf.constant([[self.text_featurizer.stop]], tf.int32)], axis=-1)
decoded = tf.concat([decoded, yseq[0]], axis=0)
return b_i + 1, B, features, decoded
_, _, _, decoded = tf.while_loop(
_cond,
_body,
loop_vars=(b_i, B, features, decoded),
shape_invariants=(
tf.TensorShape([]),
tf.TensorShape([]),
get_shape_invariants(features),
tf.TensorShape([None])
)
)
return [decoded]
self.recognize_pb = recognize_pb
@tf.function(experimental_relax_shapes=True)
def perform_greedy(self,
features,
streaming=False):
if self.wav_info:
wav=features
if self.mel_layer is not None:
features = self.mel_layer(features)
decoded = tf.constant([self.text_featurizer.start])
if self.kept_decode is not None:
decoded = self.kept_decode
if self.wav_info:
enc = self.encoder([features,wav], training=False) # [1, T, E]
else:
enc = self.encoder(features, training=False) # [1, T, E]
enc = tf.squeeze(enc, axis=0) # [T, E]
T = tf.cast(tf.shape(enc)[0], dtype=tf.int32)
i = tf.constant(0, dtype=tf.int32)
def _cond(enc, i, decoded, T):
return tf.less(i, T)
def _body(enc, i, decoded, T):
hi = tf.reshape(enc[i], [1, 1, -1]) # [1, 1, E]
y = self.predict_net(
inputs=tf.reshape(decoded, [1, -1]), # [1, 1]
p_memory_states=None,
training=False
)
y = y[:, -1:]
# [1, 1, P], [1, P], [1, P]
# [1, 1, E] + [1, 1, P] => [1, 1, 1, V]
ytu = tf.nn.log_softmax(self.joint_net([hi, y], training=False))
ytu = tf.squeeze(ytu, axis=None) # [1, 1, 1, V] => [V]
n_predict = tf.argmax(ytu, axis=-1, output_type=tf.int32) # => argmax []
n_predict = tf.reshape(n_predict, [1])
def return_no_blank():
return tf.concat([decoded, n_predict], axis=0)
decoded = tf.cond(
n_predict != self.text_featurizer.blank and n_predict != 0,
true_fn=return_no_blank,
false_fn=lambda: decoded
)
return enc, i + 1, decoded, T
_, _, decoded, _ = tf.while_loop(
_cond,
_body,
loop_vars=(enc, i, decoded, T),
shape_invariants=(
tf.TensorShape([None, None]),
tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([])
)
)
return tf.expand_dims(decoded, axis=0)
def recognize(self, features):
decoded = self.perform_greedy(features)
return decoded
def get_config(self):
if self.mel_layer is not None:
conf = self.mel_layer.get_config()
conf.update(self.encoder.get_config())
else:
conf = self.encoder.get_config()
conf.update(self.predict_net.get_config())
conf.update(self.joint_net.get_config())
return conf
|
import os
import tensorflow as tf
from utils.tools import shape_list, get_shape_invariants, merge_repeated
from utils.text_featurizers import TextFeaturizer
from AMmodel.layers.time_frequency import Melspectrogram, Spectrogram
from AMmodel.layers.LayerNormLstmCell import LayerNormLSTMCell
class TransducerPrediction(tf.keras.Model):
def __init__(self,
vocabulary_size: int,
embed_dim: int,
embed_dropout: float = 0,
num_lstms: int = 1,
lstm_units: int = 512,
name="transducer_prediction",
**kwargs):
super(TransducerPrediction, self).__init__(name=name, **kwargs)
self.embed = tf.keras.layers.Embedding(
input_dim=vocabulary_size, output_dim=embed_dim, mask_zero=False)
self.do = tf.keras.layers.Dropout(embed_dropout)
self.lstm_cells = []
# lstms units must equal (for using beam search)
for i in range(num_lstms):
lstm = LayerNormLSTMCell(units=lstm_units,dropout=embed_dropout,recurrent_dropout=embed_dropout)
self.lstm_cells.append(lstm)
self.decoder_lstms = tf.keras.layers.RNN(
self.lstm_cells, return_sequences=True, return_state=True)
def get_initial_state(self, input_sample):
return self.decoder_lstms.get_initial_state(input_sample)
# @tf.function(experimental_relax_shapes=True)
def call(self,
inputs,
training=False,
p_memory_states=None,
**kwargs):
# inputs has shape [B, U]
outputs = self.embed(inputs, training=training)
outputs = self.do(outputs, training=training)
if p_memory_states is None: # Zeros mean no initial_state
p_memory_states = self.get_initial_state(outputs)
# n_memory_states = []
# for i, lstm in enumerate(self.lstms):
outputs = self.decoder_lstms(outputs, training=training, initial_state=p_memory_states)
# new_memory_states = outputs[1:]
outputs = outputs[0]
# n_memory_states.append(new_memory_states)
# return shapes [B, T, P], ([num_lstms, B, P], [num_lstms, B, P]) if using lstm
return outputs # , new_memory_states
def get_config(self):
conf = super(TransducerPrediction, self).get_config()
conf.update(self.embed.get_config())
conf.update(self.do.get_config())
for lstm in self.lstms:
conf.update(lstm.get_config())
return conf
class TransducerJoint(tf.keras.Model):
def __init__(self,
vocabulary_size: int,
joint_dim: int = 1024,
name="tranducer_joint",
**kwargs):
super(TransducerJoint, self).__init__(name=name, **kwargs)
self.ffn_enc = tf.keras.layers.Dense(joint_dim)
self.ffn_pred = tf.keras.layers.Dense(joint_dim)
self.ffn_out = tf.keras.layers.Dense(vocabulary_size)
# @tf.function(experimental_relax_shapes=True)
def call(self, inputs, training=False, **kwargs):
# enc has shape [B, T, E]
# pred has shape [B, U, P]
enc, pred = inputs
enc_out = self.ffn_enc(enc, training=training) # [B, T ,E] => [B, T, V]
pred_out = self.ffn_pred(pred, training=training) # [B, U, P] => [B, U, V]
# => [B, T, U, V]
outputs = tf.nn.tanh(tf.expand_dims(enc_out, axis=2) + tf.expand_dims(pred_out, axis=1))
outputs = self.ffn_out(outputs, training=training)
return outputs
def get_config(self):
conf = super(TransducerJoint, self).get_config()
conf.update(self.ffn_enc.get_config())
conf.update(self.ffn_pred.get_config())
conf.update(self.ffn_out.get_config())
return conf
class Transducer(tf.keras.Model):
""" Transducer Model Warper """
def __init__(self,
encoder: tf.keras.Model,
vocabulary_size: int,
embed_dim: int = 512,
embed_dropout: float = 0,
num_lstms: int = 1,
lstm_units: int = 320,
joint_dim: int = 1024,
name="transducer", speech_config=dict,
**kwargs):
super(Transducer, self).__init__(name=name, **kwargs)
self.encoder = encoder
self.predict_net = TransducerPrediction(
vocabulary_size=vocabulary_size,
embed_dim=embed_dim,
embed_dropout=embed_dropout,
num_lstms=num_lstms,
lstm_units=lstm_units,
name=f"{name}_prediction"
)
self.joint_net = TransducerJoint(
vocabulary_size=vocabulary_size,
joint_dim=joint_dim,
name=f"{name}_joint"
)
self.speech_config = speech_config
self.mel_layer = None
if speech_config['use_mel_layer']:
if speech_config['mel_layer_type'] == 'Melspectrogram':
self.mel_layer = Melspectrogram(sr=speech_config['sample_rate'],
n_mels=speech_config['num_feature_bins'],
n_hop=int(
speech_config['stride_ms'] * speech_config['sample_rate'] // 1000),
n_dft=1024,
trainable_fb=speech_config['trainable_kernel']
)
else:
self.mel_layer = Spectrogram(
n_hop=int(speech_config['stride_ms'] * speech_config['sample_rate'] // 1000),
n_dft=1024,
trainable_kernel=speech_config['trainable_kernel']
)
self.mel_layer.trainable = speech_config['trainable_kernel']
self.wav_info = speech_config['add_wav_info']
if self.wav_info:
assert speech_config['use_mel_layer'] == True, 'shold set use_mel_layer is True'
self.kept_decode = None
self.startid = 0
self.endid = 1
self.max_iter = 10
def _build(self, sample_shape): # Call on real data for building model
features = tf.random.normal(shape=sample_shape)
predicted = tf.constant([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]])
return self([features, predicted], training=True)
def save_seperate(self, path_to_dir: str):
self.encoder.save(os.path.join(path_to_dir, "encoder"))
self.predict_net.save(os.path.join(path_to_dir, "prediction"))
self.joint_net.save(os.path.join(path_to_dir, "joint"))
def summary(self, line_length=None, **kwargs):
self.predict_net.summary(line_length=line_length, **kwargs)
self.joint_net.summary(line_length=line_length, **kwargs)
super(Transducer, self).summary(line_length=line_length, **kwargs)
# @tf.function(experimental_relax_shapes=True)
def call(self,inputs, training=False):
features, predicted=inputs
if self.mel_layer is not None:
if self.wav_info :
wav=features
features = self.mel_layer(features)
else:
features = self.mel_layer(features)
# print(inputs.shape)
if self.wav_info :
enc = self.encoder([features,wav], training=training)
else:
enc = self.encoder(features, training=training)
pred = self.predict_net(predicted, training=training)
outputs = self.joint_net([enc, pred], training=training)
return outputs
def add_featurizers(self,
text_featurizer: TextFeaturizer):
self.text_featurizer = text_featurizer
def return_pb_function(self, shape):
@tf.function(input_signature=[
tf.TensorSpec(shape, dtype=tf.float32), # features
tf.TensorSpec([None, 1], dtype=tf.int32), # features
])
def recognize_pb(features, lengths):
b_i = tf.constant(0, dtype=tf.int32)
B = tf.shape(features)[0]
decoded = tf.constant([], dtype=tf.int32)
def _cond(b_i, B, features, decoded): return tf.less(b_i, B)
def _body(b_i, B, features, decoded):
yseq = self.perform_greedy(tf.expand_dims(features[b_i], axis=0),
streaming=False)
yseq = tf.concat([yseq, tf.constant([[self.text_featurizer.stop]], tf.int32)], axis=-1)
decoded = tf.concat([decoded, yseq[0]], axis=0)
return b_i + 1, B, features, decoded
_, _, _, decoded = tf.while_loop(
_cond,
_body,
loop_vars=(b_i, B, features, decoded),
shape_invariants=(
tf.TensorShape([]),
tf.TensorShape([]),
get_shape_invariants(features),
tf.TensorShape([None])
)
)
return [decoded]
self.recognize_pb = recognize_pb
@tf.function(experimental_relax_shapes=True)
def perform_greedy(self,
features,
streaming=False):
if self.wav_info:
wav=features
if self.mel_layer is not None:
features = self.mel_layer(features)
decoded = tf.constant([self.text_featurizer.start])
if self.kept_decode is not None:
decoded = self.kept_decode
if self.wav_info:
enc = self.encoder([features,wav], training=False) # [1, T, E]
else:
enc = self.encoder(features, training=False) # [1, T, E]
enc = tf.squeeze(enc, axis=0) # [T, E]
T = tf.cast(tf.shape(enc)[0], dtype=tf.int32)
i = tf.constant(0, dtype=tf.int32)
def _cond(enc, i, decoded, T):
return tf.less(i, T)
def _body(enc, i, decoded, T):
hi = tf.reshape(enc[i], [1, 1, -1]) # [1, 1, E]
y = self.predict_net(
inputs=tf.reshape(decoded, [1, -1]), # [1, 1]
p_memory_states=None,
training=False
)
y = y[:, -1:]
# [1, 1, P], [1, P], [1, P]
# [1, 1, E] + [1, 1, P] => [1, 1, 1, V]
ytu = tf.nn.log_softmax(self.joint_net([hi, y], training=False))
ytu = tf.squeeze(ytu, axis=None) # [1, 1, 1, V] => [V]
n_predict = tf.argmax(ytu, axis=-1, output_type=tf.int32) # => argmax []
n_predict = tf.reshape(n_predict, [1])
def return_no_blank():
return tf.concat([decoded, n_predict], axis=0)
decoded = tf.cond(
n_predict != self.text_featurizer.blank and n_predict != 0,
true_fn=return_no_blank,
false_fn=lambda: decoded
)
return enc, i + 1, decoded, T
_, _, decoded, _ = tf.while_loop(
_cond,
_body,
loop_vars=(enc, i, decoded, T),
shape_invariants=(
tf.TensorShape([None, None]),
tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([])
)
)
return tf.expand_dims(decoded, axis=0)
def recognize(self, features):
decoded = self.perform_greedy(features)
return decoded
def get_config(self):
if self.mel_layer is not None:
conf = self.mel_layer.get_config()
conf.update(self.encoder.get_config())
else:
conf = self.encoder.get_config()
conf.update(self.predict_net.get_config())
conf.update(self.joint_net.get_config())
return conf
|
en
| 0.768446
|
# lstms units must equal (for using beam search) # @tf.function(experimental_relax_shapes=True) # inputs has shape [B, U] # Zeros mean no initial_state # n_memory_states = [] # for i, lstm in enumerate(self.lstms): # new_memory_states = outputs[1:] # n_memory_states.append(new_memory_states) # return shapes [B, T, P], ([num_lstms, B, P], [num_lstms, B, P]) if using lstm # , new_memory_states # @tf.function(experimental_relax_shapes=True) # enc has shape [B, T, E] # pred has shape [B, U, P] # [B, T ,E] => [B, T, V] # [B, U, P] => [B, U, V] # => [B, T, U, V] Transducer Model Warper # Call on real data for building model # @tf.function(experimental_relax_shapes=True) # print(inputs.shape) # features # features # [1, T, E] # [1, T, E] # [T, E] # [1, 1, E] # [1, 1] # [1, 1, P], [1, P], [1, P] # [1, 1, E] + [1, 1, P] => [1, 1, 1, V] # [1, 1, 1, V] => [V] # => argmax []
| 2.278417
| 2
|
test/aws_permission_verification.py
|
LucidumInc/update-manager
| 0
|
6628180
|
<reponame>LucidumInc/update-manager<filename>test/aws_permission_verification.py
import yaml
class EnvironmentTest():
def __init__(self):
self.yaml_configuration_file = '/usr/lucidum/connector-aws_latest/external/settings.yml'
self.accounts = self._get_accounts()
self.cloudtrail_state = self._get_cloudtrail_state()
self.cloudwatch_state = self._get_cloudwatch_state()
self.config_state = self._get_config_state()
self.dynamodb_state = self._get_dynamodb_state()
self.ec2_state = self._get_ec2_state()
self.ecs_state = self._get_ecs_state()
self.eks_state = self._get_eks_state()
self.elasticloadbalancing_state = self._get_elasticloadbalancing_state()
self.guardduty_state = self._get_guardduty_state()
self.iam_state = self._get_iam_state()
self.inspector_state = self._get_inspector_state()
self.kms_state = self._get_kms_state()
self.lambda_state = self._get_lambda_state()
self.logs_state = self._get_logs_state()
self.organizations_state = self._get_organizations_state()
self.pricing_state = self._get_pricing_state()
self.route53_state = self._get_route53_state()
self.s3_state = self._get_s3_state()
self.securityhub_state = self._get_securityhub_state()
self.ssm_state = self._get_ssm_state()
self.sts_state = self._get_sts_state()
self.tag_state = self._get_tag_state()
def make_report(self):
print("cloudtrail:")
print(" ok:" + str(self.cloudtrail_state['ok']))
print(" not-ok:" + str(self.cloudtrail_state['not-ok']))
print("cloudwatch:")
print(" ok:" + str(self.cloudwatch_state['ok']))
print(" not-ok:" + str(self.cloudwatch_state['not-ok']))
print("config:")
print(" ok:" + str(self.config_state['ok']))
print(" not-ok:" + str(self.config_state['not-ok']))
print("dynamodb:")
print(" ok:" + str(self.dynamodb_state['ok']))
print(" not-ok:" + str(self.dynamodb_state['not-ok']))
print("ec2:")
print(" ok:" + str(self.ec2_state['ok']))
print(" not-ok:" + str(self.ec2_state['not-ok']))
print("ecs:")
print(" ok:" + str(self.ecs_state['ok']))
print(" not-ok:" + str(self.ecs_state['not-ok']))
print("eks:")
print(" ok:" + str(self.eks_state['ok']))
print(" not-ok:" + str(self.eks_state['not-ok']))
print("elasticloadbalancing")
print(" ok:" + str(self.elasticloadbalancing_state['ok']))
print(" not-ok:" + str(self.elasticloadbalancing_state['not-ok']))
print("guardduty")
print(" ok:" + str(self.guardduty_state['ok']))
print(" not-ok:" + str(self.guardduty_state['not-ok']))
print("iam:")
print(" ok:" + str(self.iam_state['ok']))
print(" not-ok:" + str(self.iam_state['not-ok']))
print("inspector:")
print(" ok:" + str(self.inspector_state['ok']))
print(" not-ok:" + str(self.inspector_state['not-ok']))
print("kms:")
print(" ok:" + str(self.kms_state['ok']))
print(" not-ok:" + str(self.kms_state['not-ok']))
print("lambda:")
print(" ok:" + str(self.lambda_state['ok']))
print(" not-ok:" + str(self.lambda_state['not-ok']))
print("logs:")
print(" ok:" + str(self.logs_state['ok']))
print(" not-ok:" + str(self.logs_state['not-ok']))
print("organizations:")
print(" ok:" + str(self.organizations_state['ok']))
print(" not-ok:" + str(self.organizations_state['not-ok']))
print("pricing:")
print(" ok:" + str(self.pricing_state['ok']))
print(" not-ok:" + str(self.pricing_state['not-ok']))
print("route53:")
print(" ok:" + str(self.route53_state['ok']))
print(" not-ok:" + str(self.route53_state['not-ok']))
print("s3:")
print(" ok:" + str(self.s3_state['ok']))
print(" not-ok:" + str(self.s3_state['not-ok']))
print("securityhub:")
print(" ok:" + str(self.securityhub_state['ok']))
print(" not-ok:" + str(self.securityhub_state['not-ok']))
print("ssm:")
print(" ok:" + str(self.ssm_state['ok']))
print(" not-ok:" + str(self.ssm_state['not-ok']))
print("sts:")
print(" ok:" + str(self.sts_state['ok']))
print(" not-ok:" + str(self.sts_state['not-ok']))
print("tag:")
print(" ok:" + str(self.tag_state['ok']))
print(" not-ok:" + str(self.tag_state['not-ok']))
def _get_cloudtrail_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_cloudwatch_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_config_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_dynamodb_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_ec2_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_ecs_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_eks_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_elasticloadbalancing_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_guardduty_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_iam_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_inspector_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_kms_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_lambda_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_logs_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_organizations_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_pricing_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_route53_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_s3_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_securityhub_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_ssm_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_sts_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_tag_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_accounts(self):
with open(self.yaml_configuration_file) as aws_yaml:
accounts = yaml.load(aws_yaml, Loader=yaml.FullLoader)
return accounts['global']['aws_server']['role_accounts']
if __name__ == '__main__':
test = EnvironmentTest()
test.make_report()
|
import yaml
class EnvironmentTest():
def __init__(self):
self.yaml_configuration_file = '/usr/lucidum/connector-aws_latest/external/settings.yml'
self.accounts = self._get_accounts()
self.cloudtrail_state = self._get_cloudtrail_state()
self.cloudwatch_state = self._get_cloudwatch_state()
self.config_state = self._get_config_state()
self.dynamodb_state = self._get_dynamodb_state()
self.ec2_state = self._get_ec2_state()
self.ecs_state = self._get_ecs_state()
self.eks_state = self._get_eks_state()
self.elasticloadbalancing_state = self._get_elasticloadbalancing_state()
self.guardduty_state = self._get_guardduty_state()
self.iam_state = self._get_iam_state()
self.inspector_state = self._get_inspector_state()
self.kms_state = self._get_kms_state()
self.lambda_state = self._get_lambda_state()
self.logs_state = self._get_logs_state()
self.organizations_state = self._get_organizations_state()
self.pricing_state = self._get_pricing_state()
self.route53_state = self._get_route53_state()
self.s3_state = self._get_s3_state()
self.securityhub_state = self._get_securityhub_state()
self.ssm_state = self._get_ssm_state()
self.sts_state = self._get_sts_state()
self.tag_state = self._get_tag_state()
def make_report(self):
print("cloudtrail:")
print(" ok:" + str(self.cloudtrail_state['ok']))
print(" not-ok:" + str(self.cloudtrail_state['not-ok']))
print("cloudwatch:")
print(" ok:" + str(self.cloudwatch_state['ok']))
print(" not-ok:" + str(self.cloudwatch_state['not-ok']))
print("config:")
print(" ok:" + str(self.config_state['ok']))
print(" not-ok:" + str(self.config_state['not-ok']))
print("dynamodb:")
print(" ok:" + str(self.dynamodb_state['ok']))
print(" not-ok:" + str(self.dynamodb_state['not-ok']))
print("ec2:")
print(" ok:" + str(self.ec2_state['ok']))
print(" not-ok:" + str(self.ec2_state['not-ok']))
print("ecs:")
print(" ok:" + str(self.ecs_state['ok']))
print(" not-ok:" + str(self.ecs_state['not-ok']))
print("eks:")
print(" ok:" + str(self.eks_state['ok']))
print(" not-ok:" + str(self.eks_state['not-ok']))
print("elasticloadbalancing")
print(" ok:" + str(self.elasticloadbalancing_state['ok']))
print(" not-ok:" + str(self.elasticloadbalancing_state['not-ok']))
print("guardduty")
print(" ok:" + str(self.guardduty_state['ok']))
print(" not-ok:" + str(self.guardduty_state['not-ok']))
print("iam:")
print(" ok:" + str(self.iam_state['ok']))
print(" not-ok:" + str(self.iam_state['not-ok']))
print("inspector:")
print(" ok:" + str(self.inspector_state['ok']))
print(" not-ok:" + str(self.inspector_state['not-ok']))
print("kms:")
print(" ok:" + str(self.kms_state['ok']))
print(" not-ok:" + str(self.kms_state['not-ok']))
print("lambda:")
print(" ok:" + str(self.lambda_state['ok']))
print(" not-ok:" + str(self.lambda_state['not-ok']))
print("logs:")
print(" ok:" + str(self.logs_state['ok']))
print(" not-ok:" + str(self.logs_state['not-ok']))
print("organizations:")
print(" ok:" + str(self.organizations_state['ok']))
print(" not-ok:" + str(self.organizations_state['not-ok']))
print("pricing:")
print(" ok:" + str(self.pricing_state['ok']))
print(" not-ok:" + str(self.pricing_state['not-ok']))
print("route53:")
print(" ok:" + str(self.route53_state['ok']))
print(" not-ok:" + str(self.route53_state['not-ok']))
print("s3:")
print(" ok:" + str(self.s3_state['ok']))
print(" not-ok:" + str(self.s3_state['not-ok']))
print("securityhub:")
print(" ok:" + str(self.securityhub_state['ok']))
print(" not-ok:" + str(self.securityhub_state['not-ok']))
print("ssm:")
print(" ok:" + str(self.ssm_state['ok']))
print(" not-ok:" + str(self.ssm_state['not-ok']))
print("sts:")
print(" ok:" + str(self.sts_state['ok']))
print(" not-ok:" + str(self.sts_state['not-ok']))
print("tag:")
print(" ok:" + str(self.tag_state['ok']))
print(" not-ok:" + str(self.tag_state['not-ok']))
def _get_cloudtrail_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_cloudwatch_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_config_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_dynamodb_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_ec2_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_ecs_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_eks_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_elasticloadbalancing_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_guardduty_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_iam_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_inspector_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_kms_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_lambda_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_logs_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_organizations_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_pricing_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_route53_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_s3_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_securityhub_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_ssm_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_sts_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_tag_state(self):
return { 'ok': self.accounts, 'not-ok': self.accounts }
def _get_accounts(self):
with open(self.yaml_configuration_file) as aws_yaml:
accounts = yaml.load(aws_yaml, Loader=yaml.FullLoader)
return accounts['global']['aws_server']['role_accounts']
if __name__ == '__main__':
test = EnvironmentTest()
test.make_report()
|
none
| 1
| 1.997129
| 2
|
|
ctpn/layers/models.py
|
adolf69/keras-ctpn
| 0
|
6628181
|
# -*- coding: utf-8 -*-
"""
File Name: models
Description : 模型
Author : mick.yi
date: 2019/3/13
"""
import keras
from keras import layers
from keras import Input, Model
import tensorflow as tf
from .base_net import resnet50
from .anchor import CtpnAnchor
from .target import CtpnTarget
from .losses import ctpn_cls_loss, ctpn_regress_loss, side_regress_loss
from .text_proposals import TextProposal
def ctpn_net(config, stage='train'):
# 网络构建
# input_image = Input(batch_shape=(config.IMAGES_PER_GPU,) + config.IMAGE_SHAPE, name='input_image')
# input_image_meta = Input(batch_shape=(config.IMAGES_PER_GPU, 12), name='input_image_meta')
# gt_class_ids = Input(batch_shape=(config.IMAGES_PER_GPU, config.MAX_GT_INSTANCES, 2), name='gt_class_ids')
# gt_boxes = Input(batch_shape=(config.IMAGES_PER_GPU, config.MAX_GT_INSTANCES, 5), name='gt_boxes')
input_image = Input(shape=config.IMAGE_SHAPE, name='input_image')
input_image_meta = Input(shape=(12,), name='input_image_meta')
gt_class_ids = Input(shape=(config.MAX_GT_INSTANCES, 2), name='gt_class_ids')
gt_boxes = Input(shape=(config.MAX_GT_INSTANCES, 5), name='gt_boxes')
# 预测
base_features = resnet50(input_image)
num_anchors = len(config.ANCHORS_HEIGHT)
predict_class_logits, predict_deltas, predict_side_deltas = ctpn(base_features, num_anchors, 64, 256)
# anchors生成
anchors, valid_anchors_indices = CtpnAnchor(config.ANCHORS_HEIGHT, config.ANCHORS_WIDTH, config.NET_STRIDE,
name='gen_ctpn_anchors')(base_features)
if stage == 'train':
targets = CtpnTarget(config.IMAGES_PER_GPU,
train_anchors_num=config.TRAIN_ANCHORS_PER_IMAGE,
positive_ratios=config.ANCHOR_POSITIVE_RATIO,
max_gt_num=config.MAX_GT_INSTANCES,
name='ctpn_target')([gt_boxes, gt_class_ids, anchors, valid_anchors_indices])
deltas, class_ids, anchors_indices = targets[:3]
# 损失函数
regress_loss = layers.Lambda(lambda x: ctpn_regress_loss(*x),
name='ctpn_regress_loss')([predict_deltas, deltas, anchors_indices])
side_loss = layers.Lambda(lambda x: side_regress_loss(*x),
name='side_regress_loss')([predict_side_deltas, deltas, anchors_indices])
cls_loss = layers.Lambda(lambda x: ctpn_cls_loss(*x),
name='ctpn_class_loss')([predict_class_logits, class_ids, anchors_indices])
model = Model(inputs=[input_image, gt_boxes, gt_class_ids],
outputs=[regress_loss, cls_loss, side_loss])
else:
text_boxes, text_scores, text_class_logits = TextProposal(config.IMAGES_PER_GPU,
score_threshold=config.TEXT_PROPOSALS_MIN_SCORE,
output_box_num=config.TEXT_PROPOSALS_MAX_NUM,
iou_threshold=config.TEXT_PROPOSALS_NMS_THRESH,
use_side_refine=config.USE_SIDE_REFINE,
name='text_proposals')(
[predict_deltas, predict_side_deltas, predict_class_logits, anchors, valid_anchors_indices])
image_meta = layers.Lambda(lambda x: x)(input_image_meta) # 原样返回
model = Model(inputs=[input_image, input_image_meta], outputs=[text_boxes, text_scores, image_meta])
return model
def ctpn(base_features, num_anchors, rnn_units=128, fc_units=512):
"""
ctpn网络
:param base_features: (B,H,W,C)
:param num_anchors: anchors个数
:param rnn_units:
:param fc_units:
:return:
"""
x = layers.Conv2D(512, kernel_size=(3, 3), padding='same', name='pre_fc')(base_features) # [B,H,W,512]
# 沿着宽度方式做rnn
rnn_forward = layers.TimeDistributed(layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal'),
name='gru_forward')(x)
rnn_backward = layers.TimeDistributed(
layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal', go_backwards=True),
name='gru_backward')(x)
rnn_output = layers.Concatenate(name='gru_concat')([rnn_forward, rnn_backward]) # (B,H,W,256)
# conv实现fc
fc_output = layers.Conv2D(fc_units, kernel_size=(1, 1), activation='relu', name='fc_output')(
rnn_output) # (B,H,W,512)
# 分类
class_logits = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='cls')(fc_output)
class_logits = layers.Reshape(target_shape=(-1, 2), name='cls_reshape')(class_logits)
# 中心点垂直坐标和高度回归
predict_deltas = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='deltas')(fc_output)
predict_deltas = layers.Reshape(target_shape=(-1, 2), name='deltas_reshape')(predict_deltas)
# 侧边精调(只需要预测x偏移即可)
predict_side_deltas = layers.Conv2D(num_anchors, kernel_size=(1, 1), name='side_deltas')(fc_output)
predict_side_deltas = layers.Reshape(target_shape=(-1, 1), name='side_deltas_reshape')(
predict_side_deltas)
return class_logits, predict_deltas, predict_side_deltas
def get_layer(model, name):
for layer in model.layers:
if layer.name == name:
return layer
return None
def compile(keras_model, config, loss_names=[]):
"""
编译模型,增加损失函数,L2正则化以
:param keras_model:
:param config:
:param loss_names: 损失函数列表
:return:
"""
# 优化目标
optimizer = keras.optimizers.SGD(
lr=config.LEARNING_RATE, momentum=config.LEARNING_MOMENTUM,
clipnorm=config.GRADIENT_CLIP_NORM)
# 增加损失函数,首先清除之前的,防止重复
keras_model._losses = []
keras_model._per_input_losses = {}
for name in loss_names:
layer = get_layer(keras_model, name)
if layer is None or layer.output in keras_model.losses:
continue
loss = (tf.reduce_mean(layer.output, keepdims=True)
* config.LOSS_WEIGHTS.get(name, 1.))
keras_model.add_loss(loss)
# 增加L2正则化
# 跳过批标准化层的 gamma 和 beta 权重
reg_losses = [
keras.regularizers.l2(config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
keras_model.add_loss(tf.add_n(reg_losses))
# 编译
keras_model.compile(
optimizer=optimizer,
loss=[None] * len(keras_model.outputs)) # 使用虚拟损失
# 为每个损失函数增加度量
for name in loss_names:
if name in keras_model.metrics_names:
continue
layer = get_layer(keras_model, name)
if layer is None:
continue
keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* config.LOSS_WEIGHTS.get(name, 1.))
keras_model.add_metric(loss, name)
def add_metrics(keras_model, metric_name_list, metric_tensor_list):
"""
增加度量
:param keras_model: 模型
:param metric_name_list: 度量名称列表
:param metric_tensor_list: 度量张量列表
:return: 无
"""
for name, tensor in zip(metric_name_list, metric_tensor_list):
keras_model.metrics_names.append(name)
keras_model.add_metric(tf.reduce_mean(tensor, keepdims=False))
|
# -*- coding: utf-8 -*-
"""
File Name: models
Description : 模型
Author : mick.yi
date: 2019/3/13
"""
import keras
from keras import layers
from keras import Input, Model
import tensorflow as tf
from .base_net import resnet50
from .anchor import CtpnAnchor
from .target import CtpnTarget
from .losses import ctpn_cls_loss, ctpn_regress_loss, side_regress_loss
from .text_proposals import TextProposal
def ctpn_net(config, stage='train'):
# 网络构建
# input_image = Input(batch_shape=(config.IMAGES_PER_GPU,) + config.IMAGE_SHAPE, name='input_image')
# input_image_meta = Input(batch_shape=(config.IMAGES_PER_GPU, 12), name='input_image_meta')
# gt_class_ids = Input(batch_shape=(config.IMAGES_PER_GPU, config.MAX_GT_INSTANCES, 2), name='gt_class_ids')
# gt_boxes = Input(batch_shape=(config.IMAGES_PER_GPU, config.MAX_GT_INSTANCES, 5), name='gt_boxes')
input_image = Input(shape=config.IMAGE_SHAPE, name='input_image')
input_image_meta = Input(shape=(12,), name='input_image_meta')
gt_class_ids = Input(shape=(config.MAX_GT_INSTANCES, 2), name='gt_class_ids')
gt_boxes = Input(shape=(config.MAX_GT_INSTANCES, 5), name='gt_boxes')
# 预测
base_features = resnet50(input_image)
num_anchors = len(config.ANCHORS_HEIGHT)
predict_class_logits, predict_deltas, predict_side_deltas = ctpn(base_features, num_anchors, 64, 256)
# anchors生成
anchors, valid_anchors_indices = CtpnAnchor(config.ANCHORS_HEIGHT, config.ANCHORS_WIDTH, config.NET_STRIDE,
name='gen_ctpn_anchors')(base_features)
if stage == 'train':
targets = CtpnTarget(config.IMAGES_PER_GPU,
train_anchors_num=config.TRAIN_ANCHORS_PER_IMAGE,
positive_ratios=config.ANCHOR_POSITIVE_RATIO,
max_gt_num=config.MAX_GT_INSTANCES,
name='ctpn_target')([gt_boxes, gt_class_ids, anchors, valid_anchors_indices])
deltas, class_ids, anchors_indices = targets[:3]
# 损失函数
regress_loss = layers.Lambda(lambda x: ctpn_regress_loss(*x),
name='ctpn_regress_loss')([predict_deltas, deltas, anchors_indices])
side_loss = layers.Lambda(lambda x: side_regress_loss(*x),
name='side_regress_loss')([predict_side_deltas, deltas, anchors_indices])
cls_loss = layers.Lambda(lambda x: ctpn_cls_loss(*x),
name='ctpn_class_loss')([predict_class_logits, class_ids, anchors_indices])
model = Model(inputs=[input_image, gt_boxes, gt_class_ids],
outputs=[regress_loss, cls_loss, side_loss])
else:
text_boxes, text_scores, text_class_logits = TextProposal(config.IMAGES_PER_GPU,
score_threshold=config.TEXT_PROPOSALS_MIN_SCORE,
output_box_num=config.TEXT_PROPOSALS_MAX_NUM,
iou_threshold=config.TEXT_PROPOSALS_NMS_THRESH,
use_side_refine=config.USE_SIDE_REFINE,
name='text_proposals')(
[predict_deltas, predict_side_deltas, predict_class_logits, anchors, valid_anchors_indices])
image_meta = layers.Lambda(lambda x: x)(input_image_meta) # 原样返回
model = Model(inputs=[input_image, input_image_meta], outputs=[text_boxes, text_scores, image_meta])
return model
def ctpn(base_features, num_anchors, rnn_units=128, fc_units=512):
"""
ctpn网络
:param base_features: (B,H,W,C)
:param num_anchors: anchors个数
:param rnn_units:
:param fc_units:
:return:
"""
x = layers.Conv2D(512, kernel_size=(3, 3), padding='same', name='pre_fc')(base_features) # [B,H,W,512]
# 沿着宽度方式做rnn
rnn_forward = layers.TimeDistributed(layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal'),
name='gru_forward')(x)
rnn_backward = layers.TimeDistributed(
layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal', go_backwards=True),
name='gru_backward')(x)
rnn_output = layers.Concatenate(name='gru_concat')([rnn_forward, rnn_backward]) # (B,H,W,256)
# conv实现fc
fc_output = layers.Conv2D(fc_units, kernel_size=(1, 1), activation='relu', name='fc_output')(
rnn_output) # (B,H,W,512)
# 分类
class_logits = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='cls')(fc_output)
class_logits = layers.Reshape(target_shape=(-1, 2), name='cls_reshape')(class_logits)
# 中心点垂直坐标和高度回归
predict_deltas = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='deltas')(fc_output)
predict_deltas = layers.Reshape(target_shape=(-1, 2), name='deltas_reshape')(predict_deltas)
# 侧边精调(只需要预测x偏移即可)
predict_side_deltas = layers.Conv2D(num_anchors, kernel_size=(1, 1), name='side_deltas')(fc_output)
predict_side_deltas = layers.Reshape(target_shape=(-1, 1), name='side_deltas_reshape')(
predict_side_deltas)
return class_logits, predict_deltas, predict_side_deltas
def get_layer(model, name):
for layer in model.layers:
if layer.name == name:
return layer
return None
def compile(keras_model, config, loss_names=[]):
"""
编译模型,增加损失函数,L2正则化以
:param keras_model:
:param config:
:param loss_names: 损失函数列表
:return:
"""
# 优化目标
optimizer = keras.optimizers.SGD(
lr=config.LEARNING_RATE, momentum=config.LEARNING_MOMENTUM,
clipnorm=config.GRADIENT_CLIP_NORM)
# 增加损失函数,首先清除之前的,防止重复
keras_model._losses = []
keras_model._per_input_losses = {}
for name in loss_names:
layer = get_layer(keras_model, name)
if layer is None or layer.output in keras_model.losses:
continue
loss = (tf.reduce_mean(layer.output, keepdims=True)
* config.LOSS_WEIGHTS.get(name, 1.))
keras_model.add_loss(loss)
# 增加L2正则化
# 跳过批标准化层的 gamma 和 beta 权重
reg_losses = [
keras.regularizers.l2(config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
keras_model.add_loss(tf.add_n(reg_losses))
# 编译
keras_model.compile(
optimizer=optimizer,
loss=[None] * len(keras_model.outputs)) # 使用虚拟损失
# 为每个损失函数增加度量
for name in loss_names:
if name in keras_model.metrics_names:
continue
layer = get_layer(keras_model, name)
if layer is None:
continue
keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* config.LOSS_WEIGHTS.get(name, 1.))
keras_model.add_metric(loss, name)
def add_metrics(keras_model, metric_name_list, metric_tensor_list):
"""
增加度量
:param keras_model: 模型
:param metric_name_list: 度量名称列表
:param metric_tensor_list: 度量张量列表
:return: 无
"""
for name, tensor in zip(metric_name_list, metric_tensor_list):
keras_model.metrics_names.append(name)
keras_model.add_metric(tf.reduce_mean(tensor, keepdims=False))
|
zh
| 0.446755
|
# -*- coding: utf-8 -*- File Name: models Description : 模型 Author : mick.yi date: 2019/3/13 # 网络构建 # input_image = Input(batch_shape=(config.IMAGES_PER_GPU,) + config.IMAGE_SHAPE, name='input_image') # input_image_meta = Input(batch_shape=(config.IMAGES_PER_GPU, 12), name='input_image_meta') # gt_class_ids = Input(batch_shape=(config.IMAGES_PER_GPU, config.MAX_GT_INSTANCES, 2), name='gt_class_ids') # gt_boxes = Input(batch_shape=(config.IMAGES_PER_GPU, config.MAX_GT_INSTANCES, 5), name='gt_boxes') # 预测 # anchors生成 # 损失函数 # 原样返回 ctpn网络 :param base_features: (B,H,W,C) :param num_anchors: anchors个数 :param rnn_units: :param fc_units: :return: # [B,H,W,512] # 沿着宽度方式做rnn # (B,H,W,256) # conv实现fc # (B,H,W,512) # 分类 # 中心点垂直坐标和高度回归 # 侧边精调(只需要预测x偏移即可) 编译模型,增加损失函数,L2正则化以 :param keras_model: :param config: :param loss_names: 损失函数列表 :return: # 优化目标 # 增加损失函数,首先清除之前的,防止重复 # 增加L2正则化 # 跳过批标准化层的 gamma 和 beta 权重 # 编译 # 使用虚拟损失 # 为每个损失函数增加度量 增加度量 :param keras_model: 模型 :param metric_name_list: 度量名称列表 :param metric_tensor_list: 度量张量列表 :return: 无
| 2.403418
| 2
|
05_Practice1/Step02/yj.py
|
StudyForCoding/BEAKJOON
| 0
|
6628182
|
burger = []
drink = []
for i in range(3):
a = int(input())
burger.append(a)
for i in range(2):
b = int(input())
drink.append(b)
print(min(burger)+min(drink)-50)
|
burger = []
drink = []
for i in range(3):
a = int(input())
burger.append(a)
for i in range(2):
b = int(input())
drink.append(b)
print(min(burger)+min(drink)-50)
|
none
| 1
| 3.734303
| 4
|
|
tests/test_table.py
|
vpv11110000/pyss
| 0
|
6628183
|
<gh_stars>0
# #!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long,missing-docstring,bad-whitespace
import sys
import os
import unittest
import random
DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep
sys.path.append(DIRNAME_MODULE)
sys.path.append(DIRNAME_MODULE + "pyss" + os.sep)
from pyss import pyssobject
from pyss.pyss_model import PyssModel
from pyss.segment import Segment
from pyss.generate import Generate
from pyss.terminate import Terminate
from pyss import logger
from pyss.table import Table
from pyss.handle import Handle
from pyss.enter import Enter
from pyss.leave import Leave
from pyss.table import Table
from pyss.advance import Advance
from pyss.preempt import Preempt
from pyss.g_return import GReturn
from pyss.facility import Facility
from pyss.seize import Seize
from pyss.release import Release
from pyss.transfer import Transfer
from pyss.test import Test
from pyss.queue import Queue
from pyss.depart import Depart
from pyss.split import Split
from pyss.transact import Transact
from pyss.pyss_const import *
class TestTable(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init_001(self):
#
with self.assertRaises(pyssobject.ErrorIsNone) as context:
Table(None, tableName="T1", argFunc=lambda o, t: "P1", limitUpFirst=1.0, widthInt=1.0, countInt=20)
def test_init_002(self):
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
#
Table(m, tableName="T1", argFunc=lambda o, t: "P1", limitUpFirst=1.0, widthInt=1.0, countInt=20)
def test_001(self):
logger.info("--- test_001 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
with self.assertRaises(Exception) as e:
Table(m, tableName=None, argFunc=None, limitUpFirst=None, widthInt=None, countInt=None)
def test_002(self):
logger.info("--- test_002 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
with self.assertRaises(Exception) as e:
Table(m, tableName="table_stat", argFunc="P1", limitUpFirst=1, widthInt=0, countInt=2)
def test_003(self):
logger.info("--- test_003 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
def argFunc(owner, transact):
return random.randint(1, 9)
tbl = Table(m, tableName="table_stat", argFunc=argFunc, limitUpFirst=None, widthInt=None, countInt=None)
timeCreated = 0
while timeCreated < 1000:
t = Transact(None, timeCreated, priority=0)
# t[P1]=0.1
tbl.handleTransact(t, coef=1)
timeCreated += 1
logger.info(tbl.table2str())
def test_004(self):
logger.info("--- test_004 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
ARGUMENT = "ARGUMENT"
def argFunc(owner, tranzact):
return tranzact[ARGUMENT]
tbl = Table(m, tableName="table_stat", argFunc=argFunc, limitUpFirst=1.0, widthInt=1.0, countInt=10)
for timeCreated in xrange(1, 7):
t = Transact(None, timeCreated, priority=0)
t[NUM] = timeCreated
t[ARGUMENT] = timeCreated % 7
# t[P1]=0.1
tbl.handleTransact(t, coef=1)
self.assertEqual(tbl[INTERVALS][POSITIVE_INFINITY], 0, "tbl[INTERVALS][POSITIVE_INFINITY], 0")
self.assertEqual(tbl[INTERVALS][NEGATIVE_INFINITY], 0, "tbl[INTERVALS][NEGATIVE_INFINITY], 0")
self.assertListEqual(tbl[LIST], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0])
l = tbl[LIST]
for z in l:
y = tbl[INTERVALS][z]
if (z>1) and (z < 8):
self.assertEqual(y, 1, "x=%f y=%f" % (z, y))
else:
self.assertEqual(y, 0)
def test_005(self):
logger.info("--- test_005 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
def argFunc(owner, transact):
return random.randint(1, 8)
tbl = Table(m, tableName="table_stat", argFunc=argFunc, limitUpFirst=2, widthInt=1, countInt=8)
timeCreated = 0
while timeCreated < 1000:
t = Transact(None, timeCreated, priority=0)
# t[P1]=0.1
tbl.handleTransact(t, coef=1)
timeCreated += 1
logger.info(tbl.table2str())
def test_006(self):
logger.info("--- test_006 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
def argFunc(owner, transact):
return random.randint(1, 2)
tbl = Table(m, tableName="table_stat", argFunc=argFunc, limitUpFirst=2, widthInt=1, countInt=1)
timeCreated = 0
while timeCreated < 1000:
t = Transact(None, timeCreated, priority=0)
# t[P1]=0.1
tbl.handleTransact(t, coef=1)
timeCreated += 1
logger.info(tbl.table2str())
if __name__ == '__main__':
unittest.main(module="test_table")
|
# #!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long,missing-docstring,bad-whitespace
import sys
import os
import unittest
import random
DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep
sys.path.append(DIRNAME_MODULE)
sys.path.append(DIRNAME_MODULE + "pyss" + os.sep)
from pyss import pyssobject
from pyss.pyss_model import PyssModel
from pyss.segment import Segment
from pyss.generate import Generate
from pyss.terminate import Terminate
from pyss import logger
from pyss.table import Table
from pyss.handle import Handle
from pyss.enter import Enter
from pyss.leave import Leave
from pyss.table import Table
from pyss.advance import Advance
from pyss.preempt import Preempt
from pyss.g_return import GReturn
from pyss.facility import Facility
from pyss.seize import Seize
from pyss.release import Release
from pyss.transfer import Transfer
from pyss.test import Test
from pyss.queue import Queue
from pyss.depart import Depart
from pyss.split import Split
from pyss.transact import Transact
from pyss.pyss_const import *
class TestTable(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init_001(self):
#
with self.assertRaises(pyssobject.ErrorIsNone) as context:
Table(None, tableName="T1", argFunc=lambda o, t: "P1", limitUpFirst=1.0, widthInt=1.0, countInt=20)
def test_init_002(self):
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
#
Table(m, tableName="T1", argFunc=lambda o, t: "P1", limitUpFirst=1.0, widthInt=1.0, countInt=20)
def test_001(self):
logger.info("--- test_001 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
with self.assertRaises(Exception) as e:
Table(m, tableName=None, argFunc=None, limitUpFirst=None, widthInt=None, countInt=None)
def test_002(self):
logger.info("--- test_002 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
with self.assertRaises(Exception) as e:
Table(m, tableName="table_stat", argFunc="P1", limitUpFirst=1, widthInt=0, countInt=2)
def test_003(self):
logger.info("--- test_003 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
def argFunc(owner, transact):
return random.randint(1, 9)
tbl = Table(m, tableName="table_stat", argFunc=argFunc, limitUpFirst=None, widthInt=None, countInt=None)
timeCreated = 0
while timeCreated < 1000:
t = Transact(None, timeCreated, priority=0)
# t[P1]=0.1
tbl.handleTransact(t, coef=1)
timeCreated += 1
logger.info(tbl.table2str())
def test_004(self):
logger.info("--- test_004 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
ARGUMENT = "ARGUMENT"
def argFunc(owner, tranzact):
return tranzact[ARGUMENT]
tbl = Table(m, tableName="table_stat", argFunc=argFunc, limitUpFirst=1.0, widthInt=1.0, countInt=10)
for timeCreated in xrange(1, 7):
t = Transact(None, timeCreated, priority=0)
t[NUM] = timeCreated
t[ARGUMENT] = timeCreated % 7
# t[P1]=0.1
tbl.handleTransact(t, coef=1)
self.assertEqual(tbl[INTERVALS][POSITIVE_INFINITY], 0, "tbl[INTERVALS][POSITIVE_INFINITY], 0")
self.assertEqual(tbl[INTERVALS][NEGATIVE_INFINITY], 0, "tbl[INTERVALS][NEGATIVE_INFINITY], 0")
self.assertListEqual(tbl[LIST], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0])
l = tbl[LIST]
for z in l:
y = tbl[INTERVALS][z]
if (z>1) and (z < 8):
self.assertEqual(y, 1, "x=%f y=%f" % (z, y))
else:
self.assertEqual(y, 0)
def test_005(self):
logger.info("--- test_005 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
def argFunc(owner, transact):
return random.randint(1, 8)
tbl = Table(m, tableName="table_stat", argFunc=argFunc, limitUpFirst=2, widthInt=1, countInt=8)
timeCreated = 0
while timeCreated < 1000:
t = Transact(None, timeCreated, priority=0)
# t[P1]=0.1
tbl.handleTransact(t, coef=1)
timeCreated += 1
logger.info(tbl.table2str())
def test_006(self):
logger.info("--- test_006 ----------------------------------")
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
def argFunc(owner, transact):
return random.randint(1, 2)
tbl = Table(m, tableName="table_stat", argFunc=argFunc, limitUpFirst=2, widthInt=1, countInt=1)
timeCreated = 0
while timeCreated < 1000:
t = Transact(None, timeCreated, priority=0)
# t[P1]=0.1
tbl.handleTransact(t, coef=1)
timeCreated += 1
logger.info(tbl.table2str())
if __name__ == '__main__':
unittest.main(module="test_table")
|
en
| 0.507895
|
# #!/usr/bin/python # -*- coding: utf-8 -*- # pylint: disable=line-too-long,missing-docstring,bad-whitespace # # # t[P1]=0.1 # t[P1]=0.1 # t[P1]=0.1 # t[P1]=0.1
| 2.01126
| 2
|
src/tools/fuse/src/elektra_fuse/__init__.py
|
dev2718/libelektra
| 188
|
6628184
|
import argparse, logging, logging.handlers, sys
from pathlib import Path
from fuse import FUSE
import kdb
from .rootlevel_resolver import RootlevelResolver
from . import elektra_util
if __name__ == '__main__':
main()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('mountpoint')
parser.add_argument('-f', '--foreground', default = False)
parser.add_argument('-p', '--parent_key', default = "/")
parser.add_argument('-l', '--logger', default = "stdout", choices = ["syslog", "stdout", "none"])
parser.add_argument('-ll', '--loglevel', default = "DEBUG", choices = ["INFO", "DEBUG", "ERROR", "CRITICAL", "FATAL", "WARN"])
parser.add_argument('-a', '--allow-other', default = True)
parser.add_argument('-nt', '--nothreads', default = True)
args = parser.parse_args()
#validate parent_key
try:
parent_key = kdb.Key(args.parent_key)
if not parent_key.isValid() or not parent_key.isCascading():
raise NameError
elektra_util.parent_key = parent_key
except kdb.kdb.KeyInvalidName:
raise NameError
except NameError:
print("parent_key needs to be a valid key in the cascading namespace", file=sys.stderr)
sys.exit("1")
#configure logging
logging.basicConfig(level = getattr(logging, args.loglevel))
logger = logging.getLogger()
if args.logger == "syslog":
logger.addHandler(logging.handlers.SysLogHandler(address = '/dev/log'))
elif args.logger == "none":
logger.propagate = False
elif args.logger == "stdout":
pass
FUSE(RootlevelResolver(args.mountpoint), args.mountpoint, foreground = args.foreground, allow_other = args.allow_other, nothreads = args.nothreads)
|
import argparse, logging, logging.handlers, sys
from pathlib import Path
from fuse import FUSE
import kdb
from .rootlevel_resolver import RootlevelResolver
from . import elektra_util
if __name__ == '__main__':
main()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('mountpoint')
parser.add_argument('-f', '--foreground', default = False)
parser.add_argument('-p', '--parent_key', default = "/")
parser.add_argument('-l', '--logger', default = "stdout", choices = ["syslog", "stdout", "none"])
parser.add_argument('-ll', '--loglevel', default = "DEBUG", choices = ["INFO", "DEBUG", "ERROR", "CRITICAL", "FATAL", "WARN"])
parser.add_argument('-a', '--allow-other', default = True)
parser.add_argument('-nt', '--nothreads', default = True)
args = parser.parse_args()
#validate parent_key
try:
parent_key = kdb.Key(args.parent_key)
if not parent_key.isValid() or not parent_key.isCascading():
raise NameError
elektra_util.parent_key = parent_key
except kdb.kdb.KeyInvalidName:
raise NameError
except NameError:
print("parent_key needs to be a valid key in the cascading namespace", file=sys.stderr)
sys.exit("1")
#configure logging
logging.basicConfig(level = getattr(logging, args.loglevel))
logger = logging.getLogger()
if args.logger == "syslog":
logger.addHandler(logging.handlers.SysLogHandler(address = '/dev/log'))
elif args.logger == "none":
logger.propagate = False
elif args.logger == "stdout":
pass
FUSE(RootlevelResolver(args.mountpoint), args.mountpoint, foreground = args.foreground, allow_other = args.allow_other, nothreads = args.nothreads)
|
en
| 0.13503
|
#validate parent_key #configure logging
| 2.016932
| 2
|
concierge/admin.py
|
silverfix/django-concierge
| 2
|
6628185
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, absolute_import
from django.contrib import admin
from django.contrib.admin import register
from django.contrib.auth import admin as auth_admin, models as auth_models
from django.contrib.auth.forms import UserChangeForm, AdminPasswordChangeForm
from django.utils.translation import ugettext_lazy as _
from . import models
from . import forms
admin.site.unregister(auth_models.Group)
@register(models.User)
class UserAdmin(auth_admin.UserAdmin):
ordering = ['email']
list_display = ['email', 'is_staff', 'is_active']
fieldsets = [
(None, {'fields': ('email', 'password')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
]
add_fieldsets = [
(None, {
'classes': ('wide',),
'fields': ('email', '<PASSWORD>', '<PASSWORD>'),
}),
]
add_form = forms.SignupForm
form = UserChangeForm
change_password_form = AdminPasswordChangeForm
list_filter = ['is_staff', 'is_superuser', 'is_active']
search_fields = ['email']
filter_horizontal = ['user_permissions']
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, absolute_import
from django.contrib import admin
from django.contrib.admin import register
from django.contrib.auth import admin as auth_admin, models as auth_models
from django.contrib.auth.forms import UserChangeForm, AdminPasswordChangeForm
from django.utils.translation import ugettext_lazy as _
from . import models
from . import forms
admin.site.unregister(auth_models.Group)
@register(models.User)
class UserAdmin(auth_admin.UserAdmin):
ordering = ['email']
list_display = ['email', 'is_staff', 'is_active']
fieldsets = [
(None, {'fields': ('email', 'password')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
]
add_fieldsets = [
(None, {
'classes': ('wide',),
'fields': ('email', '<PASSWORD>', '<PASSWORD>'),
}),
]
add_form = forms.SignupForm
form = UserChangeForm
change_password_form = AdminPasswordChangeForm
list_filter = ['is_staff', 'is_superuser', 'is_active']
search_fields = ['email']
filter_horizontal = ['user_permissions']
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.779876
| 2
|
qiskit/circuit/library/grover_operator.py
|
WiFisunset/qiskit-terra
| 1
|
6628186
|
<reponame>WiFisunset/qiskit-terra
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Grover operator."""
from typing import List, Optional, Union
import numpy
from qiskit.circuit import QuantumCircuit, QuantumRegister, AncillaRegister
from qiskit.quantum_info import Statevector, Operator, DensityMatrix
from .standard_gates import MCXGate
class GroverOperator(QuantumCircuit):
r"""The Grover operator.
Grover's search algorithm [1, 2] consists of repeated applications of the so-called
Grover operator used to amplify the amplitudes of the desired output states.
This operator, :math:`\mathcal{Q}`, consists of the phase oracle, :math:`\mathcal{S}_f`,
zero phase-shift or zero reflection, :math:`\mathcal{S}_0`, and an
input state preparation :math:`\mathcal{A}`:
.. math::
\mathcal{Q} = \mathcal{A} \mathcal{S}_0 \mathcal{A}^\dagger \mathcal{S}_f
In the standard Grover search we have :math:`\mathcal{A} = H^{\otimes n}`:
.. math::
\mathcal{Q} = H^{\otimes n} \mathcal{S}_0 H^{\otimes n} \mathcal{S}_f
= D \mathcal{S_f}
The operation :math:`D = H^{\otimes n} \mathcal{S}_0 H^{\otimes n}` is also referred to as
diffusion operator. In this formulation we can see that Grover's operator consists of two
steps: first, the phase oracle multiplies the good states by -1 (with :math:`\mathcal{S}_f`)
and then the whole state is reflected around the mean (with :math:`D`).
This class allows setting a different state preparation, as in quantum amplitude
amplification (a generalization of Grover's algorithm), :math:`\mathcal{A}` might not be
a layer of Hardamard gates [3].
The action of the phase oracle :math:`\mathcal{S}_f` is defined as
.. math::
\mathcal{S}_f: |x\rangle \mapsto (-1)^{f(x)}|x\rangle
where :math:`f(x) = 1` if :math:`x` is a good state and 0 otherwise. To highlight the fact
that this oracle flips the phase of the good states and does not flip the state of a result
qubit, we call :math:`\mathcal{S}_f` a phase oracle.
Note that you can easily construct a phase oracle from a bitflip oracle by sandwiching the
controlled X gate on the result qubit by a X and H gate. For instance
.. parsed-literal::
Bitflip oracle Phaseflip oracle
q_0: ──■── q_0: ────────────■────────────
┌─┴─┐ ┌───┐┌───┐┌─┴─┐┌───┐┌───┐
out: ┤ X ├ out: ┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├
└───┘ └───┘└───┘└───┘└───┘└───┘
There is some flexibility in defining the oracle and :math:`\mathcal{A}` operator. Before the
Grover operator is applied in Grover's algorithm, the qubits are first prepared with one
application of the :math:`\mathcal{A}` operator (or Hadamard gates in the standard formulation).
Thus, we always have operation of the form
:math:`\mathcal{A} \mathcal{S}_f \mathcal{A}^\dagger`. Therefore it is possible to move
bitflip logic into :math:`\mathcal{A}` and leaving the oracle only to do phaseflips via Z gates
based on the bitflips. One possible use-case for this are oracles that do not uncompute the
state qubits.
The zero reflection :math:`\mathcal{S}_0` is usually defined as
.. math::
\mathcal{S}_0 = 2 |0\rangle^{\otimes n} \langle 0|^{\otimes n} - \mathbb{I}_n
where :math:`\mathbb{I}_n` is the identity on :math:`n` qubits.
By default, this class implements the negative version
:math:`2 |0\rangle^{\otimes n} \langle 0|^{\otimes n} - \mathbb{I}_n`, since this can simply
be implemented with a multi-controlled Z sandwiched by X gates on the target qubit and the
introduced global phase does not matter for Grover's algorithm.
Examples:
>>> from qiskit.circuit import QuantumCircuit
>>> from qiskit.circuit.library import GroverOperator
>>> oracle = QuantumCircuit(2)
>>> oracle.z(0) # good state = first qubit is |1>
>>> grover_op = GroverOperator(oracle, insert_barriers=True)
>>> grover_op.draw()
┌───┐ ░ ┌───┐ ░ ┌───┐ ┌───┐ ░ ┌───┐
state_0: ┤ Z ├─░─┤ H ├─░─┤ X ├───────■──┤ X ├──────░─┤ H ├
└───┘ ░ ├───┤ ░ ├───┤┌───┐┌─┴─┐├───┤┌───┐ ░ ├───┤
state_1: ──────░─┤ H ├─░─┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├─░─┤ H ├
░ └───┘ ░ └───┘└───┘└───┘└───┘└───┘ ░ └───┘
>>> oracle = QuantumCircuit(1)
>>> oracle.z(0) # the qubit state |1> is the good state
>>> state_preparation = QuantumCircuit(1)
>>> state_preparation.ry(0.2, 0) # non-uniform state preparation
>>> grover_op = GroverOperator(oracle, state_preparation)
>>> grover_op.draw()
┌───┐┌──────────┐┌───┐┌───┐┌───┐┌─────────┐
state_0: ┤ Z ├┤ RY(-0.2) ├┤ X ├┤ Z ├┤ X ├┤ RY(0.2) ├
└───┘└──────────┘└───┘└───┘└───┘└─────────┘
>>> oracle = QuantumCircuit(4)
>>> oracle.z(3)
>>> reflection_qubits = [0, 3]
>>> state_preparation = QuantumCircuit(4)
>>> state_preparation.cry(0.1, 0, 3)
>>> state_preparation.ry(0.5, 3)
>>> grover_op = GroverOperator(oracle, state_preparation,
... reflection_qubits=reflection_qubits)
>>> grover_op.draw()
┌───┐ ┌───┐
state_0: ──────────────────────■──────┤ X ├───────■──┤ X ├──────────■────────────────
│ └───┘ │ └───┘ │
state_1: ──────────────────────┼──────────────────┼─────────────────┼────────────────
│ │ │
state_2: ──────────────────────┼──────────────────┼─────────────────┼────────────────
┌───┐┌──────────┐┌────┴─────┐┌───┐┌───┐┌─┴─┐┌───┐┌───┐┌────┴────┐┌─────────┐
state_3: ┤ Z ├┤ RY(-0.5) ├┤ RY(-0.1) ├┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├┤ RY(0.1) ├┤ RY(0.5) ├
└───┘└──────────┘└──────────┘└───┘└───┘└───┘└───┘└───┘└─────────┘└─────────┘
>>> mark_state = Statevector.from_label('011')
>>> diffuse_operator = 2 * DensityMatrix.from_label('000') - Operator.from_label('III')
>>> grover_op = GroverOperator(oracle=mark_state, zero_reflection=diffuse_operator)
>>> grover_op.draw(fold=70)
┌─────────────────┐ ┌───┐ »
state_0: ┤0 ├──────┤ H ├──────────────────────────»
│ │┌─────┴───┴─────┐ ┌───┐ »
state_1: ┤1 UCRZ(0,pi,0,0) ├┤0 ├─────┤ H ├──────────»
│ ││ UCRZ(pi/2,0) │┌────┴───┴────┐┌───┐»
state_2: ┤2 ├┤1 ├┤ UCRZ(-pi/4) ├┤ H ├»
└─────────────────┘└───────────────┘└─────────────┘└───┘»
« ┌─────────────────┐ ┌───┐
«state_0: ┤0 ├──────┤ H ├─────────────────────────
« │ │┌─────┴───┴─────┐ ┌───┐
«state_1: ┤1 UCRZ(pi,0,0,0) ├┤0 ├────┤ H ├──────────
« │ ││ UCRZ(pi/2,0) │┌───┴───┴────┐┌───┐
«state_2: ┤2 ├┤1 ├┤ UCRZ(pi/4) ├┤ H ├
« └─────────────────┘└───────────────┘└────────────┘└───┘
References:
[1]: <NAME> (1996), A fast quantum mechanical algorithm for database search,
`arXiv:quant-ph/9605043 <https://arxiv.org/abs/quant-ph/9605043>`_.
[2]: <NAME> & <NAME>, Quantum Computation and Quantum Information,
Cambridge: Cambridge University Press, 2000. Chapter 6.1.2.
[3]: <NAME>., <NAME>., <NAME>., & <NAME>. (2000).
Quantum Amplitude Amplification and Estimation.
`arXiv:quant-ph/0005055 <http://arxiv.org/abs/quant-ph/0005055>`_.
"""
def __init__(
self,
oracle: Union[QuantumCircuit, Statevector],
state_preparation: Optional[QuantumCircuit] = None,
zero_reflection: Optional[Union[QuantumCircuit, DensityMatrix, Operator]] = None,
reflection_qubits: Optional[List[int]] = None,
insert_barriers: bool = False,
mcx_mode: str = "noancilla",
name: str = "Q",
) -> None:
r"""
Args:
oracle: The phase oracle implementing a reflection about the bad state. Note that this
is not a bitflip oracle, see the docstring for more information.
state_preparation: The operator preparing the good and bad state.
For Grover's algorithm, this is a n-qubit Hadamard gate and for amplitude
amplification or estimation the operator :math:`\mathcal{A}`.
zero_reflection: The reflection about the zero state, :math:`\mathcal{S}_0`.
reflection_qubits: Qubits on which the zero reflection acts on.
insert_barriers: Whether barriers should be inserted between the reflections and A.
mcx_mode: The mode to use for building the default zero reflection.
name: The name of the circuit.
"""
super().__init__(name=name)
# store inputs
if isinstance(oracle, Statevector):
from qiskit.circuit.library import Diagonal # pylint: disable=cyclic-import
oracle = Diagonal((-1) ** oracle.data)
self._oracle = oracle
if isinstance(zero_reflection, (Operator, DensityMatrix)):
from qiskit.circuit.library import Diagonal # pylint: disable=cyclic-import
zero_reflection = Diagonal(zero_reflection.data.diagonal())
self._zero_reflection = zero_reflection
self._reflection_qubits = reflection_qubits
self._state_preparation = state_preparation
self._insert_barriers = insert_barriers
self._mcx_mode = mcx_mode
# build circuit
self._build()
@property
def reflection_qubits(self):
"""Reflection qubits, on which S0 is applied (if S0 is not user-specified)."""
if self._reflection_qubits is not None:
return self._reflection_qubits
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
return list(range(num_state_qubits))
@property
def zero_reflection(self) -> QuantumCircuit:
"""The subcircuit implementing the reflection about 0."""
if self._zero_reflection is not None:
return self._zero_reflection
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
return _zero_reflection(num_state_qubits, self.reflection_qubits, self._mcx_mode)
@property
def state_preparation(self) -> QuantumCircuit:
"""The subcircuit implementing the A operator or Hadamards."""
if self._state_preparation is not None:
return self._state_preparation
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
hadamards = QuantumCircuit(num_state_qubits, name="H")
# apply Hadamards only on reflection qubits, rest will cancel out
hadamards.h(self.reflection_qubits)
return hadamards
@property
def oracle(self):
"""The oracle implementing a reflection about the bad state."""
return self._oracle
def _build(self):
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
self.add_register(QuantumRegister(num_state_qubits, name="state"))
num_ancillas = numpy.max(
[
self.oracle.num_ancillas,
self.zero_reflection.num_ancillas,
self.state_preparation.num_ancillas,
]
)
if num_ancillas > 0:
self.add_register(AncillaRegister(num_ancillas, name="ancilla"))
self.compose(self.oracle, list(range(self.oracle.num_qubits)), inplace=True)
if self._insert_barriers:
self.barrier()
self.compose(
self.state_preparation.inverse(),
list(range(self.state_preparation.num_qubits)),
inplace=True,
)
if self._insert_barriers:
self.barrier()
self.compose(
self.zero_reflection, list(range(self.zero_reflection.num_qubits)), inplace=True
)
if self._insert_barriers:
self.barrier()
self.compose(
self.state_preparation, list(range(self.state_preparation.num_qubits)), inplace=True
)
# minus sign
self.global_phase = numpy.pi
# TODO use the oracle compiler or the bit string oracle
def _zero_reflection(
num_state_qubits: int, qubits: List[int], mcx_mode: Optional[str] = None
) -> QuantumCircuit:
qr_state = QuantumRegister(num_state_qubits, "state")
reflection = QuantumCircuit(qr_state, name="S_0")
num_ancillas = MCXGate.get_num_ancilla_qubits(len(qubits) - 1, mcx_mode)
if num_ancillas > 0:
qr_ancilla = AncillaRegister(num_ancillas, "ancilla")
reflection.add_register(qr_ancilla)
else:
qr_ancilla = []
reflection.x(qubits)
if len(qubits) == 1:
reflection.z(0) # MCX does not allow 0 control qubits, therefore this is separate
else:
reflection.h(qubits[-1])
reflection.mcx(qubits[:-1], qubits[-1], qr_ancilla[:], mode=mcx_mode)
reflection.h(qubits[-1])
reflection.x(qubits)
return reflection
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Grover operator."""
from typing import List, Optional, Union
import numpy
from qiskit.circuit import QuantumCircuit, QuantumRegister, AncillaRegister
from qiskit.quantum_info import Statevector, Operator, DensityMatrix
from .standard_gates import MCXGate
class GroverOperator(QuantumCircuit):
r"""The Grover operator.
Grover's search algorithm [1, 2] consists of repeated applications of the so-called
Grover operator used to amplify the amplitudes of the desired output states.
This operator, :math:`\mathcal{Q}`, consists of the phase oracle, :math:`\mathcal{S}_f`,
zero phase-shift or zero reflection, :math:`\mathcal{S}_0`, and an
input state preparation :math:`\mathcal{A}`:
.. math::
\mathcal{Q} = \mathcal{A} \mathcal{S}_0 \mathcal{A}^\dagger \mathcal{S}_f
In the standard Grover search we have :math:`\mathcal{A} = H^{\otimes n}`:
.. math::
\mathcal{Q} = H^{\otimes n} \mathcal{S}_0 H^{\otimes n} \mathcal{S}_f
= D \mathcal{S_f}
The operation :math:`D = H^{\otimes n} \mathcal{S}_0 H^{\otimes n}` is also referred to as
diffusion operator. In this formulation we can see that Grover's operator consists of two
steps: first, the phase oracle multiplies the good states by -1 (with :math:`\mathcal{S}_f`)
and then the whole state is reflected around the mean (with :math:`D`).
This class allows setting a different state preparation, as in quantum amplitude
amplification (a generalization of Grover's algorithm), :math:`\mathcal{A}` might not be
a layer of Hardamard gates [3].
The action of the phase oracle :math:`\mathcal{S}_f` is defined as
.. math::
\mathcal{S}_f: |x\rangle \mapsto (-1)^{f(x)}|x\rangle
where :math:`f(x) = 1` if :math:`x` is a good state and 0 otherwise. To highlight the fact
that this oracle flips the phase of the good states and does not flip the state of a result
qubit, we call :math:`\mathcal{S}_f` a phase oracle.
Note that you can easily construct a phase oracle from a bitflip oracle by sandwiching the
controlled X gate on the result qubit by a X and H gate. For instance
.. parsed-literal::
Bitflip oracle Phaseflip oracle
q_0: ──■── q_0: ────────────■────────────
┌─┴─┐ ┌───┐┌───┐┌─┴─┐┌───┐┌───┐
out: ┤ X ├ out: ┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├
└───┘ └───┘└───┘└───┘└───┘└───┘
There is some flexibility in defining the oracle and :math:`\mathcal{A}` operator. Before the
Grover operator is applied in Grover's algorithm, the qubits are first prepared with one
application of the :math:`\mathcal{A}` operator (or Hadamard gates in the standard formulation).
Thus, we always have operation of the form
:math:`\mathcal{A} \mathcal{S}_f \mathcal{A}^\dagger`. Therefore it is possible to move
bitflip logic into :math:`\mathcal{A}` and leaving the oracle only to do phaseflips via Z gates
based on the bitflips. One possible use-case for this are oracles that do not uncompute the
state qubits.
The zero reflection :math:`\mathcal{S}_0` is usually defined as
.. math::
\mathcal{S}_0 = 2 |0\rangle^{\otimes n} \langle 0|^{\otimes n} - \mathbb{I}_n
where :math:`\mathbb{I}_n` is the identity on :math:`n` qubits.
By default, this class implements the negative version
:math:`2 |0\rangle^{\otimes n} \langle 0|^{\otimes n} - \mathbb{I}_n`, since this can simply
be implemented with a multi-controlled Z sandwiched by X gates on the target qubit and the
introduced global phase does not matter for Grover's algorithm.
Examples:
>>> from qiskit.circuit import QuantumCircuit
>>> from qiskit.circuit.library import GroverOperator
>>> oracle = QuantumCircuit(2)
>>> oracle.z(0) # good state = first qubit is |1>
>>> grover_op = GroverOperator(oracle, insert_barriers=True)
>>> grover_op.draw()
┌───┐ ░ ┌───┐ ░ ┌───┐ ┌───┐ ░ ┌───┐
state_0: ┤ Z ├─░─┤ H ├─░─┤ X ├───────■──┤ X ├──────░─┤ H ├
└───┘ ░ ├───┤ ░ ├───┤┌───┐┌─┴─┐├───┤┌───┐ ░ ├───┤
state_1: ──────░─┤ H ├─░─┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├─░─┤ H ├
░ └───┘ ░ └───┘└───┘└───┘└───┘└───┘ ░ └───┘
>>> oracle = QuantumCircuit(1)
>>> oracle.z(0) # the qubit state |1> is the good state
>>> state_preparation = QuantumCircuit(1)
>>> state_preparation.ry(0.2, 0) # non-uniform state preparation
>>> grover_op = GroverOperator(oracle, state_preparation)
>>> grover_op.draw()
┌───┐┌──────────┐┌───┐┌───┐┌───┐┌─────────┐
state_0: ┤ Z ├┤ RY(-0.2) ├┤ X ├┤ Z ├┤ X ├┤ RY(0.2) ├
└───┘└──────────┘└───┘└───┘└───┘└─────────┘
>>> oracle = QuantumCircuit(4)
>>> oracle.z(3)
>>> reflection_qubits = [0, 3]
>>> state_preparation = QuantumCircuit(4)
>>> state_preparation.cry(0.1, 0, 3)
>>> state_preparation.ry(0.5, 3)
>>> grover_op = GroverOperator(oracle, state_preparation,
... reflection_qubits=reflection_qubits)
>>> grover_op.draw()
┌───┐ ┌───┐
state_0: ──────────────────────■──────┤ X ├───────■──┤ X ├──────────■────────────────
│ └───┘ │ └───┘ │
state_1: ──────────────────────┼──────────────────┼─────────────────┼────────────────
│ │ │
state_2: ──────────────────────┼──────────────────┼─────────────────┼────────────────
┌───┐┌──────────┐┌────┴─────┐┌───┐┌───┐┌─┴─┐┌───┐┌───┐┌────┴────┐┌─────────┐
state_3: ┤ Z ├┤ RY(-0.5) ├┤ RY(-0.1) ├┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├┤ RY(0.1) ├┤ RY(0.5) ├
└───┘└──────────┘└──────────┘└───┘└───┘└───┘└───┘└───┘└─────────┘└─────────┘
>>> mark_state = Statevector.from_label('011')
>>> diffuse_operator = 2 * DensityMatrix.from_label('000') - Operator.from_label('III')
>>> grover_op = GroverOperator(oracle=mark_state, zero_reflection=diffuse_operator)
>>> grover_op.draw(fold=70)
┌─────────────────┐ ┌───┐ »
state_0: ┤0 ├──────┤ H ├──────────────────────────»
│ │┌─────┴───┴─────┐ ┌───┐ »
state_1: ┤1 UCRZ(0,pi,0,0) ├┤0 ├─────┤ H ├──────────»
│ ││ UCRZ(pi/2,0) │┌────┴───┴────┐┌───┐»
state_2: ┤2 ├┤1 ├┤ UCRZ(-pi/4) ├┤ H ├»
└─────────────────┘└───────────────┘└─────────────┘└───┘»
« ┌─────────────────┐ ┌───┐
«state_0: ┤0 ├──────┤ H ├─────────────────────────
« │ │┌─────┴───┴─────┐ ┌───┐
«state_1: ┤1 UCRZ(pi,0,0,0) ├┤0 ├────┤ H ├──────────
« │ ││ UCRZ(pi/2,0) │┌───┴───┴────┐┌───┐
«state_2: ┤2 ├┤1 ├┤ UCRZ(pi/4) ├┤ H ├
« └─────────────────┘└───────────────┘└────────────┘└───┘
References:
[1]: <NAME> (1996), A fast quantum mechanical algorithm for database search,
`arXiv:quant-ph/9605043 <https://arxiv.org/abs/quant-ph/9605043>`_.
[2]: <NAME> & <NAME>, Quantum Computation and Quantum Information,
Cambridge: Cambridge University Press, 2000. Chapter 6.1.2.
[3]: <NAME>., <NAME>., <NAME>., & <NAME>. (2000).
Quantum Amplitude Amplification and Estimation.
`arXiv:quant-ph/0005055 <http://arxiv.org/abs/quant-ph/0005055>`_.
"""
def __init__(
self,
oracle: Union[QuantumCircuit, Statevector],
state_preparation: Optional[QuantumCircuit] = None,
zero_reflection: Optional[Union[QuantumCircuit, DensityMatrix, Operator]] = None,
reflection_qubits: Optional[List[int]] = None,
insert_barriers: bool = False,
mcx_mode: str = "noancilla",
name: str = "Q",
) -> None:
r"""
Args:
oracle: The phase oracle implementing a reflection about the bad state. Note that this
is not a bitflip oracle, see the docstring for more information.
state_preparation: The operator preparing the good and bad state.
For Grover's algorithm, this is a n-qubit Hadamard gate and for amplitude
amplification or estimation the operator :math:`\mathcal{A}`.
zero_reflection: The reflection about the zero state, :math:`\mathcal{S}_0`.
reflection_qubits: Qubits on which the zero reflection acts on.
insert_barriers: Whether barriers should be inserted between the reflections and A.
mcx_mode: The mode to use for building the default zero reflection.
name: The name of the circuit.
"""
super().__init__(name=name)
# store inputs
if isinstance(oracle, Statevector):
from qiskit.circuit.library import Diagonal # pylint: disable=cyclic-import
oracle = Diagonal((-1) ** oracle.data)
self._oracle = oracle
if isinstance(zero_reflection, (Operator, DensityMatrix)):
from qiskit.circuit.library import Diagonal # pylint: disable=cyclic-import
zero_reflection = Diagonal(zero_reflection.data.diagonal())
self._zero_reflection = zero_reflection
self._reflection_qubits = reflection_qubits
self._state_preparation = state_preparation
self._insert_barriers = insert_barriers
self._mcx_mode = mcx_mode
# build circuit
self._build()
@property
def reflection_qubits(self):
"""Reflection qubits, on which S0 is applied (if S0 is not user-specified)."""
if self._reflection_qubits is not None:
return self._reflection_qubits
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
return list(range(num_state_qubits))
@property
def zero_reflection(self) -> QuantumCircuit:
"""The subcircuit implementing the reflection about 0."""
if self._zero_reflection is not None:
return self._zero_reflection
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
return _zero_reflection(num_state_qubits, self.reflection_qubits, self._mcx_mode)
@property
def state_preparation(self) -> QuantumCircuit:
"""The subcircuit implementing the A operator or Hadamards."""
if self._state_preparation is not None:
return self._state_preparation
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
hadamards = QuantumCircuit(num_state_qubits, name="H")
# apply Hadamards only on reflection qubits, rest will cancel out
hadamards.h(self.reflection_qubits)
return hadamards
@property
def oracle(self):
"""The oracle implementing a reflection about the bad state."""
return self._oracle
def _build(self):
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
self.add_register(QuantumRegister(num_state_qubits, name="state"))
num_ancillas = numpy.max(
[
self.oracle.num_ancillas,
self.zero_reflection.num_ancillas,
self.state_preparation.num_ancillas,
]
)
if num_ancillas > 0:
self.add_register(AncillaRegister(num_ancillas, name="ancilla"))
self.compose(self.oracle, list(range(self.oracle.num_qubits)), inplace=True)
if self._insert_barriers:
self.barrier()
self.compose(
self.state_preparation.inverse(),
list(range(self.state_preparation.num_qubits)),
inplace=True,
)
if self._insert_barriers:
self.barrier()
self.compose(
self.zero_reflection, list(range(self.zero_reflection.num_qubits)), inplace=True
)
if self._insert_barriers:
self.barrier()
self.compose(
self.state_preparation, list(range(self.state_preparation.num_qubits)), inplace=True
)
# minus sign
self.global_phase = numpy.pi
# TODO use the oracle compiler or the bit string oracle
def _zero_reflection(
num_state_qubits: int, qubits: List[int], mcx_mode: Optional[str] = None
) -> QuantumCircuit:
qr_state = QuantumRegister(num_state_qubits, "state")
reflection = QuantumCircuit(qr_state, name="S_0")
num_ancillas = MCXGate.get_num_ancilla_qubits(len(qubits) - 1, mcx_mode)
if num_ancillas > 0:
qr_ancilla = AncillaRegister(num_ancillas, "ancilla")
reflection.add_register(qr_ancilla)
else:
qr_ancilla = []
reflection.x(qubits)
if len(qubits) == 1:
reflection.z(0) # MCX does not allow 0 control qubits, therefore this is separate
else:
reflection.h(qubits[-1])
reflection.mcx(qubits[:-1], qubits[-1], qr_ancilla[:], mode=mcx_mode)
reflection.h(qubits[-1])
reflection.x(qubits)
return reflection
|
en
| 0.544176
|
# This code is part of Qiskit. # # (C) Copyright IBM 2017, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. The Grover operator. The Grover operator. Grover's search algorithm [1, 2] consists of repeated applications of the so-called Grover operator used to amplify the amplitudes of the desired output states. This operator, :math:`\mathcal{Q}`, consists of the phase oracle, :math:`\mathcal{S}_f`, zero phase-shift or zero reflection, :math:`\mathcal{S}_0`, and an input state preparation :math:`\mathcal{A}`: .. math:: \mathcal{Q} = \mathcal{A} \mathcal{S}_0 \mathcal{A}^\dagger \mathcal{S}_f In the standard Grover search we have :math:`\mathcal{A} = H^{\otimes n}`: .. math:: \mathcal{Q} = H^{\otimes n} \mathcal{S}_0 H^{\otimes n} \mathcal{S}_f = D \mathcal{S_f} The operation :math:`D = H^{\otimes n} \mathcal{S}_0 H^{\otimes n}` is also referred to as diffusion operator. In this formulation we can see that Grover's operator consists of two steps: first, the phase oracle multiplies the good states by -1 (with :math:`\mathcal{S}_f`) and then the whole state is reflected around the mean (with :math:`D`). This class allows setting a different state preparation, as in quantum amplitude amplification (a generalization of Grover's algorithm), :math:`\mathcal{A}` might not be a layer of Hardamard gates [3]. The action of the phase oracle :math:`\mathcal{S}_f` is defined as .. math:: \mathcal{S}_f: |x\rangle \mapsto (-1)^{f(x)}|x\rangle where :math:`f(x) = 1` if :math:`x` is a good state and 0 otherwise. To highlight the fact that this oracle flips the phase of the good states and does not flip the state of a result qubit, we call :math:`\mathcal{S}_f` a phase oracle. Note that you can easily construct a phase oracle from a bitflip oracle by sandwiching the controlled X gate on the result qubit by a X and H gate. For instance .. parsed-literal:: Bitflip oracle Phaseflip oracle q_0: ──■── q_0: ────────────■──────────── ┌─┴─┐ ┌───┐┌───┐┌─┴─┐┌───┐┌───┐ out: ┤ X ├ out: ┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├ └───┘ └───┘└───┘└───┘└───┘└───┘ There is some flexibility in defining the oracle and :math:`\mathcal{A}` operator. Before the Grover operator is applied in Grover's algorithm, the qubits are first prepared with one application of the :math:`\mathcal{A}` operator (or Hadamard gates in the standard formulation). Thus, we always have operation of the form :math:`\mathcal{A} \mathcal{S}_f \mathcal{A}^\dagger`. Therefore it is possible to move bitflip logic into :math:`\mathcal{A}` and leaving the oracle only to do phaseflips via Z gates based on the bitflips. One possible use-case for this are oracles that do not uncompute the state qubits. The zero reflection :math:`\mathcal{S}_0` is usually defined as .. math:: \mathcal{S}_0 = 2 |0\rangle^{\otimes n} \langle 0|^{\otimes n} - \mathbb{I}_n where :math:`\mathbb{I}_n` is the identity on :math:`n` qubits. By default, this class implements the negative version :math:`2 |0\rangle^{\otimes n} \langle 0|^{\otimes n} - \mathbb{I}_n`, since this can simply be implemented with a multi-controlled Z sandwiched by X gates on the target qubit and the introduced global phase does not matter for Grover's algorithm. Examples: >>> from qiskit.circuit import QuantumCircuit >>> from qiskit.circuit.library import GroverOperator >>> oracle = QuantumCircuit(2) >>> oracle.z(0) # good state = first qubit is |1> >>> grover_op = GroverOperator(oracle, insert_barriers=True) >>> grover_op.draw() ┌───┐ ░ ┌───┐ ░ ┌───┐ ┌───┐ ░ ┌───┐ state_0: ┤ Z ├─░─┤ H ├─░─┤ X ├───────■──┤ X ├──────░─┤ H ├ └───┘ ░ ├───┤ ░ ├───┤┌───┐┌─┴─┐├───┤┌───┐ ░ ├───┤ state_1: ──────░─┤ H ├─░─┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├─░─┤ H ├ ░ └───┘ ░ └───┘└───┘└───┘└───┘└───┘ ░ └───┘ >>> oracle = QuantumCircuit(1) >>> oracle.z(0) # the qubit state |1> is the good state >>> state_preparation = QuantumCircuit(1) >>> state_preparation.ry(0.2, 0) # non-uniform state preparation >>> grover_op = GroverOperator(oracle, state_preparation) >>> grover_op.draw() ┌───┐┌──────────┐┌───┐┌───┐┌───┐┌─────────┐ state_0: ┤ Z ├┤ RY(-0.2) ├┤ X ├┤ Z ├┤ X ├┤ RY(0.2) ├ └───┘└──────────┘└───┘└───┘└───┘└─────────┘ >>> oracle = QuantumCircuit(4) >>> oracle.z(3) >>> reflection_qubits = [0, 3] >>> state_preparation = QuantumCircuit(4) >>> state_preparation.cry(0.1, 0, 3) >>> state_preparation.ry(0.5, 3) >>> grover_op = GroverOperator(oracle, state_preparation, ... reflection_qubits=reflection_qubits) >>> grover_op.draw() ┌───┐ ┌───┐ state_0: ──────────────────────■──────┤ X ├───────■──┤ X ├──────────■──────────────── │ └───┘ │ └───┘ │ state_1: ──────────────────────┼──────────────────┼─────────────────┼──────────────── │ │ │ state_2: ──────────────────────┼──────────────────┼─────────────────┼──────────────── ┌───┐┌──────────┐┌────┴─────┐┌───┐┌───┐┌─┴─┐┌───┐┌───┐┌────┴────┐┌─────────┐ state_3: ┤ Z ├┤ RY(-0.5) ├┤ RY(-0.1) ├┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├┤ RY(0.1) ├┤ RY(0.5) ├ └───┘└──────────┘└──────────┘└───┘└───┘└───┘└───┘└───┘└─────────┘└─────────┘ >>> mark_state = Statevector.from_label('011') >>> diffuse_operator = 2 * DensityMatrix.from_label('000') - Operator.from_label('III') >>> grover_op = GroverOperator(oracle=mark_state, zero_reflection=diffuse_operator) >>> grover_op.draw(fold=70) ┌─────────────────┐ ┌───┐ » state_0: ┤0 ├──────┤ H ├──────────────────────────» │ │┌─────┴───┴─────┐ ┌───┐ » state_1: ┤1 UCRZ(0,pi,0,0) ├┤0 ├─────┤ H ├──────────» │ ││ UCRZ(pi/2,0) │┌────┴───┴────┐┌───┐» state_2: ┤2 ├┤1 ├┤ UCRZ(-pi/4) ├┤ H ├» └─────────────────┘└───────────────┘└─────────────┘└───┘» « ┌─────────────────┐ ┌───┐ «state_0: ┤0 ├──────┤ H ├───────────────────────── « │ │┌─────┴───┴─────┐ ┌───┐ «state_1: ┤1 UCRZ(pi,0,0,0) ├┤0 ├────┤ H ├────────── « │ ││ UCRZ(pi/2,0) │┌───┴───┴────┐┌───┐ «state_2: ┤2 ├┤1 ├┤ UCRZ(pi/4) ├┤ H ├ « └─────────────────┘└───────────────┘└────────────┘└───┘ References: [1]: <NAME> (1996), A fast quantum mechanical algorithm for database search, `arXiv:quant-ph/9605043 <https://arxiv.org/abs/quant-ph/9605043>`_. [2]: <NAME> & <NAME>, Quantum Computation and Quantum Information, Cambridge: Cambridge University Press, 2000. Chapter 6.1.2. [3]: <NAME>., <NAME>., <NAME>., & <NAME>. (2000). Quantum Amplitude Amplification and Estimation. `arXiv:quant-ph/0005055 <http://arxiv.org/abs/quant-ph/0005055>`_. Args: oracle: The phase oracle implementing a reflection about the bad state. Note that this is not a bitflip oracle, see the docstring for more information. state_preparation: The operator preparing the good and bad state. For Grover's algorithm, this is a n-qubit Hadamard gate and for amplitude amplification or estimation the operator :math:`\mathcal{A}`. zero_reflection: The reflection about the zero state, :math:`\mathcal{S}_0`. reflection_qubits: Qubits on which the zero reflection acts on. insert_barriers: Whether barriers should be inserted between the reflections and A. mcx_mode: The mode to use for building the default zero reflection. name: The name of the circuit. # store inputs # pylint: disable=cyclic-import # pylint: disable=cyclic-import # build circuit Reflection qubits, on which S0 is applied (if S0 is not user-specified). The subcircuit implementing the reflection about 0. The subcircuit implementing the A operator or Hadamards. # apply Hadamards only on reflection qubits, rest will cancel out The oracle implementing a reflection about the bad state. # minus sign # TODO use the oracle compiler or the bit string oracle # MCX does not allow 0 control qubits, therefore this is separate
| 2.834606
| 3
|
tests/test_downloadermiddleware_robotstxt.py
|
eliasdorneles/scrapy
| 1
|
6628187
|
from __future__ import absolute_import
import re
from twisted.internet import reactor, error
from twisted.internet.defer import Deferred
from twisted.python import failure
from twisted.trial import unittest
from scrapy.downloadermiddlewares.robotstxt import RobotsTxtMiddleware
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http import Request, Response, TextResponse
from scrapy.settings import Settings
from tests import mock
class RobotsTxtMiddlewareTest(unittest.TestCase):
def setUp(self):
self.crawler = mock.MagicMock()
self.crawler.settings = Settings()
self.crawler.engine.download = mock.MagicMock()
def tearDown(self):
del self.crawler
def test_robotstxt_settings(self):
self.crawler.settings = Settings()
self.crawler.settings.set('USER_AGENT', 'CustomAgent')
self.assertRaises(NotConfigured, RobotsTxtMiddleware, self.crawler)
def _get_successful_crawler(self):
crawler = self.crawler
crawler.settings.set('ROBOTSTXT_OBEY', True)
ROBOTS = re.sub(b'^\s+(?m)', b'', b'''
User-Agent: *
Disallow: /admin/
Disallow: /static/
''')
response = TextResponse('http://site.local/robots.txt', body=ROBOTS)
def return_response(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.callback, response)
return deferred
crawler.engine.download.side_effect = return_response
return crawler
def test_robotstxt(self):
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
# There is a bit of neglect in robotstxt.py: robots.txt is fetched asynchronously,
# and it is actually fetched only *after* first process_request completes.
# So, first process_request will always succeed.
# We defer test() because otherwise robots.txt download mock will be called after assertRaises failure.
self.assertNotIgnored(Request('http://site.local'), middleware)
def test(r):
self.assertNotIgnored(Request('http://site.local/allowed'), middleware)
self.assertIgnored(Request('http://site.local/admin/main'), middleware)
self.assertIgnored(Request('http://site.local/static/'), middleware)
deferred = Deferred()
deferred.addCallback(test)
reactor.callFromThread(deferred.callback, None)
return deferred
def test_robotstxt_meta(self):
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
meta = {'dont_obey_robotstxt': True}
self.assertNotIgnored(Request('http://site.local', meta=meta), middleware)
def test(r):
self.assertNotIgnored(Request('http://site.local/allowed', meta=meta), middleware)
self.assertNotIgnored(Request('http://site.local/admin/main', meta=meta), middleware)
self.assertNotIgnored(Request('http://site.local/static/', meta=meta), middleware)
deferred = Deferred()
deferred.addCallback(test)
reactor.callFromThread(deferred.callback, None)
return deferred
def _get_garbage_crawler(self):
crawler = self.crawler
crawler.settings.set('ROBOTSTXT_OBEY', True)
response = Response('http://site.local/robots.txt', body=b'GIF89a\xd3\x00\xfe\x00\xa2')
def return_response(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.callback, response)
return deferred
crawler.engine.download.side_effect = return_response
return crawler
def test_robotstxt_garbage(self):
# garbage response should be discarded, equal 'allow all'
middleware = RobotsTxtMiddleware(self._get_garbage_crawler())
middleware._logerror = mock.MagicMock()
middleware.process_request(Request('http://site.local'), None)
self.assertNotIgnored(Request('http://site.local'), middleware)
def test(r):
self.assertNotIgnored(Request('http://site.local/allowed'), middleware)
self.assertNotIgnored(Request('http://site.local/admin/main'), middleware)
self.assertNotIgnored(Request('http://site.local/static/'), middleware)
deferred = Deferred()
deferred.addCallback(test)
deferred.addErrback(lambda _: self.assertIsNone(middleware._logerror.assert_any_call()))
reactor.callFromThread(deferred.callback, None)
return deferred
def _get_emptybody_crawler(self):
crawler = self.crawler
crawler.settings.set('ROBOTSTXT_OBEY', True)
response = Response('http://site.local/robots.txt')
def return_response(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.callback, response)
return deferred
crawler.engine.download.side_effect = return_response
return crawler
def test_robotstxt_empty_response(self):
# empty response should equal 'allow all'
middleware = RobotsTxtMiddleware(self._get_emptybody_crawler())
self.assertNotIgnored(Request('http://site.local'), middleware)
def test(r):
self.assertNotIgnored(Request('http://site.local/allowed'), middleware)
self.assertNotIgnored(Request('http://site.local/admin/main'), middleware)
self.assertNotIgnored(Request('http://site.local/static/'), middleware)
deferred = Deferred()
deferred.addCallback(test)
reactor.callFromThread(deferred.callback, None)
return deferred
def test_robotstxt_error(self):
self.crawler.settings.set('ROBOTSTXT_OBEY', True)
err = error.DNSLookupError('Robotstxt address not found')
def return_failure(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.errback, failure.Failure(err))
return deferred
self.crawler.engine.download.side_effect = return_failure
middleware = RobotsTxtMiddleware(self.crawler)
middleware._logerror = mock.MagicMock()
middleware.process_request(Request('http://site.local'), None)
deferred = Deferred()
deferred.addErrback(lambda _: self.assertIsNone(middleware._logerror.assert_any_call()))
reactor.callFromThread(deferred.callback, None)
return deferred
def assertNotIgnored(self, request, middleware):
spider = None # not actually used
self.assertIsNone(middleware.process_request(request, spider))
def assertIgnored(self, request, middleware):
spider = None # not actually used
self.assertRaises(IgnoreRequest, middleware.process_request, request, spider)
|
from __future__ import absolute_import
import re
from twisted.internet import reactor, error
from twisted.internet.defer import Deferred
from twisted.python import failure
from twisted.trial import unittest
from scrapy.downloadermiddlewares.robotstxt import RobotsTxtMiddleware
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http import Request, Response, TextResponse
from scrapy.settings import Settings
from tests import mock
class RobotsTxtMiddlewareTest(unittest.TestCase):
def setUp(self):
self.crawler = mock.MagicMock()
self.crawler.settings = Settings()
self.crawler.engine.download = mock.MagicMock()
def tearDown(self):
del self.crawler
def test_robotstxt_settings(self):
self.crawler.settings = Settings()
self.crawler.settings.set('USER_AGENT', 'CustomAgent')
self.assertRaises(NotConfigured, RobotsTxtMiddleware, self.crawler)
def _get_successful_crawler(self):
crawler = self.crawler
crawler.settings.set('ROBOTSTXT_OBEY', True)
ROBOTS = re.sub(b'^\s+(?m)', b'', b'''
User-Agent: *
Disallow: /admin/
Disallow: /static/
''')
response = TextResponse('http://site.local/robots.txt', body=ROBOTS)
def return_response(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.callback, response)
return deferred
crawler.engine.download.side_effect = return_response
return crawler
def test_robotstxt(self):
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
# There is a bit of neglect in robotstxt.py: robots.txt is fetched asynchronously,
# and it is actually fetched only *after* first process_request completes.
# So, first process_request will always succeed.
# We defer test() because otherwise robots.txt download mock will be called after assertRaises failure.
self.assertNotIgnored(Request('http://site.local'), middleware)
def test(r):
self.assertNotIgnored(Request('http://site.local/allowed'), middleware)
self.assertIgnored(Request('http://site.local/admin/main'), middleware)
self.assertIgnored(Request('http://site.local/static/'), middleware)
deferred = Deferred()
deferred.addCallback(test)
reactor.callFromThread(deferred.callback, None)
return deferred
def test_robotstxt_meta(self):
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
meta = {'dont_obey_robotstxt': True}
self.assertNotIgnored(Request('http://site.local', meta=meta), middleware)
def test(r):
self.assertNotIgnored(Request('http://site.local/allowed', meta=meta), middleware)
self.assertNotIgnored(Request('http://site.local/admin/main', meta=meta), middleware)
self.assertNotIgnored(Request('http://site.local/static/', meta=meta), middleware)
deferred = Deferred()
deferred.addCallback(test)
reactor.callFromThread(deferred.callback, None)
return deferred
def _get_garbage_crawler(self):
crawler = self.crawler
crawler.settings.set('ROBOTSTXT_OBEY', True)
response = Response('http://site.local/robots.txt', body=b'GIF89a\xd3\x00\xfe\x00\xa2')
def return_response(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.callback, response)
return deferred
crawler.engine.download.side_effect = return_response
return crawler
def test_robotstxt_garbage(self):
# garbage response should be discarded, equal 'allow all'
middleware = RobotsTxtMiddleware(self._get_garbage_crawler())
middleware._logerror = mock.MagicMock()
middleware.process_request(Request('http://site.local'), None)
self.assertNotIgnored(Request('http://site.local'), middleware)
def test(r):
self.assertNotIgnored(Request('http://site.local/allowed'), middleware)
self.assertNotIgnored(Request('http://site.local/admin/main'), middleware)
self.assertNotIgnored(Request('http://site.local/static/'), middleware)
deferred = Deferred()
deferred.addCallback(test)
deferred.addErrback(lambda _: self.assertIsNone(middleware._logerror.assert_any_call()))
reactor.callFromThread(deferred.callback, None)
return deferred
def _get_emptybody_crawler(self):
crawler = self.crawler
crawler.settings.set('ROBOTSTXT_OBEY', True)
response = Response('http://site.local/robots.txt')
def return_response(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.callback, response)
return deferred
crawler.engine.download.side_effect = return_response
return crawler
def test_robotstxt_empty_response(self):
# empty response should equal 'allow all'
middleware = RobotsTxtMiddleware(self._get_emptybody_crawler())
self.assertNotIgnored(Request('http://site.local'), middleware)
def test(r):
self.assertNotIgnored(Request('http://site.local/allowed'), middleware)
self.assertNotIgnored(Request('http://site.local/admin/main'), middleware)
self.assertNotIgnored(Request('http://site.local/static/'), middleware)
deferred = Deferred()
deferred.addCallback(test)
reactor.callFromThread(deferred.callback, None)
return deferred
def test_robotstxt_error(self):
self.crawler.settings.set('ROBOTSTXT_OBEY', True)
err = error.DNSLookupError('Robotstxt address not found')
def return_failure(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.errback, failure.Failure(err))
return deferred
self.crawler.engine.download.side_effect = return_failure
middleware = RobotsTxtMiddleware(self.crawler)
middleware._logerror = mock.MagicMock()
middleware.process_request(Request('http://site.local'), None)
deferred = Deferred()
deferred.addErrback(lambda _: self.assertIsNone(middleware._logerror.assert_any_call()))
reactor.callFromThread(deferred.callback, None)
return deferred
def assertNotIgnored(self, request, middleware):
spider = None # not actually used
self.assertIsNone(middleware.process_request(request, spider))
def assertIgnored(self, request, middleware):
spider = None # not actually used
self.assertRaises(IgnoreRequest, middleware.process_request, request, spider)
|
en
| 0.860766
|
User-Agent: * Disallow: /admin/ Disallow: /static/ # There is a bit of neglect in robotstxt.py: robots.txt is fetched asynchronously, # and it is actually fetched only *after* first process_request completes. # So, first process_request will always succeed. # We defer test() because otherwise robots.txt download mock will be called after assertRaises failure. # garbage response should be discarded, equal 'allow all' # empty response should equal 'allow all' # not actually used # not actually used
| 2.25334
| 2
|
cata/constants.py
|
seblee97/student_teacher_catastrophic
| 2
|
6628188
|
LABEL_TASK_BOUNDARIES = "label_task_boundaries"
LEARNER_CONFIGURATION = "learner_configuration"
CONTINUAL = "continual"
META = "meta"
TEACHER_CONFIGURATION = "teacher_configuration"
OVERLAPPING = "overlapping"
NUM_TEACHERS = "num_teachers"
LOSS_TYPE = "loss_type"
REGRESSION = "regression"
CLASSIFICATION = "classification"
TASK = "task"
TOTAL_TRAINING_STEPS = "total_training_steps"
TRAIN_BATCH_SIZE = "train_batch_size"
LEARNING_RATE = "learning_rate"
LOSS_FUNCTION = "loss_function"
MSE = "mse"
BCE = "bce"
SCALE_HEAD_LR = "scale_head_lr"
SCALE_HIDDEN_LR = "scale_hidden_lr"
TIMESTEP = "timestep"
ODE_TIMESTEP = "ode_timestep"
TRAIN_HIDDEN_LAYERS = "train_hidden_layers"
TRAIN_HEAD_LAYER = "train_head_layer"
TRAINING = "training"
INPUT_SOURCE = "input_source"
IID_GAUSSIAN = "iid_gaussian"
MNIST_STREAM = "mnist_stream"
DATA = "data"
VERBOSE = "verbose"
VERBOSE_TB = "verbose_tb"
LOG_FREQUENCY = "log_frequency"
CHECKPOINT_FREQUENCY = "checkpoint_frequency"
LOG_TO_DF = "log_to_df"
MERGE_AT_CHECKPOINT = "merge_at_checkpoint"
SAVE_WEIGHTS_AT_SWITCH = "save_weights_at_switch"
SAVE_INITIAL_WEIGHTS = "save_initial_weights"
LOGGING = "logging"
TEST_BATCH_SIZE = "test_batch_size"
TEST_FREQUENCY = "test_frequency"
OVERLAP_FREQUENCY = "overlap_frequency"
TESTING = "testing"
INPUT_DIMENSION = "input_dimension"
STUDENT_HIDDEN_LAYERS = "student_hidden_layers"
TEACHER_HIDDEN_LAYERS = "teacher_hidden_layers"
OUTPUT_DIMENSION = "output_dimension"
STUDENT_NONLINEARITY = "student_nonlinearity"
SCALED_ERF = "scaled_erf"
RELU = "relu"
SIGMOID = "sigmoid"
LINEAR = "linear"
TEACHER_NONLINEARITIES = "teacher_nonlinearities"
NORMALISE_TEACHERS = "normalise_teachers"
TEACHER_INITIALISATION_STD = "teacher_initialisation_std"
STUDENT_INITIALISATION_STD = "student_initialisation_std"
UNIT_NORM_TEACHER_HEAD = "unit_norm_teacher_head"
INITIALISE_STUDENT_OUTPUTS = "initialise_student_outputs"
SOFT_COMMITTEE = "soft_committee"
TEACHER_BIAS_PARAMETERS = "teacher_bias_parameters"
STUDENT_BIAS_PARAMETERS = "student_bias_parameters"
SYMMETRIC_STUDENT_INITIALISATION = "symmetric_student_initialisation"
MODEL = "model"
STOPPING_CONDITION = "stopping_condition"
FIXED_PERIOD = "fixed_period"
THRESHOLD = "threshold"
LOSS_THRESHOLDS = "loss_thresholds"
CURRICULUM = "curriculum"
OVERLAP_TYPES = "overlap_types"
TEACHER_FEATURES_COPY = "teacher_features_copy"
COPY = "copy"
ROTATION = "rotation"
OVERLAP_ROTATIONS = "overlap_rotations"
NOT_APPLICABLE = "n/a"
OVERLAP_PERCENTAGES = "overlap_percentages"
TEACHER_NOISES = "teacher_noises"
TEACHERS = "teachers"
EXPERIMENT_NAME = "experiment_name"
USE_GPU = "use_gpu"
SEED = "seed"
NETWORK_SIMULATION = "network_simulation"
ODE_SIMULATION = "ode_simulation"
READOUT_ROTATION = "readout_rotation"
READOUT_ROTATION_MAGNITUDE = "readout_rotation_magnitude"
FEATURE_ROTATION = "feature_rotation"
FEATURE_ROTATION_MAGNITUDE = "feature_rotation_magnitude"
FEATURE_COPY_PERCENTAGE = "feature_copy_percentage"
STUDENT = "student"
MODEL = "model"
ROTATION_MAGNITUDE = "rotation_magnitude"
HIDDEN_DIMENSIONS = "hidden_dimensions"
BIAS = "bias"
NONLINEARITY = "nonlinearity"
INITIALISATION_STD = "initialisation_std"
STUDENT_HEAD_WEIGHTS = "student_head_weights"
TEACHER_HEAD_WEIGHTS = "teacher_head_weights"
STUDENT_SELF_OVERLAP = "student_self_overlap"
TEACHER_SELF_OVERLAP = "teacher_self_overlap"
TEACHER_CROSS_OVERLAPS = "teacher_cross_overlaps"
STUDENT_TEACHER_OVERLAPS = "student_teacher_overlaps"
IMPLEMENTATION = "implementation"
CPP = "cpp"
PYTHON = "python"
ODE_RUN = "ode_run"
X = "x"
MEAN = "mean"
VARIANCE = "variance"
DATASET_SIZE = "dataset_size"
INF = "inf"
ODE_CSV = "ode_log.csv"
NETWORK_CSV = "network_log.csv"
GENERALISATION_ERROR = "generalisation_error"
GENERALISATION_ERROR_LABEL = r"$\epsilon$"
LOG_GENERALISATION_ERROR = "log_generalisation_error"
LOG_GENERALISATION_ERROR_LABEL = r"$\log{\epsilon}$"
STUDENT_HEAD = "student_head"
STUDENT_HEAD_LABEL = r"$h$"
TEACHER_HEAD = "teacher_head"
TEACHER_HEAD_LABEL = r"$v$"
STUDENT_SELF = "student_self"
STUDENT_SELF_LABEL = r"$Q$"
STUDENT_TEACHER = "student_teacher"
STUDENT_TEACHER_0 = "student_teacher_0"
STUDENT_TEACHER_0_LABEL = r"$R$"
STUDENT_TEACHER_1 = "student_teacher_1"
STUDENT_TEACHER_1_LABEL = r"$U$"
ODE = "ode"
SIM = "sim"
ODE_PDF = "ode_spec.pdf"
NETWORK_PDF = "network_spec.pdf"
OVERLAY_PDF = "overlay.pdf"
DASHED = "dashed"
SOLID = "solid"
STEP = "steps"
PRIVATE_CURRENT_TEACHER = "_current_teacher"
FREEZE_FEATURES = "freeze_features"
LOG_OVERLAPS = "log_overlaps"
EXPERIMENT_DEVICE = "experiment_device"
USING_GPU = "using_gpu"
GPU_ID = "gpu_id"
SWITCH_STEPS = "switch_steps"
SPLIT_LOGGING = "split_logging"
STUDENT_WEIGHTS = "student_weights"
SAVE_WEIGHT_FREQUENCY = "save_weight_frequency"
CHECKPOINT_PATH = "checkpoint_path"
EXPERIMENT_TIMESTAMP = "experiment_timestamp"
RESULTS = "results"
RESULTS_PATH = "results_path"
PARALLEL = "parallel"
SERIAL = "serial"
FORGETTING_PLOT = "forgetting_plot.pdf"
TRANSFER_PLOT = "transfer_plot.pdf"
PLASMA = "plasma"
VIRIDIS = "viridis"
FORGETTING_VS_V_PLOT = "forgetting_vs_v.pdf"
TRANSFER_VS_V_PLOT = "transfer_vs_v.pdf"
FORGETTING_RATE_PLOT = "forgetting_rate.pdf"
TRANSFER_RATE_PLOT = "transfer_rate.pdf"
WEIGHT = "weight"
OVERLAP = "overlap"
BOTH_ROTATION = "both_rotation"
FEATURE_ROTATION_ALPHA = "feature_rotation_alpha"
READOUT_ROTATION_ALPHA = "readout_rotation_alpha"
SCALE_FORWARD_BY_HIDDEN = "scale_forward_by_hidden"
SCALE_TEACHER_FORWARD_BY_HIDDEN = "scale_teacher_forward_by_hidden"
SCALE_STUDENT_FORWARD_BY_HIDDEN = "scale_student_forward_by_hidden"
FORWARD_SCALING = "forward_scaling"
SAVE_TEACHER_WEIGHTS = "save_teacher_weights"
TEACHER_WEIGHT_SAVE_PATH = "teacher_weights"
LEFT = "left"
RIGHT = "right"
APPLY_NONLINEARITY_ON_OUTPUT = "apply_nonlinearity_on_output"
CONSOLIDATE = "consolidate"
CONSOLIDATION = "consolidation"
CONSOLIDATION_TYPE = "consolidation_type"
EWC = "ewc"
IMPORTANCE = "importance"
TYPE = "type"
QUADRATIC = "quadratic"
SYNAPTIC_INTELLIGENCE = "synaptic_intelligence"
INTERLEAVE = "interleave"
INTERLEAVE_PERIOD = "interleave_period"
INTERLEAVE_DURATION = "interleave_duration"
TEACHER_INDEX = "teacher_index"
WEIGHT_NORMALISATION = "weight_normalisation"
NODE_CONSOLIDATION = "node_consolidation"
COPY_HEAD_AT_SWITCH = "copy_head_at_switch"
NONLINEARITIES = "nonlinearities"
NOISE_STDS = "noise_stds"
NODE_CONSOLIDATION_HESSIAN = "node_consolidation_hessian"
STUDENT_OLD_STUDENT = "student_old_student"
STUDENT_OLD_STUDENT_LABEL = "Q*"
SINGLE = "single"
STEP = "step"
NODE_SHARING = "node_sharing"
NUM_SHARED_NODES = "num_shared_nodes"
CLUSTER = "cluster"
LOSS = "loss"
CONSOLIDATION_PENALTY = "consolidation_penalty"
EVEN_ODD_MAPPING = {0: 0, 1: 1, 2: 0, 3: 1, 4: 0, 5: 1, 6: 0, 7: 1, 8: 0, 9: 1}
GREATER_FIVE_MAPPING = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1}
# Hard-coded subplot layouts for different numbers of graphs
GRAPH_LAYOUTS = {
1: (1, 1),
2: (1, 2),
3: (1, 3),
4: (2, 2),
5: (2, 3),
6: (2, 3),
7: (2, 4),
8: (2, 4),
9: (3, 3),
10: (2, 5),
11: (3, 4),
12: (3, 4),
13: (4, 4),
14: (4, 4),
15: (4, 4),
16: (4, 4),
}
TEACHER_SHADES = ["#2A9D8F", "#E9C46A"]
STUDENT_SHADES = ["#264653", "#E9C46A", "#878E88", "#76BED0"]
ORANGE_SHADES = [
"#E9C46A",
"#F4A261",
"#E76F51",
"#D5B942",
"#D9D375",
"#EDFBC1",
"#FC9E4F",
"#F17105",
]
TORQUOISE_SHADES = [
"#2A9D8F",
"#4E8098",
"#17301C",
"#4B644A",
"#89A894",
"#1C3738",
"#32746D",
"#01200F",
]
BLUE_SHADES = ["#5465ff", "#788bff", "#9bb1ff", "#bfd7ff", "#e2fdff"]
GREEN_SHADES = ["#143601", "#245501", "#538d22", "#73a942", "#aad576"]
MNIST_TRAIN_SET_SIZE = 60000
MNIST_TEST_SET_SIZE = 10000
MNIST_FLATTENED_DIM = 784
|
LABEL_TASK_BOUNDARIES = "label_task_boundaries"
LEARNER_CONFIGURATION = "learner_configuration"
CONTINUAL = "continual"
META = "meta"
TEACHER_CONFIGURATION = "teacher_configuration"
OVERLAPPING = "overlapping"
NUM_TEACHERS = "num_teachers"
LOSS_TYPE = "loss_type"
REGRESSION = "regression"
CLASSIFICATION = "classification"
TASK = "task"
TOTAL_TRAINING_STEPS = "total_training_steps"
TRAIN_BATCH_SIZE = "train_batch_size"
LEARNING_RATE = "learning_rate"
LOSS_FUNCTION = "loss_function"
MSE = "mse"
BCE = "bce"
SCALE_HEAD_LR = "scale_head_lr"
SCALE_HIDDEN_LR = "scale_hidden_lr"
TIMESTEP = "timestep"
ODE_TIMESTEP = "ode_timestep"
TRAIN_HIDDEN_LAYERS = "train_hidden_layers"
TRAIN_HEAD_LAYER = "train_head_layer"
TRAINING = "training"
INPUT_SOURCE = "input_source"
IID_GAUSSIAN = "iid_gaussian"
MNIST_STREAM = "mnist_stream"
DATA = "data"
VERBOSE = "verbose"
VERBOSE_TB = "verbose_tb"
LOG_FREQUENCY = "log_frequency"
CHECKPOINT_FREQUENCY = "checkpoint_frequency"
LOG_TO_DF = "log_to_df"
MERGE_AT_CHECKPOINT = "merge_at_checkpoint"
SAVE_WEIGHTS_AT_SWITCH = "save_weights_at_switch"
SAVE_INITIAL_WEIGHTS = "save_initial_weights"
LOGGING = "logging"
TEST_BATCH_SIZE = "test_batch_size"
TEST_FREQUENCY = "test_frequency"
OVERLAP_FREQUENCY = "overlap_frequency"
TESTING = "testing"
INPUT_DIMENSION = "input_dimension"
STUDENT_HIDDEN_LAYERS = "student_hidden_layers"
TEACHER_HIDDEN_LAYERS = "teacher_hidden_layers"
OUTPUT_DIMENSION = "output_dimension"
STUDENT_NONLINEARITY = "student_nonlinearity"
SCALED_ERF = "scaled_erf"
RELU = "relu"
SIGMOID = "sigmoid"
LINEAR = "linear"
TEACHER_NONLINEARITIES = "teacher_nonlinearities"
NORMALISE_TEACHERS = "normalise_teachers"
TEACHER_INITIALISATION_STD = "teacher_initialisation_std"
STUDENT_INITIALISATION_STD = "student_initialisation_std"
UNIT_NORM_TEACHER_HEAD = "unit_norm_teacher_head"
INITIALISE_STUDENT_OUTPUTS = "initialise_student_outputs"
SOFT_COMMITTEE = "soft_committee"
TEACHER_BIAS_PARAMETERS = "teacher_bias_parameters"
STUDENT_BIAS_PARAMETERS = "student_bias_parameters"
SYMMETRIC_STUDENT_INITIALISATION = "symmetric_student_initialisation"
MODEL = "model"
STOPPING_CONDITION = "stopping_condition"
FIXED_PERIOD = "fixed_period"
THRESHOLD = "threshold"
LOSS_THRESHOLDS = "loss_thresholds"
CURRICULUM = "curriculum"
OVERLAP_TYPES = "overlap_types"
TEACHER_FEATURES_COPY = "teacher_features_copy"
COPY = "copy"
ROTATION = "rotation"
OVERLAP_ROTATIONS = "overlap_rotations"
NOT_APPLICABLE = "n/a"
OVERLAP_PERCENTAGES = "overlap_percentages"
TEACHER_NOISES = "teacher_noises"
TEACHERS = "teachers"
EXPERIMENT_NAME = "experiment_name"
USE_GPU = "use_gpu"
SEED = "seed"
NETWORK_SIMULATION = "network_simulation"
ODE_SIMULATION = "ode_simulation"
READOUT_ROTATION = "readout_rotation"
READOUT_ROTATION_MAGNITUDE = "readout_rotation_magnitude"
FEATURE_ROTATION = "feature_rotation"
FEATURE_ROTATION_MAGNITUDE = "feature_rotation_magnitude"
FEATURE_COPY_PERCENTAGE = "feature_copy_percentage"
STUDENT = "student"
MODEL = "model"
ROTATION_MAGNITUDE = "rotation_magnitude"
HIDDEN_DIMENSIONS = "hidden_dimensions"
BIAS = "bias"
NONLINEARITY = "nonlinearity"
INITIALISATION_STD = "initialisation_std"
STUDENT_HEAD_WEIGHTS = "student_head_weights"
TEACHER_HEAD_WEIGHTS = "teacher_head_weights"
STUDENT_SELF_OVERLAP = "student_self_overlap"
TEACHER_SELF_OVERLAP = "teacher_self_overlap"
TEACHER_CROSS_OVERLAPS = "teacher_cross_overlaps"
STUDENT_TEACHER_OVERLAPS = "student_teacher_overlaps"
IMPLEMENTATION = "implementation"
CPP = "cpp"
PYTHON = "python"
ODE_RUN = "ode_run"
X = "x"
MEAN = "mean"
VARIANCE = "variance"
DATASET_SIZE = "dataset_size"
INF = "inf"
ODE_CSV = "ode_log.csv"
NETWORK_CSV = "network_log.csv"
GENERALISATION_ERROR = "generalisation_error"
GENERALISATION_ERROR_LABEL = r"$\epsilon$"
LOG_GENERALISATION_ERROR = "log_generalisation_error"
LOG_GENERALISATION_ERROR_LABEL = r"$\log{\epsilon}$"
STUDENT_HEAD = "student_head"
STUDENT_HEAD_LABEL = r"$h$"
TEACHER_HEAD = "teacher_head"
TEACHER_HEAD_LABEL = r"$v$"
STUDENT_SELF = "student_self"
STUDENT_SELF_LABEL = r"$Q$"
STUDENT_TEACHER = "student_teacher"
STUDENT_TEACHER_0 = "student_teacher_0"
STUDENT_TEACHER_0_LABEL = r"$R$"
STUDENT_TEACHER_1 = "student_teacher_1"
STUDENT_TEACHER_1_LABEL = r"$U$"
ODE = "ode"
SIM = "sim"
ODE_PDF = "ode_spec.pdf"
NETWORK_PDF = "network_spec.pdf"
OVERLAY_PDF = "overlay.pdf"
DASHED = "dashed"
SOLID = "solid"
STEP = "steps"
PRIVATE_CURRENT_TEACHER = "_current_teacher"
FREEZE_FEATURES = "freeze_features"
LOG_OVERLAPS = "log_overlaps"
EXPERIMENT_DEVICE = "experiment_device"
USING_GPU = "using_gpu"
GPU_ID = "gpu_id"
SWITCH_STEPS = "switch_steps"
SPLIT_LOGGING = "split_logging"
STUDENT_WEIGHTS = "student_weights"
SAVE_WEIGHT_FREQUENCY = "save_weight_frequency"
CHECKPOINT_PATH = "checkpoint_path"
EXPERIMENT_TIMESTAMP = "experiment_timestamp"
RESULTS = "results"
RESULTS_PATH = "results_path"
PARALLEL = "parallel"
SERIAL = "serial"
FORGETTING_PLOT = "forgetting_plot.pdf"
TRANSFER_PLOT = "transfer_plot.pdf"
PLASMA = "plasma"
VIRIDIS = "viridis"
FORGETTING_VS_V_PLOT = "forgetting_vs_v.pdf"
TRANSFER_VS_V_PLOT = "transfer_vs_v.pdf"
FORGETTING_RATE_PLOT = "forgetting_rate.pdf"
TRANSFER_RATE_PLOT = "transfer_rate.pdf"
WEIGHT = "weight"
OVERLAP = "overlap"
BOTH_ROTATION = "both_rotation"
FEATURE_ROTATION_ALPHA = "feature_rotation_alpha"
READOUT_ROTATION_ALPHA = "readout_rotation_alpha"
SCALE_FORWARD_BY_HIDDEN = "scale_forward_by_hidden"
SCALE_TEACHER_FORWARD_BY_HIDDEN = "scale_teacher_forward_by_hidden"
SCALE_STUDENT_FORWARD_BY_HIDDEN = "scale_student_forward_by_hidden"
FORWARD_SCALING = "forward_scaling"
SAVE_TEACHER_WEIGHTS = "save_teacher_weights"
TEACHER_WEIGHT_SAVE_PATH = "teacher_weights"
LEFT = "left"
RIGHT = "right"
APPLY_NONLINEARITY_ON_OUTPUT = "apply_nonlinearity_on_output"
CONSOLIDATE = "consolidate"
CONSOLIDATION = "consolidation"
CONSOLIDATION_TYPE = "consolidation_type"
EWC = "ewc"
IMPORTANCE = "importance"
TYPE = "type"
QUADRATIC = "quadratic"
SYNAPTIC_INTELLIGENCE = "synaptic_intelligence"
INTERLEAVE = "interleave"
INTERLEAVE_PERIOD = "interleave_period"
INTERLEAVE_DURATION = "interleave_duration"
TEACHER_INDEX = "teacher_index"
WEIGHT_NORMALISATION = "weight_normalisation"
NODE_CONSOLIDATION = "node_consolidation"
COPY_HEAD_AT_SWITCH = "copy_head_at_switch"
NONLINEARITIES = "nonlinearities"
NOISE_STDS = "noise_stds"
NODE_CONSOLIDATION_HESSIAN = "node_consolidation_hessian"
STUDENT_OLD_STUDENT = "student_old_student"
STUDENT_OLD_STUDENT_LABEL = "Q*"
SINGLE = "single"
STEP = "step"
NODE_SHARING = "node_sharing"
NUM_SHARED_NODES = "num_shared_nodes"
CLUSTER = "cluster"
LOSS = "loss"
CONSOLIDATION_PENALTY = "consolidation_penalty"
EVEN_ODD_MAPPING = {0: 0, 1: 1, 2: 0, 3: 1, 4: 0, 5: 1, 6: 0, 7: 1, 8: 0, 9: 1}
GREATER_FIVE_MAPPING = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1}
# Hard-coded subplot layouts for different numbers of graphs
GRAPH_LAYOUTS = {
1: (1, 1),
2: (1, 2),
3: (1, 3),
4: (2, 2),
5: (2, 3),
6: (2, 3),
7: (2, 4),
8: (2, 4),
9: (3, 3),
10: (2, 5),
11: (3, 4),
12: (3, 4),
13: (4, 4),
14: (4, 4),
15: (4, 4),
16: (4, 4),
}
TEACHER_SHADES = ["#2A9D8F", "#E9C46A"]
STUDENT_SHADES = ["#264653", "#E9C46A", "#878E88", "#76BED0"]
ORANGE_SHADES = [
"#E9C46A",
"#F4A261",
"#E76F51",
"#D5B942",
"#D9D375",
"#EDFBC1",
"#FC9E4F",
"#F17105",
]
TORQUOISE_SHADES = [
"#2A9D8F",
"#4E8098",
"#17301C",
"#4B644A",
"#89A894",
"#1C3738",
"#32746D",
"#01200F",
]
BLUE_SHADES = ["#5465ff", "#788bff", "#9bb1ff", "#bfd7ff", "#e2fdff"]
GREEN_SHADES = ["#143601", "#245501", "#538d22", "#73a942", "#aad576"]
MNIST_TRAIN_SET_SIZE = 60000
MNIST_TEST_SET_SIZE = 10000
MNIST_FLATTENED_DIM = 784
|
en
| 0.589746
|
# Hard-coded subplot layouts for different numbers of graphs
| 1.697621
| 2
|
yardstick/benchmark/core/runner.py
|
alexnemes/yardstick_enc
| 1
|
6628189
|
<reponame>alexnemes/yardstick_enc
##############################################################################
# Copyright (c) 2015 <NAME> and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
""" Handler for yardstick command 'runner' """
from __future__ import absolute_import
from __future__ import print_function
from yardstick.benchmark.runners.base import Runner
from yardstick.benchmark.core import print_hbar
class Runners(object):
"""Runner commands.
Set of commands to discover and display runner types.
"""
def list_all(self, args):
"""List existing runner types"""
types = Runner.get_types()
print_hbar(78)
print("| %-16s | %-60s" % ("Type", "Description"))
print_hbar(78)
for rtype in types:
print("| %-16s | %-60s" % (rtype.__execution_type__,
rtype.__doc__.split("\n")[0]))
print_hbar(78)
def show(self, args):
"""Show details of a specific runner type"""
rtype = Runner.get_cls(args.type[0])
print(rtype.__doc__)
|
##############################################################################
# Copyright (c) 2015 <NAME> and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
""" Handler for yardstick command 'runner' """
from __future__ import absolute_import
from __future__ import print_function
from yardstick.benchmark.runners.base import Runner
from yardstick.benchmark.core import print_hbar
class Runners(object):
"""Runner commands.
Set of commands to discover and display runner types.
"""
def list_all(self, args):
"""List existing runner types"""
types = Runner.get_types()
print_hbar(78)
print("| %-16s | %-60s" % ("Type", "Description"))
print_hbar(78)
for rtype in types:
print("| %-16s | %-60s" % (rtype.__execution_type__,
rtype.__doc__.split("\n")[0]))
print_hbar(78)
def show(self, args):
"""Show details of a specific runner type"""
rtype = Runner.get_cls(args.type[0])
print(rtype.__doc__)
|
en
| 0.604777
|
############################################################################## # Copyright (c) 2015 <NAME> and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## Handler for yardstick command 'runner' Runner commands. Set of commands to discover and display runner types. List existing runner types Show details of a specific runner type
| 2.549711
| 3
|
tests/test_pages/test_inline.py
|
inducer/courseflow
| 0
|
6628190
|
<reponame>inducer/courseflow<filename>tests/test_pages/test_inline.py
__copyright__ = "Copyright (C) 2018 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from django.test import TestCase
import pytest
from course.content import get_repo_blob
from course.flow import get_page_behavior
from tests.base_test_mixins import SingleCourseQuizPageTestMixin
from tests.test_sandbox import (
SingleCoursePageSandboxTestBaseMixin
)
from tests.constants import PAGE_ERRORS
from tests.utils import mock
INLINE_MULTI_MARKDOWN_SINGLE = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_TWO_NOT_REQUIRED = r"""
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
A quarter equals [[choice1]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <regex>(?:bar)?\s+
- <plain> BAR
- <plain>bar
choice1:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
- <div><p>This_should_be_wrapped_by_p_tag</p></div>
- [0.25]
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
One dollar is [[blank2]].
answers:
blank1:
type: ShortAnswer
%(attr1)s
correct_answer:
- <plain> BAR
- <plain>bar
blank2:
type: ShortAnswer
%(attr2)s
correct_answer:
- type: float
rtol: 0.00001
value: 1
- <plain> one
"""
INLINE_MULTI_MARKDOWN_FLOAT_WITHOUT_TOL = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
One dollar is [[blank2]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
blank2:
type: ShortAnswer
width: 3em
prepended_text: "$"
hint: Blank with prepended text
correct_answer:
- type: float
value: 1
"""
INLINE_MULTI_MARKDOWN_NOT_ALLOWED_EMBEDDED_QTYPE = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1:
type: SomeQuestionType
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_QUESTION_NOT_STRUCT = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1: Something
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_HAS_NO_EXTRA_HTML = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
[[blank1]][[blank2]]
answers:
blank1:
type: ShortAnswer
correct_answer:
- <plain> BAR
- <plain>bar
blank2:
type: ShortAnswer
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_NO_CORRECT_ANSWER = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1:
type: ShortAnswer
correct_answer: []
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_TEXT_Q_NO_STRINGIFIABLE_CORRECT_ANSWER = r"""
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1:
type: ShortAnswer
correct_answer:
- <regex>(?:foo\s+)?\s
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_CHOICE_Q_NO_CORRECT_ANSWER = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[choice]] are often used in code examples.
answers:
choice:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- 0.25
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_CHOICE_QUESTION = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[choice]] are often used in code examples.
answers:
choice:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_NAMING_ERROR = r"""
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
A quarter equals [[1choice]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <regex>(?:bar)?\s+
- <plain> BAR
- <plain>bar
choice:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
- <div><p>This_should_be_wrapped_by_p_tag</p></div>
- [0.25]
"""
INLINE_MULTI_MARKDOWN_ANSWERS_NAMING_ERROR = r"""
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
A quarter equals [[choice1]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <regex>(?:bar)?\s+
- <plain> BAR
- <plain>bar
choice1:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
- <div><p>This_should_be_wrapped_by_p_tag</p></div>
- [0.25]
2choice:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
- <div><p>This_should_be_wrapped_by_p_tag</p></div>
- [0.25]
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_NAMING_DUPLICATED = r"""
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]][[blank1]] are often used in code examples.
A quarter equals [[choice1]][[choice1]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <regex>(?:bar)?\s+
- <plain> BAR
- <plain>bar
choice1:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
"""
INLINE_MULTI_MARKDOWN_REDUNDANT = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
answer_explanation: This is an explanation.
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1:
type: ShortAnswer
width: 4em
required: True
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
blank_2:
type: ShortAnswer
width: 10em
hint: <ol><li>with no hint title</li><li>HTML is OK</li><ol>
correct_answer:
- <plain> "1/5"
- type: float
value: 1/5
rtol: 0.00001
- <plain> 0.2
"""
INLINE_MULTI_EMBEDDED_WITH_MARKDOWN = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
answer_explanation: This is an explanation.
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
<img src="media:images/classroom.jpeg">
answers:
blank1:
type: ShortAnswer
width: 4em
required: True
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_NO_ANSWER_FIELD = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
abcd
answers:
blank1:
type: ShortAnswer
width: 4em
required: True
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_HAS_UNPAIRED_WRAPPER = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
[[[[blank1]]]]
answers:
blank1:
type: ShortAnswer
width: 4em
required: True
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_FEWER = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.(old version)
question: |
Foo and [[blank1]] are often used in code examples, or
tutorials. $\\frac{1}{5}$ is equivalent to [[blank_2]].
The correct answer for this choice question is [[choice_a]].
The Upper case of "foo" is [[choice2]].
One dollar is [[blank3]], and five percent is [[blank4]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: True
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
blank_2:
type: ShortAnswer
width: 10em
hint: <ol><li>with no hint title</li><li>HTML is OK</li><ol>
correct_answer:
- <plain> "1/5"
- type: float
value: 1/5
rtol: 0.00001
- <plain> 0.2
choice_a:
type: ChoicesAnswer
required: True
choices:
- ~CORRECT~ Correct
- Wrong
choice2:
type: ChoicesAnswer
choices:
- ~CORRECT~ FOO
- BAR
- fOO
blank3:
type: ShortAnswer
width: 3em
prepended_text: "$"
hint: Blank with prepended text
correct_answer:
- type: float
value: 1
rtol: 0.00001
- <plain> "1"
blank4:
type: ShortAnswer
width: 3em
appended_text: "%"
hint: Blank with appended text
correct_answer:
- type: float
value: 5
rtol: 0.00001
- <plain> "5"
"""
def get_repo_blob_side_effect(repo, full_name, commit_sha, allow_tree=True):
# Fake the inline multiple question yaml for specific commit
if not (full_name == "questions/multi-question-example.yml"
and commit_sha == b"ec41a2de73a99e6022060518cb5c5c162b88cdf5"):
return get_repo_blob(repo, full_name, commit_sha, allow_tree)
else:
class Blob:
pass
blob = Blob()
blob.data = INLINE_MULTI_MARKDOWN_FEWER.encode()
return blob
def get_page_behavior_not_show_correctness_side_effect(page,
permissions,
session_in_progress,
answer_was_graded,
generates_grade,
is_unenrolled_session,
viewing_prior_version=False):
page_behavior = get_page_behavior(
page,
permissions,
session_in_progress,
answer_was_graded,
generates_grade,
is_unenrolled_session,
viewing_prior_version)
page_behavior.show_correctness = False
return page_behavior
class InlineMultiQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase):
def test_single(self):
markdown = INLINE_MULTI_MARKDOWN_SINGLE
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(resp, None)
# When there's more than one field, that field is force_required.
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, "This field is required.")
def test_negative_width(self):
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "width: -4em",
"attr2": "width: 5em"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"blank1: 'width': unrecogonized width attribute string: '-4em'")
def test_negative_weight(self):
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "weight: 15",
"attr2": "weight: -5"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"blank2: 'weight' must be a non-negative value, got '-5' instead")
def test_two_not_required(self):
markdown = INLINE_MULTI_MARKDOWN_TWO_NOT_REQUIRED
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(resp, None)
# because this choice was wrapped by p tag before markdown handling
self.assertContains(
resp, "<p>This_should_be_wrapped_by_p_tag</p>", html=True)
self.assertContains(resp, "[0.25]")
# When there's more than one fields, can submit with no answer
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0)
# partial answer
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': ['Bar']})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0.5)
# full answer, choice wrong answer
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': 'Bar', 'choice1': 4})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0.5)
# full answer, all correct
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': 'Bar', 'choice1': 2})
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 1)
def test_submit_validation_error(self):
markdown = INLINE_MULTI_MARKDOWN_FLOAT_WITHOUT_TOL
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(
resp,
"Float match should have either rtol or "
"atol--otherwise it will match any number")
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': 'Bar', 'blank2': 'abc'})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(
resp, "TypeError: Cannot convert expression to float")
def test_not_allowed_embedded_question_type(self):
markdown = INLINE_MULTI_MARKDOWN_NOT_ALLOWED_EMBEDDED_QTYPE
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"unknown embedded question type 'SomeQuestionType'")
def test_embedded_question_not_struct(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_QUESTION_NOT_STRUCT
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"Embedded question 'blank1' must be a struct")
def test_embedded_question_no_extra_html(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_HAS_NO_EXTRA_HTML
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
# There's no html string between rendered blank1 field and blank2 field
self.assertIn('</div> <div id="div_id_blank2"', resp.content.decode())
def test_embedded_weight_count(self):
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "weight: 15",
"attr2": "weight: 5"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(resp, None)
# no answer
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0)
# partial answer
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': ['Bar']})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0.75)
# blank2 has not weight set
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "weight: 15",
"attr2": ""})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': ['Bar']})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 1)
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank2': 'One'})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0)
def test_embedded_width_attr(self):
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "width: 15",
"attr2": "width: 85 %"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(resp, None)
self.assertIn("width: 8.5em", resp.context["form"].as_p())
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "width: 15pt",
"attr2": "width: 5pt"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(resp, None)
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "width: one",
"attr2": "width: 5 pt"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"unrecogonized width attribute string: 'one'")
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "width: 15 pt",
"attr2": "width: 5 km"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"unsupported length unit 'km'")
def test_embedded_question_no_correct_answer(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_NO_CORRECT_ANSWER
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"blank1: at least one answer must be provided")
def test_embedded_text_question_no_stringifiable_correct_answer(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_TEXT_Q_NO_STRINGIFIABLE_CORRECT_ANSWER # noqa
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"blank1: no matcher is able to provide a plain-text "
"correct answer")
def test_embedded_choice_question_no_correct_answer(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_CHOICE_Q_NO_CORRECT_ANSWER
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
" more correct answer(s) expected for question 'choice', "
"0 found")
def test_embedded_choice_not_stringifiable(self):
expected_page_error = (
"'choice' choice 2: unable to convert to string")
class BadChoice:
def __str__(self):
raise Exception
from relate.utils import dict_to_struct
fake_page_desc = dict_to_struct(
{'type': 'InlineMultiQuestion', 'id': 'inlinemulti',
'prompt':
'\n# An InlineMultiQuestion example\n\nComplete the '
'following paragraph.\n',
'question': '\nFoo and [[choice]] are often used in code '
'examples.\n',
'_field_names': [
'type', 'id', 'prompt', 'question', 'answers', 'value'],
'answers': {'_field_names': ['choice'],
'choice': {
'_field_names': ['type',
'choices'],
'type': 'ChoicesAnswer',
'choices': [0.2,
BadChoice(),
'~CORRECT~ 0.25']}},
'value': 10}
)
with mock.patch("relate.utils.dict_to_struct") as mock_dict_to_struct:
mock_dict_to_struct.return_value = fake_page_desc
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_CHOICE_QUESTION
resp = (
self.get_page_sandbox_preview_response(markdown))
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(resp, PAGE_ERRORS,
expected_page_error)
def test_embedded_question_no_answer_field_defined(self):
markdown = INLINE_MULTI_MARKDOWN_NO_ANSWER_FIELD
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"InlineMultiQuestion requires at least one answer field to "
"be defined.")
def test_embedded_naming_error(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_NAMING_ERROR
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"ValidationError")
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"could not instantiate flow page")
def test_answers_naming_error(self):
markdown = INLINE_MULTI_MARKDOWN_ANSWERS_NAMING_ERROR
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"invalid answers name '2choice'. A valid name should start "
"with letters. Alphanumeric with underscores. Do not use "
"spaces.")
def test_embedded_naming_duplicated(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_NAMING_DUPLICATED
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"embedded question name 'blank1', 'choice1' not unique.")
def test_has_unpaired_wrapper(self):
markdown = INLINE_MULTI_MARKDOWN_HAS_UNPAIRED_WRAPPER
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"question has unpaired '[['.")
def test_redundant(self):
markdown = INLINE_MULTI_MARKDOWN_REDUNDANT
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(
resp,
"redundant answers 'blank_2' provided for non-existing "
"question(s).")
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': 'Bar'})
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, "This is an explanation.")
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 1)
def test_embedded_question_with_markdown(self):
self.post_update_course_content(
commit_sha=b"4124e0c23e369d6709a670398167cb9c2fe52d35")
markdown = INLINE_MULTI_EMBEDDED_WITH_MARKDOWN
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertContains(
resp, '<img src="/course/test-course/media/4124e0c23e369d6709a6'
'70398167cb9c2fe52d35/images/classroom.jpeg">', html=True)
@pytest.mark.slow
class InlineMultiPageUpdateTest(SingleCourseQuizPageTestMixin, TestCase):
page_id = "inlinemulti"
def setUp(self):
super().setUp()
def test_quiz_inline_not_show_correctness(self):
self.start_flow(self.flow_id)
with mock.patch("course.flow.get_page_behavior") as mock_get_bhv:
mock_get_bhv.side_effect = (
get_page_behavior_not_show_correctness_side_effect)
submit_answer_response, _ = (
self.submit_page_answer_by_page_id_and_test(
self.page_id, do_grading=False))
self.assertEqual(submit_answer_response.status_code, 200)
# 7 answer
self.assertContains(submit_answer_response, 'correctness="1"', count=0)
self.assertContains(submit_answer_response, 'correctness="0"', count=0)
self.end_flow()
self.assertSessionScoreEqual(10)
# {{{ Test bug fix in https://github.com/inducer/relate/pull/262
def test_add_new_question(self):
"""Test bug fix in https://github.com/inducer/relate/pull/262
"""
with mock.patch("course.content.get_repo_blob") as mock_get_repo_blob:
mock_get_repo_blob.side_effect = get_repo_blob_side_effect
self.post_update_course_content(
commit_sha=b"ec41a2de73a99e6022060518cb5c5c162b88cdf5")
self.start_flow(self.flow_id)
resp = self.client.get(
self.get_page_url_by_page_id(page_id=self.page_id))
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, "(old version)")
answer_data = {
'blank1': 'Bar', 'blank_2': '0.2', 'blank3': '1',
'blank4': '5', 'choice2': '0', 'choice_a': '0'}
submit_answer_response, _ = (
self.submit_page_answer_by_page_id_and_test(
self.page_id, answer_data=answer_data, expected_grades=10))
# 6 correct answer
self.assertContains(submit_answer_response,
'correctness="1"', count=6)
self.post_update_course_content(
commit_sha=b"4124e0c23e369d6709a670398167cb9c2fe52d35")
resp = self.client.get(
self.get_page_url_by_page_id(page_id=self.page_id))
self.assertEqual(resp.status_code, 200)
# 7 answer
self.assertContains(resp, 'correctness="1"', count=7)
# vim: fdm=marker
|
__copyright__ = "Copyright (C) 2018 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from django.test import TestCase
import pytest
from course.content import get_repo_blob
from course.flow import get_page_behavior
from tests.base_test_mixins import SingleCourseQuizPageTestMixin
from tests.test_sandbox import (
SingleCoursePageSandboxTestBaseMixin
)
from tests.constants import PAGE_ERRORS
from tests.utils import mock
INLINE_MULTI_MARKDOWN_SINGLE = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_TWO_NOT_REQUIRED = r"""
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
A quarter equals [[choice1]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <regex>(?:bar)?\s+
- <plain> BAR
- <plain>bar
choice1:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
- <div><p>This_should_be_wrapped_by_p_tag</p></div>
- [0.25]
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
One dollar is [[blank2]].
answers:
blank1:
type: ShortAnswer
%(attr1)s
correct_answer:
- <plain> BAR
- <plain>bar
blank2:
type: ShortAnswer
%(attr2)s
correct_answer:
- type: float
rtol: 0.00001
value: 1
- <plain> one
"""
INLINE_MULTI_MARKDOWN_FLOAT_WITHOUT_TOL = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
One dollar is [[blank2]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
blank2:
type: ShortAnswer
width: 3em
prepended_text: "$"
hint: Blank with prepended text
correct_answer:
- type: float
value: 1
"""
INLINE_MULTI_MARKDOWN_NOT_ALLOWED_EMBEDDED_QTYPE = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1:
type: SomeQuestionType
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_QUESTION_NOT_STRUCT = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1: Something
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_HAS_NO_EXTRA_HTML = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
[[blank1]][[blank2]]
answers:
blank1:
type: ShortAnswer
correct_answer:
- <plain> BAR
- <plain>bar
blank2:
type: ShortAnswer
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_NO_CORRECT_ANSWER = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1:
type: ShortAnswer
correct_answer: []
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_TEXT_Q_NO_STRINGIFIABLE_CORRECT_ANSWER = r"""
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1:
type: ShortAnswer
correct_answer:
- <regex>(?:foo\s+)?\s
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_CHOICE_Q_NO_CORRECT_ANSWER = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[choice]] are often used in code examples.
answers:
choice:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- 0.25
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_CHOICE_QUESTION = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[choice]] are often used in code examples.
answers:
choice:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_NAMING_ERROR = r"""
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
A quarter equals [[1choice]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <regex>(?:bar)?\s+
- <plain> BAR
- <plain>bar
choice:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
- <div><p>This_should_be_wrapped_by_p_tag</p></div>
- [0.25]
"""
INLINE_MULTI_MARKDOWN_ANSWERS_NAMING_ERROR = r"""
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
A quarter equals [[choice1]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <regex>(?:bar)?\s+
- <plain> BAR
- <plain>bar
choice1:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
- <div><p>This_should_be_wrapped_by_p_tag</p></div>
- [0.25]
2choice:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
- <div><p>This_should_be_wrapped_by_p_tag</p></div>
- [0.25]
"""
INLINE_MULTI_MARKDOWN_EMBEDDED_NAMING_DUPLICATED = r"""
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]][[blank1]] are often used in code examples.
A quarter equals [[choice1]][[choice1]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: False
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <regex>(?:bar)?\s+
- <plain> BAR
- <plain>bar
choice1:
type: ChoicesAnswer
choices:
- 0.2
- 1/6
- ~CORRECT~ 0.25
"""
INLINE_MULTI_MARKDOWN_REDUNDANT = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
answer_explanation: This is an explanation.
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
answers:
blank1:
type: ShortAnswer
width: 4em
required: True
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
blank_2:
type: ShortAnswer
width: 10em
hint: <ol><li>with no hint title</li><li>HTML is OK</li><ol>
correct_answer:
- <plain> "1/5"
- type: float
value: 1/5
rtol: 0.00001
- <plain> 0.2
"""
INLINE_MULTI_EMBEDDED_WITH_MARKDOWN = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
answer_explanation: This is an explanation.
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
Foo and [[blank1]] are often used in code examples.
<img src="media:images/classroom.jpeg">
answers:
blank1:
type: ShortAnswer
width: 4em
required: True
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_NO_ANSWER_FIELD = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
abcd
answers:
blank1:
type: ShortAnswer
width: 4em
required: True
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_HAS_UNPAIRED_WRAPPER = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.
question: |
[[[[blank1]]]]
answers:
blank1:
type: ShortAnswer
width: 4em
required: True
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
"""
INLINE_MULTI_MARKDOWN_FEWER = """
type: InlineMultiQuestion
id: inlinemulti
value: 10
prompt: |
# An InlineMultiQuestion example
Complete the following paragraph.(old version)
question: |
Foo and [[blank1]] are often used in code examples, or
tutorials. $\\frac{1}{5}$ is equivalent to [[blank_2]].
The correct answer for this choice question is [[choice_a]].
The Upper case of "foo" is [[choice2]].
One dollar is [[blank3]], and five percent is [[blank4]].
answers:
blank1:
type: ShortAnswer
width: 4em
required: True
hint: Tex can be rendered in hint, e.g. $x_1$.
hint_title: Hint
correct_answer:
- <plain> BAR
- <plain>bar
blank_2:
type: ShortAnswer
width: 10em
hint: <ol><li>with no hint title</li><li>HTML is OK</li><ol>
correct_answer:
- <plain> "1/5"
- type: float
value: 1/5
rtol: 0.00001
- <plain> 0.2
choice_a:
type: ChoicesAnswer
required: True
choices:
- ~CORRECT~ Correct
- Wrong
choice2:
type: ChoicesAnswer
choices:
- ~CORRECT~ FOO
- BAR
- fOO
blank3:
type: ShortAnswer
width: 3em
prepended_text: "$"
hint: Blank with prepended text
correct_answer:
- type: float
value: 1
rtol: 0.00001
- <plain> "1"
blank4:
type: ShortAnswer
width: 3em
appended_text: "%"
hint: Blank with appended text
correct_answer:
- type: float
value: 5
rtol: 0.00001
- <plain> "5"
"""
def get_repo_blob_side_effect(repo, full_name, commit_sha, allow_tree=True):
# Fake the inline multiple question yaml for specific commit
if not (full_name == "questions/multi-question-example.yml"
and commit_sha == b"ec41a2de73a99e6022060518cb5c5c162b88cdf5"):
return get_repo_blob(repo, full_name, commit_sha, allow_tree)
else:
class Blob:
pass
blob = Blob()
blob.data = INLINE_MULTI_MARKDOWN_FEWER.encode()
return blob
def get_page_behavior_not_show_correctness_side_effect(page,
permissions,
session_in_progress,
answer_was_graded,
generates_grade,
is_unenrolled_session,
viewing_prior_version=False):
page_behavior = get_page_behavior(
page,
permissions,
session_in_progress,
answer_was_graded,
generates_grade,
is_unenrolled_session,
viewing_prior_version)
page_behavior.show_correctness = False
return page_behavior
class InlineMultiQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase):
def test_single(self):
markdown = INLINE_MULTI_MARKDOWN_SINGLE
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(resp, None)
# When there's more than one field, that field is force_required.
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, "This field is required.")
def test_negative_width(self):
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "width: -4em",
"attr2": "width: 5em"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"blank1: 'width': unrecogonized width attribute string: '-4em'")
def test_negative_weight(self):
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "weight: 15",
"attr2": "weight: -5"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"blank2: 'weight' must be a non-negative value, got '-5' instead")
def test_two_not_required(self):
markdown = INLINE_MULTI_MARKDOWN_TWO_NOT_REQUIRED
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(resp, None)
# because this choice was wrapped by p tag before markdown handling
self.assertContains(
resp, "<p>This_should_be_wrapped_by_p_tag</p>", html=True)
self.assertContains(resp, "[0.25]")
# When there's more than one fields, can submit with no answer
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0)
# partial answer
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': ['Bar']})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0.5)
# full answer, choice wrong answer
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': 'Bar', 'choice1': 4})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0.5)
# full answer, all correct
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': 'Bar', 'choice1': 2})
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 1)
def test_submit_validation_error(self):
markdown = INLINE_MULTI_MARKDOWN_FLOAT_WITHOUT_TOL
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(
resp,
"Float match should have either rtol or "
"atol--otherwise it will match any number")
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': 'Bar', 'blank2': 'abc'})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(
resp, "TypeError: Cannot convert expression to float")
def test_not_allowed_embedded_question_type(self):
markdown = INLINE_MULTI_MARKDOWN_NOT_ALLOWED_EMBEDDED_QTYPE
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"unknown embedded question type 'SomeQuestionType'")
def test_embedded_question_not_struct(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_QUESTION_NOT_STRUCT
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"Embedded question 'blank1' must be a struct")
def test_embedded_question_no_extra_html(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_HAS_NO_EXTRA_HTML
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
# There's no html string between rendered blank1 field and blank2 field
self.assertIn('</div> <div id="div_id_blank2"', resp.content.decode())
def test_embedded_weight_count(self):
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "weight: 15",
"attr2": "weight: 5"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(resp, None)
# no answer
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0)
# partial answer
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': ['Bar']})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0.75)
# blank2 has not weight set
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "weight: 15",
"attr2": ""})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': ['Bar']})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 1)
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank2': 'One'})
self.assertEqual(resp.status_code, 200)
self.assertFormErrorLoose(resp, None)
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 0)
def test_embedded_width_attr(self):
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "width: 15",
"attr2": "width: 85 %"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(resp, None)
self.assertIn("width: 8.5em", resp.context["form"].as_p())
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "width: 15pt",
"attr2": "width: 5pt"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(resp, None)
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "width: one",
"attr2": "width: 5 pt"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"unrecogonized width attribute string: 'one'")
markdown = (INLINE_MULTI_MARKDOWN_EMBEDDED_ATTR_PATTERN
% {"attr1": "width: 15 pt",
"attr2": "width: 5 km"})
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"unsupported length unit 'km'")
def test_embedded_question_no_correct_answer(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_NO_CORRECT_ANSWER
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"blank1: at least one answer must be provided")
def test_embedded_text_question_no_stringifiable_correct_answer(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_TEXT_Q_NO_STRINGIFIABLE_CORRECT_ANSWER # noqa
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"blank1: no matcher is able to provide a plain-text "
"correct answer")
def test_embedded_choice_question_no_correct_answer(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_CHOICE_Q_NO_CORRECT_ANSWER
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
" more correct answer(s) expected for question 'choice', "
"0 found")
def test_embedded_choice_not_stringifiable(self):
expected_page_error = (
"'choice' choice 2: unable to convert to string")
class BadChoice:
def __str__(self):
raise Exception
from relate.utils import dict_to_struct
fake_page_desc = dict_to_struct(
{'type': 'InlineMultiQuestion', 'id': 'inlinemulti',
'prompt':
'\n# An InlineMultiQuestion example\n\nComplete the '
'following paragraph.\n',
'question': '\nFoo and [[choice]] are often used in code '
'examples.\n',
'_field_names': [
'type', 'id', 'prompt', 'question', 'answers', 'value'],
'answers': {'_field_names': ['choice'],
'choice': {
'_field_names': ['type',
'choices'],
'type': 'ChoicesAnswer',
'choices': [0.2,
BadChoice(),
'~CORRECT~ 0.25']}},
'value': 10}
)
with mock.patch("relate.utils.dict_to_struct") as mock_dict_to_struct:
mock_dict_to_struct.return_value = fake_page_desc
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_CHOICE_QUESTION
resp = (
self.get_page_sandbox_preview_response(markdown))
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(resp, PAGE_ERRORS,
expected_page_error)
def test_embedded_question_no_answer_field_defined(self):
markdown = INLINE_MULTI_MARKDOWN_NO_ANSWER_FIELD
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"InlineMultiQuestion requires at least one answer field to "
"be defined.")
def test_embedded_naming_error(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_NAMING_ERROR
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"ValidationError")
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"could not instantiate flow page")
def test_answers_naming_error(self):
markdown = INLINE_MULTI_MARKDOWN_ANSWERS_NAMING_ERROR
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"invalid answers name '2choice'. A valid name should start "
"with letters. Alphanumeric with underscores. Do not use "
"spaces.")
def test_embedded_naming_duplicated(self):
markdown = INLINE_MULTI_MARKDOWN_EMBEDDED_NAMING_DUPLICATED
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"embedded question name 'blank1', 'choice1' not unique.")
def test_has_unpaired_wrapper(self):
markdown = INLINE_MULTI_MARKDOWN_HAS_UNPAIRED_WRAPPER
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxNotHasValidPage(resp)
self.assertResponseContextContains(
resp, PAGE_ERRORS,
"question has unpaired '[['.")
def test_redundant(self):
markdown = INLINE_MULTI_MARKDOWN_REDUNDANT
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertSandboxWarningTextContain(
resp,
"redundant answers 'blank_2' provided for non-existing "
"question(s).")
resp = self.get_page_sandbox_submit_answer_response(
markdown,
answer_data={'blank1': 'Bar'})
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, "This is an explanation.")
self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 1)
def test_embedded_question_with_markdown(self):
self.post_update_course_content(
commit_sha=b"4124e0c23e369d6709a670398167cb9c2fe52d35")
markdown = INLINE_MULTI_EMBEDDED_WITH_MARKDOWN
resp = self.get_page_sandbox_preview_response(markdown)
self.assertEqual(resp.status_code, 200)
self.assertSandboxHasValidPage(resp)
self.assertContains(
resp, '<img src="/course/test-course/media/4124e0c23e369d6709a6'
'70398167cb9c2fe52d35/images/classroom.jpeg">', html=True)
@pytest.mark.slow
class InlineMultiPageUpdateTest(SingleCourseQuizPageTestMixin, TestCase):
page_id = "inlinemulti"
def setUp(self):
super().setUp()
def test_quiz_inline_not_show_correctness(self):
self.start_flow(self.flow_id)
with mock.patch("course.flow.get_page_behavior") as mock_get_bhv:
mock_get_bhv.side_effect = (
get_page_behavior_not_show_correctness_side_effect)
submit_answer_response, _ = (
self.submit_page_answer_by_page_id_and_test(
self.page_id, do_grading=False))
self.assertEqual(submit_answer_response.status_code, 200)
# 7 answer
self.assertContains(submit_answer_response, 'correctness="1"', count=0)
self.assertContains(submit_answer_response, 'correctness="0"', count=0)
self.end_flow()
self.assertSessionScoreEqual(10)
# {{{ Test bug fix in https://github.com/inducer/relate/pull/262
def test_add_new_question(self):
"""Test bug fix in https://github.com/inducer/relate/pull/262
"""
with mock.patch("course.content.get_repo_blob") as mock_get_repo_blob:
mock_get_repo_blob.side_effect = get_repo_blob_side_effect
self.post_update_course_content(
commit_sha=b"ec41a2de73a99e6022060518cb5c5c162b88cdf5")
self.start_flow(self.flow_id)
resp = self.client.get(
self.get_page_url_by_page_id(page_id=self.page_id))
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, "(old version)")
answer_data = {
'blank1': 'Bar', 'blank_2': '0.2', 'blank3': '1',
'blank4': '5', 'choice2': '0', 'choice_a': '0'}
submit_answer_response, _ = (
self.submit_page_answer_by_page_id_and_test(
self.page_id, answer_data=answer_data, expected_grades=10))
# 6 correct answer
self.assertContains(submit_answer_response,
'correctness="1"', count=6)
self.post_update_course_content(
commit_sha=b"4124e0c23e369d6709a670398167cb9c2fe52d35")
resp = self.client.get(
self.get_page_url_by_page_id(page_id=self.page_id))
self.assertEqual(resp.status_code, 200)
# 7 answer
self.assertContains(resp, 'correctness="1"', count=7)
# vim: fdm=marker
|
en
| 0.658487
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. answers: blank1: type: ShortAnswer width: 4em required: False hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <plain> BAR - <plain>bar type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. A quarter equals [[choice1]]. answers: blank1: type: ShortAnswer width: 4em required: False hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <regex>(?:bar)?\s+ - <plain> BAR - <plain>bar choice1: type: ChoicesAnswer choices: - 0.2 - 1/6 - ~CORRECT~ 0.25 - <div><p>This_should_be_wrapped_by_p_tag</p></div> - [0.25] type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. One dollar is [[blank2]]. answers: blank1: type: ShortAnswer %(attr1)s correct_answer: - <plain> BAR - <plain>bar blank2: type: ShortAnswer %(attr2)s correct_answer: - type: float rtol: 0.00001 value: 1 - <plain> one type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. One dollar is [[blank2]]. answers: blank1: type: ShortAnswer width: 4em required: False hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <plain> BAR - <plain>bar blank2: type: ShortAnswer width: 3em prepended_text: "$" hint: Blank with prepended text correct_answer: - type: float value: 1 type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. answers: blank1: type: SomeQuestionType width: 4em required: False hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <plain> BAR - <plain>bar type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. answers: blank1: Something type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | [[blank1]][[blank2]] answers: blank1: type: ShortAnswer correct_answer: - <plain> BAR - <plain>bar blank2: type: ShortAnswer correct_answer: - <plain> BAR - <plain>bar type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. answers: blank1: type: ShortAnswer correct_answer: [] type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. answers: blank1: type: ShortAnswer correct_answer: - <regex>(?:foo\s+)?\s type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[choice]] are often used in code examples. answers: choice: type: ChoicesAnswer choices: - 0.2 - 1/6 - 0.25 type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[choice]] are often used in code examples. answers: choice: type: ChoicesAnswer choices: - 0.2 - 1/6 - ~CORRECT~ 0.25 type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. A quarter equals [[1choice]]. answers: blank1: type: ShortAnswer width: 4em required: False hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <regex>(?:bar)?\s+ - <plain> BAR - <plain>bar choice: type: ChoicesAnswer choices: - 0.2 - 1/6 - ~CORRECT~ 0.25 - <div><p>This_should_be_wrapped_by_p_tag</p></div> - [0.25] type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. A quarter equals [[choice1]]. answers: blank1: type: ShortAnswer width: 4em required: False hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <regex>(?:bar)?\s+ - <plain> BAR - <plain>bar choice1: type: ChoicesAnswer choices: - 0.2 - 1/6 - ~CORRECT~ 0.25 - <div><p>This_should_be_wrapped_by_p_tag</p></div> - [0.25] 2choice: type: ChoicesAnswer choices: - 0.2 - 1/6 - ~CORRECT~ 0.25 - <div><p>This_should_be_wrapped_by_p_tag</p></div> - [0.25] type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]][[blank1]] are often used in code examples. A quarter equals [[choice1]][[choice1]]. answers: blank1: type: ShortAnswer width: 4em required: False hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <regex>(?:bar)?\s+ - <plain> BAR - <plain>bar choice1: type: ChoicesAnswer choices: - 0.2 - 1/6 - ~CORRECT~ 0.25 type: InlineMultiQuestion id: inlinemulti value: 10 answer_explanation: This is an explanation. prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. answers: blank1: type: ShortAnswer width: 4em required: True hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <plain> BAR - <plain>bar blank_2: type: ShortAnswer width: 10em hint: <ol><li>with no hint title</li><li>HTML is OK</li><ol> correct_answer: - <plain> "1/5" - type: float value: 1/5 rtol: 0.00001 - <plain> 0.2 type: InlineMultiQuestion id: inlinemulti value: 10 answer_explanation: This is an explanation. prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | Foo and [[blank1]] are often used in code examples. <img src="media:images/classroom.jpeg"> answers: blank1: type: ShortAnswer width: 4em required: True hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <plain> BAR - <plain>bar type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | abcd answers: blank1: type: ShortAnswer width: 4em required: True hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <plain> BAR - <plain>bar type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph. question: | [[[[blank1]]]] answers: blank1: type: ShortAnswer width: 4em required: True hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <plain> BAR - <plain>bar type: InlineMultiQuestion id: inlinemulti value: 10 prompt: | # An InlineMultiQuestion example Complete the following paragraph.(old version) question: | Foo and [[blank1]] are often used in code examples, or tutorials. $\\frac{1}{5}$ is equivalent to [[blank_2]]. The correct answer for this choice question is [[choice_a]]. The Upper case of "foo" is [[choice2]]. One dollar is [[blank3]], and five percent is [[blank4]]. answers: blank1: type: ShortAnswer width: 4em required: True hint: Tex can be rendered in hint, e.g. $x_1$. hint_title: Hint correct_answer: - <plain> BAR - <plain>bar blank_2: type: ShortAnswer width: 10em hint: <ol><li>with no hint title</li><li>HTML is OK</li><ol> correct_answer: - <plain> "1/5" - type: float value: 1/5 rtol: 0.00001 - <plain> 0.2 choice_a: type: ChoicesAnswer required: True choices: - ~CORRECT~ Correct - Wrong choice2: type: ChoicesAnswer choices: - ~CORRECT~ FOO - BAR - fOO blank3: type: ShortAnswer width: 3em prepended_text: "$" hint: Blank with prepended text correct_answer: - type: float value: 1 rtol: 0.00001 - <plain> "1" blank4: type: ShortAnswer width: 3em appended_text: "%" hint: Blank with appended text correct_answer: - type: float value: 5 rtol: 0.00001 - <plain> "5" # Fake the inline multiple question yaml for specific commit # When there's more than one field, that field is force_required. # because this choice was wrapped by p tag before markdown handling # When there's more than one fields, can submit with no answer # partial answer # full answer, choice wrong answer # full answer, all correct # There's no html string between rendered blank1 field and blank2 field # no answer # partial answer # blank2 has not weight set # noqa # An InlineMultiQuestion example\n\nComplete the ' # 7 answer # {{{ Test bug fix in https://github.com/inducer/relate/pull/262 Test bug fix in https://github.com/inducer/relate/pull/262 # 6 correct answer # 7 answer # vim: fdm=marker
| 1.579315
| 2
|
old-stuff-for-reference/nightjar-base/nightjar-src/python-src/nightjar/backend/api/tests/__init__.py
|
groboclown/nightjar-mesh
| 3
|
6628191
|
<gh_stars>1-10
"""
Tests for the data_store module.
"""
|
"""
Tests for the data_store module.
"""
|
it
| 0.330068
|
Tests for the data_store module.
| 1.055975
| 1
|
pcdet/models/backbones_3d/vfe/__init__.py
|
HenryLittle/OpenPCDet-HL
| 0
|
6628192
|
from .mean_vfe import MeanVFE
from .pillar_vfe import PillarVFE
from .image_vfe import ImageVFE
from .vfe_template import VFETemplate
from .fusion_vfe import ImageResNetVFE, ImageMaskRCNNVFE
__all__ = {
'VFETemplate': VFETemplate,
'MeanVFE': MeanVFE,
'PillarVFE': PillarVFE,
'ImageVFE': ImageVFE,
'ImageResNetVFE': ImageResNetVFE,
'ImageMaskRCNNVFE': ImageMaskRCNNVFE,
}
|
from .mean_vfe import MeanVFE
from .pillar_vfe import PillarVFE
from .image_vfe import ImageVFE
from .vfe_template import VFETemplate
from .fusion_vfe import ImageResNetVFE, ImageMaskRCNNVFE
__all__ = {
'VFETemplate': VFETemplate,
'MeanVFE': MeanVFE,
'PillarVFE': PillarVFE,
'ImageVFE': ImageVFE,
'ImageResNetVFE': ImageResNetVFE,
'ImageMaskRCNNVFE': ImageMaskRCNNVFE,
}
|
none
| 1
| 0.994543
| 1
|
|
challenges/trees/shortest_unique_prefix.py
|
lukasmartinelli/sharpen
| 13
|
6628193
|
<reponame>lukasmartinelli/sharpen
"""
Find shortest unique prefix to represent each word in the list.
Input: [zebra, dog, duck, dove]
Output: {z, dog, du, dov}
where we can see that
zebra = z
dog = dog
duck = du
dove = dov
Approach: Build a trie from the words first.
root
/ \
zebra d
/ / \
uck o
/ \
ve g
"""
def insert(node, word):
if len(word) == 0:
return
for child in node.children:
if child.char[0] == word[0]:
insert(child, word[1:])
return
if len(node.char) > 1:
replacement_node = TrieNode(node.char[1:])
node.children.append(replacement_node)
node.char = node.char[0]
insert(node, word)
return
new_node = TrieNode(word)
node.children.append(new_node)
def find_prefix(node, prefix, word):
for child in node.children:
if child.char == word:
return prefix + [node.char, child.char[0]]
if child.char[0] == word[0]:
return find_prefix(child, prefix + [node.char], word[1:])
return []
class TrieNode():
def __init__(self, c):
self.char = c
self.children = []
def __repr__(self):
return '<TrieNode {}>'.format(self.char)
def create_trie(words):
root = TrieNode('')
for word in words:
insert(root, word)
return root
def shortest_unique_prefix(words):
trie = create_trie(words)
return [''.join(find_prefix(trie, prefix=[], word=w)) for w in words]
def test_shortest_unique_prefix_no_words():
assert shortest_unique_prefix([]) == []
def test_shortest_unique_prefix_one_word():
assert shortest_unique_prefix(['zebra']) == ['z']
def test_shortest_unique_prefix():
words = ['zebra', 'dog', 'duck', 'dove']
assert shortest_unique_prefix(words) == ['z', 'dog', 'du', 'dov']
def test_shortest_unique_prefix_same_start():
words = ['bearcat', 'bert']
assert shortest_unique_prefix(words) == ['bea', 'ber']
def test_create_trie_same_start():
words = ['bearcat', 'bert']
trie = create_trie(words)
assert len(trie.children) == 1
assert trie.children[0].char == 'b'
assert trie.children[0].children[0].char == 'e'
assert trie.children[0].children[0].children[0].char == 'arcat'
assert trie.children[0].children[0].children[1].char == 'rt'
def test_create_trie():
words = ['zebra', 'dog', 'duck', 'dove']
trie = create_trie(words)
assert len(trie.children) == 2
assert trie.children[0].char == 'zebra'
assert trie.children[1].char == 'd'
assert len(trie.children[0].children) == 0
assert len(trie.children[1].children) == 2
assert trie.children[1].children[0].char == 'o'
assert trie.children[1].children[1].char == 'uck'
assert trie.children[1].children[0].children[0].char == 'g'
assert trie.children[1].children[0].children[1].char == 've'
|
"""
Find shortest unique prefix to represent each word in the list.
Input: [zebra, dog, duck, dove]
Output: {z, dog, du, dov}
where we can see that
zebra = z
dog = dog
duck = du
dove = dov
Approach: Build a trie from the words first.
root
/ \
zebra d
/ / \
uck o
/ \
ve g
"""
def insert(node, word):
if len(word) == 0:
return
for child in node.children:
if child.char[0] == word[0]:
insert(child, word[1:])
return
if len(node.char) > 1:
replacement_node = TrieNode(node.char[1:])
node.children.append(replacement_node)
node.char = node.char[0]
insert(node, word)
return
new_node = TrieNode(word)
node.children.append(new_node)
def find_prefix(node, prefix, word):
for child in node.children:
if child.char == word:
return prefix + [node.char, child.char[0]]
if child.char[0] == word[0]:
return find_prefix(child, prefix + [node.char], word[1:])
return []
class TrieNode():
def __init__(self, c):
self.char = c
self.children = []
def __repr__(self):
return '<TrieNode {}>'.format(self.char)
def create_trie(words):
root = TrieNode('')
for word in words:
insert(root, word)
return root
def shortest_unique_prefix(words):
trie = create_trie(words)
return [''.join(find_prefix(trie, prefix=[], word=w)) for w in words]
def test_shortest_unique_prefix_no_words():
assert shortest_unique_prefix([]) == []
def test_shortest_unique_prefix_one_word():
assert shortest_unique_prefix(['zebra']) == ['z']
def test_shortest_unique_prefix():
words = ['zebra', 'dog', 'duck', 'dove']
assert shortest_unique_prefix(words) == ['z', 'dog', 'du', 'dov']
def test_shortest_unique_prefix_same_start():
words = ['bearcat', 'bert']
assert shortest_unique_prefix(words) == ['bea', 'ber']
def test_create_trie_same_start():
words = ['bearcat', 'bert']
trie = create_trie(words)
assert len(trie.children) == 1
assert trie.children[0].char == 'b'
assert trie.children[0].children[0].char == 'e'
assert trie.children[0].children[0].children[0].char == 'arcat'
assert trie.children[0].children[0].children[1].char == 'rt'
def test_create_trie():
words = ['zebra', 'dog', 'duck', 'dove']
trie = create_trie(words)
assert len(trie.children) == 2
assert trie.children[0].char == 'zebra'
assert trie.children[1].char == 'd'
assert len(trie.children[0].children) == 0
assert len(trie.children[1].children) == 2
assert trie.children[1].children[0].char == 'o'
assert trie.children[1].children[1].char == 'uck'
assert trie.children[1].children[0].children[0].char == 'g'
assert trie.children[1].children[0].children[1].char == 've'
|
en
| 0.740124
|
Find shortest unique prefix to represent each word in the list. Input: [zebra, dog, duck, dove] Output: {z, dog, du, dov} where we can see that zebra = z dog = dog duck = du dove = dov Approach: Build a trie from the words first. root / \ zebra d / / \ uck o / \ ve g
| 3.858325
| 4
|
harfbuzz_metrics.py
|
mawillcockson/barcode-wheel
| 0
|
6628194
|
<reponame>mawillcockson/barcode-wheel
"""freetype_metrics.py but for HarfBuzz"""
import sys
import svgwrite
from collections import namedtuple
import pathlib
import barcode_wheel
def main():
file_name = pathlib.Path(sys.argv[0]).with_suffix(".svg")
drawing = svgwrite.Drawing(filename=str(file_name), size=("100%", "100%"))
Window = namedtuple("Window", "width, height")
window = Window(100, 100)
text = "Ty,gM`Ǖ" if len(sys.argv) <= 1 else sys.argv[1]
font_family = "sans-serif"
drawing.add(
drawing.rect(
insert=(0, 0),
size=window,
fill="orange",
opacity=0.5,
)
)
scaled_bounding_box = barcode_wheel.scaled_text_bounding_box(
target_box=window,
string=text,
font_family=font_family,
)
bounding_box = barcode_wheel.text_bounding_box(text, font_family=font_family)
scaled_dimensions = barcode_wheel.box_in_box(starting_box=(bounding_box.width, bounding_box.height), target_box=window)
drawing.add(
drawing.rect(
insert=(scaled_dimensions.x_offset, scaled_dimensions.y_offset),
size=(scaled_dimensions.width, scaled_bounding_box.height),
fill="grey",
opacity=0.5,
)
)
tfa = drawing.defs.add(
barcode_wheel.text_filled_area(text, font_family)
)
drawing.add(
drawing.use(
href=tfa,
insert=(0, 0),
size=window,
)
)
# CSS styling
drawing.defs.add(
drawing.style(
f"""
svg {{
margin: 0px;
padding: 0px;
}}
"""
)
)
drawing.viewbox(-window.width * 0.1, -window.height * 0.1, window.width * 1.2, window.height * 1.2)
drawing.fit()
drawing.save(pretty=True)
if __name__ == "__main__":
main()
|
"""freetype_metrics.py but for HarfBuzz"""
import sys
import svgwrite
from collections import namedtuple
import pathlib
import barcode_wheel
def main():
file_name = pathlib.Path(sys.argv[0]).with_suffix(".svg")
drawing = svgwrite.Drawing(filename=str(file_name), size=("100%", "100%"))
Window = namedtuple("Window", "width, height")
window = Window(100, 100)
text = "Ty,gM`Ǖ" if len(sys.argv) <= 1 else sys.argv[1]
font_family = "sans-serif"
drawing.add(
drawing.rect(
insert=(0, 0),
size=window,
fill="orange",
opacity=0.5,
)
)
scaled_bounding_box = barcode_wheel.scaled_text_bounding_box(
target_box=window,
string=text,
font_family=font_family,
)
bounding_box = barcode_wheel.text_bounding_box(text, font_family=font_family)
scaled_dimensions = barcode_wheel.box_in_box(starting_box=(bounding_box.width, bounding_box.height), target_box=window)
drawing.add(
drawing.rect(
insert=(scaled_dimensions.x_offset, scaled_dimensions.y_offset),
size=(scaled_dimensions.width, scaled_bounding_box.height),
fill="grey",
opacity=0.5,
)
)
tfa = drawing.defs.add(
barcode_wheel.text_filled_area(text, font_family)
)
drawing.add(
drawing.use(
href=tfa,
insert=(0, 0),
size=window,
)
)
# CSS styling
drawing.defs.add(
drawing.style(
f"""
svg {{
margin: 0px;
padding: 0px;
}}
"""
)
)
drawing.viewbox(-window.width * 0.1, -window.height * 0.1, window.width * 1.2, window.height * 1.2)
drawing.fit()
drawing.save(pretty=True)
if __name__ == "__main__":
main()
|
en
| 0.182387
|
freetype_metrics.py but for HarfBuzz # CSS styling svg {{ margin: 0px; padding: 0px; }}
| 2.385805
| 2
|
async/p192.py
|
ls-2018/tips
| 2
|
6628195
|
<reponame>ls-2018/tips
# 发现回调时间长运行的回调函数
import asyncio
import logging
import sys
import time
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
def slow():
time.sleep(1.5)
print('over')
async def main():
loop = asyncio.get_running_loop()
loop.slow_callback_duration = 1
loop.call_soon(slow)
asyncio.run(main(), debug=True)
|
# 发现回调时间长运行的回调函数
import asyncio
import logging
import sys
import time
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
def slow():
time.sleep(1.5)
print('over')
async def main():
loop = asyncio.get_running_loop()
loop.slow_callback_duration = 1
loop.call_soon(slow)
asyncio.run(main(), debug=True)
|
zh
| 0.49356
|
# 发现回调时间长运行的回调函数 # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
| 3.209054
| 3
|
synergy/mx/freerun_action_handler.py
|
mushkevych/scheduler
| 15
|
6628196
|
__author__ = '<NAME>'
import json
from synergy.db.model.freerun_process_entry import FreerunProcessEntry
from synergy.db.dao.freerun_process_dao import FreerunProcessDao
from synergy.mx.base_request_handler import valid_action_request
from synergy.mx.abstract_action_handler import AbstractActionHandler
from synergy.scheduler.scheduler_constants import STATE_MACHINE_FREERUN
class FreerunActionHandler(AbstractActionHandler):
def __init__(self, request, **values):
super(FreerunActionHandler, self).__init__(request, **values)
self.process_name = self.request_arguments.get('process_name')
self.entry_name = self.request_arguments.get('entry_name')
self.freerun_process_dao = FreerunProcessDao(self.logger)
self.is_request_valid = True if self.process_name and self.entry_name else False
if self.is_request_valid:
self.process_name = self.process_name.strip()
self.entry_name = self.entry_name.strip()
self.is_requested_state_on = self.request_arguments.get('is_on') == 'on'
@AbstractActionHandler.thread_handler.getter
def thread_handler(self):
handler_key = (self.process_name, self.entry_name)
return self.scheduler.freerun_handlers[handler_key]
@AbstractActionHandler.process_entry.getter
def process_entry(self):
return self.thread_handler.process_entry
@AbstractActionHandler.uow_id.getter
def uow_id(self):
return self.process_entry.related_unit_of_work
@valid_action_request
def cancel_uow(self):
freerun_state_machine = self.scheduler.timetable.state_machines[STATE_MACHINE_FREERUN]
freerun_state_machine.cancel_uow(self.process_entry)
return self.reply_ok()
@valid_action_request
def get_event_log(self):
return {'event_log': self.process_entry.event_log}
@valid_action_request
def create_entry(self):
process_entry = FreerunProcessEntry()
process_entry.process_name = self.process_name
process_entry.entry_name = self.entry_name
if self.request_arguments['arguments']:
arguments = self.request_arguments['arguments']
if isinstance(arguments, bytes):
arguments = arguments.decode('unicode-escape')
process_entry.arguments = json.loads(arguments)
else:
process_entry.arguments = {}
process_entry.description = self.request_arguments['description']
process_entry.is_on = self.is_requested_state_on
process_entry.trigger_frequency = self.request_arguments['trigger_frequency']
self.freerun_process_dao.update(process_entry)
self.scheduler._register_process_entry(process_entry, self.scheduler.fire_freerun_worker)
return self.reply_ok()
@valid_action_request
def delete_entry(self):
handler_key = (self.process_name, self.entry_name)
self.thread_handler.deactivate()
self.freerun_process_dao.remove(handler_key)
del self.scheduler.freerun_handlers[handler_key]
self.logger.info(f'MX: Deleted FreerunThreadHandler for {handler_key}')
return self.reply_ok()
@valid_action_request
def update_entry(self):
is_interval_changed = self.process_entry.trigger_frequency != self.request_arguments['trigger_frequency']
if self.request_arguments['arguments']:
arguments = self.request_arguments['arguments']
if isinstance(arguments, bytes):
arguments = arguments.decode('unicode-escape')
self.process_entry.arguments = json.loads(arguments)
else:
self.process_entry.arguments = {}
self.process_entry.description = self.request_arguments['description']
self.process_entry.is_on = self.is_requested_state_on
self.process_entry.trigger_frequency = self.request_arguments['trigger_frequency']
self.freerun_process_dao.update(self.process_entry)
if is_interval_changed:
self.change_interval()
if self.process_entry.is_on != self.is_requested_state_on:
if self.is_requested_state_on:
self.activate_trigger()
else:
self.deactivate_trigger()
return self.reply_ok()
|
__author__ = '<NAME>'
import json
from synergy.db.model.freerun_process_entry import FreerunProcessEntry
from synergy.db.dao.freerun_process_dao import FreerunProcessDao
from synergy.mx.base_request_handler import valid_action_request
from synergy.mx.abstract_action_handler import AbstractActionHandler
from synergy.scheduler.scheduler_constants import STATE_MACHINE_FREERUN
class FreerunActionHandler(AbstractActionHandler):
def __init__(self, request, **values):
super(FreerunActionHandler, self).__init__(request, **values)
self.process_name = self.request_arguments.get('process_name')
self.entry_name = self.request_arguments.get('entry_name')
self.freerun_process_dao = FreerunProcessDao(self.logger)
self.is_request_valid = True if self.process_name and self.entry_name else False
if self.is_request_valid:
self.process_name = self.process_name.strip()
self.entry_name = self.entry_name.strip()
self.is_requested_state_on = self.request_arguments.get('is_on') == 'on'
@AbstractActionHandler.thread_handler.getter
def thread_handler(self):
handler_key = (self.process_name, self.entry_name)
return self.scheduler.freerun_handlers[handler_key]
@AbstractActionHandler.process_entry.getter
def process_entry(self):
return self.thread_handler.process_entry
@AbstractActionHandler.uow_id.getter
def uow_id(self):
return self.process_entry.related_unit_of_work
@valid_action_request
def cancel_uow(self):
freerun_state_machine = self.scheduler.timetable.state_machines[STATE_MACHINE_FREERUN]
freerun_state_machine.cancel_uow(self.process_entry)
return self.reply_ok()
@valid_action_request
def get_event_log(self):
return {'event_log': self.process_entry.event_log}
@valid_action_request
def create_entry(self):
process_entry = FreerunProcessEntry()
process_entry.process_name = self.process_name
process_entry.entry_name = self.entry_name
if self.request_arguments['arguments']:
arguments = self.request_arguments['arguments']
if isinstance(arguments, bytes):
arguments = arguments.decode('unicode-escape')
process_entry.arguments = json.loads(arguments)
else:
process_entry.arguments = {}
process_entry.description = self.request_arguments['description']
process_entry.is_on = self.is_requested_state_on
process_entry.trigger_frequency = self.request_arguments['trigger_frequency']
self.freerun_process_dao.update(process_entry)
self.scheduler._register_process_entry(process_entry, self.scheduler.fire_freerun_worker)
return self.reply_ok()
@valid_action_request
def delete_entry(self):
handler_key = (self.process_name, self.entry_name)
self.thread_handler.deactivate()
self.freerun_process_dao.remove(handler_key)
del self.scheduler.freerun_handlers[handler_key]
self.logger.info(f'MX: Deleted FreerunThreadHandler for {handler_key}')
return self.reply_ok()
@valid_action_request
def update_entry(self):
is_interval_changed = self.process_entry.trigger_frequency != self.request_arguments['trigger_frequency']
if self.request_arguments['arguments']:
arguments = self.request_arguments['arguments']
if isinstance(arguments, bytes):
arguments = arguments.decode('unicode-escape')
self.process_entry.arguments = json.loads(arguments)
else:
self.process_entry.arguments = {}
self.process_entry.description = self.request_arguments['description']
self.process_entry.is_on = self.is_requested_state_on
self.process_entry.trigger_frequency = self.request_arguments['trigger_frequency']
self.freerun_process_dao.update(self.process_entry)
if is_interval_changed:
self.change_interval()
if self.process_entry.is_on != self.is_requested_state_on:
if self.is_requested_state_on:
self.activate_trigger()
else:
self.deactivate_trigger()
return self.reply_ok()
|
none
| 1
| 1.861231
| 2
|
|
gala/potential/potential/util.py
|
ltlancas/gala
| 1
|
6628197
|
# coding: utf-8
""" Utilities for Potential classes """
from __future__ import division, print_function
# Third-party
import numpy as np
# Project
from .core import PotentialBase
__all__ = ['from_equation']
# def _classnamify(s):
# s = [x.lower() for x in str(s).split()]
# words = []
# for word in s:
# words.append(word.capitalize())
# return "".join(words)
def from_equation(expr, vars, pars, name=None, hessian=False):
r"""
Create a potential class from an expression for the potential.
.. note::
This utility requires having `Sympy <http://www.sympy.org/>`_ installed.
.. warning::
These potentials are *not* pickle-able and cannot be written
out to YAML files (using `~gala.potential.PotentialBase.save()`)
Parameters
----------
expr : :class:`sympy.core.expr.Expr`, str
Either a ``Sympy`` expression, or a string that can be converted to
a ``Sympy`` expression.
vars : iterable
An iterable of variable names in the expression.
pars : iterable
An iterable of parameter names in the expression.
name : str (optional)
The name of the potential class returned.
hessian : bool (optional)
Generate a function to compute the Hessian.
Returns
-------
CustomPotential : `~gala.potential.PotentialBase`
A potential class that represents the input equation. To instantiate the
potential, use just like a normal class with parameters.
Examples
--------
Here we'll create a potential class for the harmonic oscillator
potential, :math:`\Phi(x) = \frac{1}{2}\,k\,x^2`::
>>> Potential = from_equation("1/2*k*x**2", vars="x", pars="k",
... name='HarmonicOscillator')
>>> p1 = Potential(k=1.)
>>> p1
<HarmonicOscillatorPotential: k=1.00 (dimensionless)>
The potential class (and object) is a fully-fledged subclass of
`~gala.potential.PotentialBase` and therefore has many useful methods.
For example, to integrate an orbit::
>>> orbit = p1.integrate_orbit([1.,0], dt=0.01, n_steps=1000)
"""
try:
import sympy
from sympy.utilities.lambdify import lambdify
except ImportError:
raise ImportError("sympy is required to use 'from_equation()' "
"potential class creation.")
# convert all input to Sympy objects
expr = sympy.sympify(expr)
vars = [sympy.sympify(v) for v in vars]
var_names = [v.name for v in vars]
pars = [sympy.sympify(p) for p in pars]
par_names = [p.name for p in pars]
ndim = len(vars)
# Energy / value
energyfunc = lambdify(vars + pars, expr, dummify=False, modules='numpy')
# Gradient
gradfuncs = []
for var in vars:
gradfuncs.append(lambdify(vars + pars, sympy.diff(expr,var), dummify=False, modules='numpy'))
class CustomPotential(PotentialBase):
def __init__(self, units=None, **kwargs):
for par in par_names:
if par not in kwargs:
raise ValueError("You must specify a value for "
"parameter '{}'.".format(par))
super(CustomPotential,self).__init__(units=units,
parameters=kwargs,
ndim=ndim)
def _energy(self, w, t=0.):
kw = self.parameters.copy()
for k,v in kw.items():
kw[k] = v.value
for i,name in enumerate(var_names):
kw[name] = w[:,i]
return np.array(energyfunc(**kw))
def _gradient(self, w, t=0.):
kw = self.parameters.copy()
for k,v in kw.items():
kw[k] = v.value
for i,name in enumerate(var_names):
kw[name] = w[:,i]
grad = np.vstack([f(**kw)[np.newaxis] for f in gradfuncs])
return grad.T
if name is not None:
# name = _classnamify(name)
if "potential" not in name.lower():
name = name + "Potential"
CustomPotential.__name__ = str(name)
# Hessian
if hessian:
hessfuncs = []
for var1 in vars:
for var2 in vars:
hessfuncs.append(lambdify(vars + pars, sympy.diff(expr,var1,var2),
dummify=False, modules='numpy'))
def _hessian(self, w, t):
kw = self.parameters.copy()
for k,v in kw.items():
kw[k] = v.value
for i,name in enumerate(var_names):
kw[name] = w[:,i]
# expand = [np.newaxis] * w[i].ndim
# This ain't pretty, bub
arrs = []
for f in hessfuncs:
hess_arr = np.array(f(**kw))
if hess_arr.shape != w[:,i].shape:
hess_arr = np.tile(hess_arr, reps=w[:,i].shape)
arrs.append(hess_arr)
hess = np.vstack(arrs)
return hess.reshape((ndim,ndim,len(w[:,i])))
CustomPotential._hessian = _hessian
CustomPotential.save = None
return CustomPotential
|
# coding: utf-8
""" Utilities for Potential classes """
from __future__ import division, print_function
# Third-party
import numpy as np
# Project
from .core import PotentialBase
__all__ = ['from_equation']
# def _classnamify(s):
# s = [x.lower() for x in str(s).split()]
# words = []
# for word in s:
# words.append(word.capitalize())
# return "".join(words)
def from_equation(expr, vars, pars, name=None, hessian=False):
r"""
Create a potential class from an expression for the potential.
.. note::
This utility requires having `Sympy <http://www.sympy.org/>`_ installed.
.. warning::
These potentials are *not* pickle-able and cannot be written
out to YAML files (using `~gala.potential.PotentialBase.save()`)
Parameters
----------
expr : :class:`sympy.core.expr.Expr`, str
Either a ``Sympy`` expression, or a string that can be converted to
a ``Sympy`` expression.
vars : iterable
An iterable of variable names in the expression.
pars : iterable
An iterable of parameter names in the expression.
name : str (optional)
The name of the potential class returned.
hessian : bool (optional)
Generate a function to compute the Hessian.
Returns
-------
CustomPotential : `~gala.potential.PotentialBase`
A potential class that represents the input equation. To instantiate the
potential, use just like a normal class with parameters.
Examples
--------
Here we'll create a potential class for the harmonic oscillator
potential, :math:`\Phi(x) = \frac{1}{2}\,k\,x^2`::
>>> Potential = from_equation("1/2*k*x**2", vars="x", pars="k",
... name='HarmonicOscillator')
>>> p1 = Potential(k=1.)
>>> p1
<HarmonicOscillatorPotential: k=1.00 (dimensionless)>
The potential class (and object) is a fully-fledged subclass of
`~gala.potential.PotentialBase` and therefore has many useful methods.
For example, to integrate an orbit::
>>> orbit = p1.integrate_orbit([1.,0], dt=0.01, n_steps=1000)
"""
try:
import sympy
from sympy.utilities.lambdify import lambdify
except ImportError:
raise ImportError("sympy is required to use 'from_equation()' "
"potential class creation.")
# convert all input to Sympy objects
expr = sympy.sympify(expr)
vars = [sympy.sympify(v) for v in vars]
var_names = [v.name for v in vars]
pars = [sympy.sympify(p) for p in pars]
par_names = [p.name for p in pars]
ndim = len(vars)
# Energy / value
energyfunc = lambdify(vars + pars, expr, dummify=False, modules='numpy')
# Gradient
gradfuncs = []
for var in vars:
gradfuncs.append(lambdify(vars + pars, sympy.diff(expr,var), dummify=False, modules='numpy'))
class CustomPotential(PotentialBase):
def __init__(self, units=None, **kwargs):
for par in par_names:
if par not in kwargs:
raise ValueError("You must specify a value for "
"parameter '{}'.".format(par))
super(CustomPotential,self).__init__(units=units,
parameters=kwargs,
ndim=ndim)
def _energy(self, w, t=0.):
kw = self.parameters.copy()
for k,v in kw.items():
kw[k] = v.value
for i,name in enumerate(var_names):
kw[name] = w[:,i]
return np.array(energyfunc(**kw))
def _gradient(self, w, t=0.):
kw = self.parameters.copy()
for k,v in kw.items():
kw[k] = v.value
for i,name in enumerate(var_names):
kw[name] = w[:,i]
grad = np.vstack([f(**kw)[np.newaxis] for f in gradfuncs])
return grad.T
if name is not None:
# name = _classnamify(name)
if "potential" not in name.lower():
name = name + "Potential"
CustomPotential.__name__ = str(name)
# Hessian
if hessian:
hessfuncs = []
for var1 in vars:
for var2 in vars:
hessfuncs.append(lambdify(vars + pars, sympy.diff(expr,var1,var2),
dummify=False, modules='numpy'))
def _hessian(self, w, t):
kw = self.parameters.copy()
for k,v in kw.items():
kw[k] = v.value
for i,name in enumerate(var_names):
kw[name] = w[:,i]
# expand = [np.newaxis] * w[i].ndim
# This ain't pretty, bub
arrs = []
for f in hessfuncs:
hess_arr = np.array(f(**kw))
if hess_arr.shape != w[:,i].shape:
hess_arr = np.tile(hess_arr, reps=w[:,i].shape)
arrs.append(hess_arr)
hess = np.vstack(arrs)
return hess.reshape((ndim,ndim,len(w[:,i])))
CustomPotential._hessian = _hessian
CustomPotential.save = None
return CustomPotential
|
en
| 0.636766
|
# coding: utf-8 Utilities for Potential classes # Third-party # Project # def _classnamify(s): # s = [x.lower() for x in str(s).split()] # words = [] # for word in s: # words.append(word.capitalize()) # return "".join(words) Create a potential class from an expression for the potential. .. note:: This utility requires having `Sympy <http://www.sympy.org/>`_ installed. .. warning:: These potentials are *not* pickle-able and cannot be written out to YAML files (using `~gala.potential.PotentialBase.save()`) Parameters ---------- expr : :class:`sympy.core.expr.Expr`, str Either a ``Sympy`` expression, or a string that can be converted to a ``Sympy`` expression. vars : iterable An iterable of variable names in the expression. pars : iterable An iterable of parameter names in the expression. name : str (optional) The name of the potential class returned. hessian : bool (optional) Generate a function to compute the Hessian. Returns ------- CustomPotential : `~gala.potential.PotentialBase` A potential class that represents the input equation. To instantiate the potential, use just like a normal class with parameters. Examples -------- Here we'll create a potential class for the harmonic oscillator potential, :math:`\Phi(x) = \frac{1}{2}\,k\,x^2`:: >>> Potential = from_equation("1/2*k*x**2", vars="x", pars="k", ... name='HarmonicOscillator') >>> p1 = Potential(k=1.) >>> p1 <HarmonicOscillatorPotential: k=1.00 (dimensionless)> The potential class (and object) is a fully-fledged subclass of `~gala.potential.PotentialBase` and therefore has many useful methods. For example, to integrate an orbit:: >>> orbit = p1.integrate_orbit([1.,0], dt=0.01, n_steps=1000) # convert all input to Sympy objects # Energy / value # Gradient # name = _classnamify(name) # Hessian # expand = [np.newaxis] * w[i].ndim # This ain't pretty, bub
| 3.404757
| 3
|
vilya/views/api/repos/issues.py
|
mubashshirjamal/code
| 1,582
|
6628198
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
from vilya.libs import api_errors
from vilya.models.project_issue import ProjectIssue
from vilya.views.api.utils import RestAPIUI
class IssuesUI(RestAPIUI):
_q_exports = []
_q_methods = ['get', 'post']
def __init__(self, repo):
self.repo = repo
def get(self, request):
return {}
def post(self, request):
return {}
def _q_lookup(self, request, issue_number):
repo = self.repo
issue = ProjectIssue.get(project_id=repo.id,
number=issue_number)
if not issue:
raise api_errors.NotFoundError('project issue')
return IssueUI(request, repo, issue)
class IssueUI(RestAPIUI):
_q_exports = ['milestone']
_q_methods = ['get']
def __init__(self, repo, issue):
self.repo = repo
self.issue = issue
def get(self, request):
return self.issue.as_dict()
@property
def milestone(self):
return MilestoneUI(self.issue)
class MilestoneUI(RestAPIUI):
_q_exports = []
_q_methods = ['get', 'post', 'delete']
def __init__(self, issue):
self.issue = issue
def get(self, request):
return {}
def post(self, request):
return {}
def delete(self, request):
return {}
|
# -*- coding: utf-8 -*-
from vilya.libs import api_errors
from vilya.models.project_issue import ProjectIssue
from vilya.views.api.utils import RestAPIUI
class IssuesUI(RestAPIUI):
_q_exports = []
_q_methods = ['get', 'post']
def __init__(self, repo):
self.repo = repo
def get(self, request):
return {}
def post(self, request):
return {}
def _q_lookup(self, request, issue_number):
repo = self.repo
issue = ProjectIssue.get(project_id=repo.id,
number=issue_number)
if not issue:
raise api_errors.NotFoundError('project issue')
return IssueUI(request, repo, issue)
class IssueUI(RestAPIUI):
_q_exports = ['milestone']
_q_methods = ['get']
def __init__(self, repo, issue):
self.repo = repo
self.issue = issue
def get(self, request):
return self.issue.as_dict()
@property
def milestone(self):
return MilestoneUI(self.issue)
class MilestoneUI(RestAPIUI):
_q_exports = []
_q_methods = ['get', 'post', 'delete']
def __init__(self, issue):
self.issue = issue
def get(self, request):
return {}
def post(self, request):
return {}
def delete(self, request):
return {}
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.179425
| 2
|
test/test_ihate.py
|
Profpatsch/beets
| 0
|
6628199
|
<reponame>Profpatsch/beets<gh_stars>0
"""Tests for the 'ihate' plugin"""
from _common import unittest
from beets import importer
from beets.library import Item
from beetsplug.ihate import IHatePlugin
class IHatePluginTest(unittest.TestCase):
def test_hate(self):
match_pattern = {}
test_item = Item(
genre='TestGenre',
album=u'TestAlbum',
artist=u'TestArtist')
task = importer.SingletonImportTask(test_item)
# Empty query should let it pass.
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# 1 query match.
match_pattern = ["artist:bad_artist","artist:TestArtist"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
# 2 query matches, either should trigger.
match_pattern = ["album:test","artist:testartist"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
# Query is blocked by AND clause.
match_pattern = ["album:notthis genre:testgenre"]
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# Both queries are blocked by AND clause with unmatched condition.
match_pattern = ["album:notthis genre:testgenre",
"artist:testartist album:notthis"]
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# Only one query should fire.
match_pattern = ["album:testalbum genre:testgenre",
"artist:testartist album:notthis"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
"""Tests for the 'ihate' plugin"""
from _common import unittest
from beets import importer
from beets.library import Item
from beetsplug.ihate import IHatePlugin
class IHatePluginTest(unittest.TestCase):
def test_hate(self):
match_pattern = {}
test_item = Item(
genre='TestGenre',
album=u'TestAlbum',
artist=u'TestArtist')
task = importer.SingletonImportTask(test_item)
# Empty query should let it pass.
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# 1 query match.
match_pattern = ["artist:bad_artist","artist:TestArtist"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
# 2 query matches, either should trigger.
match_pattern = ["album:test","artist:testartist"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
# Query is blocked by AND clause.
match_pattern = ["album:notthis genre:testgenre"]
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# Both queries are blocked by AND clause with unmatched condition.
match_pattern = ["album:notthis genre:testgenre",
"artist:testartist album:notthis"]
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# Only one query should fire.
match_pattern = ["album:testalbum genre:testgenre",
"artist:testartist album:notthis"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
en
| 0.936845
|
Tests for the 'ihate' plugin # Empty query should let it pass. # 1 query match. # 2 query matches, either should trigger. # Query is blocked by AND clause. # Both queries are blocked by AND clause with unmatched condition. # Only one query should fire.
| 2.318341
| 2
|
src/gardena/devices/base_device.py
|
codacy-badger/py-smart-gardena
| 10
|
6628200
|
<gh_stars>1-10
from gardena.base_gardena_class import BaseGardenaClass
class BaseDevice(BaseGardenaClass):
"""Base class informations about gardena devices"""
id = "N/A"
type = "N/A"
battery_level = "N/A"
battery_state = "N/A"
name = "N/A"
rf_link_level = "N/A"
rf_link_state = "N/A"
serial = "N/A"
callbacks = []
def __init__(self, smart_system, device_map):
self.smart_system = smart_system
# Only one common field
self.id = device_map["COMMON"][0]["id"]
for messages_list in device_map.values():
for message in messages_list:
self.update_data(message)
def add_callback(self, callback):
self.callbacks.append(callback)
def update_data(self, device_map):
if device_map["type"] == "COMMON":
self.update_common_data(device_map)
self.update_device_specific_data(device_map)
for callback in self.callbacks:
callback(self)
def update_common_data(self, common_map):
self.set_attribute_value("battery_level", common_map, "batteryLevel")
self.set_attribute_value("battery_state", common_map, "batteryState")
self.set_attribute_value("name", common_map, "name")
self.set_attribute_value("rf_link_level", common_map, "rfLinkLevel")
self.set_attribute_value("rf_link_state", common_map, "rfLinkState")
self.set_attribute_value("serial", common_map, "serial")
def set_attribute_value(self, field_name, attributes_map, attribute_name):
if attribute_name in attributes_map["attributes"]:
setattr(
self, field_name, attributes_map["attributes"][attribute_name]["value"]
)
|
from gardena.base_gardena_class import BaseGardenaClass
class BaseDevice(BaseGardenaClass):
"""Base class informations about gardena devices"""
id = "N/A"
type = "N/A"
battery_level = "N/A"
battery_state = "N/A"
name = "N/A"
rf_link_level = "N/A"
rf_link_state = "N/A"
serial = "N/A"
callbacks = []
def __init__(self, smart_system, device_map):
self.smart_system = smart_system
# Only one common field
self.id = device_map["COMMON"][0]["id"]
for messages_list in device_map.values():
for message in messages_list:
self.update_data(message)
def add_callback(self, callback):
self.callbacks.append(callback)
def update_data(self, device_map):
if device_map["type"] == "COMMON":
self.update_common_data(device_map)
self.update_device_specific_data(device_map)
for callback in self.callbacks:
callback(self)
def update_common_data(self, common_map):
self.set_attribute_value("battery_level", common_map, "batteryLevel")
self.set_attribute_value("battery_state", common_map, "batteryState")
self.set_attribute_value("name", common_map, "name")
self.set_attribute_value("rf_link_level", common_map, "rfLinkLevel")
self.set_attribute_value("rf_link_state", common_map, "rfLinkState")
self.set_attribute_value("serial", common_map, "serial")
def set_attribute_value(self, field_name, attributes_map, attribute_name):
if attribute_name in attributes_map["attributes"]:
setattr(
self, field_name, attributes_map["attributes"][attribute_name]["value"]
)
|
en
| 0.777413
|
Base class informations about gardena devices # Only one common field
| 2.778723
| 3
|
kernel/components/intersection/__init__.py
|
rinceyuan/WeFe
| 39
|
6628201
|
<gh_stars>10-100
from kernel.components.intersection.dh.dh_intersect_promoter import DhIntersectionPromoter
from kernel.components.intersection.dh.dh_intersect_provider import DhIntersectionProvider
from kernel.components.intersection.dhkey.dh_key_intersect_promoter import DhKeyIntersectionPromoter
from kernel.components.intersection.dhkey.dh_key_intersect_provider import DhKeyIntersectionProvider
__all__ = ['DhKeyIntersectionPromoter',
'DhKeyIntersectionProvider',
'DhIntersectionPromoter',
'DhIntersectionProvider']
|
from kernel.components.intersection.dh.dh_intersect_promoter import DhIntersectionPromoter
from kernel.components.intersection.dh.dh_intersect_provider import DhIntersectionProvider
from kernel.components.intersection.dhkey.dh_key_intersect_promoter import DhKeyIntersectionPromoter
from kernel.components.intersection.dhkey.dh_key_intersect_provider import DhKeyIntersectionProvider
__all__ = ['DhKeyIntersectionPromoter',
'DhKeyIntersectionProvider',
'DhIntersectionPromoter',
'DhIntersectionProvider']
|
none
| 1
| 1.198763
| 1
|
|
src/rascore/util/constants/gene.py
|
mitch-parker/rascore
| 7
|
6628202
|
# -*- coding: utf-8 -*-
"""
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
hras_name = "HRAS"
kras_name = "KRAS"
nras_name = "NRAS"
gene_class_lst = [kras_name, hras_name, nras_name]
swiss_id_lst = ["RASK_HUMAN", "RASN_HUMAN", "RASH_HUMAN"]
uniprot_acc_lst = ["P01116", "P01116-2", "P01112", "P01111"]
gene_class_dict = {
"GTPase HRas": hras_name,
"GTPase KRas": kras_name,
"GTPase NRas": nras_name,
}
|
# -*- coding: utf-8 -*-
"""
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
hras_name = "HRAS"
kras_name = "KRAS"
nras_name = "NRAS"
gene_class_lst = [kras_name, hras_name, nras_name]
swiss_id_lst = ["RASK_HUMAN", "RASN_HUMAN", "RASH_HUMAN"]
uniprot_acc_lst = ["P01116", "P01116-2", "P01112", "P01111"]
gene_class_dict = {
"GTPase HRas": hras_name,
"GTPase KRas": kras_name,
"GTPase NRas": nras_name,
}
|
en
| 0.85495
|
# -*- coding: utf-8 -*- Copyright 2022 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 1.270099
| 1
|
paddlex/ppcls/arch/backbone/legendary_models/mobilenet_v1.py
|
cheneyveron/PaddleX
| 8
|
6628203
|
<reponame>cheneyveron/PaddleX
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from paddle import ParamAttr
import paddle.nn as nn
from paddle.nn import Conv2D, BatchNorm, Linear, ReLU, Flatten
from paddle.nn import AdaptiveAvgPool2D
from paddle.nn.initializer import KaimingNormal
from paddlex.ppcls.arch.backbone.base.theseus_layer import TheseusLayer
from paddlex.ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"MobileNetV1_x0_25":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_25_pretrained.pdparams",
"MobileNetV1_x0_5":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_5_pretrained.pdparams",
"MobileNetV1_x0_75":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_75_pretrained.pdparams",
"MobileNetV1":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_pretrained.pdparams"
}
__all__ = MODEL_URLS.keys()
class ConvBNLayer(TheseusLayer):
def __init__(self,
num_channels,
filter_size,
num_filters,
stride,
padding,
num_groups=1):
super().__init__()
self.conv = Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
weight_attr=ParamAttr(initializer=KaimingNormal()),
bias_attr=False)
self.bn = BatchNorm(num_filters)
self.relu = ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class DepthwiseSeparable(TheseusLayer):
def __init__(self, num_channels, num_filters1, num_filters2, num_groups,
stride, scale):
super().__init__()
self.depthwise_conv = ConvBNLayer(
num_channels=num_channels,
num_filters=int(num_filters1 * scale),
filter_size=3,
stride=stride,
padding=1,
num_groups=int(num_groups * scale))
self.pointwise_conv = ConvBNLayer(
num_channels=int(num_filters1 * scale),
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0)
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
class MobileNet(TheseusLayer):
"""
MobileNet
Args:
scale: float=1.0. The coefficient that controls the size of network parameters.
class_num: int=1000. The number of classes.
Returns:
model: nn.Layer. Specific MobileNet model depends on args.
"""
def __init__(self, scale=1.0, class_num=1000, return_patterns=None):
super().__init__()
self.scale = scale
self.conv = ConvBNLayer(
num_channels=3,
filter_size=3,
num_filters=int(32 * scale),
stride=2,
padding=1)
#num_channels, num_filters1, num_filters2, num_groups, stride
self.cfg = [[int(32 * scale), 32, 64, 32, 1],
[int(64 * scale), 64, 128, 64, 2],
[int(128 * scale), 128, 128, 128, 1],
[int(128 * scale), 128, 256, 128, 2],
[int(256 * scale), 256, 256, 256, 1],
[int(256 * scale), 256, 512, 256, 2],
[int(512 * scale), 512, 512, 512, 1],
[int(512 * scale), 512, 512, 512, 1],
[int(512 * scale), 512, 512, 512, 1],
[int(512 * scale), 512, 512, 512, 1],
[int(512 * scale), 512, 512, 512, 1],
[int(512 * scale), 512, 1024, 512, 2],
[int(1024 * scale), 1024, 1024, 1024, 1]]
self.blocks = nn.Sequential(*[
DepthwiseSeparable(
num_channels=params[0],
num_filters1=params[1],
num_filters2=params[2],
num_groups=params[3],
stride=params[4],
scale=scale) for params in self.cfg
])
self.avg_pool = AdaptiveAvgPool2D(1)
self.flatten = Flatten(start_axis=1, stop_axis=-1)
self.fc = Linear(
int(1024 * scale),
class_num,
weight_attr=ParamAttr(initializer=KaimingNormal()))
if return_patterns is not None:
self.update_res(return_patterns)
self.register_forward_post_hook(self._return_dict_hook)
def forward(self, x):
x = self.conv(x)
x = self.blocks(x)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.fc(x)
return x
def _load_pretrained(pretrained, model, model_url, use_ssld):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def MobileNetV1_x0_25(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1_x0_25
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args.
"""
model = MobileNet(scale=0.25, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_25"],
use_ssld)
return model
def MobileNetV1_x0_5(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1_x0_5
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args.
"""
model = MobileNet(scale=0.5, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_5"],
use_ssld)
return model
def MobileNetV1_x0_75(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1_x0_75
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args.
"""
model = MobileNet(scale=0.75, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_75"],
use_ssld)
return model
def MobileNetV1(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1` model depends on args.
"""
model = MobileNet(scale=1.0, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1"], use_ssld)
return model
|
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from paddle import ParamAttr
import paddle.nn as nn
from paddle.nn import Conv2D, BatchNorm, Linear, ReLU, Flatten
from paddle.nn import AdaptiveAvgPool2D
from paddle.nn.initializer import KaimingNormal
from paddlex.ppcls.arch.backbone.base.theseus_layer import TheseusLayer
from paddlex.ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"MobileNetV1_x0_25":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_25_pretrained.pdparams",
"MobileNetV1_x0_5":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_5_pretrained.pdparams",
"MobileNetV1_x0_75":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_75_pretrained.pdparams",
"MobileNetV1":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_pretrained.pdparams"
}
__all__ = MODEL_URLS.keys()
class ConvBNLayer(TheseusLayer):
def __init__(self,
num_channels,
filter_size,
num_filters,
stride,
padding,
num_groups=1):
super().__init__()
self.conv = Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
weight_attr=ParamAttr(initializer=KaimingNormal()),
bias_attr=False)
self.bn = BatchNorm(num_filters)
self.relu = ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class DepthwiseSeparable(TheseusLayer):
def __init__(self, num_channels, num_filters1, num_filters2, num_groups,
stride, scale):
super().__init__()
self.depthwise_conv = ConvBNLayer(
num_channels=num_channels,
num_filters=int(num_filters1 * scale),
filter_size=3,
stride=stride,
padding=1,
num_groups=int(num_groups * scale))
self.pointwise_conv = ConvBNLayer(
num_channels=int(num_filters1 * scale),
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0)
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
class MobileNet(TheseusLayer):
"""
MobileNet
Args:
scale: float=1.0. The coefficient that controls the size of network parameters.
class_num: int=1000. The number of classes.
Returns:
model: nn.Layer. Specific MobileNet model depends on args.
"""
def __init__(self, scale=1.0, class_num=1000, return_patterns=None):
super().__init__()
self.scale = scale
self.conv = ConvBNLayer(
num_channels=3,
filter_size=3,
num_filters=int(32 * scale),
stride=2,
padding=1)
#num_channels, num_filters1, num_filters2, num_groups, stride
self.cfg = [[int(32 * scale), 32, 64, 32, 1],
[int(64 * scale), 64, 128, 64, 2],
[int(128 * scale), 128, 128, 128, 1],
[int(128 * scale), 128, 256, 128, 2],
[int(256 * scale), 256, 256, 256, 1],
[int(256 * scale), 256, 512, 256, 2],
[int(512 * scale), 512, 512, 512, 1],
[int(512 * scale), 512, 512, 512, 1],
[int(512 * scale), 512, 512, 512, 1],
[int(512 * scale), 512, 512, 512, 1],
[int(512 * scale), 512, 512, 512, 1],
[int(512 * scale), 512, 1024, 512, 2],
[int(1024 * scale), 1024, 1024, 1024, 1]]
self.blocks = nn.Sequential(*[
DepthwiseSeparable(
num_channels=params[0],
num_filters1=params[1],
num_filters2=params[2],
num_groups=params[3],
stride=params[4],
scale=scale) for params in self.cfg
])
self.avg_pool = AdaptiveAvgPool2D(1)
self.flatten = Flatten(start_axis=1, stop_axis=-1)
self.fc = Linear(
int(1024 * scale),
class_num,
weight_attr=ParamAttr(initializer=KaimingNormal()))
if return_patterns is not None:
self.update_res(return_patterns)
self.register_forward_post_hook(self._return_dict_hook)
def forward(self, x):
x = self.conv(x)
x = self.blocks(x)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.fc(x)
return x
def _load_pretrained(pretrained, model, model_url, use_ssld):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def MobileNetV1_x0_25(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1_x0_25
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args.
"""
model = MobileNet(scale=0.25, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_25"],
use_ssld)
return model
def MobileNetV1_x0_5(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1_x0_5
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args.
"""
model = MobileNet(scale=0.5, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_5"],
use_ssld)
return model
def MobileNetV1_x0_75(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1_x0_75
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args.
"""
model = MobileNet(scale=0.75, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_75"],
use_ssld)
return model
def MobileNetV1(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1` model depends on args.
"""
model = MobileNet(scale=1.0, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1"], use_ssld)
return model
|
en
| 0.596675
|
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MobileNet Args: scale: float=1.0. The coefficient that controls the size of network parameters. class_num: int=1000. The number of classes. Returns: model: nn.Layer. Specific MobileNet model depends on args. #num_channels, num_filters1, num_filters2, num_groups, stride MobileNetV1_x0_25 Args: pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. If str, means the path of the pretrained model. use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. Returns: model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args. MobileNetV1_x0_5 Args: pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. If str, means the path of the pretrained model. use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. Returns: model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args. MobileNetV1_x0_75 Args: pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. If str, means the path of the pretrained model. use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. Returns: model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args. MobileNetV1 Args: pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. If str, means the path of the pretrained model. use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. Returns: model: nn.Layer. Specific `MobileNetV1` model depends on args.
| 1.414943
| 1
|
applications/ShapeOptimizationApplication/tests/algorithm_penalized_projection_test/run_test.py
|
AndreaVoltan/MyKratos7.0
| 2
|
6628204
|
<reponame>AndreaVoltan/MyKratos7.0
# Making KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division
# Import Kratos core and apps
from KratosMultiphysics import *
from KratosMultiphysics.ShapeOptimizationApplication import *
# Additional imports
from KratosMultiphysics.KratosUnittest import TestCase
import KratosMultiphysics.kratos_utilities as kratos_utilities
import csv, os
# Read parameters
with open("parameters.json",'r') as parameter_file:
parameters = Parameters(parameter_file.read())
model = Model()
# =======================================================================================================
# Define external analyzer
# =======================================================================================================
# The external analyzer provides a response to constrain the distance of a specific node to a given target
from analyzer_base import AnalyzerBaseClass
class CustomAnalyzer(AnalyzerBaseClass):
# --------------------------------------------------------------------------------------------------
def __init__( self ):
self.constrained_node_id =975
self.target_x = 1.15655
self.target_y = 9.93289
self.target_z = 5.28392
# --------------------------------------------------------------------------------------------------
def AnalyzeDesignAndReportToCommunicator(self, current_design, optimization_iteration, communicator):
if communicator.isRequestingValueOf("distance"):
communicator.reportValue("distance", self.__CalculateValue(current_design))
if communicator.isRequestingGradientOf("distance"):
communicator.reportGradient("distance", self.__CalculateGradient(current_design))
# --------------------------------------------------------------------------
def __CalculateValue( self, current_design ):
constrained_node = current_design.GetNodes()[self.constrained_node_id]
distance = [0,0,0]
distance[0] = constrained_node.X0 - self.target_x
distance[1] = constrained_node.Y0 - self.target_y
distance[2] = constrained_node.Z0 - self.target_z
return distance[0]**2 + distance[1]**2 + distance[2]**2
# --------------------------------------------------------------------------
def __CalculateGradient( self, current_design ):
constrained_node = current_design.GetNodes()[self.constrained_node_id]
response_gradient = {}
for node in current_design.Nodes:
local_gradient = [0,0,0]
if node.Id == self.constrained_node_id:
local_gradient[0] = 2*(constrained_node.X0 - self.target_x)
local_gradient[1] = 2*(constrained_node.Y0 - self.target_y)
local_gradient[2] = 2*(constrained_node.Z0 - self.target_z)
else:
local_gradient[0] = 0.0
local_gradient[1] = 0.0
local_gradient[2] = 0.0
response_gradient[node.Id] = local_gradient
return response_gradient
# =======================================================================================================
# Perform optimization
# =======================================================================================================
# Create optimizer and perform optimization
import optimizer_factory
optimizer = optimizer_factory.CreateOptimizer(parameters["optimization_settings"], model, CustomAnalyzer())
optimizer.Optimize()
# =======================================================================================================
# Test results and clean directory
# =======================================================================================================
output_directory = parameters["optimization_settings"]["output"]["output_directory"].GetString()
optimization_log_filename = parameters["optimization_settings"]["output"]["optimization_log_filename"].GetString() + ".csv"
optimization_model_part_name = parameters["optimization_settings"]["model_settings"]["model_part_name"].GetString()
# Testing
original_directory = os.getcwd()
os.chdir(output_directory)
with open(optimization_log_filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
last_line = None
for line in reader:
if not line:
continue
else:
last_line = line
resulting_iteration = float(last_line[0].strip())
resulting_improvement = float(last_line[2].strip())
resulting_constraint_value = float(last_line[4].strip())
# # Check against specifications
TestCase().assertEqual(resulting_iteration, 8)
TestCase().assertAlmostEqual(resulting_improvement, -1.09262E+01, 4)
TestCase().assertAlmostEqual(resulting_constraint_value, 2.76773E-02, 4)
os.chdir(original_directory)
# Cleaning
kratos_utilities.DeleteDirectoryIfExisting("__pycache__")
kratos_utilities.DeleteDirectoryIfExisting(output_directory)
kratos_utilities.DeleteFileIfExisting(os.path.basename(original_directory)+".post.lst")
kratos_utilities.DeleteFileIfExisting(optimization_model_part_name+".time")
# =======================================================================================================
|
# Making KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division
# Import Kratos core and apps
from KratosMultiphysics import *
from KratosMultiphysics.ShapeOptimizationApplication import *
# Additional imports
from KratosMultiphysics.KratosUnittest import TestCase
import KratosMultiphysics.kratos_utilities as kratos_utilities
import csv, os
# Read parameters
with open("parameters.json",'r') as parameter_file:
parameters = Parameters(parameter_file.read())
model = Model()
# =======================================================================================================
# Define external analyzer
# =======================================================================================================
# The external analyzer provides a response to constrain the distance of a specific node to a given target
from analyzer_base import AnalyzerBaseClass
class CustomAnalyzer(AnalyzerBaseClass):
# --------------------------------------------------------------------------------------------------
def __init__( self ):
self.constrained_node_id =975
self.target_x = 1.15655
self.target_y = 9.93289
self.target_z = 5.28392
# --------------------------------------------------------------------------------------------------
def AnalyzeDesignAndReportToCommunicator(self, current_design, optimization_iteration, communicator):
if communicator.isRequestingValueOf("distance"):
communicator.reportValue("distance", self.__CalculateValue(current_design))
if communicator.isRequestingGradientOf("distance"):
communicator.reportGradient("distance", self.__CalculateGradient(current_design))
# --------------------------------------------------------------------------
def __CalculateValue( self, current_design ):
constrained_node = current_design.GetNodes()[self.constrained_node_id]
distance = [0,0,0]
distance[0] = constrained_node.X0 - self.target_x
distance[1] = constrained_node.Y0 - self.target_y
distance[2] = constrained_node.Z0 - self.target_z
return distance[0]**2 + distance[1]**2 + distance[2]**2
# --------------------------------------------------------------------------
def __CalculateGradient( self, current_design ):
constrained_node = current_design.GetNodes()[self.constrained_node_id]
response_gradient = {}
for node in current_design.Nodes:
local_gradient = [0,0,0]
if node.Id == self.constrained_node_id:
local_gradient[0] = 2*(constrained_node.X0 - self.target_x)
local_gradient[1] = 2*(constrained_node.Y0 - self.target_y)
local_gradient[2] = 2*(constrained_node.Z0 - self.target_z)
else:
local_gradient[0] = 0.0
local_gradient[1] = 0.0
local_gradient[2] = 0.0
response_gradient[node.Id] = local_gradient
return response_gradient
# =======================================================================================================
# Perform optimization
# =======================================================================================================
# Create optimizer and perform optimization
import optimizer_factory
optimizer = optimizer_factory.CreateOptimizer(parameters["optimization_settings"], model, CustomAnalyzer())
optimizer.Optimize()
# =======================================================================================================
# Test results and clean directory
# =======================================================================================================
output_directory = parameters["optimization_settings"]["output"]["output_directory"].GetString()
optimization_log_filename = parameters["optimization_settings"]["output"]["optimization_log_filename"].GetString() + ".csv"
optimization_model_part_name = parameters["optimization_settings"]["model_settings"]["model_part_name"].GetString()
# Testing
original_directory = os.getcwd()
os.chdir(output_directory)
with open(optimization_log_filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
last_line = None
for line in reader:
if not line:
continue
else:
last_line = line
resulting_iteration = float(last_line[0].strip())
resulting_improvement = float(last_line[2].strip())
resulting_constraint_value = float(last_line[4].strip())
# # Check against specifications
TestCase().assertEqual(resulting_iteration, 8)
TestCase().assertAlmostEqual(resulting_improvement, -1.09262E+01, 4)
TestCase().assertAlmostEqual(resulting_constraint_value, 2.76773E-02, 4)
os.chdir(original_directory)
# Cleaning
kratos_utilities.DeleteDirectoryIfExisting("__pycache__")
kratos_utilities.DeleteDirectoryIfExisting(output_directory)
kratos_utilities.DeleteFileIfExisting(os.path.basename(original_directory)+".post.lst")
kratos_utilities.DeleteFileIfExisting(optimization_model_part_name+".time")
# =======================================================================================================
|
en
| 0.365951
|
# Making KratosMultiphysics backward compatible with python 2.6 and 2.7 # Import Kratos core and apps # Additional imports # Read parameters # ======================================================================================================= # Define external analyzer # ======================================================================================================= # The external analyzer provides a response to constrain the distance of a specific node to a given target # -------------------------------------------------------------------------------------------------- # -------------------------------------------------------------------------------------------------- # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # ======================================================================================================= # Perform optimization # ======================================================================================================= # Create optimizer and perform optimization # ======================================================================================================= # Test results and clean directory # ======================================================================================================= # Testing # # Check against specifications # Cleaning # =======================================================================================================
| 2.204788
| 2
|
src/1.DataPreprocessing/slice_extraction.py
|
AdrianArnaiz/Brain-MRI-Autoencoder
| 18
|
6628205
|
<reponame>AdrianArnaiz/Brain-MRI-Autoencoder
""" Python script for extract slices from IXI volumes.
We will use DeepBrainSliceExtractor class
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# Path improvement configuration
from os.path import dirname
import os
import sys
import numpy as np
import pickle as pkl
script_path = dirname(__file__)
sys.path.append(script_path)
from deep_brain_slice_extractor import DeepBrainSliceExtractor
with open(script_path+os.path.sep+'deepbrain_image_data.pickle', 'rb') as f:
db_image_data = pkl.load(f)
with open(script_path+os.path.sep+'..'+os.path.sep+'2.Experiments'+os.path.sep+'data_test_volumes_df.pkl', 'rb') as f:
test_vols = pkl.load(f)
with open(script_path+os.path.sep+'..'+os.path.sep+'2.Experiments'+os.path.sep+'data_train_val_volumes_df.pkl', 'rb') as f:
train_val_vols = pkl.load(f)
OUTFORMAT = 'png'
SAVE_PATH =script_path+os.path.sep+'..'+os.path.sep+'IXI-T1'+os.path.sep+'PNG'+os.path.sep
test_vols = test_vols.IXI_ID.values
train_val_vols = train_val_vols.IXI_ID.values
se = DeepBrainSliceExtractor(volume_folder = script_path+os.path.sep+'..'+os.path.sep+'IXI-T1'+os.path.sep+'*.gz',
save_img_path = SAVE_PATH,
pretrained=True,
img_data=db_image_data,
trainval_ids=train_val_vols,
test_ids=test_vols,
out_format=OUTFORMAT)
se.transform()
|
""" Python script for extract slices from IXI volumes.
We will use DeepBrainSliceExtractor class
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# Path improvement configuration
from os.path import dirname
import os
import sys
import numpy as np
import pickle as pkl
script_path = dirname(__file__)
sys.path.append(script_path)
from deep_brain_slice_extractor import DeepBrainSliceExtractor
with open(script_path+os.path.sep+'deepbrain_image_data.pickle', 'rb') as f:
db_image_data = pkl.load(f)
with open(script_path+os.path.sep+'..'+os.path.sep+'2.Experiments'+os.path.sep+'data_test_volumes_df.pkl', 'rb') as f:
test_vols = pkl.load(f)
with open(script_path+os.path.sep+'..'+os.path.sep+'2.Experiments'+os.path.sep+'data_train_val_volumes_df.pkl', 'rb') as f:
train_val_vols = pkl.load(f)
OUTFORMAT = 'png'
SAVE_PATH =script_path+os.path.sep+'..'+os.path.sep+'IXI-T1'+os.path.sep+'PNG'+os.path.sep
test_vols = test_vols.IXI_ID.values
train_val_vols = train_val_vols.IXI_ID.values
se = DeepBrainSliceExtractor(volume_folder = script_path+os.path.sep+'..'+os.path.sep+'IXI-T1'+os.path.sep+'*.gz',
save_img_path = SAVE_PATH,
pretrained=True,
img_data=db_image_data,
trainval_ids=train_val_vols,
test_ids=test_vols,
out_format=OUTFORMAT)
se.transform()
|
en
| 0.573708
|
Python script for extract slices from IXI volumes. We will use DeepBrainSliceExtractor class # Path improvement configuration
| 2.310735
| 2
|
python/playingthechanges/ptc_fetch.py
|
mbland/google-code-archive
| 1
|
6628206
|
<filename>python/playingthechanges/ptc_fetch.py
#! /usr/bin/python
# coding=UTF-8
"""
Fetches the MP3 files from playingthechanges.com to import into iTunes.
Author: <NAME> (<EMAIL>)
http://mike-bland.com/
Date: 2014-03-13
License: Creative Commons Attribution 4.0 International (CC By 4.0)
http://creativecommons.org/licenses/by/4.0/deed.en_US
Grabs all the MP3 links from the http://playingthechanges.com/ page and
downloads each file into the current directory, then updates the tag info for
each MP3.
If you don't have the requests module installed, you may need to
install pip, the Python Package Index installer:
https://pypi.python.org/pypi
http://www.pip-installer.org/en/latest/installing.html
Then:
$ sudo pip install requests
Requires the id3lib tools. For OS X, install Homebrew: http://brew.sh/
Then:
$ brew install id3lib
Written with hints from:
http://ubuntuforums.org/showthread.php?t=1542894
http://docs.python-requests.org/en/latest/user/quickstart/
More info:
http://mike-bland.com/2014/03/17/playing-the-changes-hack-continued.html
"""
import contextlib
import os
import os.path
import re
import requests
import subprocess
import sys
PTC_COM='http://www.playingthechanges.com'
ROOT_WEIGHTS = {
'C': 0,
'F': 1,
'Bb': 2,
'Eb': 3,
'Ab': 4,
'Db': 5,
'Fsharp': 6,
'B': 7,
'E': 8,
'A': 9,
'D': 10,
'G': 11,
}
SUFFIX_WEIGHTS = {
'Maj7': 0,
'min7': 1,
'7': 2,
'min7b5': 3,
'7b9b13': 4,
'7b913': 5,
}
# I'd intended to use the proper unicode flat (U+266D) and sharp (U+266F),
# but iTunes doesn't grok them.
ROOT_REWRITES = {
'C': 'C',
'F': 'F',
'Bb': 'Bb',
'Eb': 'Eb',
'Ab': 'Ab',
'Db': 'Db',
'Fsharp': 'F#',
'B': 'B',
'E': 'E',
'A': 'A',
'D': 'D',
'G': 'G',
}
SUFFIX_REWRITES = {
'Maj7': 'Maj7',
'min7': '-7',
'7': '7',
'min7b5': '-7(b5)',
'7b9b13': '7(b9,b13)',
'7b913': '7(b9,13)',
}
def FetchPtcFiles():
"""Scrapes and fetches the list of MP3 files from playingthechanges.com."""
with contextlib.closing(requests.get('%s/' % PTC_COM)) as index_page:
mp3_links = re.findall('downloads/.*\.mp3', index_page.text)
for i, link in enumerate(mp3_links):
print 'Fetching %2d of %d: %s' % (i + 1, len(mp3_links), link)
with contextlib.closing(requests.get('%s/%s' % (PTC_COM, link))) as mp3:
with open(os.path.basename(link), 'wb') as fd:
for chunk in mp3.iter_content(1<<20):
fd.write(chunk)
class BadChordFileNameException(Exception):
"""Raised when a chord file name does not match the expected format."""
pass
def SplitFileName(file_name):
"""Returns the tuple (root, suffix) based on a chord's file name.
Args:
file_name: corresponds to a chord file from playingthechanges.com
Returns:
a (chord root, chord suffix) tuple
Raises:
BadChordFileNameException: if the file does not end with .mp3 or if either
the chord root or chord suffix does not correspond to an expected value
within ROOT_WEIGHTS and SUFFIX_WEIGHTS, respectively
"""
kMp3Suffix = '.mp3'
if not file_name.endswith(kMp3Suffix):
raise BadChordFileNameException('Bad chord file name: %s' % file_name)
suffix_start = 1
if file_name[1] == 'b':
suffix_start = 2
elif file_name.startswith('sharp', 1):
suffix_start = 6
root = file_name[:suffix_start]
suffix = file_name[suffix_start:-len(kMp3Suffix)]
if root not in ROOT_WEIGHTS:
raise BadChordFileNameException('Unknown chord root in file name: %s' %
file_name)
if suffix not in SUFFIX_WEIGHTS:
raise BadChordFileNameException('Unknown chord suffix in file name: %s' %
file_name)
return (root, suffix)
def CompareChordFileNames(lhs, rhs):
"""Defines an ordering for split chord file names.
Suffix order weight trumps root order. Root order is defined by walking the
circle of fourths up from C. Both are defined in ROOT_WEIGHTS and
SUFFIX_WEIGHTS.
Args:
lhs: left-hand tuple of (root, suffix)
rhs: right-hand tuple of (root, suffix)
Returns:
-1 if lhs < rhs
0 if lhs == rhs
1 if lhs > rhs
"""
return (cmp(SUFFIX_WEIGHTS[lhs[1]], SUFFIX_WEIGHTS[rhs[1]]) or
cmp(ROOT_WEIGHTS[lhs[0]], ROOT_WEIGHTS[rhs[0]]))
def ChordName(file_name):
"""Generates the chord name from the (root, suffix) file name tuple."""
return u'%s%s' % (ROOT_REWRITES[file_name[0]], SUFFIX_REWRITES[file_name[1]])
def UpdateMp3Tags():
mp3s = [SplitFileName(i) for i in os.listdir('.') if i.endswith('.mp3')]
mp3s.sort(CompareChordFileNames)
for i, mp3 in enumerate(mp3s):
mp3_file = '%s%s.mp3' % mp3
print 'Updating: %s' % mp3_file
command = ['/usr/local/bin/id3tag',
'--artist=<NAME>',
'--album=Playing the Changes',
'--song=%s' % ChordName(mp3),
'--track=%d' % (i + 1),
'--total=%d' % len(mp3s),
mp3_file]
return_code = subprocess.call(command)
if return_code:
print >> sys.stderr, ('Error updating %s (return code %d) with '
'command: %s' % (mp3_file, return_code, ' '.join(command)))
sys.exit(return_code)
print "Updated %d mp3%s" % (len(mp3s), len(mp3s) != 1 and 's' or '')
if __name__ == '__main__':
FetchPtcFiles()
UpdateMp3Tags()
|
<filename>python/playingthechanges/ptc_fetch.py
#! /usr/bin/python
# coding=UTF-8
"""
Fetches the MP3 files from playingthechanges.com to import into iTunes.
Author: <NAME> (<EMAIL>)
http://mike-bland.com/
Date: 2014-03-13
License: Creative Commons Attribution 4.0 International (CC By 4.0)
http://creativecommons.org/licenses/by/4.0/deed.en_US
Grabs all the MP3 links from the http://playingthechanges.com/ page and
downloads each file into the current directory, then updates the tag info for
each MP3.
If you don't have the requests module installed, you may need to
install pip, the Python Package Index installer:
https://pypi.python.org/pypi
http://www.pip-installer.org/en/latest/installing.html
Then:
$ sudo pip install requests
Requires the id3lib tools. For OS X, install Homebrew: http://brew.sh/
Then:
$ brew install id3lib
Written with hints from:
http://ubuntuforums.org/showthread.php?t=1542894
http://docs.python-requests.org/en/latest/user/quickstart/
More info:
http://mike-bland.com/2014/03/17/playing-the-changes-hack-continued.html
"""
import contextlib
import os
import os.path
import re
import requests
import subprocess
import sys
PTC_COM='http://www.playingthechanges.com'
ROOT_WEIGHTS = {
'C': 0,
'F': 1,
'Bb': 2,
'Eb': 3,
'Ab': 4,
'Db': 5,
'Fsharp': 6,
'B': 7,
'E': 8,
'A': 9,
'D': 10,
'G': 11,
}
SUFFIX_WEIGHTS = {
'Maj7': 0,
'min7': 1,
'7': 2,
'min7b5': 3,
'7b9b13': 4,
'7b913': 5,
}
# I'd intended to use the proper unicode flat (U+266D) and sharp (U+266F),
# but iTunes doesn't grok them.
ROOT_REWRITES = {
'C': 'C',
'F': 'F',
'Bb': 'Bb',
'Eb': 'Eb',
'Ab': 'Ab',
'Db': 'Db',
'Fsharp': 'F#',
'B': 'B',
'E': 'E',
'A': 'A',
'D': 'D',
'G': 'G',
}
SUFFIX_REWRITES = {
'Maj7': 'Maj7',
'min7': '-7',
'7': '7',
'min7b5': '-7(b5)',
'7b9b13': '7(b9,b13)',
'7b913': '7(b9,13)',
}
def FetchPtcFiles():
"""Scrapes and fetches the list of MP3 files from playingthechanges.com."""
with contextlib.closing(requests.get('%s/' % PTC_COM)) as index_page:
mp3_links = re.findall('downloads/.*\.mp3', index_page.text)
for i, link in enumerate(mp3_links):
print 'Fetching %2d of %d: %s' % (i + 1, len(mp3_links), link)
with contextlib.closing(requests.get('%s/%s' % (PTC_COM, link))) as mp3:
with open(os.path.basename(link), 'wb') as fd:
for chunk in mp3.iter_content(1<<20):
fd.write(chunk)
class BadChordFileNameException(Exception):
"""Raised when a chord file name does not match the expected format."""
pass
def SplitFileName(file_name):
"""Returns the tuple (root, suffix) based on a chord's file name.
Args:
file_name: corresponds to a chord file from playingthechanges.com
Returns:
a (chord root, chord suffix) tuple
Raises:
BadChordFileNameException: if the file does not end with .mp3 or if either
the chord root or chord suffix does not correspond to an expected value
within ROOT_WEIGHTS and SUFFIX_WEIGHTS, respectively
"""
kMp3Suffix = '.mp3'
if not file_name.endswith(kMp3Suffix):
raise BadChordFileNameException('Bad chord file name: %s' % file_name)
suffix_start = 1
if file_name[1] == 'b':
suffix_start = 2
elif file_name.startswith('sharp', 1):
suffix_start = 6
root = file_name[:suffix_start]
suffix = file_name[suffix_start:-len(kMp3Suffix)]
if root not in ROOT_WEIGHTS:
raise BadChordFileNameException('Unknown chord root in file name: %s' %
file_name)
if suffix not in SUFFIX_WEIGHTS:
raise BadChordFileNameException('Unknown chord suffix in file name: %s' %
file_name)
return (root, suffix)
def CompareChordFileNames(lhs, rhs):
"""Defines an ordering for split chord file names.
Suffix order weight trumps root order. Root order is defined by walking the
circle of fourths up from C. Both are defined in ROOT_WEIGHTS and
SUFFIX_WEIGHTS.
Args:
lhs: left-hand tuple of (root, suffix)
rhs: right-hand tuple of (root, suffix)
Returns:
-1 if lhs < rhs
0 if lhs == rhs
1 if lhs > rhs
"""
return (cmp(SUFFIX_WEIGHTS[lhs[1]], SUFFIX_WEIGHTS[rhs[1]]) or
cmp(ROOT_WEIGHTS[lhs[0]], ROOT_WEIGHTS[rhs[0]]))
def ChordName(file_name):
"""Generates the chord name from the (root, suffix) file name tuple."""
return u'%s%s' % (ROOT_REWRITES[file_name[0]], SUFFIX_REWRITES[file_name[1]])
def UpdateMp3Tags():
mp3s = [SplitFileName(i) for i in os.listdir('.') if i.endswith('.mp3')]
mp3s.sort(CompareChordFileNames)
for i, mp3 in enumerate(mp3s):
mp3_file = '%s%s.mp3' % mp3
print 'Updating: %s' % mp3_file
command = ['/usr/local/bin/id3tag',
'--artist=<NAME>',
'--album=Playing the Changes',
'--song=%s' % ChordName(mp3),
'--track=%d' % (i + 1),
'--total=%d' % len(mp3s),
mp3_file]
return_code = subprocess.call(command)
if return_code:
print >> sys.stderr, ('Error updating %s (return code %d) with '
'command: %s' % (mp3_file, return_code, ' '.join(command)))
sys.exit(return_code)
print "Updated %d mp3%s" % (len(mp3s), len(mp3s) != 1 and 's' or '')
if __name__ == '__main__':
FetchPtcFiles()
UpdateMp3Tags()
|
en
| 0.69008
|
#! /usr/bin/python # coding=UTF-8 Fetches the MP3 files from playingthechanges.com to import into iTunes. Author: <NAME> (<EMAIL>) http://mike-bland.com/ Date: 2014-03-13 License: Creative Commons Attribution 4.0 International (CC By 4.0) http://creativecommons.org/licenses/by/4.0/deed.en_US Grabs all the MP3 links from the http://playingthechanges.com/ page and downloads each file into the current directory, then updates the tag info for each MP3. If you don't have the requests module installed, you may need to install pip, the Python Package Index installer: https://pypi.python.org/pypi http://www.pip-installer.org/en/latest/installing.html Then: $ sudo pip install requests Requires the id3lib tools. For OS X, install Homebrew: http://brew.sh/ Then: $ brew install id3lib Written with hints from: http://ubuntuforums.org/showthread.php?t=1542894 http://docs.python-requests.org/en/latest/user/quickstart/ More info: http://mike-bland.com/2014/03/17/playing-the-changes-hack-continued.html # I'd intended to use the proper unicode flat (U+266D) and sharp (U+266F), # but iTunes doesn't grok them. #', Scrapes and fetches the list of MP3 files from playingthechanges.com. Raised when a chord file name does not match the expected format. Returns the tuple (root, suffix) based on a chord's file name. Args: file_name: corresponds to a chord file from playingthechanges.com Returns: a (chord root, chord suffix) tuple Raises: BadChordFileNameException: if the file does not end with .mp3 or if either the chord root or chord suffix does not correspond to an expected value within ROOT_WEIGHTS and SUFFIX_WEIGHTS, respectively Defines an ordering for split chord file names. Suffix order weight trumps root order. Root order is defined by walking the circle of fourths up from C. Both are defined in ROOT_WEIGHTS and SUFFIX_WEIGHTS. Args: lhs: left-hand tuple of (root, suffix) rhs: right-hand tuple of (root, suffix) Returns: -1 if lhs < rhs 0 if lhs == rhs 1 if lhs > rhs Generates the chord name from the (root, suffix) file name tuple.
| 2.737031
| 3
|
dump_to_sqlite.py
|
hargup/stackexchange-dump-to-postgres
| 0
|
6628207
|
<reponame>hargup/stackexchange-dump-to-postgres
import sqlite3
import os
import xml.etree.cElementTree as etree
import logging
ANATHOMY = {
'badges': {
'Id': 'INTEGER',
'UserId': 'INTEGER',
'Class': 'INTEGER',
'Name': 'TEXT',
'Date': 'DATETIME',
'TagBased': 'BOOLEAN',
},
'comments': {
'Id': 'INTEGER',
'PostId': 'INTEGER',
'Score': 'INTEGER',
'Text': 'TEXT',
'CreationDate': 'DATETIME',
'UserId': 'INTEGER',
'UserDisplayName': 'TEXT'
},
'posts': {
'Id': 'INTEGER',
'PostTypeId': 'INTEGER', # 1: Question, 2: Answer
'ParentId': 'INTEGER', # (only present if PostTypeId is 2)
'AcceptedAnswerId': 'INTEGER', # (only present if PostTypeId is 1)
'CreationDate': 'DATETIME',
'Score': 'INTEGER',
'ViewCount': 'INTEGER',
'Body': 'TEXT',
'OwnerUserId': 'INTEGER', # (present only if user has not been deleted)
'OwnerDisplayName': 'TEXT',
'LastEditorUserId': 'INTEGER',
'LastEditorDisplayName': 'TEXT', # ="<NAME>"
'LastEditDate': 'DATETIME', #="2009-03-05T22:28:34.823"
'LastActivityDate': 'DATETIME', #="2009-03-11T12:51:01.480"
'CommunityOwnedDate': 'DATETIME', #(present only if post is community wikied)
'Title': 'TEXT',
'Tags': 'TEXT',
'AnswerCount': 'INTEGER',
'CommentCount': 'INTEGER',
'FavoriteCount': 'INTEGER',
'ClosedDate': 'DATETIME',
'ContentLicense': 'TEXT'
},
'votes': {
'Id': 'INTEGER',
'PostId': 'INTEGER',
'UserId': 'INTEGER',
'VoteTypeId': 'INTEGER',
# - 1: AcceptedByOriginator
# - 2: UpMod
# - 3: DownMod
# - 4: Offensive
# - 5: Favorite
# - 6: Close
# - 7: Reopen
# - 8: BountyStart
# - 9: BountyClose
# - 10: Deletion
# - 11: Undeletion
# - 12: Spam
# - 13: InformModerator
'CreationDate': 'DATETIME',
'BountyAmount': 'INTEGER'
},
'posthistory': {
'Id': 'INTEGER',
'PostHistoryTypeId': 'INTEGER',
'PostId': 'INTEGER',
'RevisionGUID': 'TEXT',
'CreationDate': 'DATETIME',
'UserId': 'INTEGER',
'UserDisplayName': 'TEXT',
'Comment': 'TEXT',
'Text': 'TEXT'
},
'postlinks': {
'Id': 'INTEGER',
'CreationDate': 'DATETIME',
'PostId': 'INTEGER',
'RelatedPostId': 'INTEGER',
'PostLinkTypeId': 'INTEGER',
'LinkTypeId': 'INTEGER'
},
'users': {
'Id': 'INTEGER',
'Reputation': 'INTEGER',
'CreationDate': 'DATETIME',
'DisplayName': 'TEXT',
'LastAccessDate': 'DATETIME',
'WebsiteUrl': 'TEXT',
'Location': 'TEXT',
'Age': 'INTEGER',
'AboutMe': 'TEXT',
'Views': 'INTEGER',
'UpVotes': 'INTEGER',
'DownVotes': 'INTEGER',
'AccountId': 'INTEGER',
'ProfileImageUrl': 'TEXT'
},
'tags': {
'Id': 'INTEGER',
'TagName': 'TEXT',
'Count': 'INTEGER',
'ExcerptPostId': 'INTEGER',
'WikiPostId': 'INTEGER'
}
}
def dump_files(file_names, anathomy,
dump_path='.',
dump_database_name='so-dump.db',
create_query='CREATE TABLE IF NOT EXISTS {table} ({fields})',
insert_query='INSERT INTO {table} ({columns}) VALUES ({values})',
log_filename='so-parser.log'):
logging.basicConfig(filename=os.path.join(dump_path, log_filename), level=logging.INFO)
db = sqlite3.connect(os.path.join(dump_path, dump_database_name))
for file in file_names:
print("Opening {0}.xml".format(file))
with open(os.path.join(dump_path, file + '.xml')) as xml_file:
tree = etree.iterparse(xml_file)
table_name = file.lower()
sql_create = create_query.format(
table=table_name,
fields=", ".join(['{0} {1}'.format(name, type) for name, type in anathomy[table_name].items()]))
print('Creating table {0}'.format(table_name))
try:
logging.info(sql_create)
db.execute(sql_create)
except Exception as e:
logging.warning(e)
count = 0
for events, row in tree:
# print(tree)
try:
if row.attrib.values():
# print("Row has attributes")
# print(row.attrib.keys())
logging.debug(row.attrib.keys())
query = insert_query.format(
table=table_name,
columns=', '.join(row.attrib.keys()),
values=('?, ' * len(row.attrib.keys()))[:-2])
vals = []
for key, val in row.attrib.items():
if anathomy[table_name][key] == 'INTEGER':
vals.append(int(val))
elif anathomy[table_name][key] == 'BOOLEAN':
vals.append(1 if val=="TRUE" else 0)
else:
vals.append(val)
db.execute(query, vals)
count += 1
if (count % 1000 == 0):
print("{}".format(count))
except Exception as e:
# print(e)
# logging.warning(e)
print("x", end="")
finally:
row.clear()
print("\n")
db.commit()
del (tree)
if __name__ == '__main__':
dump_files(['posts'], ANATHOMY)
|
import sqlite3
import os
import xml.etree.cElementTree as etree
import logging
ANATHOMY = {
'badges': {
'Id': 'INTEGER',
'UserId': 'INTEGER',
'Class': 'INTEGER',
'Name': 'TEXT',
'Date': 'DATETIME',
'TagBased': 'BOOLEAN',
},
'comments': {
'Id': 'INTEGER',
'PostId': 'INTEGER',
'Score': 'INTEGER',
'Text': 'TEXT',
'CreationDate': 'DATETIME',
'UserId': 'INTEGER',
'UserDisplayName': 'TEXT'
},
'posts': {
'Id': 'INTEGER',
'PostTypeId': 'INTEGER', # 1: Question, 2: Answer
'ParentId': 'INTEGER', # (only present if PostTypeId is 2)
'AcceptedAnswerId': 'INTEGER', # (only present if PostTypeId is 1)
'CreationDate': 'DATETIME',
'Score': 'INTEGER',
'ViewCount': 'INTEGER',
'Body': 'TEXT',
'OwnerUserId': 'INTEGER', # (present only if user has not been deleted)
'OwnerDisplayName': 'TEXT',
'LastEditorUserId': 'INTEGER',
'LastEditorDisplayName': 'TEXT', # ="<NAME>"
'LastEditDate': 'DATETIME', #="2009-03-05T22:28:34.823"
'LastActivityDate': 'DATETIME', #="2009-03-11T12:51:01.480"
'CommunityOwnedDate': 'DATETIME', #(present only if post is community wikied)
'Title': 'TEXT',
'Tags': 'TEXT',
'AnswerCount': 'INTEGER',
'CommentCount': 'INTEGER',
'FavoriteCount': 'INTEGER',
'ClosedDate': 'DATETIME',
'ContentLicense': 'TEXT'
},
'votes': {
'Id': 'INTEGER',
'PostId': 'INTEGER',
'UserId': 'INTEGER',
'VoteTypeId': 'INTEGER',
# - 1: AcceptedByOriginator
# - 2: UpMod
# - 3: DownMod
# - 4: Offensive
# - 5: Favorite
# - 6: Close
# - 7: Reopen
# - 8: BountyStart
# - 9: BountyClose
# - 10: Deletion
# - 11: Undeletion
# - 12: Spam
# - 13: InformModerator
'CreationDate': 'DATETIME',
'BountyAmount': 'INTEGER'
},
'posthistory': {
'Id': 'INTEGER',
'PostHistoryTypeId': 'INTEGER',
'PostId': 'INTEGER',
'RevisionGUID': 'TEXT',
'CreationDate': 'DATETIME',
'UserId': 'INTEGER',
'UserDisplayName': 'TEXT',
'Comment': 'TEXT',
'Text': 'TEXT'
},
'postlinks': {
'Id': 'INTEGER',
'CreationDate': 'DATETIME',
'PostId': 'INTEGER',
'RelatedPostId': 'INTEGER',
'PostLinkTypeId': 'INTEGER',
'LinkTypeId': 'INTEGER'
},
'users': {
'Id': 'INTEGER',
'Reputation': 'INTEGER',
'CreationDate': 'DATETIME',
'DisplayName': 'TEXT',
'LastAccessDate': 'DATETIME',
'WebsiteUrl': 'TEXT',
'Location': 'TEXT',
'Age': 'INTEGER',
'AboutMe': 'TEXT',
'Views': 'INTEGER',
'UpVotes': 'INTEGER',
'DownVotes': 'INTEGER',
'AccountId': 'INTEGER',
'ProfileImageUrl': 'TEXT'
},
'tags': {
'Id': 'INTEGER',
'TagName': 'TEXT',
'Count': 'INTEGER',
'ExcerptPostId': 'INTEGER',
'WikiPostId': 'INTEGER'
}
}
def dump_files(file_names, anathomy,
dump_path='.',
dump_database_name='so-dump.db',
create_query='CREATE TABLE IF NOT EXISTS {table} ({fields})',
insert_query='INSERT INTO {table} ({columns}) VALUES ({values})',
log_filename='so-parser.log'):
logging.basicConfig(filename=os.path.join(dump_path, log_filename), level=logging.INFO)
db = sqlite3.connect(os.path.join(dump_path, dump_database_name))
for file in file_names:
print("Opening {0}.xml".format(file))
with open(os.path.join(dump_path, file + '.xml')) as xml_file:
tree = etree.iterparse(xml_file)
table_name = file.lower()
sql_create = create_query.format(
table=table_name,
fields=", ".join(['{0} {1}'.format(name, type) for name, type in anathomy[table_name].items()]))
print('Creating table {0}'.format(table_name))
try:
logging.info(sql_create)
db.execute(sql_create)
except Exception as e:
logging.warning(e)
count = 0
for events, row in tree:
# print(tree)
try:
if row.attrib.values():
# print("Row has attributes")
# print(row.attrib.keys())
logging.debug(row.attrib.keys())
query = insert_query.format(
table=table_name,
columns=', '.join(row.attrib.keys()),
values=('?, ' * len(row.attrib.keys()))[:-2])
vals = []
for key, val in row.attrib.items():
if anathomy[table_name][key] == 'INTEGER':
vals.append(int(val))
elif anathomy[table_name][key] == 'BOOLEAN':
vals.append(1 if val=="TRUE" else 0)
else:
vals.append(val)
db.execute(query, vals)
count += 1
if (count % 1000 == 0):
print("{}".format(count))
except Exception as e:
# print(e)
# logging.warning(e)
print("x", end="")
finally:
row.clear()
print("\n")
db.commit()
del (tree)
if __name__ == '__main__':
dump_files(['posts'], ANATHOMY)
|
en
| 0.762276
|
# 1: Question, 2: Answer # (only present if PostTypeId is 2) # (only present if PostTypeId is 1) # (present only if user has not been deleted) # ="<NAME>" #="2009-03-05T22:28:34.823" #="2009-03-11T12:51:01.480" #(present only if post is community wikied) # - 1: AcceptedByOriginator # - 2: UpMod # - 3: DownMod # - 4: Offensive # - 5: Favorite # - 6: Close # - 7: Reopen # - 8: BountyStart # - 9: BountyClose # - 10: Deletion # - 11: Undeletion # - 12: Spam # - 13: InformModerator # print(tree) # print("Row has attributes") # print(row.attrib.keys()) # print(e) # logging.warning(e)
| 2.306658
| 2
|
lib/app.py
|
steverice/SlackTeamStatus
| 1
|
6628208
|
import os
import subprocess
from io import BytesIO
from os.path import expanduser
from pathlib import Path
from typing import Dict
from typing import List
from urllib.parse import ParseResult
from urllib.parse import urlparse
from urllib.request import urlopen
import emoji_data_python
import yaml
from lib.anybar_client import AnyBarClient
from lib.slack_client import SlackClient
from PIL import Image
from PIL import UnidentifiedImageError
from tqdm import tqdm
MENUBAR_IMAGE_SIZE_2X = (44, 44)
WORK_POOL_SIZE = os.cpu_count()
EMOJI_DOWNLOAD_PATH = Path(os.path.join(expanduser("~"), ".AnyBar"))
CONFIG_PATH = Path(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "config.yml")
)
SKIN_TONES = {
"skin-tone-2": "1F3FB",
"skin-tone-3": "1F3FC",
"skin-tone-4": "1F3FD",
"skin-tone-5": "1F3FE",
"skin-tone-6": "1F3FF",
}
class SlackTeamStatus(object):
_slack = None
use_emoji: bool = True
use_avatars: bool = True
config: dict = {"slack": {"token": None, "teammates": None,}}
anybar: Dict[str, tuple] = {}
custom_emoji: Dict[str, str] = {}
user_avatars: Dict[str, str] = {}
def __init__(self, logger, use_emoji=True, use_avatars=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_emoji = use_emoji
self.use_avatars = use_avatars
self.logger = logger
def read_config(self) -> bool:
if not Path.exists(CONFIG_PATH):
return False
with open(CONFIG_PATH, "r") as stream:
config = yaml.safe_load(stream)
assert config, "empty config"
self.config = config
return True
def save_config(self):
self.logger.info("Saving configuration file")
with open(CONFIG_PATH, "w") as stream:
yaml.dump(self.config, stream)
@property
def slack(self):
if not self._slack:
self._slack = SlackClient(token=self.token)
return self._slack
@property
def token(self) -> str:
token = self.config["slack"]["token"]
assert token, "missing slack token"
return token
@token.setter
def token(self, token: str):
self.config["slack"]["token"] = token
@property
def users(self) -> List[str]:
users = self.config["slack"]["users"]
assert users, "missing slack users"
return users
@users.setter
def users(self, users: List[str]):
self.config["slack"]["users"] = users
def get_status_mapping(self) -> Dict[str, str]:
mapping = {
"away": "red",
"active": "green",
}
assert mapping, "missing status mapping"
assert mapping["away"], "missing away status mapping"
assert mapping["active"], "missing active status mapping"
return mapping
def local_emoji_path(self, emoji_name: str):
return os.path.join(EMOJI_DOWNLOAD_PATH, emoji_name + "@2x.png")
def update_emoji(self, url: str, emoji_name: str = None):
parsed_emoji_name, extension = self.parse_emoji_url(url)
if emoji_name is None:
emoji_name = parsed_emoji_name
local_path = self.local_emoji_path(emoji_name)
if not Path.exists(Path(local_path)):
image_data = BytesIO(urlopen(url).read())
try:
img = Image.open(image_data)
resized = img.resize(MENUBAR_IMAGE_SIZE_2X)
resized.convert("RGBA").save(local_path, "PNG")
except UnidentifiedImageError:
self.logger.warning("Unidentified image at %s", url)
except Exception as e:
self.logger.exception(e)
def update_emoji_map(self, args):
return self.update_emoji(args[0], args[1] if 1 < len(args) else None)
def full_emoji_name(self, emoji_name: str, variation: str = None):
if variation is not None:
return "-".join((emoji_name, variation))
else:
return emoji_name
def update_standard_emoji(self, emoji_name: str, skin_variation: str = None):
emoji_data = emoji_data_python.find_by_shortname(emoji_name)
if not emoji_data:
self.logger.warning("emoji %s not found", emoji_name)
return
elif len(emoji_data) > 1:
self.logger.warning(
"multiple emoji found for %s: %s", emoji_name, emoji_data
)
emoji_data = emoji_data[0]
if skin_variation and SKIN_TONES[skin_variation] in emoji_data.skin_variations:
emoji_data = emoji_data.skin_variations[SKIN_TONES[skin_variation]]
emoji_name = self.full_emoji_name(emoji_name, skin_variation)
if not emoji_data.has_img_apple:
self.logger.warning("No Apple emoji found for %s", emoji_name)
url = (
"https://raw.githubusercontent.com/iamcal/emoji-data/master/img-apple-64/"
+ emoji_data.image
)
self.update_emoji(url, emoji_name)
def parse_emoji_url(self, url: str) -> (str, str):
parsed_url: ParseResult = urlparse(url)
path_parts = parsed_url.path.split("/")
extension = path_parts[-1].split(".")[-1]
emoji_name = path_parts[-2]
return emoji_name, extension
def check_if_exists(self, emoji_name, url):
self.custom_emoji[emoji_name] = url
if url.startswith("alias:"):
return None
_, extension = self.parse_emoji_url(url)
local_path = self.local_emoji_path(emoji_name)
if not Path.exists(Path(local_path)):
return (url, emoji_name)
return None
def check_if_exists_map(self, args):
return self.check_if_exists(*args)
def get_custom_emoji(self):
data = self.slack.web_client.emoji_list()
emoji_to_download = list(
filter(None, map(self.check_if_exists_map, data["emoji"].items()),)
)
num_emoji = len(emoji_to_download)
list(
tqdm(
map(self.update_emoji_map, emoji_to_download),
desc="Downloading Custom Emoji",
unit="emoji",
total=num_emoji,
)
)
def resolve_aliases(self, emoji_name: str):
if emoji_name not in self.custom_emoji:
return emoji_name # This is a standard emoji
if self.custom_emoji[emoji_name].startswith("alias"):
aliased_emoji = self.custom_emoji[emoji_name].split(":")[-1]
return self.resolve_aliases(aliased_emoji)
else:
return emoji_name
def launch_anybar(self, port: int):
anybar_loc = subprocess.run(
["mdfind", 'kMDItemCFBundleIdentifier = "tonsky.AnyBar"'],
check=True,
capture_output=True,
)
path_to_anybar_dir = anybar_loc.stdout.decode().strip()
if not anybar_loc.stdout:
raise RuntimeError(
"Could not find AnyBar application. Please install https://github.com/tonsky/AnyBar first."
)
path_to_anybar_cmd = os.path.join(
path_to_anybar_dir, "Contents", "MacOS", "AnyBar"
)
anybar_instance = subprocess.Popen(
[path_to_anybar_cmd], env={"ANYBAR_PORT": str(port),}
)
return anybar_instance
def pre_download_emoji(self):
self.ensure_emoji_path()
self.get_custom_emoji()
def ensure_emoji_path(self):
Path.mkdir(EMOJI_DOWNLOAD_PATH, exist_ok=True)
def status_update(self, **payload):
self.logger.debug("Received status update event: ", payload)
user_id = payload["data"]["user"]
presence = payload["data"]["presence"]
user_info_res = self.slack.web_client.users_info(user=user_id)
assert user_info_res["ok"], "bad response"
user_name = user_info_res["user"]["name"]
status_text = user_info_res["user"]["profile"]["status_text"]
status_emoji = user_info_res["user"]["profile"]["status_emoji"]
self.logger.info(
"New status for %s: (%s) %s %s",
user_name,
presence,
status_emoji,
status_text,
)
if self.use_avatars:
if user_id not in self.user_avatars:
self.user_avatars[user_id] = user_info_res["user"]["profile"][
"image_48"
]
if self.user_avatars[user_id]:
self.update_emoji(self.user_avatars[user_id], user_id)
variation = None
if self.use_emoji and status_emoji:
emoji_parts = status_emoji.split(":")
if len(emoji_parts) == 3: # Standard emoji
status_emoji = emoji_parts[1]
elif len(emoji_parts) == 5: # Skin tone variant
status_emoji = emoji_parts[1]
variation = emoji_parts[3]
else:
self.logger.error("Unable to parse emoji %s", status_emoji)
new_status = self.resolve_aliases(status_emoji)
if new_status not in self.custom_emoji:
self.update_standard_emoji(new_status, variation)
new_status = self.full_emoji_name(new_status, variation)
elif presence == "active" and self.use_avatars:
new_status = user_id
else:
new_status = self.get_status_mapping()[presence]
self.logger.debug("Setting %s icon to %s", user_name, new_status)
self.anybar[user_id][0].update_status(new_status)
def emoji_update(self, **payload: dict):
self.logger.debug("Received emoji update event: ", payload)
if payload["data"]["subtype"] == "add":
emoji_name = payload["data"]["name"]
self.logger.info("Adding new emoji %s", emoji_name)
self.update_emoji(payload["data"]["value"], emoji_name)
def start(self):
if self.use_emoji:
self.ensure_emoji_path()
anybar_port = 1738
for user in self.users:
anybar_instance = self.launch_anybar(port=anybar_port)
anybar_client = AnyBarClient(port=anybar_port)
anybar_port += 1
self.anybar[user] = (anybar_client, anybar_instance)
self.slack.add_callback("presence_change", self.status_update)
self.slack.add_callback("emoji_changed", self.emoji_update)
def subscribe(**payload: dict):
self.slack.subscribe_to_presence(self.users)
self.slack.add_callback("hello", subscribe)
self.logger.info("SlackTeamStatus updater running. Press Ctrl-C to exit.")
self.slack.connect()
|
import os
import subprocess
from io import BytesIO
from os.path import expanduser
from pathlib import Path
from typing import Dict
from typing import List
from urllib.parse import ParseResult
from urllib.parse import urlparse
from urllib.request import urlopen
import emoji_data_python
import yaml
from lib.anybar_client import AnyBarClient
from lib.slack_client import SlackClient
from PIL import Image
from PIL import UnidentifiedImageError
from tqdm import tqdm
MENUBAR_IMAGE_SIZE_2X = (44, 44)
WORK_POOL_SIZE = os.cpu_count()
EMOJI_DOWNLOAD_PATH = Path(os.path.join(expanduser("~"), ".AnyBar"))
CONFIG_PATH = Path(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "config.yml")
)
SKIN_TONES = {
"skin-tone-2": "1F3FB",
"skin-tone-3": "1F3FC",
"skin-tone-4": "1F3FD",
"skin-tone-5": "1F3FE",
"skin-tone-6": "1F3FF",
}
class SlackTeamStatus(object):
_slack = None
use_emoji: bool = True
use_avatars: bool = True
config: dict = {"slack": {"token": None, "teammates": None,}}
anybar: Dict[str, tuple] = {}
custom_emoji: Dict[str, str] = {}
user_avatars: Dict[str, str] = {}
def __init__(self, logger, use_emoji=True, use_avatars=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_emoji = use_emoji
self.use_avatars = use_avatars
self.logger = logger
def read_config(self) -> bool:
if not Path.exists(CONFIG_PATH):
return False
with open(CONFIG_PATH, "r") as stream:
config = yaml.safe_load(stream)
assert config, "empty config"
self.config = config
return True
def save_config(self):
self.logger.info("Saving configuration file")
with open(CONFIG_PATH, "w") as stream:
yaml.dump(self.config, stream)
@property
def slack(self):
if not self._slack:
self._slack = SlackClient(token=self.token)
return self._slack
@property
def token(self) -> str:
token = self.config["slack"]["token"]
assert token, "missing slack token"
return token
@token.setter
def token(self, token: str):
self.config["slack"]["token"] = token
@property
def users(self) -> List[str]:
users = self.config["slack"]["users"]
assert users, "missing slack users"
return users
@users.setter
def users(self, users: List[str]):
self.config["slack"]["users"] = users
def get_status_mapping(self) -> Dict[str, str]:
mapping = {
"away": "red",
"active": "green",
}
assert mapping, "missing status mapping"
assert mapping["away"], "missing away status mapping"
assert mapping["active"], "missing active status mapping"
return mapping
def local_emoji_path(self, emoji_name: str):
return os.path.join(EMOJI_DOWNLOAD_PATH, emoji_name + "@2x.png")
def update_emoji(self, url: str, emoji_name: str = None):
parsed_emoji_name, extension = self.parse_emoji_url(url)
if emoji_name is None:
emoji_name = parsed_emoji_name
local_path = self.local_emoji_path(emoji_name)
if not Path.exists(Path(local_path)):
image_data = BytesIO(urlopen(url).read())
try:
img = Image.open(image_data)
resized = img.resize(MENUBAR_IMAGE_SIZE_2X)
resized.convert("RGBA").save(local_path, "PNG")
except UnidentifiedImageError:
self.logger.warning("Unidentified image at %s", url)
except Exception as e:
self.logger.exception(e)
def update_emoji_map(self, args):
return self.update_emoji(args[0], args[1] if 1 < len(args) else None)
def full_emoji_name(self, emoji_name: str, variation: str = None):
if variation is not None:
return "-".join((emoji_name, variation))
else:
return emoji_name
def update_standard_emoji(self, emoji_name: str, skin_variation: str = None):
emoji_data = emoji_data_python.find_by_shortname(emoji_name)
if not emoji_data:
self.logger.warning("emoji %s not found", emoji_name)
return
elif len(emoji_data) > 1:
self.logger.warning(
"multiple emoji found for %s: %s", emoji_name, emoji_data
)
emoji_data = emoji_data[0]
if skin_variation and SKIN_TONES[skin_variation] in emoji_data.skin_variations:
emoji_data = emoji_data.skin_variations[SKIN_TONES[skin_variation]]
emoji_name = self.full_emoji_name(emoji_name, skin_variation)
if not emoji_data.has_img_apple:
self.logger.warning("No Apple emoji found for %s", emoji_name)
url = (
"https://raw.githubusercontent.com/iamcal/emoji-data/master/img-apple-64/"
+ emoji_data.image
)
self.update_emoji(url, emoji_name)
def parse_emoji_url(self, url: str) -> (str, str):
parsed_url: ParseResult = urlparse(url)
path_parts = parsed_url.path.split("/")
extension = path_parts[-1].split(".")[-1]
emoji_name = path_parts[-2]
return emoji_name, extension
def check_if_exists(self, emoji_name, url):
self.custom_emoji[emoji_name] = url
if url.startswith("alias:"):
return None
_, extension = self.parse_emoji_url(url)
local_path = self.local_emoji_path(emoji_name)
if not Path.exists(Path(local_path)):
return (url, emoji_name)
return None
def check_if_exists_map(self, args):
return self.check_if_exists(*args)
def get_custom_emoji(self):
data = self.slack.web_client.emoji_list()
emoji_to_download = list(
filter(None, map(self.check_if_exists_map, data["emoji"].items()),)
)
num_emoji = len(emoji_to_download)
list(
tqdm(
map(self.update_emoji_map, emoji_to_download),
desc="Downloading Custom Emoji",
unit="emoji",
total=num_emoji,
)
)
def resolve_aliases(self, emoji_name: str):
if emoji_name not in self.custom_emoji:
return emoji_name # This is a standard emoji
if self.custom_emoji[emoji_name].startswith("alias"):
aliased_emoji = self.custom_emoji[emoji_name].split(":")[-1]
return self.resolve_aliases(aliased_emoji)
else:
return emoji_name
def launch_anybar(self, port: int):
anybar_loc = subprocess.run(
["mdfind", 'kMDItemCFBundleIdentifier = "tonsky.AnyBar"'],
check=True,
capture_output=True,
)
path_to_anybar_dir = anybar_loc.stdout.decode().strip()
if not anybar_loc.stdout:
raise RuntimeError(
"Could not find AnyBar application. Please install https://github.com/tonsky/AnyBar first."
)
path_to_anybar_cmd = os.path.join(
path_to_anybar_dir, "Contents", "MacOS", "AnyBar"
)
anybar_instance = subprocess.Popen(
[path_to_anybar_cmd], env={"ANYBAR_PORT": str(port),}
)
return anybar_instance
def pre_download_emoji(self):
self.ensure_emoji_path()
self.get_custom_emoji()
def ensure_emoji_path(self):
Path.mkdir(EMOJI_DOWNLOAD_PATH, exist_ok=True)
def status_update(self, **payload):
self.logger.debug("Received status update event: ", payload)
user_id = payload["data"]["user"]
presence = payload["data"]["presence"]
user_info_res = self.slack.web_client.users_info(user=user_id)
assert user_info_res["ok"], "bad response"
user_name = user_info_res["user"]["name"]
status_text = user_info_res["user"]["profile"]["status_text"]
status_emoji = user_info_res["user"]["profile"]["status_emoji"]
self.logger.info(
"New status for %s: (%s) %s %s",
user_name,
presence,
status_emoji,
status_text,
)
if self.use_avatars:
if user_id not in self.user_avatars:
self.user_avatars[user_id] = user_info_res["user"]["profile"][
"image_48"
]
if self.user_avatars[user_id]:
self.update_emoji(self.user_avatars[user_id], user_id)
variation = None
if self.use_emoji and status_emoji:
emoji_parts = status_emoji.split(":")
if len(emoji_parts) == 3: # Standard emoji
status_emoji = emoji_parts[1]
elif len(emoji_parts) == 5: # Skin tone variant
status_emoji = emoji_parts[1]
variation = emoji_parts[3]
else:
self.logger.error("Unable to parse emoji %s", status_emoji)
new_status = self.resolve_aliases(status_emoji)
if new_status not in self.custom_emoji:
self.update_standard_emoji(new_status, variation)
new_status = self.full_emoji_name(new_status, variation)
elif presence == "active" and self.use_avatars:
new_status = user_id
else:
new_status = self.get_status_mapping()[presence]
self.logger.debug("Setting %s icon to %s", user_name, new_status)
self.anybar[user_id][0].update_status(new_status)
def emoji_update(self, **payload: dict):
self.logger.debug("Received emoji update event: ", payload)
if payload["data"]["subtype"] == "add":
emoji_name = payload["data"]["name"]
self.logger.info("Adding new emoji %s", emoji_name)
self.update_emoji(payload["data"]["value"], emoji_name)
def start(self):
if self.use_emoji:
self.ensure_emoji_path()
anybar_port = 1738
for user in self.users:
anybar_instance = self.launch_anybar(port=anybar_port)
anybar_client = AnyBarClient(port=anybar_port)
anybar_port += 1
self.anybar[user] = (anybar_client, anybar_instance)
self.slack.add_callback("presence_change", self.status_update)
self.slack.add_callback("emoji_changed", self.emoji_update)
def subscribe(**payload: dict):
self.slack.subscribe_to_presence(self.users)
self.slack.add_callback("hello", subscribe)
self.logger.info("SlackTeamStatus updater running. Press Ctrl-C to exit.")
self.slack.connect()
|
en
| 0.527719
|
# This is a standard emoji # Standard emoji # Skin tone variant
| 1.953409
| 2
|
udemy Model Predictive Control/tempCodeRunnerFile.py
|
davWilk/udemy_courses
| 0
|
6628209
|
<gh_stars>0
# if psi_t_1 > 2*pi:
# psi_t_1 = psi_t_1 % (2*pi)
|
# if psi_t_1 > 2*pi:
# psi_t_1 = psi_t_1 % (2*pi)
|
eu
| 0.205783
|
# if psi_t_1 > 2*pi: # psi_t_1 = psi_t_1 % (2*pi)
| 1.762331
| 2
|
ansible/roles/jupyterhub/files/jupyterhub_config_lti11.py
|
SebastianM-C/illumidesk
| 0
|
6628210
|
import os
from illumidesk.apis.setup_course_service import get_current_service_definitions
from illumidesk.authenticators.authenticator import LTI11Authenticator
from illumidesk.authenticators.authenticator import setup_course_hook
from illumidesk.grades.handlers import SendGradesHandler
from illumidesk.spawners.spawners import IllumiDeskDockerSpawner
c = get_config()
# load the base configuration file (with common settings)
load_subconfig('/etc/jupyterhub/jupyterhub_config_base.py') # noqa: F821
##########################################
# BEGIN JUPYTERHUB APPLICATION
##########################################
# LTI 1.1 authenticator class.
c.JupyterHub.authenticator_class = LTI11Authenticator
# Spawn end-user container and enable extensions by role
c.JupyterHub.spawner_class = IllumiDeskDockerSpawner
##########################################
# END JUPYTERHUB APPLICATION
##########################################
##########################################
# BEGIN LTI 1.1 AUTHENTICATOR
##########################################
c.LTIAuthenticator.consumers = {
os.environ.get('LTI_CONSUMER_KEY')
or 'ild_test_consumer_key': os.environ.get('LTI_SHARED_SECRET')
or 'ild_test_shared_secret'
}
# Custom Handlers
# the first one is used to send grades to LMS
# this url pattern was changed to accept spaces in the assignment name
c.JupyterHub.extra_handlers = [
(r'/submit-grades/(?P<course_id>[a-zA-Z0-9-_]+)/(?P<assignment_name>.*)$', SendGradesHandler,),
]
##########################################
# END LTI 1.1 AUTHENTICATOR
##########################################
##########################################
# BEGIN GENERAL AUTHENTICATION
##########################################
# Post auth hook to setup course
c.Authenticator.post_auth_hook = setup_course_hook
##########################################
# END GENERAL AUTHENTICATION
##########################################
##########################################
# SETUP COURSE SERVICE
##########################################
# Dynamic config to setup new courses
extra_services = get_current_service_definitions()
# load k/v's when starting jupyterhub
c.JupyterHub.load_groups.update(extra_services['load_groups'])
c.JupyterHub.services.extend(extra_services['services'])
##########################################
# END SETUP COURSE SERVICE
##########################################
|
import os
from illumidesk.apis.setup_course_service import get_current_service_definitions
from illumidesk.authenticators.authenticator import LTI11Authenticator
from illumidesk.authenticators.authenticator import setup_course_hook
from illumidesk.grades.handlers import SendGradesHandler
from illumidesk.spawners.spawners import IllumiDeskDockerSpawner
c = get_config()
# load the base configuration file (with common settings)
load_subconfig('/etc/jupyterhub/jupyterhub_config_base.py') # noqa: F821
##########################################
# BEGIN JUPYTERHUB APPLICATION
##########################################
# LTI 1.1 authenticator class.
c.JupyterHub.authenticator_class = LTI11Authenticator
# Spawn end-user container and enable extensions by role
c.JupyterHub.spawner_class = IllumiDeskDockerSpawner
##########################################
# END JUPYTERHUB APPLICATION
##########################################
##########################################
# BEGIN LTI 1.1 AUTHENTICATOR
##########################################
c.LTIAuthenticator.consumers = {
os.environ.get('LTI_CONSUMER_KEY')
or 'ild_test_consumer_key': os.environ.get('LTI_SHARED_SECRET')
or 'ild_test_shared_secret'
}
# Custom Handlers
# the first one is used to send grades to LMS
# this url pattern was changed to accept spaces in the assignment name
c.JupyterHub.extra_handlers = [
(r'/submit-grades/(?P<course_id>[a-zA-Z0-9-_]+)/(?P<assignment_name>.*)$', SendGradesHandler,),
]
##########################################
# END LTI 1.1 AUTHENTICATOR
##########################################
##########################################
# BEGIN GENERAL AUTHENTICATION
##########################################
# Post auth hook to setup course
c.Authenticator.post_auth_hook = setup_course_hook
##########################################
# END GENERAL AUTHENTICATION
##########################################
##########################################
# SETUP COURSE SERVICE
##########################################
# Dynamic config to setup new courses
extra_services = get_current_service_definitions()
# load k/v's when starting jupyterhub
c.JupyterHub.load_groups.update(extra_services['load_groups'])
c.JupyterHub.services.extend(extra_services['services'])
##########################################
# END SETUP COURSE SERVICE
##########################################
|
de
| 0.563156
|
# load the base configuration file (with common settings) # noqa: F821 ########################################## # BEGIN JUPYTERHUB APPLICATION ########################################## # LTI 1.1 authenticator class. # Spawn end-user container and enable extensions by role ########################################## # END JUPYTERHUB APPLICATION ########################################## ########################################## # BEGIN LTI 1.1 AUTHENTICATOR ########################################## # Custom Handlers # the first one is used to send grades to LMS # this url pattern was changed to accept spaces in the assignment name ########################################## # END LTI 1.1 AUTHENTICATOR ########################################## ########################################## # BEGIN GENERAL AUTHENTICATION ########################################## # Post auth hook to setup course ########################################## # END GENERAL AUTHENTICATION ########################################## ########################################## # SETUP COURSE SERVICE ########################################## # Dynamic config to setup new courses # load k/v's when starting jupyterhub ########################################## # END SETUP COURSE SERVICE ##########################################
| 1.659603
| 2
|
python_pillow_numpy/520.blur.diagonal.py
|
takenobu-hs/pixel-manipulation-examples
| 0
|
6628211
|
from PIL import Image
import numpy as np
#-- read and convert
im1 = Image.open('../images/img002.png').convert('RGB')
im1nd = np.array(im1)
width, height = im1.size
im3 = np.zeros_like(im1nd)
#-- filter coeff
d = 2
n = (d*2+1)*(d*2+1)
k = np.eye(5) / 5
#-- canvas loop
for y in range(d, height-d):
for x in range(d, width-d):
#-- get window
w = im1nd[y-d:y+d+1, x-d:x+d+1]
#-- filter
wr = np.clip((w[:,:, 0] * k).sum(), 0, 255)
wg = np.clip((w[:,:, 1] * k).sum(), 0, 255)
wb = np.clip((w[:,:, 2] * k).sum(), 0, 255)
#-- put
im3[y, x, 0] = wr
im3[y, x, 1] = wg
im3[y, x, 2] = wb
#-- save to png
Image.fromarray(np.uint8(im3)).save('z520.png')
|
from PIL import Image
import numpy as np
#-- read and convert
im1 = Image.open('../images/img002.png').convert('RGB')
im1nd = np.array(im1)
width, height = im1.size
im3 = np.zeros_like(im1nd)
#-- filter coeff
d = 2
n = (d*2+1)*(d*2+1)
k = np.eye(5) / 5
#-- canvas loop
for y in range(d, height-d):
for x in range(d, width-d):
#-- get window
w = im1nd[y-d:y+d+1, x-d:x+d+1]
#-- filter
wr = np.clip((w[:,:, 0] * k).sum(), 0, 255)
wg = np.clip((w[:,:, 1] * k).sum(), 0, 255)
wb = np.clip((w[:,:, 2] * k).sum(), 0, 255)
#-- put
im3[y, x, 0] = wr
im3[y, x, 1] = wg
im3[y, x, 2] = wb
#-- save to png
Image.fromarray(np.uint8(im3)).save('z520.png')
|
pt
| 0.405706
|
#-- read and convert #-- filter coeff #-- canvas loop #-- get window #-- filter #-- put #-- save to png
| 3.084611
| 3
|
pre_commit_hooks/_version.py
|
devanshshukla99/pre-commit-hook-prohibit-string
| 0
|
6628212
|
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '0.1.dev4+g713f51e'
version_tuple = (0, 1, 'dev4+g713f51e')
|
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '0.1.dev4+g713f51e'
version_tuple = (0, 1, 'dev4+g713f51e')
|
en
| 0.964309
|
# coding: utf-8 # file generated by setuptools_scm # don't change, don't track in version control
| 0.998511
| 1
|
chatBotStable/actions.py
|
JKhan01/SM446_TeamXYZ
| 0
|
6628213
|
<reponame>JKhan01/SM446_TeamXYZ
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from modules.Bitbucket import bitbucketActions
from modules.ErrorSearch import searchStack
import json
from functions import *
from jira_package import *
from g5 import *
from g6 import *
obj = bitbucketActions()
class CommitByUserForm(FormAction):
def name(self) -> Text:
return "commit_by_user_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
# if (tracker.get_slot("bitbucket_action")):
# if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")):
# return ["bitbucket_action","repo_name","owner_name"]
# if (tracker.get_slot("search_keys")):
# if ("who" or "who all" in tracker.get_slot("search_keys")):
# return ["bitbucket_action","repo_name","owner_name"]
return ["repo_name","owner_name","user_name"]
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_commit_by_user(tracker.get_slot('repo_name'),
tracker.get_slot('owner_name'), tracker.get_slot('user_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class CommitByBranchForm(FormAction):
def name(self) -> Text:
return "commit_by_branch_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
# if (tracker.get_slot("bitbucket_action")):
# if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")):
# return ["bitbucket_action","repo_name","owner_name"]
# if (tracker.get_slot("search_keys")):
# if ("who" or "who all" in tracker.get_slot("search_keys")):
# return ["bitbucket_action","repo_name","owner_name"]
return ["repo_name","owner_name","branch_name"]
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_commit_by_branch(tracker.get_slot('repo_name'),
tracker.get_slot('owner_name'), tracker.get_slot('branch_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class CommitMsgForm(FormAction):
def name(self) -> Text:
return "commit_msg_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
# if (tracker.get_slot("bitbucket_action")):
# if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")):
# return ["bitbucket_action","repo_name","owner_name"]
# if (tracker.get_slot("search_keys")):
# if ("who" or "who all" in tracker.get_slot("search_keys")):
# return ["bitbucket_action","repo_name","owner_name"]
return ["repo_name","owner_name","message"]
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_commit_by_msg(tracker.get_slot('repo_name'),
tracker.get_slot('owner_name'), tracker.get_slot('message'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class WatcherListForm(FormAction):
def name(self) -> Text:
return "watcher_list_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["repo_name","owner_name"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_watchers(tracker.get_slot('repo_name'),tracker.get_slot('owner_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class ErrorSearchForm(FormAction):
def __init__(self):
self.error_query = ""
def name(self) -> Text:
return "error_search_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["error_query"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
obj = searchStack()
returnVar = {}
returnVar['reply'] = obj.searchStack(tracker.get_slot("error_query"))
returnVar['status'] = 200
returnVar['type'] = 'stackoverflow'
returnVar = json.dumps(returnVar)
dispatcher.utter_message(text=returnVar)
return []
class BranchListForm(FormAction):
def name(self):
return "branch_list_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["repo_name","owner_name"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_branches(tracker.get_slot('repo_name'),tracker.get_slot('owner_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class RepoListForm(FormAction):
def name(self):
return "repo_list_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["owner_name"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
print (f"Target Repo: {tracker.get_slot('owner_name')}")
returnAnswer = obj.get_repos(tracker.get_slot('owner_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
# Information about all the spaces
class InfoAllSpaces(Action):
def name(self) -> Text:
return "action_info_of_all_spaces"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
t = get_all_spaces()
tx = json.dumps(t, indent=4)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Create a new space
class CreateSpace(FormAction):
def name(self) -> Text:
return "create_space_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"space": [self.from_entity(entity="space"),
self.from_text()],
"key": [self.from_entity(entity="key"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["key", "space"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
#dispatcher.utter_message(text="Kya baat hai!!")
#dispatcher.utter_message(template="utter_submit")
a = str(tracker.get_slot('key'))
b = str(tracker.get_slot('space'))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
create_space(a, b)
#return [t]
#t =
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
# txt = json.loads(t)
dispatcher.utter_message(text="Space Created")
return []
# Info of a specific space
class InfoSpace(Action):
def name(self) -> Text:
return "action_space_info"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = str(tracker.get_slot("key"))
t = get_info_space(a)
tx = json.dumps(t, indent = 2)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Get pages in a space
class GetPagesInSpace(Action):
def name(self) -> Text:
return "action_get_pages_in_a_space"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = str(tracker.get_slot("space"))
t = get_pages_in_a_space(a)
tx = json.dumps(t, indent=4)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Create a new page
class CreatePage(FormAction):
def name(self) -> Text:
return "create_page_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"space": [self.from_entity(entity="space"),
self.from_text()],
"title": [self.from_entity(entity="title"),
self.from_text()],
"body": [self.from_entity(entity="body", intent="body_entry"),
self.from_text()]}
# def validate_body(
# self, value:Text,
# dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["space", "title", "body"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
#dispatcher.utter_message(text="Kya baat hai!!")
#dispatcher.utter_message(template="utter_submit")
a = str(tracker.get_slot('space'))
b = str(tracker.get_slot('title'))
c = str(tracker.get_slot("body"))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
create_page(a, b, c)
#dispatcher.utter_message(text="Page Created")
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Page Created")
return []
# Delete a Page
class DeletePage(Action):
def name(self) -> Text:
return "action_delete_page"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = int(str(tracker.get_slot("page_id")))
delete_page(a)
dispatcher.utter_message(text="Page Deleted")
return []
# Get Page info using id
class GetPageInfoById(Action):
def name(self) -> Text:
return "action_get_page_info_by_id"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = int(str(tracker.get_slot("page_id")))
t = page_info_by_id(a)
tx = json.dumps(t, indent = 2)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Export Page as PDF
class ExportPageAsPdf(FormAction):
def name(self) -> Text:
return "export_page_as_pdf_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"page_id": [self.from_entity(entity="page_id"),
self.from_text()],
"file_name": [self.from_entity(entity="file_name"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["page_id", "file_name"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
#dispatcher.utter_message(text="Kya baat hai!!")
#dispatcher.utter_message(template="utter_submit")
a = str(tracker.get_slot('page_id'))
b = str(tracker.get_slot('file_name'))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
export_page_as_pdf(a, b)
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Page Exported")
return []
class GetUserAllProject(FormAction):
def name(self) -> Text:
return "get_all_project_name_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return []
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_all_project_name()
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetUserInGroup(FormAction):
def name(self) -> Text:
return "get_user_in_group_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['group_name']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_users_in_group(tracker.get_slot('group_name'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetIssueProject(FormAction):
def name(self) -> Text:
return "get_issue_in_project_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['project_name']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issues_in_project(tracker.get_slot('project_name'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetIssue(FormAction):
def name(self) -> Text:
return "get_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetEpic(FormAction):
def name(self) -> Text:
return "get_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetTask(FormAction):
def name(self) -> Text:
return "get_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetStatusOfIssue(FormAction):
def name(self) -> Text:
return "get_status_of_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_status_of_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetStatusOfEpic(FormAction):
def name(self) -> Text:
return "get_status_of_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_status_of_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetStatusOfTask(FormAction):
def name(self) -> Text:
return "get_status_of_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_status_of_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetIssueVersion(FormAction):
def name(self) -> Text:
return "get_issue_version_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue_version(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetEpicVersion(FormAction):
def name(self) -> Text:
return "get_epic_version_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue_version(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetTaskVersion(FormAction):
def name(self) -> Text:
return "get_task_version_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue_version(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetCommentIssue(FormAction):
def name(self) -> Text:
return "get_comment_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_comments_in_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetCommentEpic(FormAction):
def name(self) -> Text:
return "get_comment_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_comments_in_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetCommentTask(FormAction):
def name(self) -> Text:
return "get_comment_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_comments_in_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetWorklogIssue(FormAction):
def name(self) -> Text:
return "get_worklog_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_worklog_in_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetWorklogTask(FormAction):
def name(self) -> Text:
return "get_worklog_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_worklog_in_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetWorklogEpic(FormAction):
def name(self) -> Text:
return "get_worklog_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_worklog_in_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetLatestInboxEmail(Action):
def name(self) -> Text:
return "action_get_latest_email_in_inbox"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
op = int(tracker.latest_message.get('text'))
t = LatestMailInInbox(op)
# tx = json.dumps(t, indent = 4)
# txt = json.loads(tx)
# txtt = json.dumps(txt, indent = 2)
dispatcher.utter_message(text=t)
return []
class GetLatestUserEmail(Action):
def name(self) -> Text:
return "action_get_latest_email_from_user"
# @staticmethod
# def required_slots(tracker: Tracker) -> List[Text]:
# """ The required entries for this function """
# print("required_slots(tracker : Tracker)")
# return ["query"]
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
q = str(tracker.get_slot("query"))
op = int(tracker.latest_message.get('text'))
t = GetLatestMailFromUser(q, op)
#tx = json.dumps(t, indent = 4)
# txt = json.loads(tx)
# txtt = json.dumps(txt, indent = 2)
dispatcher.utter_message(text=t)
return []
class GetLatestLabelEmail(Action):
def name(self) -> Text:
return "action_get_latest_email_from_label"
# @staticmethod
# def required_slots(tracker: Tracker) -> List[Text]:
# """ The required entries for this function """
# print("required_slots(tracker : Tracker)")
# return ["query"]
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
q = str(tracker.get_slot("query"))
op = int(tracker.latest_message.get('text'))
t = GetLatestMailFromLabel(q, op)
#tx = json.dumps(t, indent = 4)
# txt = json.loads(tx)
# txtt = json.dumps(txt, indent = 2)
dispatcher.utter_message(text=t)
return []
class SendEmail(FormAction):
def name(self) -> Text:
return "send_email_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"email_body": [self.from_entity(entity="email_body"),
self.from_text()],
"receiver": [self.from_entity(entity="receiver"),
self.from_text()],
"subject": [self.from_entity(entity="subject"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["receiver", "subject", "email_body"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
a = str(tracker.get_slot("email_body"))
b = str(tracker.get_slot("receiver"))
c = str(tracker.get_slot("subject"))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
SendMail(a, b, c)
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Email Sent")
return []
class SendEmailWithAttachments(FormAction):
def name(self) -> Text:
return "send_email_with_attachments_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"email_body": [self.from_entity(entity="email_body"),
self.from_text()],
"receiver": [self.from_entity(entity="receiver"),
self.from_text()],
"subject": [self.from_entity(entity="subject"),
self.from_text()],
"file_dir": [self.from_entity(entity="file_dir"),
self.from_text()],
"filename": [self.from_entity(entity="filename"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["receiver", "subject", "email_body", "file_dir", "filename"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
a = str(tracker.get_slot("email_body"))
b = str(tracker.get_slot("receiver"))
c = str(tracker.get_slot("subject"))
d = str(tracker.get_slot("file_dir"))
e = str(tracker.get_slot("filename"))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
SendMailWithAttachments(a, b, c, d, e)
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Email Sent")
return []
|
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from modules.Bitbucket import bitbucketActions
from modules.ErrorSearch import searchStack
import json
from functions import *
from jira_package import *
from g5 import *
from g6 import *
obj = bitbucketActions()
class CommitByUserForm(FormAction):
def name(self) -> Text:
return "commit_by_user_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
# if (tracker.get_slot("bitbucket_action")):
# if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")):
# return ["bitbucket_action","repo_name","owner_name"]
# if (tracker.get_slot("search_keys")):
# if ("who" or "who all" in tracker.get_slot("search_keys")):
# return ["bitbucket_action","repo_name","owner_name"]
return ["repo_name","owner_name","user_name"]
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_commit_by_user(tracker.get_slot('repo_name'),
tracker.get_slot('owner_name'), tracker.get_slot('user_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class CommitByBranchForm(FormAction):
def name(self) -> Text:
return "commit_by_branch_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
# if (tracker.get_slot("bitbucket_action")):
# if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")):
# return ["bitbucket_action","repo_name","owner_name"]
# if (tracker.get_slot("search_keys")):
# if ("who" or "who all" in tracker.get_slot("search_keys")):
# return ["bitbucket_action","repo_name","owner_name"]
return ["repo_name","owner_name","branch_name"]
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_commit_by_branch(tracker.get_slot('repo_name'),
tracker.get_slot('owner_name'), tracker.get_slot('branch_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class CommitMsgForm(FormAction):
def name(self) -> Text:
return "commit_msg_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
# if (tracker.get_slot("bitbucket_action")):
# if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")):
# return ["bitbucket_action","repo_name","owner_name"]
# if (tracker.get_slot("search_keys")):
# if ("who" or "who all" in tracker.get_slot("search_keys")):
# return ["bitbucket_action","repo_name","owner_name"]
return ["repo_name","owner_name","message"]
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_commit_by_msg(tracker.get_slot('repo_name'),
tracker.get_slot('owner_name'), tracker.get_slot('message'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class WatcherListForm(FormAction):
def name(self) -> Text:
return "watcher_list_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["repo_name","owner_name"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_watchers(tracker.get_slot('repo_name'),tracker.get_slot('owner_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class ErrorSearchForm(FormAction):
def __init__(self):
self.error_query = ""
def name(self) -> Text:
return "error_search_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["error_query"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
obj = searchStack()
returnVar = {}
returnVar['reply'] = obj.searchStack(tracker.get_slot("error_query"))
returnVar['status'] = 200
returnVar['type'] = 'stackoverflow'
returnVar = json.dumps(returnVar)
dispatcher.utter_message(text=returnVar)
return []
class BranchListForm(FormAction):
def name(self):
return "branch_list_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["repo_name","owner_name"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_branches(tracker.get_slot('repo_name'),tracker.get_slot('owner_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class RepoListForm(FormAction):
def name(self):
return "repo_list_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["owner_name"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
print (f"Target Repo: {tracker.get_slot('owner_name')}")
returnAnswer = obj.get_repos(tracker.get_slot('owner_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
# Information about all the spaces
class InfoAllSpaces(Action):
def name(self) -> Text:
return "action_info_of_all_spaces"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
t = get_all_spaces()
tx = json.dumps(t, indent=4)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Create a new space
class CreateSpace(FormAction):
def name(self) -> Text:
return "create_space_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"space": [self.from_entity(entity="space"),
self.from_text()],
"key": [self.from_entity(entity="key"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["key", "space"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
#dispatcher.utter_message(text="Kya baat hai!!")
#dispatcher.utter_message(template="utter_submit")
a = str(tracker.get_slot('key'))
b = str(tracker.get_slot('space'))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
create_space(a, b)
#return [t]
#t =
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
# txt = json.loads(t)
dispatcher.utter_message(text="Space Created")
return []
# Info of a specific space
class InfoSpace(Action):
def name(self) -> Text:
return "action_space_info"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = str(tracker.get_slot("key"))
t = get_info_space(a)
tx = json.dumps(t, indent = 2)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Get pages in a space
class GetPagesInSpace(Action):
def name(self) -> Text:
return "action_get_pages_in_a_space"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = str(tracker.get_slot("space"))
t = get_pages_in_a_space(a)
tx = json.dumps(t, indent=4)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Create a new page
class CreatePage(FormAction):
def name(self) -> Text:
return "create_page_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"space": [self.from_entity(entity="space"),
self.from_text()],
"title": [self.from_entity(entity="title"),
self.from_text()],
"body": [self.from_entity(entity="body", intent="body_entry"),
self.from_text()]}
# def validate_body(
# self, value:Text,
# dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["space", "title", "body"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
#dispatcher.utter_message(text="Kya baat hai!!")
#dispatcher.utter_message(template="utter_submit")
a = str(tracker.get_slot('space'))
b = str(tracker.get_slot('title'))
c = str(tracker.get_slot("body"))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
create_page(a, b, c)
#dispatcher.utter_message(text="Page Created")
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Page Created")
return []
# Delete a Page
class DeletePage(Action):
def name(self) -> Text:
return "action_delete_page"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = int(str(tracker.get_slot("page_id")))
delete_page(a)
dispatcher.utter_message(text="Page Deleted")
return []
# Get Page info using id
class GetPageInfoById(Action):
def name(self) -> Text:
return "action_get_page_info_by_id"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = int(str(tracker.get_slot("page_id")))
t = page_info_by_id(a)
tx = json.dumps(t, indent = 2)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Export Page as PDF
class ExportPageAsPdf(FormAction):
def name(self) -> Text:
return "export_page_as_pdf_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"page_id": [self.from_entity(entity="page_id"),
self.from_text()],
"file_name": [self.from_entity(entity="file_name"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["page_id", "file_name"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
#dispatcher.utter_message(text="Kya baat hai!!")
#dispatcher.utter_message(template="utter_submit")
a = str(tracker.get_slot('page_id'))
b = str(tracker.get_slot('file_name'))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
export_page_as_pdf(a, b)
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Page Exported")
return []
class GetUserAllProject(FormAction):
def name(self) -> Text:
return "get_all_project_name_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return []
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_all_project_name()
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetUserInGroup(FormAction):
def name(self) -> Text:
return "get_user_in_group_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['group_name']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_users_in_group(tracker.get_slot('group_name'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetIssueProject(FormAction):
def name(self) -> Text:
return "get_issue_in_project_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['project_name']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issues_in_project(tracker.get_slot('project_name'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetIssue(FormAction):
def name(self) -> Text:
return "get_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetEpic(FormAction):
def name(self) -> Text:
return "get_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetTask(FormAction):
def name(self) -> Text:
return "get_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetStatusOfIssue(FormAction):
def name(self) -> Text:
return "get_status_of_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_status_of_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetStatusOfEpic(FormAction):
def name(self) -> Text:
return "get_status_of_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_status_of_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetStatusOfTask(FormAction):
def name(self) -> Text:
return "get_status_of_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_status_of_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetIssueVersion(FormAction):
def name(self) -> Text:
return "get_issue_version_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue_version(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetEpicVersion(FormAction):
def name(self) -> Text:
return "get_epic_version_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue_version(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetTaskVersion(FormAction):
def name(self) -> Text:
return "get_task_version_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue_version(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetCommentIssue(FormAction):
def name(self) -> Text:
return "get_comment_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_comments_in_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetCommentEpic(FormAction):
def name(self) -> Text:
return "get_comment_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_comments_in_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetCommentTask(FormAction):
def name(self) -> Text:
return "get_comment_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_comments_in_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetWorklogIssue(FormAction):
def name(self) -> Text:
return "get_worklog_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_worklog_in_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetWorklogTask(FormAction):
def name(self) -> Text:
return "get_worklog_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_worklog_in_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetWorklogEpic(FormAction):
def name(self) -> Text:
return "get_worklog_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_worklog_in_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetLatestInboxEmail(Action):
def name(self) -> Text:
return "action_get_latest_email_in_inbox"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
op = int(tracker.latest_message.get('text'))
t = LatestMailInInbox(op)
# tx = json.dumps(t, indent = 4)
# txt = json.loads(tx)
# txtt = json.dumps(txt, indent = 2)
dispatcher.utter_message(text=t)
return []
class GetLatestUserEmail(Action):
def name(self) -> Text:
return "action_get_latest_email_from_user"
# @staticmethod
# def required_slots(tracker: Tracker) -> List[Text]:
# """ The required entries for this function """
# print("required_slots(tracker : Tracker)")
# return ["query"]
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
q = str(tracker.get_slot("query"))
op = int(tracker.latest_message.get('text'))
t = GetLatestMailFromUser(q, op)
#tx = json.dumps(t, indent = 4)
# txt = json.loads(tx)
# txtt = json.dumps(txt, indent = 2)
dispatcher.utter_message(text=t)
return []
class GetLatestLabelEmail(Action):
def name(self) -> Text:
return "action_get_latest_email_from_label"
# @staticmethod
# def required_slots(tracker: Tracker) -> List[Text]:
# """ The required entries for this function """
# print("required_slots(tracker : Tracker)")
# return ["query"]
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
q = str(tracker.get_slot("query"))
op = int(tracker.latest_message.get('text'))
t = GetLatestMailFromLabel(q, op)
#tx = json.dumps(t, indent = 4)
# txt = json.loads(tx)
# txtt = json.dumps(txt, indent = 2)
dispatcher.utter_message(text=t)
return []
class SendEmail(FormAction):
def name(self) -> Text:
return "send_email_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"email_body": [self.from_entity(entity="email_body"),
self.from_text()],
"receiver": [self.from_entity(entity="receiver"),
self.from_text()],
"subject": [self.from_entity(entity="subject"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["receiver", "subject", "email_body"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
a = str(tracker.get_slot("email_body"))
b = str(tracker.get_slot("receiver"))
c = str(tracker.get_slot("subject"))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
SendMail(a, b, c)
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Email Sent")
return []
class SendEmailWithAttachments(FormAction):
def name(self) -> Text:
return "send_email_with_attachments_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"email_body": [self.from_entity(entity="email_body"),
self.from_text()],
"receiver": [self.from_entity(entity="receiver"),
self.from_text()],
"subject": [self.from_entity(entity="subject"),
self.from_text()],
"file_dir": [self.from_entity(entity="file_dir"),
self.from_text()],
"filename": [self.from_entity(entity="filename"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["receiver", "subject", "email_body", "file_dir", "filename"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
a = str(tracker.get_slot("email_body"))
b = str(tracker.get_slot("receiver"))
c = str(tracker.get_slot("subject"))
d = str(tracker.get_slot("file_dir"))
e = str(tracker.get_slot("filename"))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
SendMailWithAttachments(a, b, c, d, e)
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Email Sent")
return []
|
en
| 0.437236
|
# This files contains your custom actions which can be used to run # custom Python code. # # See this guide on how to implement these action: # https://rasa.com/docs/rasa/core/actions/#custom-actions/ # This is a simple example for a custom action which utters "Hello World!" # if (tracker.get_slot("bitbucket_action")): # if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")): # return ["bitbucket_action","repo_name","owner_name"] # if (tracker.get_slot("search_keys")): # if ("who" or "who all" in tracker.get_slot("search_keys")): # return ["bitbucket_action","repo_name","owner_name"] # if (tracker.get_slot("bitbucket_action")): # if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")): # return ["bitbucket_action","repo_name","owner_name"] # if (tracker.get_slot("search_keys")): # if ("who" or "who all" in tracker.get_slot("search_keys")): # return ["bitbucket_action","repo_name","owner_name"] # if (tracker.get_slot("bitbucket_action")): # if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")): # return ["bitbucket_action","repo_name","owner_name"] # if (tracker.get_slot("search_keys")): # if ("who" or "who all" in tracker.get_slot("search_keys")): # return ["bitbucket_action","repo_name","owner_name"] # Information about all the spaces # Create a new space # type: () -> Dict[Text: Union[Dict, List[Dict]]] The required entries for this function #dispatcher.utter_message(text="Kya baat hai!!") #dispatcher.utter_message(template="utter_submit") #return [t] #t = # txt = json.loads(t) # Info of a specific space # Get pages in a space # Create a new page # type: () -> Dict[Text: Union[Dict, List[Dict]]] # def validate_body( # self, value:Text, # dispatcher: CollectingDispatcher, # tracker: Tracker, # domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: The required entries for this function #dispatcher.utter_message(text="Kya baat hai!!") #dispatcher.utter_message(template="utter_submit") #dispatcher.utter_message(text="Page Created") # Delete a Page # Get Page info using id # Export Page as PDF # type: () -> Dict[Text: Union[Dict, List[Dict]]] The required entries for this function #dispatcher.utter_message(text="Kya baat hai!!") #dispatcher.utter_message(template="utter_submit") ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name ## return the same form name # tx = json.dumps(t, indent = 4) # txt = json.loads(tx) # txtt = json.dumps(txt, indent = 2) # @staticmethod # def required_slots(tracker: Tracker) -> List[Text]: # """ The required entries for this function """ # print("required_slots(tracker : Tracker)") # return ["query"] #tx = json.dumps(t, indent = 4) # txt = json.loads(tx) # txtt = json.dumps(txt, indent = 2) # @staticmethod # def required_slots(tracker: Tracker) -> List[Text]: # """ The required entries for this function """ # print("required_slots(tracker : Tracker)") # return ["query"] #tx = json.dumps(t, indent = 4) # txt = json.loads(tx) # txtt = json.dumps(txt, indent = 2) # type: () -> Dict[Text: Union[Dict, List[Dict]]] The required entries for this function # type: () -> Dict[Text: Union[Dict, List[Dict]]] The required entries for this function
| 2.600415
| 3
|
examples/TestTorchfly/MNIST/model.py
|
ECS-251-W2020/final-project-TorchFly
| 0
|
6628214
|
from abc import ABC, abstractmethod
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Any, Dict
class FlyModule(nn.Module, ABC):
def __init__(self, *args, **kwargs):
super().__init__()
self.config = args[0]
@abstractmethod
def forward(self, *args, **kwargs) -> Dict[str, torch.Tensor]:
pass
class CNNNet(FlyModule):
def __init__(self, config):
super().__init__(config.model)
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
x = batch["input"]
target = batch["target"]
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
loss = F.nll_loss(output, target)
results = {
"loss": loss,
"output": output
}
return results
|
from abc import ABC, abstractmethod
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Any, Dict
class FlyModule(nn.Module, ABC):
def __init__(self, *args, **kwargs):
super().__init__()
self.config = args[0]
@abstractmethod
def forward(self, *args, **kwargs) -> Dict[str, torch.Tensor]:
pass
class CNNNet(FlyModule):
def __init__(self, config):
super().__init__(config.model)
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
x = batch["input"]
target = batch["target"]
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
loss = F.nll_loss(output, target)
results = {
"loss": loss,
"output": output
}
return results
|
none
| 1
| 2.822729
| 3
|
|
Python/StringToIntegerAtoi.py
|
TonnyL/Windary
| 205
|
6628215
|
# -*- coding: UTF-8 -*-
#
# Implement atoi to convert a string to an integer.
#
# Hint: Carefully consider all possible input cases. If you want a challenge,
# please do not see below and ask yourself what are the possible input cases.
#
# Notes: It is intended for this problem to be specified vaguely (ie, no given input specs).
# You are responsible to gather all the input requirements up front.
#
# Python, Python 3 all accepted.
class StringToIntegerAtoi:
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
s = str.strip()
length = len(s)
if length == 0:
return 0
if length == 1:
if s[0] <= '0' or s[0] >= '9':
return 0
else:
return int(s)
int_max_value = 2147483647
int_min_value = -2147483648
if s[0] == '+':
plus = True
else:
plus = False
if s[0] == '-':
minus = True
else:
minus = False
if plus or minus:
start_index = 1
else:
start_index = 0
result = 0
for i in range(start_index, length):
if ord('0') <= ord(s[i]) <= ord('9'):
if int_max_value // 10 - (ord(s[i]) - ord('0')) <= result:
if minus and result * 10 + (ord(s[i]) - ord('0')) == int_max_value:
return -int_max_value
if minus:
return int_min_value
else:
return int_max_value
result = result * 10 + (ord(s[i]) - ord('0'))
else:
if minus:
return -result
else:
return result
if minus:
return -result
else:
return result
|
# -*- coding: UTF-8 -*-
#
# Implement atoi to convert a string to an integer.
#
# Hint: Carefully consider all possible input cases. If you want a challenge,
# please do not see below and ask yourself what are the possible input cases.
#
# Notes: It is intended for this problem to be specified vaguely (ie, no given input specs).
# You are responsible to gather all the input requirements up front.
#
# Python, Python 3 all accepted.
class StringToIntegerAtoi:
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
s = str.strip()
length = len(s)
if length == 0:
return 0
if length == 1:
if s[0] <= '0' or s[0] >= '9':
return 0
else:
return int(s)
int_max_value = 2147483647
int_min_value = -2147483648
if s[0] == '+':
plus = True
else:
plus = False
if s[0] == '-':
minus = True
else:
minus = False
if plus or minus:
start_index = 1
else:
start_index = 0
result = 0
for i in range(start_index, length):
if ord('0') <= ord(s[i]) <= ord('9'):
if int_max_value // 10 - (ord(s[i]) - ord('0')) <= result:
if minus and result * 10 + (ord(s[i]) - ord('0')) == int_max_value:
return -int_max_value
if minus:
return int_min_value
else:
return int_max_value
result = result * 10 + (ord(s[i]) - ord('0'))
else:
if minus:
return -result
else:
return result
if minus:
return -result
else:
return result
|
en
| 0.68676
|
# -*- coding: UTF-8 -*- # # Implement atoi to convert a string to an integer. # # Hint: Carefully consider all possible input cases. If you want a challenge, # please do not see below and ask yourself what are the possible input cases. # # Notes: It is intended for this problem to be specified vaguely (ie, no given input specs). # You are responsible to gather all the input requirements up front. # # Python, Python 3 all accepted. :type str: str :rtype: int
| 3.247653
| 3
|
jamon/game/common/writer.py
|
jrburga/JamOn
| 3
|
6628216
|
#####################################################################
#
# writer.py
#
# Copyright (c) 2015, <NAME>
#
# Released under the MIT License (http://opensource.org/licenses/MIT)
#
#####################################################################
import numpy as np
import os.path
import wave
from audio import Audio
class AudioWriter(object):
def __init__(self, filebase, output_wave=True):
super(AudioWriter, self).__init__()
self.active = False
self.buffers = []
self.filebase = filebase
self.output_wave = output_wave
def add_audio(self, data, num_channels) :
if self.active:
# only use a single channel if we are in stereo
if num_channels == 2:
data = data[0::2]
self.buffers.append(data)
def toggle(self) :
if self.active:
self.stop()
else:
self.start()
def start(self) :
if not self.active:
print 'AudioWriter: start capture'
self.active = True
self.buffers = []
def stop(self) :
if self.active:
print 'AudioWriter: stop capture'
self.active = False
output = combine_buffers(self.buffers)
if len(output) == 0:
print 'AudioWriter: empty buffers. Nothing to write'
return
ext = 'wav' if self.output_wave else 'npy'
filename = self._get_filename(ext)
print 'AudioWriter: saving', len(output), 'samples in', filename
if self.output_wave:
write_wave_file(output, 1, filename)
else:
np.save(filename, output)
# look for a filename that does not exist yet.
def _get_filename(self, ext) :
suffix = 1
while(True) :
filename = '%s%d.%s' % (self.filebase, suffix, ext)
if not os.path.exists(filename) :
return filename
else:
suffix += 1
def write_wave_file(buf, num_channels, name):
f = wave.open(name, 'w')
f.setnchannels(num_channels)
f.setsampwidth(2)
f.setframerate(Audio.sample_rate)
buf = buf * (2**15)
buf = buf.astype(np.int16)
f.writeframes(buf.tostring())
# create single buffer from an array of buffers:
def combine_buffers(buffers):
size = 0
for b in buffers:
size += len(b)
# create a single output buffer of the right size
output = np.empty( size, dtype=np.float32 )
f = 0
for b in buffers:
output[f:f+len(b)] = b
f += len(b)
return output
|
#####################################################################
#
# writer.py
#
# Copyright (c) 2015, <NAME>
#
# Released under the MIT License (http://opensource.org/licenses/MIT)
#
#####################################################################
import numpy as np
import os.path
import wave
from audio import Audio
class AudioWriter(object):
def __init__(self, filebase, output_wave=True):
super(AudioWriter, self).__init__()
self.active = False
self.buffers = []
self.filebase = filebase
self.output_wave = output_wave
def add_audio(self, data, num_channels) :
if self.active:
# only use a single channel if we are in stereo
if num_channels == 2:
data = data[0::2]
self.buffers.append(data)
def toggle(self) :
if self.active:
self.stop()
else:
self.start()
def start(self) :
if not self.active:
print 'AudioWriter: start capture'
self.active = True
self.buffers = []
def stop(self) :
if self.active:
print 'AudioWriter: stop capture'
self.active = False
output = combine_buffers(self.buffers)
if len(output) == 0:
print 'AudioWriter: empty buffers. Nothing to write'
return
ext = 'wav' if self.output_wave else 'npy'
filename = self._get_filename(ext)
print 'AudioWriter: saving', len(output), 'samples in', filename
if self.output_wave:
write_wave_file(output, 1, filename)
else:
np.save(filename, output)
# look for a filename that does not exist yet.
def _get_filename(self, ext) :
suffix = 1
while(True) :
filename = '%s%d.%s' % (self.filebase, suffix, ext)
if not os.path.exists(filename) :
return filename
else:
suffix += 1
def write_wave_file(buf, num_channels, name):
f = wave.open(name, 'w')
f.setnchannels(num_channels)
f.setsampwidth(2)
f.setframerate(Audio.sample_rate)
buf = buf * (2**15)
buf = buf.astype(np.int16)
f.writeframes(buf.tostring())
# create single buffer from an array of buffers:
def combine_buffers(buffers):
size = 0
for b in buffers:
size += len(b)
# create a single output buffer of the right size
output = np.empty( size, dtype=np.float32 )
f = 0
for b in buffers:
output[f:f+len(b)] = b
f += len(b)
return output
|
en
| 0.396942
|
##################################################################### # # writer.py # # Copyright (c) 2015, <NAME> # # Released under the MIT License (http://opensource.org/licenses/MIT) # ##################################################################### # only use a single channel if we are in stereo # look for a filename that does not exist yet. # create single buffer from an array of buffers: # create a single output buffer of the right size
| 3.141351
| 3
|
src/paginateProcessDataTrainTestFiles.py
|
aws-samples/aim317-uncover-insights-customer-conversations
| 0
|
6628217
|
<filename>src/paginateProcessDataTrainTestFiles.py
import boto3
import os
import io
import pandas as pd
def lambda_handler(event, context):
s3 = boto3.client('s3')
raw_data = s3.get_object(Bucket=os.environ['comprehendBucket'], Key='comprehend/train/aim317-cust-class-train-data.csv')
raw_content = pd.read_csv(io.BytesIO(raw_data['Body'].read()))
print(raw_content)
raw_content['label'] = raw_content['label'].astype(str)
selected_columns = ['label', 'text']
selected_data = raw_content[selected_columns]
DSTTRAINFILE='/tmp/comprehend-train.csv'
selected_data.to_csv(path_or_buf=DSTTRAINFILE,
header=False,
index=False,
escapechar='\\',
doublequote=False,
quotechar='"')
s3 = boto3.client('s3')
prefix = 'comprehend-custom-classifier'
bucket = os.environ['comprehendBucket']
s3.upload_file(DSTTRAINFILE, bucket, prefix+'/comprehend-train.csv')
|
<filename>src/paginateProcessDataTrainTestFiles.py
import boto3
import os
import io
import pandas as pd
def lambda_handler(event, context):
s3 = boto3.client('s3')
raw_data = s3.get_object(Bucket=os.environ['comprehendBucket'], Key='comprehend/train/aim317-cust-class-train-data.csv')
raw_content = pd.read_csv(io.BytesIO(raw_data['Body'].read()))
print(raw_content)
raw_content['label'] = raw_content['label'].astype(str)
selected_columns = ['label', 'text']
selected_data = raw_content[selected_columns]
DSTTRAINFILE='/tmp/comprehend-train.csv'
selected_data.to_csv(path_or_buf=DSTTRAINFILE,
header=False,
index=False,
escapechar='\\',
doublequote=False,
quotechar='"')
s3 = boto3.client('s3')
prefix = 'comprehend-custom-classifier'
bucket = os.environ['comprehendBucket']
s3.upload_file(DSTTRAINFILE, bucket, prefix+'/comprehend-train.csv')
|
none
| 1
| 2.44084
| 2
|
|
magicauth/forms.py
|
JMIdeaMaker/django-magicauth
| 36
|
6628218
|
from django import forms
from django.contrib.auth import get_user_model
from django.utils.module_loading import import_string
from django.core.validators import RegexValidator
from django.core.exceptions import ValidationError
from django_otp import user_has_device, devices_for_user
from magicauth import settings as magicauth_settings
from magicauth.models import MagicToken
email_unknown_callback = import_string(magicauth_settings.EMAIL_UNKNOWN_CALLBACK)
class EmailForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
user_email = self.cleaned_data["email"]
user_email = user_email.lower()
email_field = magicauth_settings.EMAIL_FIELD
field_lookup = {f"{email_field}__iexact": user_email}
if not get_user_model().objects.filter(**field_lookup).exists():
email_unknown_callback(user_email)
return user_email
class OTPForm(forms.Form):
OTP_NUM_DIGITS = magicauth_settings.OTP_NUM_DIGITS
otp_token = forms.CharField(
max_length=OTP_NUM_DIGITS,
min_length=OTP_NUM_DIGITS,
validators=[RegexValidator(r"^\d{6}$")],
label=f"Entrez le code à {OTP_NUM_DIGITS} chiffres généré par votre téléphone ou votre carte OTP",
widget=forms.TextInput(attrs={"autocomplete": "off"}),
)
def __init__(self, user, *args, **kwargs):
super(OTPForm, self).__init__(*args, **kwargs)
self.user = user
def clean_otp_token(self):
otp_token = self.cleaned_data["otp_token"]
user = self.user
if not user_has_device(user):
raise ValidationError("Le système n'a pas trouvé d'appareil (carte OTP ou générateur sur téléphone) pour votre compte. Contactez le support pour en ajouter un.")
for device in devices_for_user(user):
if device.verify_is_allowed() and device.verify_token(otp_token):
return otp_token
raise ValidationError("Ce code n'est pas valide.")
|
from django import forms
from django.contrib.auth import get_user_model
from django.utils.module_loading import import_string
from django.core.validators import RegexValidator
from django.core.exceptions import ValidationError
from django_otp import user_has_device, devices_for_user
from magicauth import settings as magicauth_settings
from magicauth.models import MagicToken
email_unknown_callback = import_string(magicauth_settings.EMAIL_UNKNOWN_CALLBACK)
class EmailForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
user_email = self.cleaned_data["email"]
user_email = user_email.lower()
email_field = magicauth_settings.EMAIL_FIELD
field_lookup = {f"{email_field}__iexact": user_email}
if not get_user_model().objects.filter(**field_lookup).exists():
email_unknown_callback(user_email)
return user_email
class OTPForm(forms.Form):
OTP_NUM_DIGITS = magicauth_settings.OTP_NUM_DIGITS
otp_token = forms.CharField(
max_length=OTP_NUM_DIGITS,
min_length=OTP_NUM_DIGITS,
validators=[RegexValidator(r"^\d{6}$")],
label=f"Entrez le code à {OTP_NUM_DIGITS} chiffres généré par votre téléphone ou votre carte OTP",
widget=forms.TextInput(attrs={"autocomplete": "off"}),
)
def __init__(self, user, *args, **kwargs):
super(OTPForm, self).__init__(*args, **kwargs)
self.user = user
def clean_otp_token(self):
otp_token = self.cleaned_data["otp_token"]
user = self.user
if not user_has_device(user):
raise ValidationError("Le système n'a pas trouvé d'appareil (carte OTP ou générateur sur téléphone) pour votre compte. Contactez le support pour en ajouter un.")
for device in devices_for_user(user):
if device.verify_is_allowed() and device.verify_token(otp_token):
return otp_token
raise ValidationError("Ce code n'est pas valide.")
|
none
| 1
| 2.179513
| 2
|
|
dp/kth-largest-element-in-an-array.py
|
Neulana/leetcode
| 2
|
6628219
|
<gh_stars>1-10
"""
在未排序的数组中找到第 k 个最大的元素。请注意,你需要找的是数组排序后的第 k 个最大的元素,而不是第 k 个不同的元素。
示例 1:
输入: [3,2,1,5,6,4] 和 k = 2
输出: 5
示例 2:
输入: [3,2,3,1,2,4,5,5,6] 和 k = 4
输出: 4
说明:
你可以假设 k 总是有效的,且 1 ≤ k ≤ 数组的长度。
"""
import random
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type A: List[int]
:type k: int
:rtype: int
"""
def quickselect(start, end, nums, k):
if start == end:
return nums[start]
mid = partition(start, end, nums)
if mid == k:
return nums[mid]
elif k > mid:
return quickselect(mid + 1, end, nums, k)
else:
return quickselect(start, mid - 1, nums, k)
def partition(start, end, nums):
p = random.randrange(start, end + 1)
pv = nums[p]
nums[end], nums[p] = nums[p], nums[end]
mid = start
for i in range(start, end):
if nums[i] >= pv:
nums[i], nums[mid] = nums[mid], nums[i]
mid += 1
nums[mid], nums[end] = nums[end], nums[mid]
return mid
ret = quickselect(0, len(nums) - 1, nums, k - 1)
return ret
def partition(start, end, nums):
p = random.randrange(start, end + 1)
pv = nums[p]
nums[end], nums[p] = nums[p], nums[end]
mid = start
for i in range(start, end):
if nums[i] >= pv:
nums[i], nums[mid] = nums[mid], nums[i]
mid += 1
nums[mid], nums[end] = nums[end], nums[mid]
return mid
|
"""
在未排序的数组中找到第 k 个最大的元素。请注意,你需要找的是数组排序后的第 k 个最大的元素,而不是第 k 个不同的元素。
示例 1:
输入: [3,2,1,5,6,4] 和 k = 2
输出: 5
示例 2:
输入: [3,2,3,1,2,4,5,5,6] 和 k = 4
输出: 4
说明:
你可以假设 k 总是有效的,且 1 ≤ k ≤ 数组的长度。
"""
import random
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type A: List[int]
:type k: int
:rtype: int
"""
def quickselect(start, end, nums, k):
if start == end:
return nums[start]
mid = partition(start, end, nums)
if mid == k:
return nums[mid]
elif k > mid:
return quickselect(mid + 1, end, nums, k)
else:
return quickselect(start, mid - 1, nums, k)
def partition(start, end, nums):
p = random.randrange(start, end + 1)
pv = nums[p]
nums[end], nums[p] = nums[p], nums[end]
mid = start
for i in range(start, end):
if nums[i] >= pv:
nums[i], nums[mid] = nums[mid], nums[i]
mid += 1
nums[mid], nums[end] = nums[end], nums[mid]
return mid
ret = quickselect(0, len(nums) - 1, nums, k - 1)
return ret
def partition(start, end, nums):
p = random.randrange(start, end + 1)
pv = nums[p]
nums[end], nums[p] = nums[p], nums[end]
mid = start
for i in range(start, end):
if nums[i] >= pv:
nums[i], nums[mid] = nums[mid], nums[i]
mid += 1
nums[mid], nums[end] = nums[end], nums[mid]
return mid
|
zh
| 0.964521
|
在未排序的数组中找到第 k 个最大的元素。请注意,你需要找的是数组排序后的第 k 个最大的元素,而不是第 k 个不同的元素。 示例 1: 输入: [3,2,1,5,6,4] 和 k = 2 输出: 5 示例 2: 输入: [3,2,3,1,2,4,5,5,6] 和 k = 4 输出: 4 说明: 你可以假设 k 总是有效的,且 1 ≤ k ≤ 数组的长度。 :type A: List[int] :type k: int :rtype: int
| 3.553932
| 4
|
test/sampleData/raspberrypi/ExampleSpiRegister.py
|
polfeliu/cyanobyte
| 70
|
6628220
|
# Copyright (C) 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Auto-generated file for ExampleSpi v0.1.0.
# Generated from peripherals/examplespi.yaml using Cyanobyte Codegen v0.1.0
"""
Class for ExampleSpi
"""
import smbus
import spidev
class ExampleSpiRegister:
"""
Example of a package using SPI
"""
device_address = 0
REGISTER_REGISTERW = 0
REGISTER_REGISTERX = 1
REGISTER_REGISTERY = 2
REGISTER_REGISTERZ = 3
def __init__(self):
# Initialize connection to peripheral
self.bus = smbus.SMBus(1)
self.spi = spidev.SpiDev()
self.device_address = 0
bus = 0 # Only SPI bus 0 is available
device = 1 # Chip select, 0 / 1 depending on connection
self.spi.open(bus, device)
self.spi.max_speed_hz = 16000
self.spi.bits_per_word = 8
self.spi.mode = 0b10
def get_registerw(self):
"""
An 8-bit register
"""
val = self.bus.read_byte_data(
self.device_address,
self.REGISTER_REGISTERW
)
return val
def set_registerw(self, data):
"""
An 8-bit register
"""
self.bus.write_byte_data(
self.device_address,
self.REGISTER_REGISTERW,
data
)
def get_registerx(self):
"""
A 16-bit register
"""
val = self.bus.read_word_data(
self.device_address,
self.REGISTER_REGISTERX
)
return val
def set_registerx(self, data):
"""
A 16-bit register
"""
self.bus.write_word_data(
self.device_address,
self.REGISTER_REGISTERX,
data
)
def get_registery(self):
"""
A 32-bit register
"""
byte_list = self.bus.read_i2c_block_data(
self.device_address,
self.REGISTER_REGISTERY,
4
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = val << 8 | byte_list[2]
val = val << 8 | byte_list[3]
return val
def set_registery(self, data):
"""
A 32-bit register
"""
buffer = []
buffer[0] = (data >> 24) & 0xFF
buffer[1] = (data >> 16) & 0xFF
buffer[2] = (data >> 8) & 0xFF
buffer[3] = (data >> 0) & 0xFF
self.bus.write_i2c_block_data(
self.device_address,
self.REGISTER_REGISTERY,
buffer
)
def get_registerz(self):
"""
A dummy register that has no data
"""
val = self.bus.read_byte_data(
self.device_address,
self.REGISTER_REGISTERZ
)
return val
def set_registerz(self):
"""
A dummy register that has no data
"""
self.bus.write_i2c_block_data(
self.device_address,
self.REGISTER_REGISTERZ,
[]
)
def spi_read_registerw(self):
"""
An 8-bit register
"""
# Simple read request msg
msg = [self.device_address, self.REGISTER_REGISTERW]
result = self.spi.xfer2(msg)
return result
def spi_write_registerw(self, data):
"""
An 8-bit register
"""
# Build request msg
msg = [self.device_address, self.REGISTER_REGISTERW]
msg = msg + data
result = self.spi.xfer2(msg)
return result
def spi_read_registerx(self):
"""
A 16-bit register
"""
# Simple read request msg
msg = [self.device_address, self.REGISTER_REGISTERX]
result = self.spi.xfer2(msg)
return result
def spi_write_registerx(self, data):
"""
A 16-bit register
"""
# Build request msg
msg = [self.device_address, self.REGISTER_REGISTERX]
msg = msg + data
result = self.spi.xfer2(msg)
return result
def spi_read_registery(self):
"""
A 32-bit register
"""
# Simple read request msg
msg = [self.device_address, self.REGISTER_REGISTERY]
result = self.spi.xfer2(msg)
return result
def spi_write_registery(self, data):
"""
A 32-bit register
"""
# Build request msg
msg = [self.device_address, self.REGISTER_REGISTERY]
msg = msg + data
result = self.spi.xfer2(msg)
return result
def spi_read_registerz(self):
"""
A dummy register that has no data
"""
# Simple read request msg
msg = [self.device_address, self.REGISTER_REGISTERZ]
result = self.spi.xfer2(msg)
return result
def spi_write_registerz(self):
"""
A dummy register that has no data
"""
# Build request msg
msg = [self.device_address, self.REGISTER_REGISTERZ]
result = self.spi.xfer2(msg)
return result
|
# Copyright (C) 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Auto-generated file for ExampleSpi v0.1.0.
# Generated from peripherals/examplespi.yaml using Cyanobyte Codegen v0.1.0
"""
Class for ExampleSpi
"""
import smbus
import spidev
class ExampleSpiRegister:
"""
Example of a package using SPI
"""
device_address = 0
REGISTER_REGISTERW = 0
REGISTER_REGISTERX = 1
REGISTER_REGISTERY = 2
REGISTER_REGISTERZ = 3
def __init__(self):
# Initialize connection to peripheral
self.bus = smbus.SMBus(1)
self.spi = spidev.SpiDev()
self.device_address = 0
bus = 0 # Only SPI bus 0 is available
device = 1 # Chip select, 0 / 1 depending on connection
self.spi.open(bus, device)
self.spi.max_speed_hz = 16000
self.spi.bits_per_word = 8
self.spi.mode = 0b10
def get_registerw(self):
"""
An 8-bit register
"""
val = self.bus.read_byte_data(
self.device_address,
self.REGISTER_REGISTERW
)
return val
def set_registerw(self, data):
"""
An 8-bit register
"""
self.bus.write_byte_data(
self.device_address,
self.REGISTER_REGISTERW,
data
)
def get_registerx(self):
"""
A 16-bit register
"""
val = self.bus.read_word_data(
self.device_address,
self.REGISTER_REGISTERX
)
return val
def set_registerx(self, data):
"""
A 16-bit register
"""
self.bus.write_word_data(
self.device_address,
self.REGISTER_REGISTERX,
data
)
def get_registery(self):
"""
A 32-bit register
"""
byte_list = self.bus.read_i2c_block_data(
self.device_address,
self.REGISTER_REGISTERY,
4
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = val << 8 | byte_list[2]
val = val << 8 | byte_list[3]
return val
def set_registery(self, data):
"""
A 32-bit register
"""
buffer = []
buffer[0] = (data >> 24) & 0xFF
buffer[1] = (data >> 16) & 0xFF
buffer[2] = (data >> 8) & 0xFF
buffer[3] = (data >> 0) & 0xFF
self.bus.write_i2c_block_data(
self.device_address,
self.REGISTER_REGISTERY,
buffer
)
def get_registerz(self):
"""
A dummy register that has no data
"""
val = self.bus.read_byte_data(
self.device_address,
self.REGISTER_REGISTERZ
)
return val
def set_registerz(self):
"""
A dummy register that has no data
"""
self.bus.write_i2c_block_data(
self.device_address,
self.REGISTER_REGISTERZ,
[]
)
def spi_read_registerw(self):
"""
An 8-bit register
"""
# Simple read request msg
msg = [self.device_address, self.REGISTER_REGISTERW]
result = self.spi.xfer2(msg)
return result
def spi_write_registerw(self, data):
"""
An 8-bit register
"""
# Build request msg
msg = [self.device_address, self.REGISTER_REGISTERW]
msg = msg + data
result = self.spi.xfer2(msg)
return result
def spi_read_registerx(self):
"""
A 16-bit register
"""
# Simple read request msg
msg = [self.device_address, self.REGISTER_REGISTERX]
result = self.spi.xfer2(msg)
return result
def spi_write_registerx(self, data):
"""
A 16-bit register
"""
# Build request msg
msg = [self.device_address, self.REGISTER_REGISTERX]
msg = msg + data
result = self.spi.xfer2(msg)
return result
def spi_read_registery(self):
"""
A 32-bit register
"""
# Simple read request msg
msg = [self.device_address, self.REGISTER_REGISTERY]
result = self.spi.xfer2(msg)
return result
def spi_write_registery(self, data):
"""
A 32-bit register
"""
# Build request msg
msg = [self.device_address, self.REGISTER_REGISTERY]
msg = msg + data
result = self.spi.xfer2(msg)
return result
def spi_read_registerz(self):
"""
A dummy register that has no data
"""
# Simple read request msg
msg = [self.device_address, self.REGISTER_REGISTERZ]
result = self.spi.xfer2(msg)
return result
def spi_write_registerz(self):
"""
A dummy register that has no data
"""
# Build request msg
msg = [self.device_address, self.REGISTER_REGISTERZ]
result = self.spi.xfer2(msg)
return result
|
en
| 0.800418
|
# Copyright (C) 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Auto-generated file for ExampleSpi v0.1.0. # Generated from peripherals/examplespi.yaml using Cyanobyte Codegen v0.1.0 Class for ExampleSpi Example of a package using SPI # Initialize connection to peripheral # Only SPI bus 0 is available # Chip select, 0 / 1 depending on connection An 8-bit register An 8-bit register A 16-bit register A 16-bit register A 32-bit register A 32-bit register A dummy register that has no data A dummy register that has no data An 8-bit register # Simple read request msg An 8-bit register # Build request msg A 16-bit register # Simple read request msg A 16-bit register # Build request msg A 32-bit register # Simple read request msg A 32-bit register # Build request msg A dummy register that has no data # Simple read request msg A dummy register that has no data # Build request msg
| 2.343322
| 2
|
guiengine_test.py
|
megatron0000/ces22-xadrez
| 0
|
6628221
|
import unittest
from guiengine import *
def initpygame():
pygame.init()
pygame.display.set_mode((800, 600))
class TestEventBus(unittest.TestCase):
def gen_cb(self, number):
def cb(data):
if data == 1:
self.called[number] = True
return cb
def setUp(self):
self.bus = EventBus()
self.called = {}
def tearDown(self):
EventBus.active(None)
def test_emission(self):
self.bus.on('event_1', self.gen_cb(1))
self.bus.on('event_1', self.gen_cb(2))
self.bus.on('event_2', self.gen_cb(3))
self.bus.emit('event_1', 1)
self.bus.emit('event_2', 1)
self.assertEqual(self.called.get(1), True)
self.assertEqual(self.called.get(2), True)
self.assertEqual(self.called.get(3), True)
def test_disable(self):
cb1 = self.gen_cb(1)
cb2 = self.gen_cb(2)
cb3 = self.gen_cb(3)
self.bus.on('event_1', cb1)
self.bus.on('event_1', cb2)
self.bus.on('event_1', cb3)
self.bus.disable('event_1', cb2)
self.bus.emit('event_1', 1)
self.assertEqual(self.called.get(1), True)
self.assertEqual(self.called.get(2), None)
self.assertEqual(self.called.get(3), True)
self.called = {}
self.bus.disable('event_1')
self.bus.emit('event_1', 1)
self.assertEqual(self.called.get(1), None)
self.assertEqual(self.called.get(2), None)
self.assertEqual(self.called.get(3), None)
def test_disable_all(self):
self.bus.on('event_1', self.gen_cb(1))
self.bus.on('event_1', self.gen_cb(2))
self.bus.on('event_2', self.gen_cb(3))
self.bus.disable_all()
self.bus.emit('event_1', 1)
self.bus.emit('event_2', 1)
self.assertEqual(self.called.get(1), None)
self.assertEqual(self.called.get(2), None)
self.assertEqual(self.called.get(2), None)
def test_active(self):
inst = EventBus()
# EventBus.active(None)
# self.assertIsNone(EventBus.active())
EventBus.active(inst)
self.assertEqual(EventBus.active(), inst)
class TestBusProxy(unittest.TestCase):
def setUp(self):
self.bus = EventBus()
EventBus.active(self.bus)
self.proxy = BusProxy()
self.called = {}
def gen_cb(self, number):
def cb(data):
if data == 1:
self.called[number] = True
return cb
def test_emission(self):
self.bus.on('event_1', self.gen_cb(1))
self.proxy.on('event_1', self.gen_cb(2))
self.proxy.emit('event_1', 1)
self.assertTrue(self.called.get(1))
self.assertTrue(self.called.get(2))
def test_disable(self):
cb0 = self.gen_cb(1)
cb1 = self.gen_cb(2)
cb2 = self.gen_cb(3)
cb3 = self.gen_cb(4)
self.proxy.on('event_1', cb0)
self.proxy.on('event_1', cb3)
self.proxy.on('event_2', cb1)
self.bus.on('event_1', cb2)
self.proxy.disable('event_1', cb0)
self.proxy.emit('event_1', 1)
self.proxy.emit('event_2', 1)
self.assertIsNone(self.called.get(1))
self.assertTrue(self.called.get(2))
self.assertTrue(self.called.get(3))
self.assertTrue(self.called.get(4))
class TestOuterBus(unittest.TestCase):
class Listeners:
def __init__(self, outer):
"""
:type outer TestOuterBus
"""
self.outer = outer
self.moved = False
self.button_down = False
self.button_up = False
def onmousemove(self, data):
self.moved = True
self.outer.assertEqual(data, (50, 50))
def onbuttondown(self, data):
self.button_down = True
self.outer.assertEqual(data, (40, 60))
def onbuttonup(self, data):
self.button_up = True
self.outer.assertEqual(data, (40, 60))
def setUp(self):
self.outer_bus = OuterBus()
self.mousemove = pygame.event.Event(pygame.MOUSEMOTION, {
'pos': (50, 50),
'rel': (-10, 30),
'buttons': (False, False, False)
})
self.buttondown = pygame.event.Event(pygame.MOUSEBUTTONDOWN, {
'button': 1,
'pos': (40, 60)
})
self.buttonup = pygame.event.Event(pygame.MOUSEBUTTONUP, {
'button': 1,
'pos': (40, 60)
})
self.listeners = self.Listeners(self)
def _launch(self, listen_on_bus):
pygame.event.post(self.mousemove)
pygame.event.post(self.buttondown)
pygame.event.post(self.buttonup)
listen_on_bus.on(Event.MOUSEMOVE, self.listeners.onmousemove)
listen_on_bus.on(Event.MOUSEUP, self.listeners.onbuttonup)
listen_on_bus.on(Event.MOUSEDOWN, self.listeners.onbuttondown)
def test_emit_refresh(self):
self._launch(self.outer_bus)
self.outer_bus.refresh()
self.assertTrue(self.listeners.moved)
self.assertTrue(self.listeners.button_up)
self.assertTrue(self.listeners.button_down)
def test_redirect(self):
bus = EventBus()
self.outer_bus.redirect(bus)
self._launch(bus)
self.outer_bus.refresh()
self.assertTrue(self.listeners.moved)
self.assertTrue(self.listeners.button_up)
self.assertTrue(self.listeners.button_down)
class TestMouseAware(unittest.TestCase):
"""
TODO: Testar MouseAware... ou não...
"""
def test_nothing(self):
pass
class TestResourceBank(unittest.TestCase):
def setUp(self):
initpygame()
self.bank = ResourceBank.instance()
self.paths = ['resources/Cburnett V2 improved/PNGs/square brown dark_png.png',
'resources/Cburnett V2 improved/PNGs/square brown light_png.png']
def test_instance(self):
self.assertEqual(self.bank, ResourceBank.instance())
def test_image(self):
self.assertIsInstance(self.bank.image(self.paths[0]), pygame.Surface)
def test_sound(self):
sound = self.bank.sound('Music/Music.ogg')
self.assertIsInstance(sound, pygame.mixer.Sound)
def test_font(self):
font = self.bank.font(None, 12)
self.assertIsInstance(font, pygame.font.Font)
def test_caching(self):
self.assertEqual(self.bank.image(self.paths[0]), self.bank.image(self.paths[0]))
self.assertNotEqual(self.bank.image(self.paths[1]),
self.bank.image(self.paths[1], cached=False))
class TestImage(unittest.TestCase):
def setUp(self):
initpygame()
self.image = Image('resources/Cburnett V2 improved/PNGs/square brown dark_png.png')
def test_scale(self):
old_w = self.image.width
old_h = self.image.height
new_rf = self.image.scale(2)
self.assertEqual(self.image.width, old_w * 2)
self.assertEqual(self.image.height, old_h * 2)
self.assertEqual(new_rf, self.image)
class TestText(unittest.TestCase):
def setUp(self):
self.c1 = 'Hola'
self.c2 = 'Adios'
self.txt = Text(self.c1, 12, None, (0, 0, 0), (255, 255, 255))
def test_to_surface(self):
# Deve retornar a mesma várias vezes, a não ser quando conteúdo ou cor mudar
s1 = self.txt.to_surface()
s2 = self.txt.to_surface()
self.txt.content(self.c2)
s3 = self.txt.to_surface()
s4 = self.txt.to_surface()
self.txt.color((0, 0, 255))
s5 = self.txt.to_surface()
s6 = self.txt.to_surface()
self.assertIs(s1, s2)
self.assertIsNot(s1, s3)
self.assertIs(s3, s4)
self.assertIsNot(s3, s5)
self.assertIs(s5, s6)
class TestRootDrawContext(unittest.TestCase):
def setUp(self):
self.ctx = RootDrawContext(Surface((500, 500)))
self.img = Image('resources/Cburnett V2 improved/PNGs/square brown dark_png.png') \
.scale(1 / 10)
def test_blit(self):
rect = self.ctx.blit(self.img, (30, 30))
self.assertEqual(rect.x, 30)
self.assertEqual(rect.y, 30)
class TestDrawContext(unittest.TestCase):
def setUp(self):
initpygame()
self.root = RootDrawContext(Surface((500, 500)))
self.img = Image('resources/Cburnett V2 improved/PNGs/square brown dark_png.png') \
.scale(1 / 10)
def test_sub_blit(self):
sub = self.root.sub((40, 40)).sub((60, 60))
rect = sub.blit(self.img, (50, 50))
self.assertEqual(rect.x, 40 + 60 + 50)
self.assertEqual(rect.y, 40 + 60 + 50)
class TestSound(unittest.TestCase):
def setUp(self):
self.sound = Sound('Music/Music.ogg')
def test_not_throws(self):
self.sound.play(-1).stop().play(0).play(0).play(3).stop()
class TestEmptySound(unittest.TestCase):
def test_nothing(self):
EmptySound().play(-1).play(0).stop().play(2).play(3).stop()
class TestRenderizable(unittest.TestCase):
def setUp(self):
self.ren = Renderizable((10, 20))
def test_bounds(self):
self.assertEqual(self.ren.bounds.x, 10)
self.assertEqual(self.ren.bounds.y, 20)
self.assertEqual(self.ren.bounds.width, 0)
self.assertEqual(self.ren.bounds.height, 0)
def test_bus(self):
self.assertIsInstance(self.ren._bus, BusProxy)
class TestFigureNode(unittest.TestCase):
class MockDrawContext(DrawContext):
def __init__(self):
self.blitted = False
def blit(self, imagelike, xy):
if xy == (10, 20):
self.blitted = True
def setUp(self):
self.fig = FigureNode((10, 20), Image(
'resources/Cburnett V2 improved/PNGs/square brown dark_png.png'
).scale(1 / 10))
def test_update_render(self):
mock = self.MockDrawContext()
self.fig.update_render(mock, 0.01)
self.assertTrue(mock.blitted)
class TestLayer(unittest.TestCase):
class MockNode(Renderizable):
def __init__(self, bounds):
super().__init__(bounds.topleft)
self.logic = False
self.render = False
self.destroyed = False
self.bounds = bounds
def update_logic(self, dt):
self.logic = True
def update_render(self, draw_context: DrawContext, dt):
self.render = True
def destroy(self):
self.destroyed = True
def setUp(self):
self.layer = Layer((10, 10))
self.c1 = TestLayer.MockNode(Rect((10, 10), (30, 40)))
self.c2 = TestLayer.MockNode(Rect((20, 10), (30, 40)))
self.layer._add_child(self.c1)
self.layer._add_child(self.c2)
def test_update_logic(self):
self.layer.update_logic(0.01)
self.assertTrue(self.c1.logic)
self.assertTrue(self.c2.logic)
def test_update_render(self):
self.layer.update_render(RootDrawContext(Surface((10, 10))), 0.01)
self.assertTrue(self.c1.render)
self.assertTrue(self.c2.render)
self.assertEqual(self.layer.bounds, Rect((10, 10), (40, 40)))
def test_remove_child(self):
self.layer._remove_child(self.c2)
self.layer.update_logic(0.01)
self.assertTrue(self.c1.logic)
self.assertFalse(self.c2.logic)
def test_destroy(self):
self.layer.destroy()
self.assertTrue(self.c1.destroyed)
self.assertTrue(self.c2.destroyed)
class TestScene(unittest.TestCase):
def setUp(self):
self.scene = Scene()
def test_bgm(self):
sound = Sound('Music/Music.ogg')
self.scene._bgm(sound)
class TestSceneManager(unittest.TestCase):
class MockDrawContext(DrawContext):
def __init__(self):
pass
def sub(self, origin):
pass
def blit(self, imagelike, xy: tuple):
pass
def circle(self, center, radius):
pass
def line(self, xy1, xy2):
pass
def fill(self, color):
pass
class MockScene(Scene):
def __init__(self, outer):
super().__init__()
self.outer = outer
def update_logic(self, dt):
self.outer.logic += 1
# Mesmo com esse evento, update_render ainda será chamado
# porque a troca de cenas é "lazy" (só acontece quando dou tick)
self._bus.emit(Event.SCENE_CHANGE, lambda: self.outer.second_scene)
def update_render(self, draw_context: DrawContext, dt):
self.outer.render += 1
def destroy(self):
self.outer.destroyed = True
class SecondMockScene(Scene):
def __init__(self, outer):
self.outer = outer
def update_logic(self, dt):
self.outer.logic -= 1
def update_render(self, draw_context: DrawContext, dt):
self.outer.render -= 1
def setUp(self):
self.ctx = self.MockDrawContext()
self.bus = EventBus()
EventBus.active(self.bus)
self.scene = self.MockScene(self)
self.second_scene = self.SecondMockScene(self)
self.mgr = SceneManager(self.ctx, self.bus, lambda: self.scene)
self.logic = 0
self.render = 0
# Marca quando a MockScene é destruída, momento no qual
# a SecondMockScene deve substituí-la
self.destroyed = False
def test_tick(self):
self.mgr.tick(0.01)
self.assertEqual(self.logic, 1)
self.assertEqual(self.render, 1)
self.mgr.tick(0.01)
self.assertTrue(self.destroyed)
self.assertEqual(self.logic, 0)
self.assertEqual(self.render, 0)
class TestGameObject(unittest.TestCase):
class MockScene(Scene):
def __init__(self):
super().__init__()
self.cycles = {
'logic': 0,
'render': 0
}
def update_render(self, draw_context: DrawContext, dt):
self.cycles['render'] += 1
def update_logic(self, dt):
self._bus.emit(Event.REQ_ANIM_FRAME)
self.cycles['logic'] += 1
if self.cycles['logic'] == 100:
self._bus.emit(Event.QUIT, None)
class MockDisplay(Display):
def __init__(self):
self.flipped = 0
def draw_context(self):
return TestSceneManager.MockDrawContext()
def resolution(self, width, height):
pass
def flip(self):
self.flipped += 1
def create_scene(self):
self.scene = self.MockScene()
return self.scene
def setUp(self):
self.display = self.MockDisplay()
# Atenção aqui ! Não posso instanciar uma Scene antes de GameObject,
# porque este define um bus, enquanto a outra pede um bus
self.game_object = GameObject(self.display, self.create_scene)
def test_gameloop(self):
self.game_object.gameloop()
self.assertEqual(self.scene.cycles, {
'logic': 100,
'render': 100
})
self.assertEqual(self.display.flipped, 100)
if __name__ == '__main__':
unittest.main()
|
import unittest
from guiengine import *
def initpygame():
pygame.init()
pygame.display.set_mode((800, 600))
class TestEventBus(unittest.TestCase):
def gen_cb(self, number):
def cb(data):
if data == 1:
self.called[number] = True
return cb
def setUp(self):
self.bus = EventBus()
self.called = {}
def tearDown(self):
EventBus.active(None)
def test_emission(self):
self.bus.on('event_1', self.gen_cb(1))
self.bus.on('event_1', self.gen_cb(2))
self.bus.on('event_2', self.gen_cb(3))
self.bus.emit('event_1', 1)
self.bus.emit('event_2', 1)
self.assertEqual(self.called.get(1), True)
self.assertEqual(self.called.get(2), True)
self.assertEqual(self.called.get(3), True)
def test_disable(self):
cb1 = self.gen_cb(1)
cb2 = self.gen_cb(2)
cb3 = self.gen_cb(3)
self.bus.on('event_1', cb1)
self.bus.on('event_1', cb2)
self.bus.on('event_1', cb3)
self.bus.disable('event_1', cb2)
self.bus.emit('event_1', 1)
self.assertEqual(self.called.get(1), True)
self.assertEqual(self.called.get(2), None)
self.assertEqual(self.called.get(3), True)
self.called = {}
self.bus.disable('event_1')
self.bus.emit('event_1', 1)
self.assertEqual(self.called.get(1), None)
self.assertEqual(self.called.get(2), None)
self.assertEqual(self.called.get(3), None)
def test_disable_all(self):
self.bus.on('event_1', self.gen_cb(1))
self.bus.on('event_1', self.gen_cb(2))
self.bus.on('event_2', self.gen_cb(3))
self.bus.disable_all()
self.bus.emit('event_1', 1)
self.bus.emit('event_2', 1)
self.assertEqual(self.called.get(1), None)
self.assertEqual(self.called.get(2), None)
self.assertEqual(self.called.get(2), None)
def test_active(self):
inst = EventBus()
# EventBus.active(None)
# self.assertIsNone(EventBus.active())
EventBus.active(inst)
self.assertEqual(EventBus.active(), inst)
class TestBusProxy(unittest.TestCase):
def setUp(self):
self.bus = EventBus()
EventBus.active(self.bus)
self.proxy = BusProxy()
self.called = {}
def gen_cb(self, number):
def cb(data):
if data == 1:
self.called[number] = True
return cb
def test_emission(self):
self.bus.on('event_1', self.gen_cb(1))
self.proxy.on('event_1', self.gen_cb(2))
self.proxy.emit('event_1', 1)
self.assertTrue(self.called.get(1))
self.assertTrue(self.called.get(2))
def test_disable(self):
cb0 = self.gen_cb(1)
cb1 = self.gen_cb(2)
cb2 = self.gen_cb(3)
cb3 = self.gen_cb(4)
self.proxy.on('event_1', cb0)
self.proxy.on('event_1', cb3)
self.proxy.on('event_2', cb1)
self.bus.on('event_1', cb2)
self.proxy.disable('event_1', cb0)
self.proxy.emit('event_1', 1)
self.proxy.emit('event_2', 1)
self.assertIsNone(self.called.get(1))
self.assertTrue(self.called.get(2))
self.assertTrue(self.called.get(3))
self.assertTrue(self.called.get(4))
class TestOuterBus(unittest.TestCase):
class Listeners:
def __init__(self, outer):
"""
:type outer TestOuterBus
"""
self.outer = outer
self.moved = False
self.button_down = False
self.button_up = False
def onmousemove(self, data):
self.moved = True
self.outer.assertEqual(data, (50, 50))
def onbuttondown(self, data):
self.button_down = True
self.outer.assertEqual(data, (40, 60))
def onbuttonup(self, data):
self.button_up = True
self.outer.assertEqual(data, (40, 60))
def setUp(self):
self.outer_bus = OuterBus()
self.mousemove = pygame.event.Event(pygame.MOUSEMOTION, {
'pos': (50, 50),
'rel': (-10, 30),
'buttons': (False, False, False)
})
self.buttondown = pygame.event.Event(pygame.MOUSEBUTTONDOWN, {
'button': 1,
'pos': (40, 60)
})
self.buttonup = pygame.event.Event(pygame.MOUSEBUTTONUP, {
'button': 1,
'pos': (40, 60)
})
self.listeners = self.Listeners(self)
def _launch(self, listen_on_bus):
pygame.event.post(self.mousemove)
pygame.event.post(self.buttondown)
pygame.event.post(self.buttonup)
listen_on_bus.on(Event.MOUSEMOVE, self.listeners.onmousemove)
listen_on_bus.on(Event.MOUSEUP, self.listeners.onbuttonup)
listen_on_bus.on(Event.MOUSEDOWN, self.listeners.onbuttondown)
def test_emit_refresh(self):
self._launch(self.outer_bus)
self.outer_bus.refresh()
self.assertTrue(self.listeners.moved)
self.assertTrue(self.listeners.button_up)
self.assertTrue(self.listeners.button_down)
def test_redirect(self):
bus = EventBus()
self.outer_bus.redirect(bus)
self._launch(bus)
self.outer_bus.refresh()
self.assertTrue(self.listeners.moved)
self.assertTrue(self.listeners.button_up)
self.assertTrue(self.listeners.button_down)
class TestMouseAware(unittest.TestCase):
"""
TODO: Testar MouseAware... ou não...
"""
def test_nothing(self):
pass
class TestResourceBank(unittest.TestCase):
def setUp(self):
initpygame()
self.bank = ResourceBank.instance()
self.paths = ['resources/Cburnett V2 improved/PNGs/square brown dark_png.png',
'resources/Cburnett V2 improved/PNGs/square brown light_png.png']
def test_instance(self):
self.assertEqual(self.bank, ResourceBank.instance())
def test_image(self):
self.assertIsInstance(self.bank.image(self.paths[0]), pygame.Surface)
def test_sound(self):
sound = self.bank.sound('Music/Music.ogg')
self.assertIsInstance(sound, pygame.mixer.Sound)
def test_font(self):
font = self.bank.font(None, 12)
self.assertIsInstance(font, pygame.font.Font)
def test_caching(self):
self.assertEqual(self.bank.image(self.paths[0]), self.bank.image(self.paths[0]))
self.assertNotEqual(self.bank.image(self.paths[1]),
self.bank.image(self.paths[1], cached=False))
class TestImage(unittest.TestCase):
def setUp(self):
initpygame()
self.image = Image('resources/Cburnett V2 improved/PNGs/square brown dark_png.png')
def test_scale(self):
old_w = self.image.width
old_h = self.image.height
new_rf = self.image.scale(2)
self.assertEqual(self.image.width, old_w * 2)
self.assertEqual(self.image.height, old_h * 2)
self.assertEqual(new_rf, self.image)
class TestText(unittest.TestCase):
def setUp(self):
self.c1 = 'Hola'
self.c2 = 'Adios'
self.txt = Text(self.c1, 12, None, (0, 0, 0), (255, 255, 255))
def test_to_surface(self):
# Deve retornar a mesma várias vezes, a não ser quando conteúdo ou cor mudar
s1 = self.txt.to_surface()
s2 = self.txt.to_surface()
self.txt.content(self.c2)
s3 = self.txt.to_surface()
s4 = self.txt.to_surface()
self.txt.color((0, 0, 255))
s5 = self.txt.to_surface()
s6 = self.txt.to_surface()
self.assertIs(s1, s2)
self.assertIsNot(s1, s3)
self.assertIs(s3, s4)
self.assertIsNot(s3, s5)
self.assertIs(s5, s6)
class TestRootDrawContext(unittest.TestCase):
def setUp(self):
self.ctx = RootDrawContext(Surface((500, 500)))
self.img = Image('resources/Cburnett V2 improved/PNGs/square brown dark_png.png') \
.scale(1 / 10)
def test_blit(self):
rect = self.ctx.blit(self.img, (30, 30))
self.assertEqual(rect.x, 30)
self.assertEqual(rect.y, 30)
class TestDrawContext(unittest.TestCase):
def setUp(self):
initpygame()
self.root = RootDrawContext(Surface((500, 500)))
self.img = Image('resources/Cburnett V2 improved/PNGs/square brown dark_png.png') \
.scale(1 / 10)
def test_sub_blit(self):
sub = self.root.sub((40, 40)).sub((60, 60))
rect = sub.blit(self.img, (50, 50))
self.assertEqual(rect.x, 40 + 60 + 50)
self.assertEqual(rect.y, 40 + 60 + 50)
class TestSound(unittest.TestCase):
def setUp(self):
self.sound = Sound('Music/Music.ogg')
def test_not_throws(self):
self.sound.play(-1).stop().play(0).play(0).play(3).stop()
class TestEmptySound(unittest.TestCase):
def test_nothing(self):
EmptySound().play(-1).play(0).stop().play(2).play(3).stop()
class TestRenderizable(unittest.TestCase):
def setUp(self):
self.ren = Renderizable((10, 20))
def test_bounds(self):
self.assertEqual(self.ren.bounds.x, 10)
self.assertEqual(self.ren.bounds.y, 20)
self.assertEqual(self.ren.bounds.width, 0)
self.assertEqual(self.ren.bounds.height, 0)
def test_bus(self):
self.assertIsInstance(self.ren._bus, BusProxy)
class TestFigureNode(unittest.TestCase):
class MockDrawContext(DrawContext):
def __init__(self):
self.blitted = False
def blit(self, imagelike, xy):
if xy == (10, 20):
self.blitted = True
def setUp(self):
self.fig = FigureNode((10, 20), Image(
'resources/Cburnett V2 improved/PNGs/square brown dark_png.png'
).scale(1 / 10))
def test_update_render(self):
mock = self.MockDrawContext()
self.fig.update_render(mock, 0.01)
self.assertTrue(mock.blitted)
class TestLayer(unittest.TestCase):
class MockNode(Renderizable):
def __init__(self, bounds):
super().__init__(bounds.topleft)
self.logic = False
self.render = False
self.destroyed = False
self.bounds = bounds
def update_logic(self, dt):
self.logic = True
def update_render(self, draw_context: DrawContext, dt):
self.render = True
def destroy(self):
self.destroyed = True
def setUp(self):
self.layer = Layer((10, 10))
self.c1 = TestLayer.MockNode(Rect((10, 10), (30, 40)))
self.c2 = TestLayer.MockNode(Rect((20, 10), (30, 40)))
self.layer._add_child(self.c1)
self.layer._add_child(self.c2)
def test_update_logic(self):
self.layer.update_logic(0.01)
self.assertTrue(self.c1.logic)
self.assertTrue(self.c2.logic)
def test_update_render(self):
self.layer.update_render(RootDrawContext(Surface((10, 10))), 0.01)
self.assertTrue(self.c1.render)
self.assertTrue(self.c2.render)
self.assertEqual(self.layer.bounds, Rect((10, 10), (40, 40)))
def test_remove_child(self):
self.layer._remove_child(self.c2)
self.layer.update_logic(0.01)
self.assertTrue(self.c1.logic)
self.assertFalse(self.c2.logic)
def test_destroy(self):
self.layer.destroy()
self.assertTrue(self.c1.destroyed)
self.assertTrue(self.c2.destroyed)
class TestScene(unittest.TestCase):
def setUp(self):
self.scene = Scene()
def test_bgm(self):
sound = Sound('Music/Music.ogg')
self.scene._bgm(sound)
class TestSceneManager(unittest.TestCase):
class MockDrawContext(DrawContext):
def __init__(self):
pass
def sub(self, origin):
pass
def blit(self, imagelike, xy: tuple):
pass
def circle(self, center, radius):
pass
def line(self, xy1, xy2):
pass
def fill(self, color):
pass
class MockScene(Scene):
def __init__(self, outer):
super().__init__()
self.outer = outer
def update_logic(self, dt):
self.outer.logic += 1
# Mesmo com esse evento, update_render ainda será chamado
# porque a troca de cenas é "lazy" (só acontece quando dou tick)
self._bus.emit(Event.SCENE_CHANGE, lambda: self.outer.second_scene)
def update_render(self, draw_context: DrawContext, dt):
self.outer.render += 1
def destroy(self):
self.outer.destroyed = True
class SecondMockScene(Scene):
def __init__(self, outer):
self.outer = outer
def update_logic(self, dt):
self.outer.logic -= 1
def update_render(self, draw_context: DrawContext, dt):
self.outer.render -= 1
def setUp(self):
self.ctx = self.MockDrawContext()
self.bus = EventBus()
EventBus.active(self.bus)
self.scene = self.MockScene(self)
self.second_scene = self.SecondMockScene(self)
self.mgr = SceneManager(self.ctx, self.bus, lambda: self.scene)
self.logic = 0
self.render = 0
# Marca quando a MockScene é destruída, momento no qual
# a SecondMockScene deve substituí-la
self.destroyed = False
def test_tick(self):
self.mgr.tick(0.01)
self.assertEqual(self.logic, 1)
self.assertEqual(self.render, 1)
self.mgr.tick(0.01)
self.assertTrue(self.destroyed)
self.assertEqual(self.logic, 0)
self.assertEqual(self.render, 0)
class TestGameObject(unittest.TestCase):
class MockScene(Scene):
def __init__(self):
super().__init__()
self.cycles = {
'logic': 0,
'render': 0
}
def update_render(self, draw_context: DrawContext, dt):
self.cycles['render'] += 1
def update_logic(self, dt):
self._bus.emit(Event.REQ_ANIM_FRAME)
self.cycles['logic'] += 1
if self.cycles['logic'] == 100:
self._bus.emit(Event.QUIT, None)
class MockDisplay(Display):
def __init__(self):
self.flipped = 0
def draw_context(self):
return TestSceneManager.MockDrawContext()
def resolution(self, width, height):
pass
def flip(self):
self.flipped += 1
def create_scene(self):
self.scene = self.MockScene()
return self.scene
def setUp(self):
self.display = self.MockDisplay()
# Atenção aqui ! Não posso instanciar uma Scene antes de GameObject,
# porque este define um bus, enquanto a outra pede um bus
self.game_object = GameObject(self.display, self.create_scene)
def test_gameloop(self):
self.game_object.gameloop()
self.assertEqual(self.scene.cycles, {
'logic': 100,
'render': 100
})
self.assertEqual(self.display.flipped, 100)
if __name__ == '__main__':
unittest.main()
|
pt
| 0.9725
|
# EventBus.active(None) # self.assertIsNone(EventBus.active()) :type outer TestOuterBus TODO: Testar MouseAware... ou não... # Deve retornar a mesma várias vezes, a não ser quando conteúdo ou cor mudar # Mesmo com esse evento, update_render ainda será chamado # porque a troca de cenas é "lazy" (só acontece quando dou tick) # Marca quando a MockScene é destruída, momento no qual # a SecondMockScene deve substituí-la # Atenção aqui ! Não posso instanciar uma Scene antes de GameObject, # porque este define um bus, enquanto a outra pede um bus
| 2.795155
| 3
|
homeassistant/components/notify/__init__.py
|
TastyPi/home-assistant
| 13
|
6628222
|
"""
Provides functionality to notify people.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/notify/
"""
import logging
import os
from functools import partial
import voluptuous as vol
import homeassistant.bootstrap as bootstrap
import homeassistant.helpers.config_validation as cv
from homeassistant.config import load_yaml_config_file
from homeassistant.const import CONF_NAME, CONF_PLATFORM
from homeassistant.helpers import config_per_platform
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
# Platform specific data
ATTR_DATA = 'data'
# Text to notify user of
ATTR_MESSAGE = 'message'
# Target of the notification (user, device, etc)
ATTR_TARGET = 'target'
# Title of notification
ATTR_TITLE = 'title'
ATTR_TITLE_DEFAULT = "Home Assistant"
DOMAIN = 'notify'
SERVICE_NOTIFY = 'notify'
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): cv.string,
vol.Optional(CONF_NAME): cv.string,
}, extra=vol.ALLOW_EXTRA)
NOTIFY_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DATA): dict,
})
def send_message(hass, message, title=None, data=None):
"""Send a notification message."""
info = {
ATTR_MESSAGE: message
}
if title is not None:
info[ATTR_TITLE] = title
if data is not None:
info[ATTR_DATA] = data
hass.services.call(DOMAIN, SERVICE_NOTIFY, info)
def setup(hass, config):
"""Setup the notify services."""
success = False
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
targets = {}
for platform, p_config in config_per_platform(config, DOMAIN):
notify_implementation = bootstrap.prepare_setup_platform(
hass, config, DOMAIN, platform)
if notify_implementation is None:
_LOGGER.error("Unknown notification service specified")
continue
notify_service = notify_implementation.get_service(hass, p_config)
if notify_service is None:
_LOGGER.error("Failed to initialize notification service %s",
platform)
continue
def notify_message(notify_service, call):
"""Handle sending notification message service calls."""
kwargs = {}
message = call.data[ATTR_MESSAGE]
title = call.data.get(ATTR_TITLE)
if title:
title.hass = hass
kwargs[ATTR_TITLE] = title.render()
if targets.get(call.service) is not None:
kwargs[ATTR_TARGET] = [targets[call.service]]
elif call.data.get(ATTR_TARGET) is not None:
kwargs[ATTR_TARGET] = call.data.get(ATTR_TARGET)
message.hass = hass
kwargs[ATTR_MESSAGE] = message.render()
kwargs[ATTR_DATA] = call.data.get(ATTR_DATA)
notify_service.send_message(**kwargs)
service_call_handler = partial(notify_message, notify_service)
if hasattr(notify_service, 'targets'):
platform_name = (p_config.get(CONF_NAME) or platform)
for name, target in notify_service.targets.items():
target_name = slugify('{}_{}'.format(platform_name, name))
targets[target_name] = target
hass.services.register(DOMAIN, target_name,
service_call_handler,
descriptions.get(SERVICE_NOTIFY),
schema=NOTIFY_SERVICE_SCHEMA)
platform_name = (p_config.get(CONF_NAME) or SERVICE_NOTIFY)
platform_name_slug = slugify(platform_name)
hass.services.register(
DOMAIN, platform_name_slug, service_call_handler,
descriptions.get(SERVICE_NOTIFY), schema=NOTIFY_SERVICE_SCHEMA)
success = True
return success
class BaseNotificationService(object):
"""An abstract class for notification services."""
def send_message(self, message, **kwargs):
"""Send a message.
kwargs can contain ATTR_TITLE to specify a title.
"""
raise NotImplementedError
|
"""
Provides functionality to notify people.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/notify/
"""
import logging
import os
from functools import partial
import voluptuous as vol
import homeassistant.bootstrap as bootstrap
import homeassistant.helpers.config_validation as cv
from homeassistant.config import load_yaml_config_file
from homeassistant.const import CONF_NAME, CONF_PLATFORM
from homeassistant.helpers import config_per_platform
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
# Platform specific data
ATTR_DATA = 'data'
# Text to notify user of
ATTR_MESSAGE = 'message'
# Target of the notification (user, device, etc)
ATTR_TARGET = 'target'
# Title of notification
ATTR_TITLE = 'title'
ATTR_TITLE_DEFAULT = "Home Assistant"
DOMAIN = 'notify'
SERVICE_NOTIFY = 'notify'
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): cv.string,
vol.Optional(CONF_NAME): cv.string,
}, extra=vol.ALLOW_EXTRA)
NOTIFY_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DATA): dict,
})
def send_message(hass, message, title=None, data=None):
"""Send a notification message."""
info = {
ATTR_MESSAGE: message
}
if title is not None:
info[ATTR_TITLE] = title
if data is not None:
info[ATTR_DATA] = data
hass.services.call(DOMAIN, SERVICE_NOTIFY, info)
def setup(hass, config):
"""Setup the notify services."""
success = False
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
targets = {}
for platform, p_config in config_per_platform(config, DOMAIN):
notify_implementation = bootstrap.prepare_setup_platform(
hass, config, DOMAIN, platform)
if notify_implementation is None:
_LOGGER.error("Unknown notification service specified")
continue
notify_service = notify_implementation.get_service(hass, p_config)
if notify_service is None:
_LOGGER.error("Failed to initialize notification service %s",
platform)
continue
def notify_message(notify_service, call):
"""Handle sending notification message service calls."""
kwargs = {}
message = call.data[ATTR_MESSAGE]
title = call.data.get(ATTR_TITLE)
if title:
title.hass = hass
kwargs[ATTR_TITLE] = title.render()
if targets.get(call.service) is not None:
kwargs[ATTR_TARGET] = [targets[call.service]]
elif call.data.get(ATTR_TARGET) is not None:
kwargs[ATTR_TARGET] = call.data.get(ATTR_TARGET)
message.hass = hass
kwargs[ATTR_MESSAGE] = message.render()
kwargs[ATTR_DATA] = call.data.get(ATTR_DATA)
notify_service.send_message(**kwargs)
service_call_handler = partial(notify_message, notify_service)
if hasattr(notify_service, 'targets'):
platform_name = (p_config.get(CONF_NAME) or platform)
for name, target in notify_service.targets.items():
target_name = slugify('{}_{}'.format(platform_name, name))
targets[target_name] = target
hass.services.register(DOMAIN, target_name,
service_call_handler,
descriptions.get(SERVICE_NOTIFY),
schema=NOTIFY_SERVICE_SCHEMA)
platform_name = (p_config.get(CONF_NAME) or SERVICE_NOTIFY)
platform_name_slug = slugify(platform_name)
hass.services.register(
DOMAIN, platform_name_slug, service_call_handler,
descriptions.get(SERVICE_NOTIFY), schema=NOTIFY_SERVICE_SCHEMA)
success = True
return success
class BaseNotificationService(object):
"""An abstract class for notification services."""
def send_message(self, message, **kwargs):
"""Send a message.
kwargs can contain ATTR_TITLE to specify a title.
"""
raise NotImplementedError
|
en
| 0.727835
|
Provides functionality to notify people. For more details about this component, please refer to the documentation at https://home-assistant.io/components/notify/ # Platform specific data # Text to notify user of # Target of the notification (user, device, etc) # Title of notification Send a notification message. Setup the notify services. Handle sending notification message service calls. An abstract class for notification services. Send a message. kwargs can contain ATTR_TITLE to specify a title.
| 2.619767
| 3
|
napalm_ebayjunos/__init__.py
|
eBay/pynetforce
| 16
|
6628223
|
from junos_ebay import JunOsEbayDriver
|
from junos_ebay import JunOsEbayDriver
|
none
| 1
| 1.053681
| 1
|
|
ai_gym_train/gym_linefollower/__init__.py
|
michalnand/line_follower_rl
| 2
|
6628224
|
<filename>ai_gym_train/gym_linefollower/__init__.py
from gym_linefollower.linefollower_bot import *
from gym_linefollower.linefollower_env import *
from gym_linefollower.motors import *
from gym_linefollower.observation import *
from gym_linefollower.track_load import *
|
<filename>ai_gym_train/gym_linefollower/__init__.py
from gym_linefollower.linefollower_bot import *
from gym_linefollower.linefollower_env import *
from gym_linefollower.motors import *
from gym_linefollower.observation import *
from gym_linefollower.track_load import *
|
none
| 1
| 1.374831
| 1
|
|
api/migrations/versions/472bf293e0a1_.py
|
cclauss/Baobab
| 52
|
6628225
|
<gh_stars>10-100
"""empty message
Revision ID: <KEY>
Revises: ('79c61673a487', '7e8bffa88454')
Create Date: 2019-06-18 11:32:36.203013
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = ('79c61673a487', '7e8bffa88454')
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
"""empty message
Revision ID: <KEY>
Revises: ('79c61673a487', '7e8bffa88454')
Create Date: 2019-06-18 11:32:36.203013
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = ('79c61673a487', '7e8bffa88454')
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
en
| 0.251734
|
empty message Revision ID: <KEY> Revises: ('79c61673a487', '7e8bffa88454') Create Date: 2019-06-18 11:32:36.203013 # revision identifiers, used by Alembic.
| 1.011595
| 1
|
argparse_schema.py
|
FebruaryBreeze/argparse-schema
| 0
|
6628226
|
<reponame>FebruaryBreeze/argparse-schema
import argparse
import json
import sys
from pathlib import Path
from typing import Any, Optional, Sequence, Union
class Kwargs:
def __init__(self):
self.type = None
self.default: Any = None
self.required: bool = False
self.help: Optional[str] = None
self.action: Optional[str] = None
self.choices: Optional[list] = None
def parse(schema: Union[dict, str, Path], args: Optional[Sequence[str]] = None) -> dict:
if not isinstance(schema, dict):
with open(str(schema)) as f:
schema: dict = json.load(f)
assert 'type' in schema and schema['type'] == 'object'
assert 'properties' in schema
required_set = set(schema.get('required', []))
type_map = {
'string': str,
'integer': int,
'number': float,
'boolean': bool
}
parser = argparse.ArgumentParser(description=schema.get('description'))
for name, value in schema.get('properties', {}).items():
assert isinstance(value, dict)
kwargs = Kwargs()
kwargs.default = value.get('default')
kwargs.help = value.get('description')
kwargs.required = name in required_set
if kwargs.default is not None:
kwargs.help = f'{kwargs.help}, [{kwargs.default}] in default'
if 'enum' in value:
enum_list = value['enum']
assert len(enum_list) > 0, "Enum List is Empty"
arg_type = type(enum_list[0])
assert all(arg_type is type(item) for item in enum_list), f"Items in [{enum_list}] with Different Types"
kwargs.type = arg_type
kwargs.choices = enum_list
else:
kwargs.type = type_map[value.get('type')]
del kwargs.choices
positional = value.get('positional')
if positional:
del kwargs.required
else:
name = f'--{name}'
if kwargs.type is bool:
assert not kwargs.default, "boolean have to be False in default"
kwargs.default = False
kwargs.action = 'store_true'
del kwargs.type
else:
del kwargs.action
parser.add_argument(name, **vars(kwargs))
return vars(parser.parse_args(args=args))
def main(): # pragma: no cover
schema_path = parse(schema={
'type': 'object',
'properties': {
'schema_path': {
'type': 'string',
'positional': True,
'description': 'argparse schema file path'
}
},
'required': [
'schema_path'
],
})['schema_path']
sys.argv[0] = 'YOUR-COMMAND'
print(f'Show help for schema file [{schema_path}]:')
parse(schema=schema_path, args=['-h'])
if __name__ == '__main__': # pragma: no cover
main()
|
import argparse
import json
import sys
from pathlib import Path
from typing import Any, Optional, Sequence, Union
class Kwargs:
def __init__(self):
self.type = None
self.default: Any = None
self.required: bool = False
self.help: Optional[str] = None
self.action: Optional[str] = None
self.choices: Optional[list] = None
def parse(schema: Union[dict, str, Path], args: Optional[Sequence[str]] = None) -> dict:
if not isinstance(schema, dict):
with open(str(schema)) as f:
schema: dict = json.load(f)
assert 'type' in schema and schema['type'] == 'object'
assert 'properties' in schema
required_set = set(schema.get('required', []))
type_map = {
'string': str,
'integer': int,
'number': float,
'boolean': bool
}
parser = argparse.ArgumentParser(description=schema.get('description'))
for name, value in schema.get('properties', {}).items():
assert isinstance(value, dict)
kwargs = Kwargs()
kwargs.default = value.get('default')
kwargs.help = value.get('description')
kwargs.required = name in required_set
if kwargs.default is not None:
kwargs.help = f'{kwargs.help}, [{kwargs.default}] in default'
if 'enum' in value:
enum_list = value['enum']
assert len(enum_list) > 0, "Enum List is Empty"
arg_type = type(enum_list[0])
assert all(arg_type is type(item) for item in enum_list), f"Items in [{enum_list}] with Different Types"
kwargs.type = arg_type
kwargs.choices = enum_list
else:
kwargs.type = type_map[value.get('type')]
del kwargs.choices
positional = value.get('positional')
if positional:
del kwargs.required
else:
name = f'--{name}'
if kwargs.type is bool:
assert not kwargs.default, "boolean have to be False in default"
kwargs.default = False
kwargs.action = 'store_true'
del kwargs.type
else:
del kwargs.action
parser.add_argument(name, **vars(kwargs))
return vars(parser.parse_args(args=args))
def main(): # pragma: no cover
schema_path = parse(schema={
'type': 'object',
'properties': {
'schema_path': {
'type': 'string',
'positional': True,
'description': 'argparse schema file path'
}
},
'required': [
'schema_path'
],
})['schema_path']
sys.argv[0] = 'YOUR-COMMAND'
print(f'Show help for schema file [{schema_path}]:')
parse(schema=schema_path, args=['-h'])
if __name__ == '__main__': # pragma: no cover
main()
|
en
| 0.478815
|
# pragma: no cover # pragma: no cover
| 2.784801
| 3
|
src/pythere/__main__.py
|
clint-lawrence/pythere
| 0
|
6628227
|
"""
1. Put you code into script/__main__.py
2. List any dependencies in script/requirements.txt (Optional)
3. Run "pythere user@remotehost script/"
Pythere bundles any files in the script folder and execute on remote host.
If script/requirements.txt exists, the listed dependencies will
be available. (Only pure python packages will be guaranteed to work)
"""
import argparse
import fabric
import zipapp
import shutil
import subprocess
import sys
import getpass
from pathlib import Path
def main():
parser = argparse.ArgumentParser()
parser.add_argument("remotehost", help="target machine to connect to")
parser.add_argument("script", help="python file/folder to run remotely")
parser.add_argument(
"--requirements", "-r", help="Requirements to bundle with script"
)
parser.add_argument(
"target_args",
nargs=argparse.REMAINDER,
help="arguments to pass to script.py when executing on the remotehost.",
)
args = parser.parse_args()
script_dir = Path(args.script)
assert script_dir.is_dir()
build_dir = Path.cwd() / "build"
executable = script_dir.with_suffix(".pyz")
clean(build_dir)
prepare(build_dir, script_dir, args.requirements)
build(build_dir, executable)
copy_and_run(executable, args.remotehost)
def clean(build_dir):
if build_dir.exists():
shutil.rmtree(build_dir)
def prepare(build_dir, script_dir, requirements):
shutil.copytree(script_dir, build_dir)
pip_args = [sys.executable, "-m", "pip", "install", "--target", build_dir]
if requirements:
pip_args.append("-r")
pip_args.append(args.requirements)
subprocess.run(pip_args)
else:
requirements_path = script_dir / "requirements.txt"
if requirements_path.exists():
pip_args.append("-r")
pip_args.append(str(requirements_path))
subprocess.run(pip_args)
def build(build_dir, executable):
zipapp.create_archive(build_dir, target=executable)
def copy_and_run(executable, remotehost):
user, host = remotehost.split("@")
print(user, host)
pw = getpass.getpass(f"Enter password for {remotehost}:")
connect_kwargs = {
"password": pw,
}
remote = fabric.Connection(host, user, connect_kwargs=connect_kwargs)
with remote:
remote.put(executable, "pythere_target.pyz")
remote.run("python pythere_target.pyz", pty=True)
if __name__ == "__main__":
main()
|
"""
1. Put you code into script/__main__.py
2. List any dependencies in script/requirements.txt (Optional)
3. Run "pythere user@remotehost script/"
Pythere bundles any files in the script folder and execute on remote host.
If script/requirements.txt exists, the listed dependencies will
be available. (Only pure python packages will be guaranteed to work)
"""
import argparse
import fabric
import zipapp
import shutil
import subprocess
import sys
import getpass
from pathlib import Path
def main():
parser = argparse.ArgumentParser()
parser.add_argument("remotehost", help="target machine to connect to")
parser.add_argument("script", help="python file/folder to run remotely")
parser.add_argument(
"--requirements", "-r", help="Requirements to bundle with script"
)
parser.add_argument(
"target_args",
nargs=argparse.REMAINDER,
help="arguments to pass to script.py when executing on the remotehost.",
)
args = parser.parse_args()
script_dir = Path(args.script)
assert script_dir.is_dir()
build_dir = Path.cwd() / "build"
executable = script_dir.with_suffix(".pyz")
clean(build_dir)
prepare(build_dir, script_dir, args.requirements)
build(build_dir, executable)
copy_and_run(executable, args.remotehost)
def clean(build_dir):
if build_dir.exists():
shutil.rmtree(build_dir)
def prepare(build_dir, script_dir, requirements):
shutil.copytree(script_dir, build_dir)
pip_args = [sys.executable, "-m", "pip", "install", "--target", build_dir]
if requirements:
pip_args.append("-r")
pip_args.append(args.requirements)
subprocess.run(pip_args)
else:
requirements_path = script_dir / "requirements.txt"
if requirements_path.exists():
pip_args.append("-r")
pip_args.append(str(requirements_path))
subprocess.run(pip_args)
def build(build_dir, executable):
zipapp.create_archive(build_dir, target=executable)
def copy_and_run(executable, remotehost):
user, host = remotehost.split("@")
print(user, host)
pw = getpass.getpass(f"Enter password for {remotehost}:")
connect_kwargs = {
"password": pw,
}
remote = fabric.Connection(host, user, connect_kwargs=connect_kwargs)
with remote:
remote.put(executable, "pythere_target.pyz")
remote.run("python pythere_target.pyz", pty=True)
if __name__ == "__main__":
main()
|
en
| 0.752616
|
1. Put you code into script/__main__.py 2. List any dependencies in script/requirements.txt (Optional) 3. Run "pythere user@remotehost script/" Pythere bundles any files in the script folder and execute on remote host. If script/requirements.txt exists, the listed dependencies will be available. (Only pure python packages will be guaranteed to work)
| 2.502644
| 3
|
Python/SonicController.py
|
johnmwright/GaragePi
| 3
|
6628228
|
import RPi.GPIO as GPIO
import time
class SonicController:
SPEED_OF_SOUND = 34000 #cm/s
def __init__(self, triggerPin, echoPin):
self.triggerPin = triggerPin
self.echoPin = echoPin
print("Initializing Ultrasonic Range Finder")
GPIO.setup(self.triggerPin, GPIO.OUT, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(self.echoPin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.output(self.triggerPin, False)
print("Waiting For Sensor To Settle")
time.sleep(2)
def _readDistanceOnce(self):
print(" Distance Measurement In Progress")
READING_TIMEOUT = 2 #sec
maxTime = time.time() + READING_TIMEOUT
GPIO.output(self.triggerPin, True)
time.sleep(0.00001)
GPIO.output(self.triggerPin, False)
pulse_start = time.time()
while GPIO.input(self.echoPin)==0 and pulse_start < maxTime:
pulse_start = time.time()
pulse_end = time.time()
while GPIO.input(self.echoPin)==1 and pulse_end < maxTime:
pulse_end = time.time()
if pulse_end > maxTime:
print(" PULSE READ TIMED OUT")
pulse_duration = pulse_end - pulse_start
roundtrip_duration = pulse_duration * self.SPEED_OF_SOUND
one_way_distance = roundtrip_duration/2
print(" Distance: {0:0.2f} cm".format(one_way_distance))
return one_way_distance
def readDistance(self):
#
# Take multiple readings in order to counter the affects of
# bad data due to non-realtime OS. Take a bunch of readings,
# throw out the min and max, then average the rest.
#
numReadingsToTake = 8
print(" Taking {} Distance Measurements".format(numReadingsToTake))
measurements = []
for x in range(0, numReadingsToTake):
thisReading = self._readDistanceOnce()
measurements.append(thisReading)
maxReading = max(measurements)
minReading = min(measurements)
measurements.remove(maxReading)
measurements.remove(minReading)
average = sum(measurements)/len(measurements)
print(" Average Distance: {0:0.2f} cm".format(average))
return average
def teardown(self):
print("Tearing down Ultrasonic Range Finder")
GPIO.output(self.triggerPin, False)
|
import RPi.GPIO as GPIO
import time
class SonicController:
SPEED_OF_SOUND = 34000 #cm/s
def __init__(self, triggerPin, echoPin):
self.triggerPin = triggerPin
self.echoPin = echoPin
print("Initializing Ultrasonic Range Finder")
GPIO.setup(self.triggerPin, GPIO.OUT, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(self.echoPin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.output(self.triggerPin, False)
print("Waiting For Sensor To Settle")
time.sleep(2)
def _readDistanceOnce(self):
print(" Distance Measurement In Progress")
READING_TIMEOUT = 2 #sec
maxTime = time.time() + READING_TIMEOUT
GPIO.output(self.triggerPin, True)
time.sleep(0.00001)
GPIO.output(self.triggerPin, False)
pulse_start = time.time()
while GPIO.input(self.echoPin)==0 and pulse_start < maxTime:
pulse_start = time.time()
pulse_end = time.time()
while GPIO.input(self.echoPin)==1 and pulse_end < maxTime:
pulse_end = time.time()
if pulse_end > maxTime:
print(" PULSE READ TIMED OUT")
pulse_duration = pulse_end - pulse_start
roundtrip_duration = pulse_duration * self.SPEED_OF_SOUND
one_way_distance = roundtrip_duration/2
print(" Distance: {0:0.2f} cm".format(one_way_distance))
return one_way_distance
def readDistance(self):
#
# Take multiple readings in order to counter the affects of
# bad data due to non-realtime OS. Take a bunch of readings,
# throw out the min and max, then average the rest.
#
numReadingsToTake = 8
print(" Taking {} Distance Measurements".format(numReadingsToTake))
measurements = []
for x in range(0, numReadingsToTake):
thisReading = self._readDistanceOnce()
measurements.append(thisReading)
maxReading = max(measurements)
minReading = min(measurements)
measurements.remove(maxReading)
measurements.remove(minReading)
average = sum(measurements)/len(measurements)
print(" Average Distance: {0:0.2f} cm".format(average))
return average
def teardown(self):
print("Tearing down Ultrasonic Range Finder")
GPIO.output(self.triggerPin, False)
|
en
| 0.842303
|
#cm/s #sec # # Take multiple readings in order to counter the affects of # bad data due to non-realtime OS. Take a bunch of readings, # throw out the min and max, then average the rest. #
| 3.246076
| 3
|
Build_Web_With_Flask/Building web applications with Flask_Code/chapter04/chapter04/ex2.py
|
abacuspix/NFV_project
| 0
|
6628229
|
<gh_stars>0
# coding:utf-8
from wtforms import Form, ValidationError
from wtforms import StringField, PasswordField
from wtforms.validators import Length, InputRequired
from werkzeug.datastructures import MultiDict
import re
def is_proper_username(form, field):
if not re.match(r"^\w+$", field.data):
msg = '%s should have any of these characters only: a-z0-9_' % field.name
raise ValidationError(msg)
class LoginForm(Form):
username = StringField(
u'Username:', [InputRequired(), is_proper_username, Length(min=3, max=40)])
password = PasswordField(
u'Password:', [InputRequired(), Length(min=5, max=12)])
@staticmethod
def validate_password(form, field):
data = field.data
if not re.findall('.*[a-z].*', data):
msg = '%s should have at least one lowercase character' % field.name
raise ValidationError(msg)
# has at least one uppercase character
if not re.findall('.*[A-Z].*', data):
msg = '%s should have at least one uppercase character' % field.name
raise ValidationError(msg)
# has at least one number
if not re.findall('.*[0-9].*', data):
msg = '%s should have at least one number' % field.name
raise ValidationError(msg)
# has at least one special character
if not re.findall('.*[^ a-zA-Z0-9].*', data):
msg = '%s should have at least one special character' % field.name
raise ValidationError(msg)
form = LoginForm({})
print form.validate()
print form.errors
|
# coding:utf-8
from wtforms import Form, ValidationError
from wtforms import StringField, PasswordField
from wtforms.validators import Length, InputRequired
from werkzeug.datastructures import MultiDict
import re
def is_proper_username(form, field):
if not re.match(r"^\w+$", field.data):
msg = '%s should have any of these characters only: a-z0-9_' % field.name
raise ValidationError(msg)
class LoginForm(Form):
username = StringField(
u'Username:', [InputRequired(), is_proper_username, Length(min=3, max=40)])
password = PasswordField(
u'Password:', [InputRequired(), Length(min=5, max=12)])
@staticmethod
def validate_password(form, field):
data = field.data
if not re.findall('.*[a-z].*', data):
msg = '%s should have at least one lowercase character' % field.name
raise ValidationError(msg)
# has at least one uppercase character
if not re.findall('.*[A-Z].*', data):
msg = '%s should have at least one uppercase character' % field.name
raise ValidationError(msg)
# has at least one number
if not re.findall('.*[0-9].*', data):
msg = '%s should have at least one number' % field.name
raise ValidationError(msg)
# has at least one special character
if not re.findall('.*[^ a-zA-Z0-9].*', data):
msg = '%s should have at least one special character' % field.name
raise ValidationError(msg)
form = LoginForm({})
print form.validate()
print form.errors
|
en
| 0.988085
|
# coding:utf-8 # has at least one uppercase character # has at least one number # has at least one special character
| 3.07591
| 3
|
pyclick/click_models/CM.py
|
gaudel/ranking_bandits
| 3
|
6628230
|
#
# Copyright (C) 2015 <NAME>
#
# Full copyright notice can be found in LICENSE.
#
from __future__ import division
from enum import Enum
from pyclick.click_models.ClickModel import ClickModel
from pyclick.click_models.Inference import MLEInference
from pyclick.click_models.Param import ParamMLE
from pyclick.click_models.ParamContainer import QueryDocumentParamContainer
__author__ = '<NAME>, <NAME>'
class CM(ClickModel):
"""
The cascade click model (CM) according to the following paper:
Craswell, Nick and Zoeter, Onno and Taylor, Michael and <NAME>.
An experimental comparison of click position-bias models.
Proceedings of WSDM, pages 87-94, 2008.
CM contains the set of attractiveness parameters,
which depend on a query and a document.
"""
PROB_MIN = 0.000001
"""The minimum probability for the cases, where the CM model cannot compute any probability."""
param_names = Enum('CMParamNames', 'attr')
"""The names of the CM parameters."""
def __init__(self):
self.params = {self.param_names.attr: QueryDocumentParamContainer(CMAttrMLE)}
self._inference = MLEInference()
def get_conditional_click_probs(self, search_session):
click_ranks = [rank for rank, click in enumerate(search_session.get_clicks()) if click]
first_click_rank = click_ranks[0] if len(click_ranks) else len(search_session.web_results)
click_probs = self.get_full_click_probs(search_session)
for rank, result in enumerate(search_session.web_results):
if rank <= first_click_rank:
if not result.click:
click_probs[rank] = 1 - click_probs[rank]
else:
click_probs[rank] = self.PROB_MIN
return click_probs
def get_full_click_probs(self, search_session):
session_params = self.get_session_params(search_session)
click_probs = []
exam = 1
for rank, result in enumerate(search_session.web_results):
attr = session_params[rank][self.param_names.attr].value()
click_prob = attr * exam
click_probs.append(click_prob)
exam *= 1 - attr
return click_probs
def predict_relevance(self, query, search_result):
return self.params[self.param_names.ctr].get(query, search_result).value()
class CMAttrMLE(ParamMLE):
"""
The attractiveness parameter of the CM model.
The value of the parameter is inferred using the MLE algorithm.
"""
def update(self, search_session, rank):
if not any(search_session.get_clicks()[:rank]):
self._numerator += search_session.web_results[rank].click
self._denominator += 1
|
#
# Copyright (C) 2015 <NAME>
#
# Full copyright notice can be found in LICENSE.
#
from __future__ import division
from enum import Enum
from pyclick.click_models.ClickModel import ClickModel
from pyclick.click_models.Inference import MLEInference
from pyclick.click_models.Param import ParamMLE
from pyclick.click_models.ParamContainer import QueryDocumentParamContainer
__author__ = '<NAME>, <NAME>'
class CM(ClickModel):
"""
The cascade click model (CM) according to the following paper:
Craswell, Nick and Zoeter, Onno and Taylor, Michael and <NAME>.
An experimental comparison of click position-bias models.
Proceedings of WSDM, pages 87-94, 2008.
CM contains the set of attractiveness parameters,
which depend on a query and a document.
"""
PROB_MIN = 0.000001
"""The minimum probability for the cases, where the CM model cannot compute any probability."""
param_names = Enum('CMParamNames', 'attr')
"""The names of the CM parameters."""
def __init__(self):
self.params = {self.param_names.attr: QueryDocumentParamContainer(CMAttrMLE)}
self._inference = MLEInference()
def get_conditional_click_probs(self, search_session):
click_ranks = [rank for rank, click in enumerate(search_session.get_clicks()) if click]
first_click_rank = click_ranks[0] if len(click_ranks) else len(search_session.web_results)
click_probs = self.get_full_click_probs(search_session)
for rank, result in enumerate(search_session.web_results):
if rank <= first_click_rank:
if not result.click:
click_probs[rank] = 1 - click_probs[rank]
else:
click_probs[rank] = self.PROB_MIN
return click_probs
def get_full_click_probs(self, search_session):
session_params = self.get_session_params(search_session)
click_probs = []
exam = 1
for rank, result in enumerate(search_session.web_results):
attr = session_params[rank][self.param_names.attr].value()
click_prob = attr * exam
click_probs.append(click_prob)
exam *= 1 - attr
return click_probs
def predict_relevance(self, query, search_result):
return self.params[self.param_names.ctr].get(query, search_result).value()
class CMAttrMLE(ParamMLE):
"""
The attractiveness parameter of the CM model.
The value of the parameter is inferred using the MLE algorithm.
"""
def update(self, search_session, rank):
if not any(search_session.get_clicks()[:rank]):
self._numerator += search_session.web_results[rank].click
self._denominator += 1
|
en
| 0.706594
|
# # Copyright (C) 2015 <NAME> # # Full copyright notice can be found in LICENSE. # The cascade click model (CM) according to the following paper: Craswell, Nick and Zoeter, Onno and Taylor, Michael and <NAME>. An experimental comparison of click position-bias models. Proceedings of WSDM, pages 87-94, 2008. CM contains the set of attractiveness parameters, which depend on a query and a document. The minimum probability for the cases, where the CM model cannot compute any probability. The names of the CM parameters. The attractiveness parameter of the CM model. The value of the parameter is inferred using the MLE algorithm.
| 2.454369
| 2
|
TestGame.py
|
maythetsan13/PythonExercises
| 0
|
6628231
|
<reponame>maythetsan13/PythonExercises
print("Hello")
print("may")
|
print("Hello")
print("may")
|
none
| 1
| 1.679951
| 2
|
|
src/sentry/runner/commands/exec.py
|
AlexWayfer/sentry
| 4
|
6628232
|
<filename>src/sentry/runner/commands/exec.py
"""
sentry.runner.commands.exec
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import six
import sys
import click
# If this changes, make sure to also update in the `__doc__`
SCRIPT_TEMPLATE = u"""\
%(header)s
try:
%(body)s
except Exception:
import traceback
traceback.print_exc()
raise ScriptError('Failed to execute script {!r}'.format(%(filename)r))
"""
@click.command(
name='exec', context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True,
)
)
@click.option('-c', default='', help='Read script from string.')
@click.argument('file', default=None, required=False)
def exec_(c, file):
"""
Execute a script.
Also compatible with hashbang `#!/usr/bin/env sentry exec`
For convenience, the following preample is attached to scripts:
\b
from sentry.runner import configure; configure()
from django.conf import settings
from sentry.models import *
Examples:
\b
$ sentry exec -c 'print(Project.objects.count())'
$ echo 'print(Project.objects.count())' | sentry exec
$ sentry exec something.py
Note: All scripts are assumed utf-8.
"""
# Can't have both a file and command, when passing both
# -c takes priority and rest is ignored. This mimics
# `python -c` behavior.
if c and file:
file = None
# If we specify neither, read from stdin
if not (c or file):
file = '-'
if file:
if file == '-':
file = '<string>'
c = click.get_text_stream('stdin').read()
else:
try:
with open(file, 'rb') as fp:
c = fp.read().decode('utf8')
except (IOError, OSError) as e:
raise click.ClickException(six.text_type(e))
else:
file = '<string>'
header = []
if 'from __future__' in c:
body = []
state = 0
for line in c.splitlines():
if line.startswith('from __future__'):
state = 1
elif line and not line.startswith('#', '"', "'") and state == 1:
state = 2
if state == 2:
body.append(line)
else:
header.append(line)
body = '\n'.join(body)
else:
header = []
body = c
if 'from sentry.runner import configure' not in c:
header.extend(
[
'from sentry.runner import configure; configure()',
'from django.conf import settings',
'from sentry.models import *',
]
)
header.append('class ScriptError(Exception): pass')
script = SCRIPT_TEMPLATE % {
# Need to reindent the code to fit inside the `try` block
'body': body.replace('\n', '\n' + (' ' * 4)),
'header': '\n'.join(header),
'filename': file,
}
# Chop off `exec` from `sys.argv` so scripts can handle
# this as exepcted.
sys.argv = sys.argv[1:]
# globals context
g = {
# Inject `__name__ = '__main__' for scripts
'__name__': '__main__',
'__file__': '<script>',
}
# we use globals as locals due to:
# http://stackoverflow.com/a/2906198/154651
six.exec_(compile(script, file, 'exec'), g, g)
|
<filename>src/sentry/runner/commands/exec.py
"""
sentry.runner.commands.exec
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import six
import sys
import click
# If this changes, make sure to also update in the `__doc__`
SCRIPT_TEMPLATE = u"""\
%(header)s
try:
%(body)s
except Exception:
import traceback
traceback.print_exc()
raise ScriptError('Failed to execute script {!r}'.format(%(filename)r))
"""
@click.command(
name='exec', context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True,
)
)
@click.option('-c', default='', help='Read script from string.')
@click.argument('file', default=None, required=False)
def exec_(c, file):
"""
Execute a script.
Also compatible with hashbang `#!/usr/bin/env sentry exec`
For convenience, the following preample is attached to scripts:
\b
from sentry.runner import configure; configure()
from django.conf import settings
from sentry.models import *
Examples:
\b
$ sentry exec -c 'print(Project.objects.count())'
$ echo 'print(Project.objects.count())' | sentry exec
$ sentry exec something.py
Note: All scripts are assumed utf-8.
"""
# Can't have both a file and command, when passing both
# -c takes priority and rest is ignored. This mimics
# `python -c` behavior.
if c and file:
file = None
# If we specify neither, read from stdin
if not (c or file):
file = '-'
if file:
if file == '-':
file = '<string>'
c = click.get_text_stream('stdin').read()
else:
try:
with open(file, 'rb') as fp:
c = fp.read().decode('utf8')
except (IOError, OSError) as e:
raise click.ClickException(six.text_type(e))
else:
file = '<string>'
header = []
if 'from __future__' in c:
body = []
state = 0
for line in c.splitlines():
if line.startswith('from __future__'):
state = 1
elif line and not line.startswith('#', '"', "'") and state == 1:
state = 2
if state == 2:
body.append(line)
else:
header.append(line)
body = '\n'.join(body)
else:
header = []
body = c
if 'from sentry.runner import configure' not in c:
header.extend(
[
'from sentry.runner import configure; configure()',
'from django.conf import settings',
'from sentry.models import *',
]
)
header.append('class ScriptError(Exception): pass')
script = SCRIPT_TEMPLATE % {
# Need to reindent the code to fit inside the `try` block
'body': body.replace('\n', '\n' + (' ' * 4)),
'header': '\n'.join(header),
'filename': file,
}
# Chop off `exec` from `sys.argv` so scripts can handle
# this as exepcted.
sys.argv = sys.argv[1:]
# globals context
g = {
# Inject `__name__ = '__main__' for scripts
'__name__': '__main__',
'__file__': '<script>',
}
# we use globals as locals due to:
# http://stackoverflow.com/a/2906198/154651
six.exec_(compile(script, file, 'exec'), g, g)
|
en
| 0.741611
|
sentry.runner.commands.exec ~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2016 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. # If this changes, make sure to also update in the `__doc__` \ %(header)s try: %(body)s except Exception: import traceback traceback.print_exc() raise ScriptError('Failed to execute script {!r}'.format(%(filename)r)) Execute a script. Also compatible with hashbang `#!/usr/bin/env sentry exec` For convenience, the following preample is attached to scripts: \b from sentry.runner import configure; configure() from django.conf import settings from sentry.models import * Examples: \b $ sentry exec -c 'print(Project.objects.count())' $ echo 'print(Project.objects.count())' | sentry exec $ sentry exec something.py Note: All scripts are assumed utf-8. # Can't have both a file and command, when passing both # -c takes priority and rest is ignored. This mimics # `python -c` behavior. # If we specify neither, read from stdin # Need to reindent the code to fit inside the `try` block # Chop off `exec` from `sys.argv` so scripts can handle # this as exepcted. # globals context # Inject `__name__ = '__main__' for scripts # we use globals as locals due to: # http://stackoverflow.com/a/2906198/154651
| 2.251971
| 2
|
solutions/python3/problem58.py
|
tjyiiuan/LeetCode
| 0
|
6628233
|
<filename>solutions/python3/problem58.py
# -*- coding: utf-8 -*-
"""
58. Length of Last Word
Given a string s consists of upper/lower-case alphabets and empty space characters ' ',
return the length of last word (last word means the last appearing word if we loop from left to right) in the string.
If the last word does not exist, return 0.
Note: A word is defined as a maximal substring consisting of non-space characters only.
"""
class Solution:
def lengthOfLastWord(self, s: str) -> int:
last_word = False
count = 0
ind = len(s) - 1
while ind >= 0:
char = s[ind]
if char == " ":
if last_word:
return count
else:
last_word = True
count += 1
ind -= 1
return count
|
<filename>solutions/python3/problem58.py
# -*- coding: utf-8 -*-
"""
58. Length of Last Word
Given a string s consists of upper/lower-case alphabets and empty space characters ' ',
return the length of last word (last word means the last appearing word if we loop from left to right) in the string.
If the last word does not exist, return 0.
Note: A word is defined as a maximal substring consisting of non-space characters only.
"""
class Solution:
def lengthOfLastWord(self, s: str) -> int:
last_word = False
count = 0
ind = len(s) - 1
while ind >= 0:
char = s[ind]
if char == " ":
if last_word:
return count
else:
last_word = True
count += 1
ind -= 1
return count
|
en
| 0.808311
|
# -*- coding: utf-8 -*- 58. Length of Last Word Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word (last word means the last appearing word if we loop from left to right) in the string. If the last word does not exist, return 0. Note: A word is defined as a maximal substring consisting of non-space characters only.
| 3.952168
| 4
|
RobotMbed/src/test/resources/function/microbit_text_join_test.py
|
KevinLiu1010/openroberta-lab
| 1
|
6628234
|
<reponame>KevinLiu1010/openroberta-lab<filename>RobotMbed/src/test/resources/function/microbit_text_join_test.py
import microbit
import random
import math
class BreakOutOfALoop(Exception): pass
class ContinueLoop(Exception): pass
timer1 = microbit.running_time()
item = "a"
def run():
global timer1, item
item = "".join(str(arg) for arg in ["sadf", "sdf"])
def main():
try:
run()
except Exception as e:
raise
if __name__ == "__main__":
main()
|
import microbit
import random
import math
class BreakOutOfALoop(Exception): pass
class ContinueLoop(Exception): pass
timer1 = microbit.running_time()
item = "a"
def run():
global timer1, item
item = "".join(str(arg) for arg in ["sadf", "sdf"])
def main():
try:
run()
except Exception as e:
raise
if __name__ == "__main__":
main()
|
none
| 1
| 2.776121
| 3
|
|
tools/mytools/ARIA/src/py/aria/OrderedDict.py
|
fmareuil/Galaxy_test_pasteur
| 0
|
6628235
|
<reponame>fmareuil/Galaxy_test_pasteur
"""
ARIA -- Ambiguous Restraints for Iterative Assignment
A software for automated NOE assignment
Version 2.3
Copyright (C) <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>
All rights reserved.
NO WARRANTY. This software package is provided 'as is' without warranty of
any kind, expressed or implied, including, but not limited to the implied
warranties of merchantability and fitness for a particular purpose or
a warranty of non-infringement.
Distribution of substantively modified versions of this module is
prohibited without the explicit permission of the copyright holders.
$Author: bardiaux $
$Revision: 1.1.1.1 $
$Date: 2010/03/23 15:27:24 $
"""
## TODO: get rid of UserDict
UserDict = dict
class OrderedDict(UserDict):
def __init__(self, order = None):
UserDict.__init__(self)
self.order = order
def keys(self):
if self.order is not None:
return self.order
else:
return UserDict.keys(self)
def values(self):
return map(lambda k, s = self: s[k], self.keys())
def items(self):
return map(lambda k, s = self: (k, s[k]), self.keys())
def __setitem__(self, key, value):
if self.order is None:
self.order = []
if key not in self.order:
self.order.append(key)
UserDict.__setitem__(self, key, value)
def __delitem__(self, name):
if self.order is not None:
if name in self.order:
self.order.remove(name)
UserDict.__delitem__(self, name)
|
"""
ARIA -- Ambiguous Restraints for Iterative Assignment
A software for automated NOE assignment
Version 2.3
Copyright (C) <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>
All rights reserved.
NO WARRANTY. This software package is provided 'as is' without warranty of
any kind, expressed or implied, including, but not limited to the implied
warranties of merchantability and fitness for a particular purpose or
a warranty of non-infringement.
Distribution of substantively modified versions of this module is
prohibited without the explicit permission of the copyright holders.
$Author: bardiaux $
$Revision: 1.1.1.1 $
$Date: 2010/03/23 15:27:24 $
"""
## TODO: get rid of UserDict
UserDict = dict
class OrderedDict(UserDict):
def __init__(self, order = None):
UserDict.__init__(self)
self.order = order
def keys(self):
if self.order is not None:
return self.order
else:
return UserDict.keys(self)
def values(self):
return map(lambda k, s = self: s[k], self.keys())
def items(self):
return map(lambda k, s = self: (k, s[k]), self.keys())
def __setitem__(self, key, value):
if self.order is None:
self.order = []
if key not in self.order:
self.order.append(key)
UserDict.__setitem__(self, key, value)
def __delitem__(self, name):
if self.order is not None:
if name in self.order:
self.order.remove(name)
UserDict.__delitem__(self, name)
|
en
| 0.78598
|
ARIA -- Ambiguous Restraints for Iterative Assignment A software for automated NOE assignment Version 2.3 Copyright (C) <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> All rights reserved. NO WARRANTY. This software package is provided 'as is' without warranty of any kind, expressed or implied, including, but not limited to the implied warranties of merchantability and fitness for a particular purpose or a warranty of non-infringement. Distribution of substantively modified versions of this module is prohibited without the explicit permission of the copyright holders. $Author: bardiaux $ $Revision: 1.1.1.1 $ $Date: 2010/03/23 15:27:24 $ ## TODO: get rid of UserDict
| 2.873028
| 3
|
Regular_expression/RE_check.py
|
waixd001/python_program_storage
| 0
|
6628236
|
# -*- coding: utf-8 -*-
import numpy as np
code = "1111011000111110110"
RE = np.array[['S','0S'],
['S','1A'],
['S','0' ],
['A','1' ],
['A','1S'],
['A','0B'],
['B','1A'],
['B','0B']]
print(RE)
|
# -*- coding: utf-8 -*-
import numpy as np
code = "1111011000111110110"
RE = np.array[['S','0S'],
['S','1A'],
['S','0' ],
['A','1' ],
['A','1S'],
['A','0B'],
['B','1A'],
['B','0B']]
print(RE)
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 3.109059
| 3
|
setup.py
|
caramdache/data-importer
| 0
|
6628237
|
# encoding: utf-8
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import data_importer
def readme():
try:
os.system('pandoc --from=markdown --to=rst README.md -o README.rst')
with open('README.rst') as f:
return f.read()
except Exception:
return '''**Django Data Importer** is a tool which allow you to transform easily a CSV, XML, XLS and XLSX file into a python object or a django model instance. It is based on the django-style declarative model.'''
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['data_importer', 'tests', '--cov=data_importer', '-vrsx']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='data-importer',
url='https://github.com/valdergallo/data-importer',
download_url='https://github.com/valdergallo/data-importer/tarball/{0!s}/'.format(data_importer.__version__),
author="valdergallo",
author_email='<EMAIL>',
keywords='Django Data Importer XLS XLSX CSV XML',
description='Simple library to easily import data with Django',
license='BSD',
long_description=readme(),
classifiers=[
'Framework :: Django',
'Operating System :: OS Independent',
'Topic :: Utilities'
],
version=data_importer.__version__,
install_requires=[
'django>=1.4',
'openpyxl==2.4.0',
'xlrd==1.0.0',
'six==1.10.0',
],
tests_require=[
'pytest>=3.0.0',
'pytest-django==2.9.1',
'pytest-cov==2.3.1',
'openpyxl==2.4.0',
'xlrd>=1.0.0',
'django>=1.4',
'six==1.10.0',
'mock==2.0.0',
],
cmdclass={'test': PyTest},
zip_safe=False,
platforms='any',
package_dir={'': '.'},
packages=find_packages('.', exclude=['tests', '*.tests', 'docs', 'example', 'media']),
package_data={
'': ['templates/data_importer.html', 'templates/my_upload.html']
}
)
|
# encoding: utf-8
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import data_importer
def readme():
try:
os.system('pandoc --from=markdown --to=rst README.md -o README.rst')
with open('README.rst') as f:
return f.read()
except Exception:
return '''**Django Data Importer** is a tool which allow you to transform easily a CSV, XML, XLS and XLSX file into a python object or a django model instance. It is based on the django-style declarative model.'''
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['data_importer', 'tests', '--cov=data_importer', '-vrsx']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='data-importer',
url='https://github.com/valdergallo/data-importer',
download_url='https://github.com/valdergallo/data-importer/tarball/{0!s}/'.format(data_importer.__version__),
author="valdergallo",
author_email='<EMAIL>',
keywords='Django Data Importer XLS XLSX CSV XML',
description='Simple library to easily import data with Django',
license='BSD',
long_description=readme(),
classifiers=[
'Framework :: Django',
'Operating System :: OS Independent',
'Topic :: Utilities'
],
version=data_importer.__version__,
install_requires=[
'django>=1.4',
'openpyxl==2.4.0',
'xlrd==1.0.0',
'six==1.10.0',
],
tests_require=[
'pytest>=3.0.0',
'pytest-django==2.9.1',
'pytest-cov==2.3.1',
'openpyxl==2.4.0',
'xlrd>=1.0.0',
'django>=1.4',
'six==1.10.0',
'mock==2.0.0',
],
cmdclass={'test': PyTest},
zip_safe=False,
platforms='any',
package_dir={'': '.'},
packages=find_packages('.', exclude=['tests', '*.tests', 'docs', 'example', 'media']),
package_data={
'': ['templates/data_importer.html', 'templates/my_upload.html']
}
)
|
en
| 0.892281
|
# encoding: utf-8 **Django Data Importer** is a tool which allow you to transform easily a CSV, XML, XLS and XLSX file into a python object or a django model instance. It is based on the django-style declarative model. # import here, cause outside the eggs aren't loaded
| 1.950457
| 2
|
posts/tests.py
|
je-ss-y/Insta-memories
| 0
|
6628238
|
from django.test import TestCase
from .models import Image,Profile,Comment
from django.contrib.auth.models import User
# Create your tests here.
class ImageTestClass(TestCase):
# set up method
def setUp(self):
self.user=User.objects.create(username='jessy')
# self.profile=Profile.objects.create(id=1,user=jessica,bio=creating,profile_photo="")
self.image=Image(image='https://www.italymagazine.com/sites/default/files/styles/624xauto/public/feature-story/leader/bolzano-lead.jpg?itok=SsNNvkdk',photoname='person',caption='hello', pub_date='2019-9-2')
#testing instance
def test_instance(self):
self.assertTrue(isinstance(self.image.Image))
# self.assertTrue(isinstance(self.profile.Profile))
self.assertTrue(isinstance(self.user.User))
def save_instance(self):
self.image.save_image()
images=Image.objects.all()
self .assertTrue(len(images)>0)
class ProfileClass(TestCase):
# set up method
def setUp(self):
self.profile=Profile.objects.create(id=1,user=jessica,bio=creating,profile_photo="https://www.italymagazine.com/sites/default/files/styles/624xauto/public/feature-story/leader/bolzano-lead.jpg?itok=SsNNvkdk")
#testing instance
def test_instance(self):
self.assertTrue(isinstance(self.profile.Profile))
def save_instance(self):
self.image.save_image()
images=Image.objects.all()
self .assertTrue(len(images)>0)
|
from django.test import TestCase
from .models import Image,Profile,Comment
from django.contrib.auth.models import User
# Create your tests here.
class ImageTestClass(TestCase):
# set up method
def setUp(self):
self.user=User.objects.create(username='jessy')
# self.profile=Profile.objects.create(id=1,user=jessica,bio=creating,profile_photo="")
self.image=Image(image='https://www.italymagazine.com/sites/default/files/styles/624xauto/public/feature-story/leader/bolzano-lead.jpg?itok=SsNNvkdk',photoname='person',caption='hello', pub_date='2019-9-2')
#testing instance
def test_instance(self):
self.assertTrue(isinstance(self.image.Image))
# self.assertTrue(isinstance(self.profile.Profile))
self.assertTrue(isinstance(self.user.User))
def save_instance(self):
self.image.save_image()
images=Image.objects.all()
self .assertTrue(len(images)>0)
class ProfileClass(TestCase):
# set up method
def setUp(self):
self.profile=Profile.objects.create(id=1,user=jessica,bio=creating,profile_photo="https://www.italymagazine.com/sites/default/files/styles/624xauto/public/feature-story/leader/bolzano-lead.jpg?itok=SsNNvkdk")
#testing instance
def test_instance(self):
self.assertTrue(isinstance(self.profile.Profile))
def save_instance(self):
self.image.save_image()
images=Image.objects.all()
self .assertTrue(len(images)>0)
|
en
| 0.500054
|
# Create your tests here. # set up method # self.profile=Profile.objects.create(id=1,user=jessica,bio=creating,profile_photo="") #testing instance # self.assertTrue(isinstance(self.profile.Profile)) # set up method #testing instance
| 2.48617
| 2
|
Savage Chickens/savage_chickens.py
|
CAVIND46016/Web-Comics-Scraping
| 5
|
6628239
|
<reponame>CAVIND46016/Web-Comics-Scraping
"""
Web scrapes the comic website and creates a pdf version of it.
"""
import urllib.request as urllib2
import http
import os
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from fpdf import FPDF
# <NAME> | <NAME> - Cartoons on Sticky Notes by <NAME>
COMIC_URL = "http://www.savagechickens.com/category/cartoons"
DIRNAME = os.path.dirname(__file__)
IMAGE_REPOSITORY = os.path.join(DIRNAME, 'images')
# Fixing the 'IncompleteRead' bug using http
# https://stackoverflow.com/questions/14149100/incompleteread-using-httplib
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
# firefox browser object
BROWSER = webdriver.Firefox()
def scrape(web_url, pdf, idx, pg_no):
"""
Web scraping logic
"""
try:
BROWSER.set_page_load_timeout(200)
BROWSER.get(web_url)
except http.client.RemoteDisconnected:
print("Error 404: {} not found.".format(web_url))
return 0
WebDriverWait(BROWSER, 200).until(EC.presence_of_element_located\
((By.ID, "pagination")))
soup = BeautifulSoup(BROWSER.page_source, "html.parser")
div_class_entry_content = soup.find_all("div", attrs={"class":"entry_content"})
for img_tag in div_class_entry_content:
img_src = img_tag.find("img")['src']
img_name = os.path.join(IMAGE_REPOSITORY, "sc{}.jpg".format(idx))
urllib2.urlretrieve(img_src, img_name)
pdf.add_page()
pdf.image(img_name, 0, 0, 210, 297)
idx += 1
print("Page no: {}".format(pg_no))
pg_no += 1
span_class_prev_entry = soup.find("span", attrs={"class":"previous-entries"})
if not span_class_prev_entry:
return 0
prev_page_url = span_class_prev_entry.find("a")['href']
#Recursive logic
scrape(prev_page_url, pdf, idx, pg_no)
def main():
"""
Entry-point for the function.
"""
pdf = FPDF()
pdf.set_display_mode('fullwidth')
pdf.set_creator('<NAME>')
pdf.set_author('<NAME>')
scrape(COMIC_URL, pdf, idx=1, pg_no=1)
BROWSER.quit()
pdf.output("savage_chickens.pdf", "F")
print("PDF created successfully.")
if __name__ == "__main__":
main()
|
"""
Web scrapes the comic website and creates a pdf version of it.
"""
import urllib.request as urllib2
import http
import os
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from fpdf import FPDF
# <NAME> | <NAME> - Cartoons on Sticky Notes by <NAME>
COMIC_URL = "http://www.savagechickens.com/category/cartoons"
DIRNAME = os.path.dirname(__file__)
IMAGE_REPOSITORY = os.path.join(DIRNAME, 'images')
# Fixing the 'IncompleteRead' bug using http
# https://stackoverflow.com/questions/14149100/incompleteread-using-httplib
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
# firefox browser object
BROWSER = webdriver.Firefox()
def scrape(web_url, pdf, idx, pg_no):
"""
Web scraping logic
"""
try:
BROWSER.set_page_load_timeout(200)
BROWSER.get(web_url)
except http.client.RemoteDisconnected:
print("Error 404: {} not found.".format(web_url))
return 0
WebDriverWait(BROWSER, 200).until(EC.presence_of_element_located\
((By.ID, "pagination")))
soup = BeautifulSoup(BROWSER.page_source, "html.parser")
div_class_entry_content = soup.find_all("div", attrs={"class":"entry_content"})
for img_tag in div_class_entry_content:
img_src = img_tag.find("img")['src']
img_name = os.path.join(IMAGE_REPOSITORY, "sc{}.jpg".format(idx))
urllib2.urlretrieve(img_src, img_name)
pdf.add_page()
pdf.image(img_name, 0, 0, 210, 297)
idx += 1
print("Page no: {}".format(pg_no))
pg_no += 1
span_class_prev_entry = soup.find("span", attrs={"class":"previous-entries"})
if not span_class_prev_entry:
return 0
prev_page_url = span_class_prev_entry.find("a")['href']
#Recursive logic
scrape(prev_page_url, pdf, idx, pg_no)
def main():
"""
Entry-point for the function.
"""
pdf = FPDF()
pdf.set_display_mode('fullwidth')
pdf.set_creator('<NAME>')
pdf.set_author('<NAME>')
scrape(COMIC_URL, pdf, idx=1, pg_no=1)
BROWSER.quit()
pdf.output("savage_chickens.pdf", "F")
print("PDF created successfully.")
if __name__ == "__main__":
main()
|
en
| 0.780106
|
Web scrapes the comic website and creates a pdf version of it. # <NAME> | <NAME> - Cartoons on Sticky Notes by <NAME> # Fixing the 'IncompleteRead' bug using http # https://stackoverflow.com/questions/14149100/incompleteread-using-httplib # firefox browser object Web scraping logic #Recursive logic Entry-point for the function.
| 3.395773
| 3
|
octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py
|
buty4649/octavia
| 0
|
6628240
|
<reponame>buty4649/octavia
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.types import failure
import tenacity
from octavia.api.drivers import utils as provider_utils
from octavia.common import constants
from octavia.common import data_models as o_data_models
from octavia.common import exceptions
from octavia.controller.worker.v2.tasks import network_tasks
from octavia.network import base as net_base
from octavia.network import data_models
from octavia.tests.common import constants as t_constants
import octavia.tests.unit.base as base
AMPHORA_ID = 7
COMPUTE_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
SUBNET_ID = uuidutils.generate_uuid()
NETWORK_ID = uuidutils.generate_uuid()
IP_ADDRESS = "172.24.41.1"
VIP = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID,
subnet_id=t_constants.MOCK_SUBNET_ID,
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
VIP2 = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID2,
subnet_id=t_constants.MOCK_SUBNET_ID2,
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID2)
LB = o_data_models.LoadBalancer(vip=VIP)
LB2 = o_data_models.LoadBalancer(vip=VIP2)
FIRST_IP = {"ip_address": IP_ADDRESS, "subnet_id": SUBNET_ID}
FIXED_IPS = [FIRST_IP]
INTERFACE = data_models.Interface(id=uuidutils.generate_uuid(),
compute_id=COMPUTE_ID, fixed_ips=FIXED_IPS,
port_id=PORT_ID)
AMPS_DATA = [o_data_models.Amphora(id=t_constants.MOCK_AMP_ID1,
vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID1,
vrrp_ip=t_constants.MOCK_VRRP_IP1),
o_data_models.Amphora(id=t_constants.MOCK_AMP_ID2,
vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID2,
vrrp_ip=t_constants.MOCK_VRRP_IP2)
]
UPDATE_DICT = {constants.TOPOLOGY: None}
_session_mock = mock.MagicMock()
class TestException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
@mock.patch('octavia.common.utils.get_network_driver')
class TestNetworkTasks(base.TestCase):
def setUp(self):
network_tasks.LOG = mock.MagicMock()
self.db_amphora_mock = mock.MagicMock()
self.db_load_balancer_mock = mock.MagicMock()
self.vip_mock = mock.MagicMock()
self.vip_mock.subnet_id = SUBNET_ID
self.db_load_balancer_mock.vip = self.vip_mock
self.db_load_balancer_mock.amphorae = []
self.db_amphora_mock.id = AMPHORA_ID
self.db_amphora_mock.compute_id = COMPUTE_ID
self.db_amphora_mock.status = constants.AMPHORA_ALLOCATED
self.boot_net_id = NETWORK_ID
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(group="controller_worker",
amp_boot_network_list=[self.boot_net_id])
conf.config(group="networking", max_retries=1)
self.amphora_mock = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: COMPUTE_ID,
constants.LB_NETWORK_IP: IP_ADDRESS,
}
self.load_balancer_mock = {
constants.LOADBALANCER_ID: uuidutils.generate_uuid(),
constants.VIP_SUBNET_ID: VIP.subnet_id,
constants.VIP_PORT_ID: VIP.port_id,
constants.VIP_ADDRESS: VIP.ip_address,
constants.VIP_QOS_POLICY_ID: t_constants.MOCK_QOS_POLICY_ID1
}
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="controller_worker",
amp_boot_network_list=[self.boot_net_id])
super().setUp()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_calculate_amphora_delta(self, mock_get_session, mock_lb_repo_get,
mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
DELETE_NETWORK_ID = uuidutils.generate_uuid()
MEMBER_NETWORK_ID = uuidutils.generate_uuid()
MEMBER_SUBNET_ID = uuidutils.generate_uuid()
VRRP_PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
member_mock = mock.MagicMock()
member_mock.subnet_id = MEMBER_SUBNET_ID
pool_mock = mock.MagicMock()
pool_mock.members = [member_mock]
lb_mock = mock.MagicMock()
lb_mock.pools = [pool_mock]
lb_dict = {constants.LOADBALANCER_ID: LB_ID}
amphora_dict = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: COMPUTE_ID,
constants.VRRP_PORT_ID: VRRP_PORT_ID}
vrrp_port_mock = mock.MagicMock()
vrrp_port_mock.network_id = self.boot_net_id
vrrp_port_dict = {constants.NETWORK_ID: self.boot_net_id}
mock_subnet = mock.MagicMock()
mock_subnet.network_id = MEMBER_NETWORK_ID
nic1_delete_mock = mock.MagicMock()
nic1_delete_mock.network_id = DELETE_NETWORK_ID
nic2_keep_mock = mock.MagicMock()
nic2_keep_mock.network_id = self.boot_net_id
mock_lb_repo_get.return_value = lb_mock
mock_driver.get_port.return_value = vrrp_port_mock
mock_driver.get_subnet.return_value = mock_subnet
mock_driver.get_plugged_networks.return_value = [nic1_delete_mock,
nic2_keep_mock]
calc_amp_delta = network_tasks.CalculateAmphoraDelta()
# Test vrrp_port_id is None
result = calc_amp_delta.execute(lb_dict, amphora_dict, {})
self.assertEqual(AMPHORA_ID, result[constants.AMPHORA_ID])
self.assertEqual(COMPUTE_ID, result[constants.COMPUTE_ID])
self.assertEqual(1, len(result[constants.ADD_NICS]))
self.assertEqual(MEMBER_NETWORK_ID,
result[constants.ADD_NICS][0][constants.NETWORK_ID])
self.assertEqual(1, len(result[constants.DELETE_NICS]))
self.assertEqual(
DELETE_NETWORK_ID,
result[constants.DELETE_NICS][0][constants.NETWORK_ID])
mock_driver.get_port.assert_called_once_with(VRRP_PORT_ID)
mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
# Test with vrrp_port_id
mock_driver.reset_mock()
result = calc_amp_delta.execute(lb_dict, amphora_dict, {},
vrrp_port=vrrp_port_dict)
self.assertEqual(AMPHORA_ID, result[constants.AMPHORA_ID])
self.assertEqual(COMPUTE_ID, result[constants.COMPUTE_ID])
self.assertEqual(1, len(result[constants.ADD_NICS]))
self.assertEqual(MEMBER_NETWORK_ID,
result[constants.ADD_NICS][0][constants.NETWORK_ID])
self.assertEqual(1, len(result[constants.DELETE_NICS]))
self.assertEqual(
DELETE_NETWORK_ID,
result[constants.DELETE_NICS][0][constants.NETWORK_ID])
mock_driver.get_port.assert_not_called()
mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_calculate_delta(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = self.db_load_balancer_mock
self.db_amphora_mock.to_dict.return_value = {
constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID,
constants.VRRP_PORT_ID: PORT_ID}
mock_get_net_driver.return_value = mock_driver
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=self.boot_net_id)]
mock_driver.get_port.return_value = data_models.Port(
network_id=self.boot_net_id)
EMPTY = {}
empty_deltas = {self.db_amphora_mock.id: data_models.Delta(
amphora_id=AMPHORA_ID,
compute_id=COMPUTE_ID,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)}
calc_delta = network_tasks.CalculateDelta()
self.assertEqual(EMPTY,
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and no pools, nothing plugged
# Delta should be empty
mock_driver.reset_mock()
self.db_amphora_mock.load_balancer = self.db_load_balancer_mock
self.db_load_balancer_mock.amphorae = [self.db_amphora_mock]
self.db_load_balancer_mock.pools = []
self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock, {}))
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
# Pool mock should be configured explicitly for each test
pool_mock = mock.MagicMock()
self.db_load_balancer_mock.pools = [pool_mock]
# Test with one amp and one pool but no members, nothing plugged
# Delta should be empty
pool_mock.members = []
self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and one pool and one member, nothing plugged
# Delta should be one additional subnet to plug
mock_driver.reset_mock()
member_mock = mock.MagicMock()
member_mock.subnet_id = 1
pool_mock.members = [member_mock]
mock_driver.get_subnet.return_value = data_models.Subnet(id=2,
network_id=3)
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[
data_models.Interface(network_id=3)],
delete_nics=[]).to_dict(recurse=True)
self.assertEqual({self.db_amphora_mock.id: ndm},
calc_delta.execute(self.load_balancer_mock, {}))
vrrp_port_call = mock.call(PORT_ID)
mock_driver.get_port.assert_has_calls([vrrp_port_call])
self.assertEqual(1, mock_driver.get_port.call_count)
member_subnet_call = mock.call(member_mock.subnet_id)
mock_driver.get_subnet.assert_has_calls([member_subnet_call])
self.assertEqual(1, mock_driver.get_subnet.call_count)
# Test with one amp and one pool and one member, already plugged
# Delta should be empty
mock_driver.reset_mock()
member_mock = mock.MagicMock()
member_mock.subnet_id = 1
pool_mock.members = [member_mock]
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=3),
data_models.Interface(network_id=self.boot_net_id)]
self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and one pool and one member, wrong network plugged
# Delta should be one network to add and one to remove
mock_driver.reset_mock()
member_mock = mock.MagicMock()
member_mock.subnet_id = 1
pool_mock.members = [member_mock]
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=2),
data_models.Interface(network_id=self.boot_net_id)]
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[
data_models.Interface(network_id=3)],
delete_nics=[
data_models.Interface(network_id=2)]
).to_dict(recurse=True)
self.assertEqual({self.db_amphora_mock.id: ndm},
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and one pool and no members, one network plugged
# Delta should be one network to remove
mock_driver.reset_mock()
pool_mock.members = []
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=2),
data_models.Interface(network_id=self.boot_net_id)
]
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[
data_models.Interface(network_id=2)]
).to_dict(recurse=True)
self.assertEqual({self.db_amphora_mock.id: ndm},
calc_delta.execute(self.load_balancer_mock, {}))
def test_get_plumbed_networks(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_driver.get_plugged_networks.side_effect = [['blah']]
net = network_tasks.GetPlumbedNetworks()
self.assertEqual(['blah'], net.execute(self.amphora_mock))
mock_driver.get_plugged_networks.assert_called_once_with(
COMPUTE_ID)
def test_plug_networks(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(network_id):
return [data_models.Interface(network_id=network_id)]
net = network_tasks.PlugNetworks()
net.execute(self.amphora_mock, None)
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
mock_driver.plug_network.assert_called_once_with(COMPUTE_ID, 1)
# revert
net.revert(self.amphora_mock, None)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.revert(self.amphora_mock, delta)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
net.revert(self.amphora_mock, delta)
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
net.revert(self.amphora_mock, delta) # No exception
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = TestException('test')
self.assertRaises(TestException,
net.revert,
self.amphora_mock,
delta)
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
def test_unplug_networks(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(network_id):
return [data_models.Interface(network_id=network_id)]
net = network_tasks.UnPlugNetworks()
net.execute(self.db_amphora_mock, None)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=_interface(1)
).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
net.execute(self.amphora_mock, delta) # No exception
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
# Do a test with a general exception in case behavior changes
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = Exception()
net.execute(self.amphora_mock, delta) # No exception
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
def test_get_member_ports(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(port_id):
return [data_models.Interface(port_id=port_id)]
net_task = network_tasks.GetMemberPorts()
net_task.execute(self.load_balancer_mock, self.amphora_mock)
mock_driver.get_port.assert_called_once_with(t_constants.MOCK_PORT_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
mock_driver.reset_mock()
net_task = network_tasks.GetMemberPorts()
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.side_effect = [
data_models.Port(network_id=NETWORK_ID),
data_models.Port(network_id=NETWORK_ID)]
net_task.execute(self.load_balancer_mock, self.amphora_mock)
self.assertEqual(2, mock_driver.get_port.call_count)
self.assertFalse(mock_driver.get_network.called)
mock_driver.reset_mock()
port_mock = mock.MagicMock()
fixed_ip_mock = mock.MagicMock()
fixed_ip_mock.subnet_id = 1
port_mock.fixed_ips = [fixed_ip_mock]
net_task = network_tasks.GetMemberPorts()
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.side_effect = [
data_models.Port(network_id=NETWORK_ID), port_mock]
ports = net_task.execute(self.load_balancer_mock, self.amphora_mock)
mock_driver.get_subnet.assert_called_once_with(1)
self.assertEqual([port_mock], ports)
def test_handle_network_delta(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
self.db_amphora_mock.to_dict.return_value = {
constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID}
mock_get_net_driver.return_value = mock_net_driver
nic1 = data_models.Interface()
nic1.network_id = uuidutils.generate_uuid()
nic2 = data_models.Interface()
nic2.network_id = uuidutils.generate_uuid()
interface1 = mock.MagicMock()
interface1.port_id = uuidutils.generate_uuid()
port1 = mock.MagicMock()
port1.network_id = uuidutils.generate_uuid()
fixed_ip = mock.MagicMock()
fixed_ip.subnet_id = uuidutils.generate_uuid()
port1.fixed_ips = [fixed_ip]
subnet = mock.MagicMock()
network = mock.MagicMock()
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[nic1],
delete_nics=[nic2, nic2, nic2]
).to_dict(recurse=True)
mock_net_driver.plug_network.return_value = interface1
mock_net_driver.get_port.return_value = port1
mock_net_driver.get_network.return_value = network
mock_net_driver.get_subnet.return_value = subnet
mock_net_driver.unplug_network.side_effect = [
None, net_base.NetworkNotFound, Exception]
handle_net_delta_obj = network_tasks.HandleNetworkDelta()
result = handle_net_delta_obj.execute(self.amphora_mock,
delta)
mock_net_driver.plug_network.assert_called_once_with(
self.db_amphora_mock.compute_id, nic1.network_id)
mock_net_driver.get_port.assert_called_once_with(interface1.port_id)
mock_net_driver.get_network.assert_called_once_with(port1.network_id)
mock_net_driver.get_subnet.assert_called_once_with(fixed_ip.subnet_id)
self.assertEqual({self.db_amphora_mock.id: [port1.to_dict()]}, result)
mock_net_driver.unplug_network.assert_called_with(
self.db_amphora_mock.compute_id, nic2.network_id)
# Revert
delta2 = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[nic1, nic1],
delete_nics=[nic2, nic2, nic2]
).to_dict(recurse=True)
mock_net_driver.unplug_network.reset_mock()
handle_net_delta_obj.revert(
failure.Failure.from_exception(Exception('boom')), None, None)
mock_net_driver.unplug_network.assert_not_called()
mock_net_driver.unplug_network.reset_mock()
handle_net_delta_obj.revert(None, None, None)
mock_net_driver.unplug_network.assert_not_called()
mock_net_driver.unplug_network.reset_mock()
handle_net_delta_obj.revert(None, None, delta2)
def test_handle_network_deltas(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
self.db_amphora_mock.to_dict.return_value = {
constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID}
mock_get_net_driver.return_value = mock_driver
def _interface(network_id):
return [data_models.Interface(network_id=network_id)]
net = network_tasks.HandleNetworkDeltas()
net.execute({})
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
mock_driver.plug_network.assert_called_once_with(COMPUTE_ID, 1)
# revert
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = TestException('test')
self.assertRaises(TestException, net.revert, mock.ANY,
{self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
net.execute({})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=_interface(1)
).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
net.execute({self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
# Do a test with a general exception in case behavior changes
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = Exception()
net.execute({self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_plug_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
LB.amphorae = AMPS_DATA
mock_get_lb.return_value = LB
LB.amphorae = AMPS_DATA
net = network_tasks.PlugVIP()
amp = mock.MagicMock()
amp.to_dict.return_value = 'vip'
mock_driver.plug_vip.return_value = [amp]
data = net.execute(self.load_balancer_mock)
mock_driver.plug_vip.assert_called_once_with(LB, LB.vip)
self.assertEqual(["vip"], data)
# revert
net.revert([o_data_models.Amphora().to_dict()],
self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
# revert with exception
mock_driver.reset_mock()
mock_driver.unplug_vip.side_effect = Exception('UnplugVipException')
net.revert([o_data_models.Amphora().to_dict()],
self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'get_current_loadbalancer_from_db')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_apply_qos_on_creation(self, mock_get_session, mock_get_lb,
mock_get_lb_db, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.ApplyQos()
mock_get_lb_db.return_value = LB
mock_get_lb.return_value = LB
# execute
UPDATE_DICT[constants.TOPOLOGY] = constants.TOPOLOGY_SINGLE
update_dict = UPDATE_DICT
net.execute(self.load_balancer_mock, [AMPS_DATA[0]], update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
VIP.qos_policy_id, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
standby_topology = constants.TOPOLOGY_ACTIVE_STANDBY
mock_driver.reset_mock()
update_dict[constants.TOPOLOGY] = standby_topology
net.execute(self.load_balancer_mock, AMPS_DATA, update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
# revert
mock_driver.reset_mock()
update_dict = UPDATE_DICT
net.revert(None, self.load_balancer_mock, [AMPS_DATA[0]], update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict[constants.TOPOLOGY] = standby_topology
net.revert(None, self.load_balancer_mock, AMPS_DATA, update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'get_current_loadbalancer_from_db')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_apply_qos_on_update(self, mock_get_session, mock_get_lb,
mock_get_lb_db, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.ApplyQos()
null_qos_vip = o_data_models.Vip(qos_policy_id=None)
null_qos_lb = o_data_models.LoadBalancer(
vip=null_qos_vip, topology=constants.TOPOLOGY_SINGLE,
amphorae=[AMPS_DATA[0]])
null_qos_lb_dict = (
provider_utils.db_loadbalancer_to_provider_loadbalancer(
null_qos_lb).to_dict())
tmp_vip_object = o_data_models.Vip(
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
tmp_lb = o_data_models.LoadBalancer(
vip=tmp_vip_object, topology=constants.TOPOLOGY_SINGLE,
amphorae=[AMPS_DATA[0]])
pr_tm_dict = provider_utils.db_loadbalancer_to_provider_loadbalancer(
tmp_lb).to_dict()
mock_get_lb.return_value = tmp_lb
# execute
update_dict = {'description': 'fool'}
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
t_constants.MOCK_QOS_POLICY_ID1, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
mock_get_lb.return_value = null_qos_lb
update_dict = {'vip': {'qos_policy_id': None}}
net.execute(null_qos_lb_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
None, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'name': '123'}
net.execute(null_qos_lb_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
update_dict = {'description': 'fool'}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
mock_get_lb.return_value = tmp_lb
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'description': 'fool',
'vip': {
'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID1}}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
mock_get_lb.return_value = null_qos_lb
mock_driver.reset_mock()
update_dict = {}
net.execute(null_qos_lb_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
# revert
mock_driver.reset_mock()
mock_get_lb.reset_mock()
tmp_lb.amphorae = [AMPS_DATA[0]]
tmp_lb.topology = constants.TOPOLOGY_SINGLE
update_dict = {'description': 'fool'}
mock_get_lb_db.return_value = tmp_lb
net.revert(None, pr_tm_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'vip': {'qos_policy_id': None}}
ori_lb_db = LB2
ori_lb_db.amphorae = [AMPS_DATA[0]]
mock_get_lb_db.return_value = ori_lb_db
net.revert(None, null_qos_lb_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
t_constants.MOCK_QOS_POLICY_ID2, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
update_dict = {'vip': {
'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID2}}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
ori_lb_db = LB2
ori_lb_db.amphorae = [AMPS_DATA[0]]
mock_get_lb_db.return_value = ori_lb_db
net.revert(None, pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID2, mock.ANY)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_unplug_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.UnplugVIP()
net.execute(self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_allocate_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.AllocateVIP()
mock_driver.allocate_vip.return_value = LB.vip
mock_driver.reset_mock()
self.assertEqual(LB.vip.to_dict(),
net.execute(self.load_balancer_mock))
mock_driver.allocate_vip.assert_called_once_with(LB)
# revert
vip_mock = VIP.to_dict()
net.revert(vip_mock, self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(
o_data_models.Vip(**vip_mock))
# revert exception
mock_driver.reset_mock()
mock_driver.deallocate_vip.side_effect = Exception('DeallVipException')
vip_mock = VIP.to_dict()
net.revert(vip_mock, self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(o_data_models.Vip(
**vip_mock))
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_deallocate_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.DeallocateVIP()
vip = o_data_models.Vip()
lb = o_data_models.LoadBalancer(vip=vip)
mock_get_lb.return_value = lb
net.execute(self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(lb.vip)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
vip = o_data_models.Vip()
lb = o_data_models.LoadBalancer(vip=vip)
mock_get_lb.return_value = lb
listeners = [{constants.LOADBALANCER_ID: lb.id}]
net_task = network_tasks.UpdateVIP()
net_task.execute(listeners)
mock_driver.update_vip.assert_called_once_with(lb)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip_for_delete(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
vip = o_data_models.Vip()
lb = o_data_models.LoadBalancer(vip=vip)
mock_get_lb.return_value = lb
listener = {constants.LOADBALANCER_ID: lb.id}
net_task = network_tasks.UpdateVIPForDelete()
net_task.execute(listener)
mock_driver.update_vip.assert_called_once_with(lb, for_delete=True)
@mock.patch('octavia.db.api.get_session', return_value='TEST')
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_get_amphora_network_configs_by_id(
self, mock_lb_get, mock_amp_get,
mock_get_session, mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
AMP_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_amp_get.return_value = 'mock amphora'
mock_lb_get.return_value = 'mock load balancer'
net_task = network_tasks.GetAmphoraNetworkConfigsByID()
net_task.execute(LB_ID, AMP_ID)
mock_driver.get_network_configs.assert_called_once_with(
'mock load balancer', amphora='mock amphora')
mock_amp_get.assert_called_once_with('TEST', id=AMP_ID)
mock_lb_get.assert_called_once_with('TEST', id=LB_ID)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_get_amphorae_network_configs(self, mock_session, mock_lb_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get_net_driver.return_value = mock_driver
lb = o_data_models.LoadBalancer()
net_task = network_tasks.GetAmphoraeNetworkConfigs()
net_task.execute(self.load_balancer_mock)
mock_driver.get_network_configs.assert_called_once_with(lb)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_failover_preparation_for_amphora(self, mock_session, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
failover = network_tasks.FailoverPreparationForAmphora()
failover.execute(self.amphora_mock)
mock_driver.failover_preparation.assert_called_once_with(
self.db_amphora_mock)
def test_retrieve_portids_on_amphora_except_lb_network(
self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(port_id):
return [data_models.Interface(port_id=port_id)]
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
mock_driver.get_plugged_networks.return_value = []
net_task.execute(self.amphora_mock)
mock_driver.get_plugged_networks.assert_called_once_with(
compute_id=COMPUTE_ID)
self.assertFalse(mock_driver.get_port.called)
mock_driver.reset_mock()
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
mock_driver.get_plugged_networks.return_value = _interface(1)
net_task.execute(self.amphora_mock)
mock_driver.get_port.assert_called_once_with(port_id=1)
mock_driver.reset_mock()
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
port_mock = mock.MagicMock()
fixed_ip_mock = mock.MagicMock()
fixed_ip_mock.ip_address = IP_ADDRESS
port_mock.fixed_ips = [fixed_ip_mock]
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.return_value = port_mock
ports = net_task.execute(self.amphora_mock)
self.assertEqual([], ports)
mock_driver.reset_mock()
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
port_mock = mock.MagicMock()
fixed_ip_mock = mock.MagicMock()
fixed_ip_mock.ip_address = "172.17.17.17"
port_mock.fixed_ips = [fixed_ip_mock]
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.return_value = port_mock
ports = net_task.execute(self.amphora_mock)
self.assertEqual(1, len(ports))
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_plug_ports(self, mock_session, mock_get, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
port1 = mock.MagicMock()
port2 = mock.MagicMock()
amp = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: '1234'}
plugports = network_tasks.PlugPorts()
plugports.execute(amp, [port1, port2])
mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port1)
mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port2)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip_sg(self, mock_session, mock_lb_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.UpdateVIPSecurityGroup()
net.execute(self.load_balancer_mock)
mock_driver.update_vip_sg.assert_called_once_with(LB, LB.vip)
def test_get_subnet_from_vip(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.GetSubnetFromVIP()
net.execute(self.load_balancer_mock)
mock_driver.get_subnet.assert_called_once_with(LB.vip.subnet_id)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_plug_vip_amphora(self, mock_session, mock_lb_get, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
amphora = {constants.ID: AMPHORA_ID,
constants.LB_NETWORK_IP: IP_ADDRESS}
mock_lb_get.return_value = LB
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
net = network_tasks.PlugVIPAmphora()
mockSubnet = mock_driver.get_subnet()
net.execute(self.load_balancer_mock, amphora, mockSubnet)
mock_driver.plug_aap_port.assert_called_once_with(
LB, LB.vip, self.db_amphora_mock, mockSubnet)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_revert_plug_vip_amphora(self, mock_session, mock_lb_get, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
net = network_tasks.PlugVIPAmphora()
mockSubnet = mock.MagicMock()
amphora = {constants.ID: AMPHORA_ID,
constants.LB_NETWORK_IP: IP_ADDRESS}
net.revert(AMPS_DATA[0].to_dict(), self.load_balancer_mock,
amphora, mockSubnet)
mock_driver.unplug_aap_port.assert_called_once_with(
LB.vip, self.db_amphora_mock, mockSubnet)
@mock.patch('octavia.controller.worker.v2.tasks.network_tasks.DeletePort.'
'update_progress')
def test_delete_port(self, mock_update_progress, mock_get_net_driver):
PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_driver.delete_port.side_effect = [
mock.DEFAULT, exceptions.OctaviaException('boom'), mock.DEFAULT,
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom')]
mock_driver.admin_down_port.side_effect = [
mock.DEFAULT, exceptions.OctaviaException('boom')]
net_task = network_tasks.DeletePort()
# Limit the retry attempts for the test run to save time
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
# Test port ID is None (no-op)
net_task.execute(None)
mock_update_progress.assert_not_called()
mock_driver.delete_port.assert_not_called()
# Test successful delete
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_update_progress.assert_called_once_with(0.5)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
# Test exception and successful retry
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
# Test passive failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID, passive_failure=True)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_called_once_with(PORT_ID)
# Test passive failure admin down failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
mock_driver.admin_down_port.reset_mock()
net_task.execute(PORT_ID, passive_failure=True)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_called_once_with(PORT_ID)
# Test non-passive failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
mock_driver.admin_down_port.reset_mock()
mock_driver.admin_down_port.side_effect = [
exceptions.OctaviaException('boom')]
self.assertRaises(exceptions.OctaviaException, net_task.execute,
PORT_ID)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_not_called()
def test_create_vip_base_port(self, mock_get_net_driver):
AMP_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
VIP_NETWORK_ID = uuidutils.generate_uuid()
VIP_QOS_ID = uuidutils.generate_uuid()
VIP_SG_ID = uuidutils.generate_uuid()
VIP_SUBNET_ID = uuidutils.generate_uuid()
VIP_IP_ADDRESS = '203.0.113.81'
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
vip_dict = {constants.IP_ADDRESS: VIP_IP_ADDRESS,
constants.NETWORK_ID: VIP_NETWORK_ID,
constants.QOS_POLICY_ID: VIP_QOS_ID,
constants.SUBNET_ID: VIP_SUBNET_ID}
port_mock = mock.MagicMock()
port_mock.id = PORT_ID
mock_driver.create_port.side_effect = [
port_mock, exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom')]
mock_driver.delete_port.side_effect = [mock.DEFAULT, Exception('boom')]
net_task = network_tasks.CreateVIPBasePort()
# Limit the retry attempts for the test run to save time
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
# Test execute
result = net_task.execute(vip_dict, VIP_SG_ID, AMP_ID)
self.assertEqual(port_mock.to_dict(), result)
mock_driver.create_port.assert_called_once_with(
VIP_NETWORK_ID, name=constants.AMP_BASE_PORT_PREFIX + AMP_ID,
fixed_ips=[{constants.SUBNET_ID: VIP_SUBNET_ID}],
secondary_ips=[VIP_IP_ADDRESS], security_group_ids=[VIP_SG_ID],
qos_policy_id=VIP_QOS_ID)
# Test execute exception
mock_driver.reset_mock()
self.assertRaises(exceptions.OctaviaException, net_task.execute,
vip_dict, None, AMP_ID)
# Test revert when this task failed
mock_driver.reset_mock()
net_task.revert(failure.Failure.from_exception(Exception('boom')),
vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_not_called()
# Test revert
mock_driver.reset_mock()
net_task.revert([port_mock], vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
# Test revert exception
mock_driver.reset_mock()
net_task.revert([port_mock], vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
@mock.patch('time.sleep')
def test_admin_down_port(self, mock_sleep, mock_get_net_driver):
PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
port_down_mock = mock.MagicMock()
port_down_mock.status = constants.DOWN
port_up_mock = mock.MagicMock()
port_up_mock.status = constants.UP
mock_driver.set_port_admin_state_up.side_effect = [
mock.DEFAULT, net_base.PortNotFound, mock.DEFAULT, mock.DEFAULT,
Exception('boom')]
mock_driver.get_port.side_effect = [port_down_mock, port_up_mock]
net_task = network_tasks.AdminDownPort()
# Test execute
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_called_once_with(PORT_ID)
# Test passive fail on port not found
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_not_called()
# Test passive fail on port stays up
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_called_once_with(PORT_ID)
# Test revert when this task failed
mock_driver.reset_mock()
net_task.revert(failure.Failure.from_exception(Exception('boom')),
PORT_ID)
mock_driver.set_port_admin_state_up.assert_not_called()
# Test revert
mock_driver.reset_mock()
net_task.revert(None, PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
True)
# Test revert exception passive failure
mock_driver.reset_mock()
net_task.revert(None, PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
True)
@mock.patch('octavia.common.utils.get_vip_security_group_name')
def test_get_vip_security_group_id(self, mock_get_sg_name,
mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
SG_ID = uuidutils.generate_uuid()
SG_NAME = 'fake_SG_name'
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_get_sg_name.return_value = SG_NAME
sg_mock = mock.MagicMock()
sg_mock.id = SG_ID
mock_driver.get_security_group.side_effect = [
sg_mock, None, net_base.SecurityGroupNotFound,
net_base.SecurityGroupNotFound]
net_task = network_tasks.GetVIPSecurityGroupID()
# Test execute
result = net_task.execute(LB_ID)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute with empty get subnet response
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
result = net_task.execute(LB_ID)
self.assertIsNone(result)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute no security group found, security groups enabled
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
mock_driver.sec_grp_enabled = True
self.assertRaises(net_base.SecurityGroupNotFound, net_task.execute,
LB_ID)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute no security group found, security groups disabled
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
mock_driver.sec_grp_enabled = False
result = net_task.execute(LB_ID)
self.assertIsNone(result)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.types import failure
import tenacity
from octavia.api.drivers import utils as provider_utils
from octavia.common import constants
from octavia.common import data_models as o_data_models
from octavia.common import exceptions
from octavia.controller.worker.v2.tasks import network_tasks
from octavia.network import base as net_base
from octavia.network import data_models
from octavia.tests.common import constants as t_constants
import octavia.tests.unit.base as base
AMPHORA_ID = 7
COMPUTE_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
SUBNET_ID = uuidutils.generate_uuid()
NETWORK_ID = uuidutils.generate_uuid()
IP_ADDRESS = "172.24.41.1"
VIP = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID,
subnet_id=t_constants.MOCK_SUBNET_ID,
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
VIP2 = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID2,
subnet_id=t_constants.MOCK_SUBNET_ID2,
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID2)
LB = o_data_models.LoadBalancer(vip=VIP)
LB2 = o_data_models.LoadBalancer(vip=VIP2)
FIRST_IP = {"ip_address": IP_ADDRESS, "subnet_id": SUBNET_ID}
FIXED_IPS = [FIRST_IP]
INTERFACE = data_models.Interface(id=uuidutils.generate_uuid(),
compute_id=COMPUTE_ID, fixed_ips=FIXED_IPS,
port_id=PORT_ID)
AMPS_DATA = [o_data_models.Amphora(id=t_constants.MOCK_AMP_ID1,
vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID1,
vrrp_ip=t_constants.MOCK_VRRP_IP1),
o_data_models.Amphora(id=t_constants.MOCK_AMP_ID2,
vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID2,
vrrp_ip=t_constants.MOCK_VRRP_IP2)
]
UPDATE_DICT = {constants.TOPOLOGY: None}
_session_mock = mock.MagicMock()
class TestException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
@mock.patch('octavia.common.utils.get_network_driver')
class TestNetworkTasks(base.TestCase):
def setUp(self):
network_tasks.LOG = mock.MagicMock()
self.db_amphora_mock = mock.MagicMock()
self.db_load_balancer_mock = mock.MagicMock()
self.vip_mock = mock.MagicMock()
self.vip_mock.subnet_id = SUBNET_ID
self.db_load_balancer_mock.vip = self.vip_mock
self.db_load_balancer_mock.amphorae = []
self.db_amphora_mock.id = AMPHORA_ID
self.db_amphora_mock.compute_id = COMPUTE_ID
self.db_amphora_mock.status = constants.AMPHORA_ALLOCATED
self.boot_net_id = NETWORK_ID
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(group="controller_worker",
amp_boot_network_list=[self.boot_net_id])
conf.config(group="networking", max_retries=1)
self.amphora_mock = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: COMPUTE_ID,
constants.LB_NETWORK_IP: IP_ADDRESS,
}
self.load_balancer_mock = {
constants.LOADBALANCER_ID: uuidutils.generate_uuid(),
constants.VIP_SUBNET_ID: VIP.subnet_id,
constants.VIP_PORT_ID: VIP.port_id,
constants.VIP_ADDRESS: VIP.ip_address,
constants.VIP_QOS_POLICY_ID: t_constants.MOCK_QOS_POLICY_ID1
}
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="controller_worker",
amp_boot_network_list=[self.boot_net_id])
super().setUp()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_calculate_amphora_delta(self, mock_get_session, mock_lb_repo_get,
mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
DELETE_NETWORK_ID = uuidutils.generate_uuid()
MEMBER_NETWORK_ID = uuidutils.generate_uuid()
MEMBER_SUBNET_ID = uuidutils.generate_uuid()
VRRP_PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
member_mock = mock.MagicMock()
member_mock.subnet_id = MEMBER_SUBNET_ID
pool_mock = mock.MagicMock()
pool_mock.members = [member_mock]
lb_mock = mock.MagicMock()
lb_mock.pools = [pool_mock]
lb_dict = {constants.LOADBALANCER_ID: LB_ID}
amphora_dict = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: COMPUTE_ID,
constants.VRRP_PORT_ID: VRRP_PORT_ID}
vrrp_port_mock = mock.MagicMock()
vrrp_port_mock.network_id = self.boot_net_id
vrrp_port_dict = {constants.NETWORK_ID: self.boot_net_id}
mock_subnet = mock.MagicMock()
mock_subnet.network_id = MEMBER_NETWORK_ID
nic1_delete_mock = mock.MagicMock()
nic1_delete_mock.network_id = DELETE_NETWORK_ID
nic2_keep_mock = mock.MagicMock()
nic2_keep_mock.network_id = self.boot_net_id
mock_lb_repo_get.return_value = lb_mock
mock_driver.get_port.return_value = vrrp_port_mock
mock_driver.get_subnet.return_value = mock_subnet
mock_driver.get_plugged_networks.return_value = [nic1_delete_mock,
nic2_keep_mock]
calc_amp_delta = network_tasks.CalculateAmphoraDelta()
# Test vrrp_port_id is None
result = calc_amp_delta.execute(lb_dict, amphora_dict, {})
self.assertEqual(AMPHORA_ID, result[constants.AMPHORA_ID])
self.assertEqual(COMPUTE_ID, result[constants.COMPUTE_ID])
self.assertEqual(1, len(result[constants.ADD_NICS]))
self.assertEqual(MEMBER_NETWORK_ID,
result[constants.ADD_NICS][0][constants.NETWORK_ID])
self.assertEqual(1, len(result[constants.DELETE_NICS]))
self.assertEqual(
DELETE_NETWORK_ID,
result[constants.DELETE_NICS][0][constants.NETWORK_ID])
mock_driver.get_port.assert_called_once_with(VRRP_PORT_ID)
mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
# Test with vrrp_port_id
mock_driver.reset_mock()
result = calc_amp_delta.execute(lb_dict, amphora_dict, {},
vrrp_port=vrrp_port_dict)
self.assertEqual(AMPHORA_ID, result[constants.AMPHORA_ID])
self.assertEqual(COMPUTE_ID, result[constants.COMPUTE_ID])
self.assertEqual(1, len(result[constants.ADD_NICS]))
self.assertEqual(MEMBER_NETWORK_ID,
result[constants.ADD_NICS][0][constants.NETWORK_ID])
self.assertEqual(1, len(result[constants.DELETE_NICS]))
self.assertEqual(
DELETE_NETWORK_ID,
result[constants.DELETE_NICS][0][constants.NETWORK_ID])
mock_driver.get_port.assert_not_called()
mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_calculate_delta(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = self.db_load_balancer_mock
self.db_amphora_mock.to_dict.return_value = {
constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID,
constants.VRRP_PORT_ID: PORT_ID}
mock_get_net_driver.return_value = mock_driver
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=self.boot_net_id)]
mock_driver.get_port.return_value = data_models.Port(
network_id=self.boot_net_id)
EMPTY = {}
empty_deltas = {self.db_amphora_mock.id: data_models.Delta(
amphora_id=AMPHORA_ID,
compute_id=COMPUTE_ID,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)}
calc_delta = network_tasks.CalculateDelta()
self.assertEqual(EMPTY,
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and no pools, nothing plugged
# Delta should be empty
mock_driver.reset_mock()
self.db_amphora_mock.load_balancer = self.db_load_balancer_mock
self.db_load_balancer_mock.amphorae = [self.db_amphora_mock]
self.db_load_balancer_mock.pools = []
self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock, {}))
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
# Pool mock should be configured explicitly for each test
pool_mock = mock.MagicMock()
self.db_load_balancer_mock.pools = [pool_mock]
# Test with one amp and one pool but no members, nothing plugged
# Delta should be empty
pool_mock.members = []
self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and one pool and one member, nothing plugged
# Delta should be one additional subnet to plug
mock_driver.reset_mock()
member_mock = mock.MagicMock()
member_mock.subnet_id = 1
pool_mock.members = [member_mock]
mock_driver.get_subnet.return_value = data_models.Subnet(id=2,
network_id=3)
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[
data_models.Interface(network_id=3)],
delete_nics=[]).to_dict(recurse=True)
self.assertEqual({self.db_amphora_mock.id: ndm},
calc_delta.execute(self.load_balancer_mock, {}))
vrrp_port_call = mock.call(PORT_ID)
mock_driver.get_port.assert_has_calls([vrrp_port_call])
self.assertEqual(1, mock_driver.get_port.call_count)
member_subnet_call = mock.call(member_mock.subnet_id)
mock_driver.get_subnet.assert_has_calls([member_subnet_call])
self.assertEqual(1, mock_driver.get_subnet.call_count)
# Test with one amp and one pool and one member, already plugged
# Delta should be empty
mock_driver.reset_mock()
member_mock = mock.MagicMock()
member_mock.subnet_id = 1
pool_mock.members = [member_mock]
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=3),
data_models.Interface(network_id=self.boot_net_id)]
self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and one pool and one member, wrong network plugged
# Delta should be one network to add and one to remove
mock_driver.reset_mock()
member_mock = mock.MagicMock()
member_mock.subnet_id = 1
pool_mock.members = [member_mock]
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=2),
data_models.Interface(network_id=self.boot_net_id)]
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[
data_models.Interface(network_id=3)],
delete_nics=[
data_models.Interface(network_id=2)]
).to_dict(recurse=True)
self.assertEqual({self.db_amphora_mock.id: ndm},
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and one pool and no members, one network plugged
# Delta should be one network to remove
mock_driver.reset_mock()
pool_mock.members = []
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=2),
data_models.Interface(network_id=self.boot_net_id)
]
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[
data_models.Interface(network_id=2)]
).to_dict(recurse=True)
self.assertEqual({self.db_amphora_mock.id: ndm},
calc_delta.execute(self.load_balancer_mock, {}))
def test_get_plumbed_networks(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_driver.get_plugged_networks.side_effect = [['blah']]
net = network_tasks.GetPlumbedNetworks()
self.assertEqual(['blah'], net.execute(self.amphora_mock))
mock_driver.get_plugged_networks.assert_called_once_with(
COMPUTE_ID)
def test_plug_networks(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(network_id):
return [data_models.Interface(network_id=network_id)]
net = network_tasks.PlugNetworks()
net.execute(self.amphora_mock, None)
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
mock_driver.plug_network.assert_called_once_with(COMPUTE_ID, 1)
# revert
net.revert(self.amphora_mock, None)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.revert(self.amphora_mock, delta)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
net.revert(self.amphora_mock, delta)
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
net.revert(self.amphora_mock, delta) # No exception
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = TestException('test')
self.assertRaises(TestException,
net.revert,
self.amphora_mock,
delta)
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
def test_unplug_networks(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(network_id):
return [data_models.Interface(network_id=network_id)]
net = network_tasks.UnPlugNetworks()
net.execute(self.db_amphora_mock, None)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=_interface(1)
).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
net.execute(self.amphora_mock, delta) # No exception
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
# Do a test with a general exception in case behavior changes
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = Exception()
net.execute(self.amphora_mock, delta) # No exception
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
def test_get_member_ports(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(port_id):
return [data_models.Interface(port_id=port_id)]
net_task = network_tasks.GetMemberPorts()
net_task.execute(self.load_balancer_mock, self.amphora_mock)
mock_driver.get_port.assert_called_once_with(t_constants.MOCK_PORT_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
mock_driver.reset_mock()
net_task = network_tasks.GetMemberPorts()
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.side_effect = [
data_models.Port(network_id=NETWORK_ID),
data_models.Port(network_id=NETWORK_ID)]
net_task.execute(self.load_balancer_mock, self.amphora_mock)
self.assertEqual(2, mock_driver.get_port.call_count)
self.assertFalse(mock_driver.get_network.called)
mock_driver.reset_mock()
port_mock = mock.MagicMock()
fixed_ip_mock = mock.MagicMock()
fixed_ip_mock.subnet_id = 1
port_mock.fixed_ips = [fixed_ip_mock]
net_task = network_tasks.GetMemberPorts()
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.side_effect = [
data_models.Port(network_id=NETWORK_ID), port_mock]
ports = net_task.execute(self.load_balancer_mock, self.amphora_mock)
mock_driver.get_subnet.assert_called_once_with(1)
self.assertEqual([port_mock], ports)
def test_handle_network_delta(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
self.db_amphora_mock.to_dict.return_value = {
constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID}
mock_get_net_driver.return_value = mock_net_driver
nic1 = data_models.Interface()
nic1.network_id = uuidutils.generate_uuid()
nic2 = data_models.Interface()
nic2.network_id = uuidutils.generate_uuid()
interface1 = mock.MagicMock()
interface1.port_id = uuidutils.generate_uuid()
port1 = mock.MagicMock()
port1.network_id = uuidutils.generate_uuid()
fixed_ip = mock.MagicMock()
fixed_ip.subnet_id = uuidutils.generate_uuid()
port1.fixed_ips = [fixed_ip]
subnet = mock.MagicMock()
network = mock.MagicMock()
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[nic1],
delete_nics=[nic2, nic2, nic2]
).to_dict(recurse=True)
mock_net_driver.plug_network.return_value = interface1
mock_net_driver.get_port.return_value = port1
mock_net_driver.get_network.return_value = network
mock_net_driver.get_subnet.return_value = subnet
mock_net_driver.unplug_network.side_effect = [
None, net_base.NetworkNotFound, Exception]
handle_net_delta_obj = network_tasks.HandleNetworkDelta()
result = handle_net_delta_obj.execute(self.amphora_mock,
delta)
mock_net_driver.plug_network.assert_called_once_with(
self.db_amphora_mock.compute_id, nic1.network_id)
mock_net_driver.get_port.assert_called_once_with(interface1.port_id)
mock_net_driver.get_network.assert_called_once_with(port1.network_id)
mock_net_driver.get_subnet.assert_called_once_with(fixed_ip.subnet_id)
self.assertEqual({self.db_amphora_mock.id: [port1.to_dict()]}, result)
mock_net_driver.unplug_network.assert_called_with(
self.db_amphora_mock.compute_id, nic2.network_id)
# Revert
delta2 = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[nic1, nic1],
delete_nics=[nic2, nic2, nic2]
).to_dict(recurse=True)
mock_net_driver.unplug_network.reset_mock()
handle_net_delta_obj.revert(
failure.Failure.from_exception(Exception('boom')), None, None)
mock_net_driver.unplug_network.assert_not_called()
mock_net_driver.unplug_network.reset_mock()
handle_net_delta_obj.revert(None, None, None)
mock_net_driver.unplug_network.assert_not_called()
mock_net_driver.unplug_network.reset_mock()
handle_net_delta_obj.revert(None, None, delta2)
def test_handle_network_deltas(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
self.db_amphora_mock.to_dict.return_value = {
constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID}
mock_get_net_driver.return_value = mock_driver
def _interface(network_id):
return [data_models.Interface(network_id=network_id)]
net = network_tasks.HandleNetworkDeltas()
net.execute({})
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
mock_driver.plug_network.assert_called_once_with(COMPUTE_ID, 1)
# revert
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = TestException('test')
self.assertRaises(TestException, net.revert, mock.ANY,
{self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
net.execute({})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=_interface(1)
).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
net.execute({self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
# Do a test with a general exception in case behavior changes
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = Exception()
net.execute({self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_plug_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
LB.amphorae = AMPS_DATA
mock_get_lb.return_value = LB
LB.amphorae = AMPS_DATA
net = network_tasks.PlugVIP()
amp = mock.MagicMock()
amp.to_dict.return_value = 'vip'
mock_driver.plug_vip.return_value = [amp]
data = net.execute(self.load_balancer_mock)
mock_driver.plug_vip.assert_called_once_with(LB, LB.vip)
self.assertEqual(["vip"], data)
# revert
net.revert([o_data_models.Amphora().to_dict()],
self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
# revert with exception
mock_driver.reset_mock()
mock_driver.unplug_vip.side_effect = Exception('UnplugVipException')
net.revert([o_data_models.Amphora().to_dict()],
self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'get_current_loadbalancer_from_db')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_apply_qos_on_creation(self, mock_get_session, mock_get_lb,
mock_get_lb_db, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.ApplyQos()
mock_get_lb_db.return_value = LB
mock_get_lb.return_value = LB
# execute
UPDATE_DICT[constants.TOPOLOGY] = constants.TOPOLOGY_SINGLE
update_dict = UPDATE_DICT
net.execute(self.load_balancer_mock, [AMPS_DATA[0]], update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
VIP.qos_policy_id, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
standby_topology = constants.TOPOLOGY_ACTIVE_STANDBY
mock_driver.reset_mock()
update_dict[constants.TOPOLOGY] = standby_topology
net.execute(self.load_balancer_mock, AMPS_DATA, update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
# revert
mock_driver.reset_mock()
update_dict = UPDATE_DICT
net.revert(None, self.load_balancer_mock, [AMPS_DATA[0]], update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict[constants.TOPOLOGY] = standby_topology
net.revert(None, self.load_balancer_mock, AMPS_DATA, update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'get_current_loadbalancer_from_db')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_apply_qos_on_update(self, mock_get_session, mock_get_lb,
mock_get_lb_db, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.ApplyQos()
null_qos_vip = o_data_models.Vip(qos_policy_id=None)
null_qos_lb = o_data_models.LoadBalancer(
vip=null_qos_vip, topology=constants.TOPOLOGY_SINGLE,
amphorae=[AMPS_DATA[0]])
null_qos_lb_dict = (
provider_utils.db_loadbalancer_to_provider_loadbalancer(
null_qos_lb).to_dict())
tmp_vip_object = o_data_models.Vip(
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
tmp_lb = o_data_models.LoadBalancer(
vip=tmp_vip_object, topology=constants.TOPOLOGY_SINGLE,
amphorae=[AMPS_DATA[0]])
pr_tm_dict = provider_utils.db_loadbalancer_to_provider_loadbalancer(
tmp_lb).to_dict()
mock_get_lb.return_value = tmp_lb
# execute
update_dict = {'description': 'fool'}
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
t_constants.MOCK_QOS_POLICY_ID1, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
mock_get_lb.return_value = null_qos_lb
update_dict = {'vip': {'qos_policy_id': None}}
net.execute(null_qos_lb_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
None, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'name': '123'}
net.execute(null_qos_lb_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
update_dict = {'description': 'fool'}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
mock_get_lb.return_value = tmp_lb
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'description': 'fool',
'vip': {
'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID1}}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
mock_get_lb.return_value = null_qos_lb
mock_driver.reset_mock()
update_dict = {}
net.execute(null_qos_lb_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
# revert
mock_driver.reset_mock()
mock_get_lb.reset_mock()
tmp_lb.amphorae = [AMPS_DATA[0]]
tmp_lb.topology = constants.TOPOLOGY_SINGLE
update_dict = {'description': 'fool'}
mock_get_lb_db.return_value = tmp_lb
net.revert(None, pr_tm_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'vip': {'qos_policy_id': None}}
ori_lb_db = LB2
ori_lb_db.amphorae = [AMPS_DATA[0]]
mock_get_lb_db.return_value = ori_lb_db
net.revert(None, null_qos_lb_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
t_constants.MOCK_QOS_POLICY_ID2, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
update_dict = {'vip': {
'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID2}}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
ori_lb_db = LB2
ori_lb_db.amphorae = [AMPS_DATA[0]]
mock_get_lb_db.return_value = ori_lb_db
net.revert(None, pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID2, mock.ANY)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_unplug_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.UnplugVIP()
net.execute(self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_allocate_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.AllocateVIP()
mock_driver.allocate_vip.return_value = LB.vip
mock_driver.reset_mock()
self.assertEqual(LB.vip.to_dict(),
net.execute(self.load_balancer_mock))
mock_driver.allocate_vip.assert_called_once_with(LB)
# revert
vip_mock = VIP.to_dict()
net.revert(vip_mock, self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(
o_data_models.Vip(**vip_mock))
# revert exception
mock_driver.reset_mock()
mock_driver.deallocate_vip.side_effect = Exception('DeallVipException')
vip_mock = VIP.to_dict()
net.revert(vip_mock, self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(o_data_models.Vip(
**vip_mock))
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_deallocate_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.DeallocateVIP()
vip = o_data_models.Vip()
lb = o_data_models.LoadBalancer(vip=vip)
mock_get_lb.return_value = lb
net.execute(self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(lb.vip)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
vip = o_data_models.Vip()
lb = o_data_models.LoadBalancer(vip=vip)
mock_get_lb.return_value = lb
listeners = [{constants.LOADBALANCER_ID: lb.id}]
net_task = network_tasks.UpdateVIP()
net_task.execute(listeners)
mock_driver.update_vip.assert_called_once_with(lb)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip_for_delete(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
vip = o_data_models.Vip()
lb = o_data_models.LoadBalancer(vip=vip)
mock_get_lb.return_value = lb
listener = {constants.LOADBALANCER_ID: lb.id}
net_task = network_tasks.UpdateVIPForDelete()
net_task.execute(listener)
mock_driver.update_vip.assert_called_once_with(lb, for_delete=True)
@mock.patch('octavia.db.api.get_session', return_value='TEST')
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_get_amphora_network_configs_by_id(
self, mock_lb_get, mock_amp_get,
mock_get_session, mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
AMP_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_amp_get.return_value = 'mock amphora'
mock_lb_get.return_value = 'mock load balancer'
net_task = network_tasks.GetAmphoraNetworkConfigsByID()
net_task.execute(LB_ID, AMP_ID)
mock_driver.get_network_configs.assert_called_once_with(
'mock load balancer', amphora='mock amphora')
mock_amp_get.assert_called_once_with('TEST', id=AMP_ID)
mock_lb_get.assert_called_once_with('TEST', id=LB_ID)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_get_amphorae_network_configs(self, mock_session, mock_lb_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get_net_driver.return_value = mock_driver
lb = o_data_models.LoadBalancer()
net_task = network_tasks.GetAmphoraeNetworkConfigs()
net_task.execute(self.load_balancer_mock)
mock_driver.get_network_configs.assert_called_once_with(lb)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_failover_preparation_for_amphora(self, mock_session, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
failover = network_tasks.FailoverPreparationForAmphora()
failover.execute(self.amphora_mock)
mock_driver.failover_preparation.assert_called_once_with(
self.db_amphora_mock)
def test_retrieve_portids_on_amphora_except_lb_network(
self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(port_id):
return [data_models.Interface(port_id=port_id)]
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
mock_driver.get_plugged_networks.return_value = []
net_task.execute(self.amphora_mock)
mock_driver.get_plugged_networks.assert_called_once_with(
compute_id=COMPUTE_ID)
self.assertFalse(mock_driver.get_port.called)
mock_driver.reset_mock()
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
mock_driver.get_plugged_networks.return_value = _interface(1)
net_task.execute(self.amphora_mock)
mock_driver.get_port.assert_called_once_with(port_id=1)
mock_driver.reset_mock()
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
port_mock = mock.MagicMock()
fixed_ip_mock = mock.MagicMock()
fixed_ip_mock.ip_address = IP_ADDRESS
port_mock.fixed_ips = [fixed_ip_mock]
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.return_value = port_mock
ports = net_task.execute(self.amphora_mock)
self.assertEqual([], ports)
mock_driver.reset_mock()
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
port_mock = mock.MagicMock()
fixed_ip_mock = mock.MagicMock()
fixed_ip_mock.ip_address = "172.17.17.17"
port_mock.fixed_ips = [fixed_ip_mock]
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.return_value = port_mock
ports = net_task.execute(self.amphora_mock)
self.assertEqual(1, len(ports))
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_plug_ports(self, mock_session, mock_get, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
port1 = mock.MagicMock()
port2 = mock.MagicMock()
amp = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: '1234'}
plugports = network_tasks.PlugPorts()
plugports.execute(amp, [port1, port2])
mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port1)
mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port2)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip_sg(self, mock_session, mock_lb_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.UpdateVIPSecurityGroup()
net.execute(self.load_balancer_mock)
mock_driver.update_vip_sg.assert_called_once_with(LB, LB.vip)
def test_get_subnet_from_vip(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.GetSubnetFromVIP()
net.execute(self.load_balancer_mock)
mock_driver.get_subnet.assert_called_once_with(LB.vip.subnet_id)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_plug_vip_amphora(self, mock_session, mock_lb_get, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
amphora = {constants.ID: AMPHORA_ID,
constants.LB_NETWORK_IP: IP_ADDRESS}
mock_lb_get.return_value = LB
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
net = network_tasks.PlugVIPAmphora()
mockSubnet = mock_driver.get_subnet()
net.execute(self.load_balancer_mock, amphora, mockSubnet)
mock_driver.plug_aap_port.assert_called_once_with(
LB, LB.vip, self.db_amphora_mock, mockSubnet)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_revert_plug_vip_amphora(self, mock_session, mock_lb_get, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
net = network_tasks.PlugVIPAmphora()
mockSubnet = mock.MagicMock()
amphora = {constants.ID: AMPHORA_ID,
constants.LB_NETWORK_IP: IP_ADDRESS}
net.revert(AMPS_DATA[0].to_dict(), self.load_balancer_mock,
amphora, mockSubnet)
mock_driver.unplug_aap_port.assert_called_once_with(
LB.vip, self.db_amphora_mock, mockSubnet)
@mock.patch('octavia.controller.worker.v2.tasks.network_tasks.DeletePort.'
'update_progress')
def test_delete_port(self, mock_update_progress, mock_get_net_driver):
PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_driver.delete_port.side_effect = [
mock.DEFAULT, exceptions.OctaviaException('boom'), mock.DEFAULT,
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom')]
mock_driver.admin_down_port.side_effect = [
mock.DEFAULT, exceptions.OctaviaException('boom')]
net_task = network_tasks.DeletePort()
# Limit the retry attempts for the test run to save time
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
# Test port ID is None (no-op)
net_task.execute(None)
mock_update_progress.assert_not_called()
mock_driver.delete_port.assert_not_called()
# Test successful delete
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_update_progress.assert_called_once_with(0.5)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
# Test exception and successful retry
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
# Test passive failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID, passive_failure=True)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_called_once_with(PORT_ID)
# Test passive failure admin down failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
mock_driver.admin_down_port.reset_mock()
net_task.execute(PORT_ID, passive_failure=True)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_called_once_with(PORT_ID)
# Test non-passive failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
mock_driver.admin_down_port.reset_mock()
mock_driver.admin_down_port.side_effect = [
exceptions.OctaviaException('boom')]
self.assertRaises(exceptions.OctaviaException, net_task.execute,
PORT_ID)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_not_called()
def test_create_vip_base_port(self, mock_get_net_driver):
AMP_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
VIP_NETWORK_ID = uuidutils.generate_uuid()
VIP_QOS_ID = uuidutils.generate_uuid()
VIP_SG_ID = uuidutils.generate_uuid()
VIP_SUBNET_ID = uuidutils.generate_uuid()
VIP_IP_ADDRESS = '203.0.113.81'
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
vip_dict = {constants.IP_ADDRESS: VIP_IP_ADDRESS,
constants.NETWORK_ID: VIP_NETWORK_ID,
constants.QOS_POLICY_ID: VIP_QOS_ID,
constants.SUBNET_ID: VIP_SUBNET_ID}
port_mock = mock.MagicMock()
port_mock.id = PORT_ID
mock_driver.create_port.side_effect = [
port_mock, exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom')]
mock_driver.delete_port.side_effect = [mock.DEFAULT, Exception('boom')]
net_task = network_tasks.CreateVIPBasePort()
# Limit the retry attempts for the test run to save time
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
# Test execute
result = net_task.execute(vip_dict, VIP_SG_ID, AMP_ID)
self.assertEqual(port_mock.to_dict(), result)
mock_driver.create_port.assert_called_once_with(
VIP_NETWORK_ID, name=constants.AMP_BASE_PORT_PREFIX + AMP_ID,
fixed_ips=[{constants.SUBNET_ID: VIP_SUBNET_ID}],
secondary_ips=[VIP_IP_ADDRESS], security_group_ids=[VIP_SG_ID],
qos_policy_id=VIP_QOS_ID)
# Test execute exception
mock_driver.reset_mock()
self.assertRaises(exceptions.OctaviaException, net_task.execute,
vip_dict, None, AMP_ID)
# Test revert when this task failed
mock_driver.reset_mock()
net_task.revert(failure.Failure.from_exception(Exception('boom')),
vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_not_called()
# Test revert
mock_driver.reset_mock()
net_task.revert([port_mock], vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
# Test revert exception
mock_driver.reset_mock()
net_task.revert([port_mock], vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
@mock.patch('time.sleep')
def test_admin_down_port(self, mock_sleep, mock_get_net_driver):
PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
port_down_mock = mock.MagicMock()
port_down_mock.status = constants.DOWN
port_up_mock = mock.MagicMock()
port_up_mock.status = constants.UP
mock_driver.set_port_admin_state_up.side_effect = [
mock.DEFAULT, net_base.PortNotFound, mock.DEFAULT, mock.DEFAULT,
Exception('boom')]
mock_driver.get_port.side_effect = [port_down_mock, port_up_mock]
net_task = network_tasks.AdminDownPort()
# Test execute
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_called_once_with(PORT_ID)
# Test passive fail on port not found
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_not_called()
# Test passive fail on port stays up
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_called_once_with(PORT_ID)
# Test revert when this task failed
mock_driver.reset_mock()
net_task.revert(failure.Failure.from_exception(Exception('boom')),
PORT_ID)
mock_driver.set_port_admin_state_up.assert_not_called()
# Test revert
mock_driver.reset_mock()
net_task.revert(None, PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
True)
# Test revert exception passive failure
mock_driver.reset_mock()
net_task.revert(None, PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
True)
@mock.patch('octavia.common.utils.get_vip_security_group_name')
def test_get_vip_security_group_id(self, mock_get_sg_name,
mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
SG_ID = uuidutils.generate_uuid()
SG_NAME = 'fake_SG_name'
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_get_sg_name.return_value = SG_NAME
sg_mock = mock.MagicMock()
sg_mock.id = SG_ID
mock_driver.get_security_group.side_effect = [
sg_mock, None, net_base.SecurityGroupNotFound,
net_base.SecurityGroupNotFound]
net_task = network_tasks.GetVIPSecurityGroupID()
# Test execute
result = net_task.execute(LB_ID)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute with empty get subnet response
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
result = net_task.execute(LB_ID)
self.assertIsNone(result)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute no security group found, security groups enabled
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
mock_driver.sec_grp_enabled = True
self.assertRaises(net_base.SecurityGroupNotFound, net_task.execute,
LB_ID)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute no security group found, security groups disabled
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
mock_driver.sec_grp_enabled = False
result = net_task.execute(LB_ID)
self.assertIsNone(result)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
|
en
| 0.856245
|
# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Test vrrp_port_id is None # Test with vrrp_port_id # Test with one amp and no pools, nothing plugged # Delta should be empty # Pool mock should be configured explicitly for each test # Test with one amp and one pool but no members, nothing plugged # Delta should be empty # Test with one amp and one pool and one member, nothing plugged # Delta should be one additional subnet to plug # Test with one amp and one pool and one member, already plugged # Delta should be empty # Test with one amp and one pool and one member, wrong network plugged # Delta should be one network to add and one to remove # Test with one amp and one pool and no members, one network plugged # Delta should be one network to remove # revert # No exception # No exception # Do a test with a general exception in case behavior changes # No exception # Revert # revert # Do a test with a general exception in case behavior changes # revert # revert with exception # execute # revert # execute # revert # revert # revert exception # Limit the retry attempts for the test run to save time # Test port ID is None (no-op) # Test successful delete # Test exception and successful retry # Test passive failure # Test passive failure admin down failure # Test non-passive failure # Limit the retry attempts for the test run to save time # Test execute # Test execute exception # Test revert when this task failed # Test revert # Test revert exception # Test execute # Test passive fail on port not found # Test passive fail on port stays up # Test revert when this task failed # Test revert # Test revert exception passive failure # Test execute # Test execute with empty get subnet response # Test execute no security group found, security groups enabled # Test execute no security group found, security groups disabled
| 1.398157
| 1
|
modules/models/example_loader.py
|
vwegmayr/entrack
| 1
|
6628241
|
<gh_stars>1-10
"""This module contains functionality to load tractography training data.
Credit goes to <NAME>
Todo:
Update doc
"""
import os
import numpy as np
import nibabel as nib
import functools
print = functools.partial(print, flush=True)
def aff_to_rot(aff):
"""Computes the rotation matrix corresponding to the given affine matrix.
Args:
aff: The affine matrix (4, 4).
Returns:
rotation: The (3, 3) matrix corresponding to the rotation in the affine.
"""
mat = aff[0:3, 0:3]
scales = np.linalg.norm(mat, axis=0)
rotation = np.divide(mat, scales)
assert np.isclose(abs(np.linalg.det(rotation)), 1.0)
return rotation
class Examples(object):
"""Base Class for loading tractography training samples.
This class provides functionality to create blocks of diffusion-data and
the associated fiber information. The diffusion data block represents the
input to any learning algorithm, whereas the fiber information serves as
label.
Classes derived from this base class handle different forms of input and
labels. For instance, the input can be raw diffusion measurements or
derived representations such as diffusion tensor or spherical harmonics.
Labels describe the local fiber flow which is the subject of prediction.
Subclasses:
PointExamples
Attributes:
fibers: List of streamlines. Each streamline is a list
with shape (fiber_length,3) which contains the x,y,z coordinates of
each point in the fiber.
fiber_header: Struct array with info about the loaded track file. See
http://trackvis.org/docs/?subsect=fileformat for more information.
brain_file: Proxy to the diffusion data file, which is assumed to be of
nifti format.
brain_data: MemMap to the diffusion data stored in the nifti file.
brain_header: Struct array with information about the loaded diffusion
data file. See https://brainder.org/2012/09/23/the-nifti-file-format/
for more information.
voxel_size: List which contains the voxel spacing in x, y, z directions.
Units are Millimeter.
block_size: Integer which indicates the entire length of the diffusion
data block in one dimension. E.g. if 7x7x7 blocks are considered,
then the block_size is 7. Should be odd.
train_labels: List which contains all training fiber labels which are
parsed from the track file. Each label is a dictionary which keys depend
on the subclass.
eval_labels: List which contains all evaluation fiber labels which are
parsed from the track file. Each label is a dictionary which keys depend
on the subclass.
block_length: Integer which indicates half the block_size minus one.
E.g. if 7x7x7 blocks are considered, the block_length is 3, i.e. the
distance from the center in each direction in voxels.
voxel_dimension: List as x,y,z dimensions of brain data.
"""
def __init__(self,
nii_file,
trk_file,
block_size,
num_eval_examples,
load_only_n_samples=False):
"""Load the input files and initialize fields.
Args:
nii_file: Path to the nifti file which is used as diffusion data
input.
trk_file: Path to the trackvis file which is used for the labels,
should be derived from the data represented in the niiFile.
block_size: Integer (odd) which indicates the desired data block
size.
num_eval_examples: Integer which indicates approximate number of
evaluation examples (and therefore labels) loaded from the track
file. Actual amount of evaluation examples can vary slightly
because of adding whole fibers at a time.
"""
assert isinstance(nii_file, list)
assert isinstance(trk_file, list)
assert len(nii_file) == len(trk_file)
print("Loading {} brains".format(len(nii_file)))
self.voxel_size = []
if nii_file is not None:
self.brain_file = [nib.load(file) for file in nii_file]
self.brain_data = [brain_file.get_data() for brain_file in self.brain_file]
self.brain_header = [brain_file.header.structarr for brain_file in self.brain_file]
self.voxel_size = [brain_header["pixdim"][1:4] for brain_header in self.brain_header]
self.voxel_dimension = [np.shape(brain_data)[3] for brain_data in self.brain_data]
nii_aff = [brain_file.affine for brain_file in self.brain_file]
assert all([brain.shape[-1] == self.brain_data[0].shape[-1] for
brain in self.brain_data])
if block_size is not None:
self.block_size = block_size
if trk_file is not None:
# self.fibers = []
self.fiber_header = []
self.n_labels = []
self.train_labels = []
trk_aff = [nib.trackvis.aff_from_hdr(fiber_header) for
fiber_header in self.fiber_header]
for i, file in enumerate(trk_file):
fibers, fiber_header = nib.trackvis.read(file,
points_space="voxel")
fibers = [fiber[0] for fiber in fibers]
# self.fibers.append(fibers)
self.fiber_header.append(fiber_header)
if nii_file is None:
self.voxel_size.append(trk_aff.diagonal()[:3])
train_labels, eval_labels = self.initialize_labels(
fibers,
voxel_size=self.voxel_size[i],
num_eval_examples=0,
load_only_n_samples=load_only_n_samples)
self.train_labels.append(train_labels)
self.n_labels.append(len(train_labels))
else:
self.fibers, self.fiber_header = None, None
self.train_labels, self.eval_labels = [], []
self.eval_set = None
if nii_file is not None and trk_file is not None:
assert all([np.allclose(nii_aff, trk_aff) for nii_aff, trk_aff in zip(nii_aff, trk_aff)])
self.affine = nii_aff
elif trk_file is not None:
self.affine = trk_aff
elif nii_file is not None:
self.affine = nii_aff
def get_train_batch(self, requested_num_examples):
"""Return a dictionary of examples.
Main method for external applications.
Args:
requested_num_examples: Integer which indicates desired number of
examples. Should be smaller or equal to num_train_examples else
warning is raised and num_train_examples are returned.
Returns:
A dictionary with keys "center", "incoming", "outgoing" and
"data_block". Each value is a list of length requested_num_examples.
The i-th element of e.g. list "dataBlock" contains the data_block for
the i-th example:
examples["center"][i] = [x,y,z] or one_hot code
examples["incoming"][i] = [x,y,z] or one_hot code
examples["outgoing"][i] = [x,y,z] or one_hot code
examples["data_block"][i] = np.array
"""
pass
def get_eval_set(self):
"""Return the evaluation set.
Returns:
A dictionary of evaluation examples. The structure is the same as
for a training batch. The total number of evaluation samples is
given by num_eval_examples.
"""
def initialize_labels(self, fibers, num_eval_examples, load_only_n_samples=False):
"""Parse labels from track file.
For internal use.
Returns:
Tuple of two lists of training and evaluation labels. Each label is
a dictionary which contains information about fiber flow. The keys of
a label depend on the subclass.
"""
pass
@staticmethod
def points_to_one_hot(center, point):
"""Calculate one-hot code for neighbor voxels.
For internal use.
Args:
center: List [x,y,z] which contains the coordinates of the voxel
approached or left by a fiber.
point: List [x,y,z] which contains the coordinates of the neighbor
voxel from where the center voxel is approached or left.
Returns:
Numpy array of shape (27). It encodes either from which neighbor
voxel the a fiber entered the center voxel or to which neighbor
voxel the fiber left the center voxel.
"""
center_voxel = np.round(center).astype(int)
if not np.array_equal(point, np.zeros(3)):
point_voxel = np.round(point).astype(int)
relative = point_voxel - center_voxel
else:
relative = np.zeros(3, dtype=np.int64)
num = 13 + np.dot([1, -3, -9], relative)
one_hot = np.zeros(27)
one_hot[num] = 1
return one_hot
@staticmethod
def points_to_relative(_from, to):
"""Calculate relative direction from global coordinates.
For internal use.
Args:
_from: List [x,y,z] which contains the coordinates of the voxel
starting point of a fiber segment.
to: List [x,y,z] which contains the coordinates of the voxel
starting point of a fiber segment
Returns:
Numpy array of shape (3) of the relative direction from "_from" to "to".
"""
if not np.array_equal(_from, np.zeros(3)) and not np.array_equal(to, np.zeros(3)):
relative = np.asarray(to) - np.asarray(_from)
if np.linalg.norm(relative) < 1e-9:
raise ValueError("Norm of relative vector is vanishingly small.")
return relative / np.linalg.norm(relative)
else:
return np.zeros(3)
@staticmethod
def build_datablock(
data,
block_size,
center_point,
incoming_point,
outgoing_point,
label_type,
affine):
"""Creates an example with all the label information and data added.
Args:
data: MemMap to the diffusion data stored in the nifti file.
block_size: Integer which indicates the entire length of the diffusion
data block in one dimension. E.g. if 7x7x7 blocks are considered,
then the block_size is 7. Should be odd.
center_point: List of [x,y,z] of coordinate where fiber goes though.
incoming_point: List of [x,y,z] of coordinate where fiber comes from.
outgoing_point: List of [x,y,z] of coordinate where fiber goes to.
label_type: String which indicates the desired label type which are
described in the docstring of PointExamples.
Returns: A dictionary with keys "center", "incoming", "outgoing" and
"data_block". Each value is a list of length requested_num_examples.
example["center"] = np.array [x,y,z] or one_hot code
example["incoming"] = np.array [x,y,z] or one_hot code
example["outgoing"] = np.array [x,y,z] or one_hot code
example["data_block"] = np.array
"""
example = {}
voxel = np.round(center_point).astype(int)
rot = aff_to_rot(affine)
if label_type == "one_hot":
example["center"] = np.round(center_point).astype(int)
example["incoming"] = Examples.points_to_one_hot(
center_point,
incoming_point)
example["outgoing"] = Examples.points_to_one_hot(
center_point,
outgoing_point)
elif label_type == "point":
example["center"] = np.array(center_point)
example["incoming"] = Examples.points_to_relative(
incoming_point[0],
center_point)
example["incoming"] = rot.dot(example["incoming"])
for i in range(len(incoming_point) - 1):
next_incoming = Examples.points_to_relative(
incoming_point[i + 1],
incoming_point[i])
next_incoming = rot.dot(next_incoming)
example["incoming"] = np.append(example["incoming"],
next_incoming)
example["outgoing"] = Examples.points_to_relative(
center_point,
outgoing_point)
example["outgoing"] = rot.dot(example["outgoing"])
data_shape = np.shape(data)
example["data_block"] = np.zeros((block_size,
block_size,
block_size,
data_shape[3]))
if (voxel[0] < 0 or voxel[0] >= data_shape[0] or
voxel[1] < 0 or voxel[1] >= data_shape[1] or
voxel[2] < 0 or voxel[2] >= data_shape[2]):
print("Warning: voxel out of bounds: ({}, {}, {}), data: (0:{}, 0:{}, 0:{})".format(
voxel[0], voxel[1], voxel[2], data_shape[0], data_shape[1], data_shape[2]))
return example
block_length = int(np.floor(block_size / 2))
# Pad data if block is out of bounds
start = [voxel[0] - block_length,
voxel[1] - block_length,
voxel[2] - block_length]
end = [voxel[0] + block_length + 1,
voxel[1] + block_length + 1,
voxel[2] + block_length + 1]
example["data_block"][
max(-(start[0]), 0):(block_size - max(end[0] - data_shape[0], 0)),
max(-(start[1]), 0):(block_size - max(end[1] - data_shape[1], 0)),
max(-(start[2]), 0):(block_size - max(end[2] - data_shape[2], 0)),
:] = np.array(data[
max(start[0], 0): min(end[0], data_shape[0]),
max(start[1], 0): min(end[1], data_shape[1]),
max(start[2], 0): min(end[2], data_shape[2]),
:])
return example
@staticmethod
def get_block(nii_file,
block_size,
point):
# TODO: Reduce code duplication in get_datablock
if isinstance(nii_file, str):
nii_file = nib.load(nii_file)
assert isinstance(nii_file,
(nib.nifti1.Nifti1Image, nib.nifti2.Nifti2Image))
voxel = np.round(point).astype(int)
data_shape = np.shape(data)
block = np.zeros((block_size,
block_size,
block_size,
data_shape[3]))
if (voxel[0] < 0 or voxel[0] >= data_shape[0] or
voxel[1] < 0 or voxel[1] >= data_shape[1] or
voxel[2] < 0 or voxel[2] >= data_shape[2]):
print("Warning: voxel out of bounds: ({}, {}, {}), "
"data: (0:{}, 0:{}, 0:{})".format(
voxel[0], voxel[1], voxel[2], data_shape[0],
data_shape[1], data_shape[2]))
return block
block_length = int(np.floor(block_size / 2))
# Pad data if block is out of bounds
start = [voxel[0] - block_length,
voxel[1] - block_length,
voxel[2] - block_length]
end = [voxel[0] + block_length + 1,
voxel[1] + block_length + 1,
voxel[2] + block_length + 1]
block[
max(-(start[0]), 0):(block_size - max(end[0] - data_shape[0], 0)),
max(-(start[1]), 0):(block_size - max(end[1] - data_shape[1], 0)),
max(-(start[2]), 0):(block_size - max(end[2] - data_shape[2], 0)),
:] = np.array(data[
max(start[0], 0): min(end[0], data_shape[0]),
max(start[1], 0): min(end[1], data_shape[1]),
max(start[2], 0): min(end[2], data_shape[2]),
:])
return block
class PointExamples(Examples):
"""Class which represents fiber point examples.
Todo:
Update doc
"""
def __init__(self,
nii_file=None,
trk_file=None,
block_size=None,
n_incoming=None,
every_n_fibers=None,
load_only_n_fibers=False,
load_only_n_samples=False,
num_eval_examples=0,
data_corrupt_percent=0.0,
example_percent=1.0,
min_fiber_length=0,
ignore_start_point=False,
ignore_stop_point=True,
cache_examples=False,
V1=None):
"""Load the input files and initialize fields."""
self.min_length = min_fiber_length
self.ignore_start_point = ignore_start_point
self.ignore_stop_point = ignore_stop_point
self.n_incoming = n_incoming
self.every_n_fibers = every_n_fibers
self.eval_fibers = []
self.train_generator = None
self.eval_generator = None
self.cache_examples = cache_examples
self.data_corrupt_percent = data_corrupt_percent
self.example_percent = example_percent
self.V1 = V1
Examples.__init__(self,
nii_file,
trk_file,
block_size,
num_eval_examples,
load_only_n_samples=load_only_n_samples)
# self.check_empty_data(warning_only=True)
def initialize_labels(self,
fibers,
voxel_size,
num_eval_examples,
augment_reverse_fibers=True,
load_only_n_samples=False):
print("Filtering Fibers...")
if self.min_length > 0:
fibers_filtered = []
for fiber in fibers:
fiber_length_mm = 0
for j in range(1, len(fiber)):
fiber_length_mm += np.linalg.norm(
(fiber[j] - fiber[j - 1]) * voxel_size)
if fiber_length_mm > self.min_length:
fibers_filtered.append(fiber)
break
else:
fibers_filtered = fibers
if self.every_n_fibers is not None:
fibers_filtered = [fiber for i, fiber in enumerate(fibers_filtered) if i % self.every_n_fibers == 0]
np.random.shuffle(fibers_filtered)
print("Using {}/{} fibers longer than {}mm".format(len(fibers_filtered), len(fibers),
self.min_length))
label_list = []
eval_labels = []
for fiber in fibers_filtered:
for j in range(self.ignore_start_point, len(fiber) - self.ignore_stop_point):
label = {"center": fiber[j]}
start = max(j - self.n_incoming, 0)
end = max(j, 0)
label["incoming"] = fiber[start:end][::-1]
label["incoming"] = np.append(
label["incoming"],
np.zeros((self.n_incoming - len(label["incoming"]), 3)),
0)
if j == len(fiber) - 1:
label["outgoing"] = np.zeros(3)
else:
label["outgoing"] = fiber[j + 1]
label_list.append(label)
if augment_reverse_fibers:
# TODO: consider ignoring start and end
start = min(j + 1, len(fiber))
end = min(j + 1 + self.n_incoming, len(fiber))
incoming = fiber[start:end]
incoming = np.append(incoming,
np.zeros((self.n_incoming - len(incoming), 3)),
0)
reverse_label = {"center": label["center"], "incoming": incoming,
"outgoing": label["incoming"][0]}
label_list.append(reverse_label)
if load_only_n_samples and len(label_list) >= load_only_n_samples:
break
if not eval_labels and num_eval_examples > 0:
self.eval_fibers.append(fiber)
if len(label_list) >= num_eval_examples and not eval_labels \
and num_eval_examples > 0:
eval_labels = label_list
label_list = []
if len(eval_labels) < num_eval_examples:
raise ValueError("PointExamples: Requested more evaluation examples than available")
print("finished loading, now shuffle")
train_labels = label_list
np.random.shuffle(eval_labels)
np.random.shuffle(train_labels)
#if self.example_percent < 1.0:
# # Subsample the labels
# n_old = len(train_labels)
# n_wanted = np.round(n_old * self.example_percent).astype(int)
# train_labels = train_labels[0:n_wanted] # Subsample
# n_new = len(train_labels)
# print("Training labels are {} / {}, i.e. {:3.2f} %".format(n_new,
# n_old,
# n_new / n_old * 100))
print("Generated {} train and {} eval fiber labels\n".format(len(train_labels),
len(eval_labels)))
# NOTE: Here is the corruption of the training labels.
# First, we calculate how many labels have to be corrupted. Then, this number of labels is
# corrupted by removing the outgoing label and in its place putting a new random one that
# has been obtained by adding to the 'center' a random unit vector in R3.
# NOTE: Labels have already been shuffled, so this can be carried on in sequential order.
if self.data_corrupt_percent > 0.0:
n_to_corrupt = int(np.floor(len(train_labels) * self.data_corrupt_percent))
print("DEBUG: Corrupting data. Corruption number is ",
n_to_corrupt,
"on a total of",
len(train_labels))
for idx in range(n_to_corrupt):
cur_label = train_labels[idx]
cur_center = cur_label['center']
random_v = np.random.normal(size=3)
random_v = np.divide(random_v, np.linalg.norm(random_v))
new_outgoing = cur_center + random_v
cur_label['outgoing'] = new_outgoing
train_labels[idx] = cur_label # QUESTION: is this really necessary?
# Done with the corruption
return train_labels, eval_labels
def example_generator(self, labels, label_type):
if label_type not in ["one_hot", "point"]:
print("ERROR: PointExamples: build_batch: Unknown label_type")
for label in labels:
example = Examples.build_datablock(self.brain_data, self.block_size,
label["center"], label["incoming"],
label["outgoing"], label_type, self.affine)
yield example
def get_generator(self):
n_labels_min = min(self.n_labels)
n_brains = len(self.n_labels)
print("n_labels: {}".format(self.n_labels))
def generator():
for i in range(n_labels_min):
for j in range(n_brains):
example = Examples.build_datablock(self.brain_data[j],
self.block_size,
self.train_labels[j][i]["center"],
self.train_labels[j][i]["incoming"],
self.train_labels[j][i]["outgoing"],
"point",
self.affine[j])
yield ({"blocks": example["data_block"],
"incoming": example["incoming"].reshape(-1, 3)},
example["outgoing"])
return generator
def get_batch(self, generator, requested_num_examples=0):
""" Return a dictionary of examples.
Args:
requested_num_examples: Integer which indicates desired number of
examples. Should be smaller or equal to num_train_examples else
warning is raised and num_train_examples are returned.
generator: Generator from which to pull examples from.
Returns:
A dictionary with keys "center", "incoming", "outgoing" and
"data_block". Each value is a list of length requested_num_examples.
The i-th element of e.g. list "dataBlock" contains the data_block
array for the i-th example:
examples["center"][i] = [x,y,z] or one_hot code
examples["incoming"][i] = [x,y,z] or one_hot code
examples["outgoing"][i] = [x,y,z] or one_hot code
examples["data_block"][i] = np.array
"""
batch = {
"center": [],
"incoming": [],
"outgoing": [],
"blocks": []
}
for i in range(requested_num_examples):
example = next(generator)
# Add example to examples by appending individual lists
batch["incoming"].append(example["incoming"])
batch["blocks"].append(example["data_block"])
batch["outgoing"].append(example["outgoing"])
return batch
def get_train_batch(self, requested_num_examples, label_type="point"):
if self.train_generator is None:
self.train_generator = self.example_generator(self.train_labels, label_type)
return self.get_batch(self.train_generator, requested_num_examples)
def get_eval_batch(self, requested_num_examples, label_type="point"):
if self.eval_generator is None:
self.eval_generator = self.example_generator(self.eval_labels, label_type)
return self.get_batch(self.eval_generator, requested_num_examples)
def get_eval_set(self, label_type="point"):
# only calculate once
if self.eval_set is None:
eval_generator = self.example_generator(self.eval_labels, label_type)
self.eval_set = self.get_batch(eval_generator, len(self.eval_labels))
return self.eval_set
def print_statistics(self):
print("Statistics for evalution set:")
eval_set = self.get_eval_set()
incoming = np.array(eval_set["incoming"])[:, 0:3]
outgoing = np.array(eval_set["outgoing"])
dot_prod = np.sum(incoming * outgoing, axis=1)
dot_loss = 1 - np.average(dot_prod)
print("Average Dot Loss (1-<incoming, outgoing>): %f" % dot_loss)
avg_angle = np.average(np.arccos(np.clip(dot_prod, -1, 1))) * 180 / np.pi
print("Average Angle: %f" % avg_angle)
if not self.ignore_start_point:
filter = [not np.array_equal(vec, [0, 0, 0]) for vec in incoming]
dot_loss_filtered = 1 - np.average(dot_prod[filter])
print("Loss without starting fibers: %f" % dot_loss_filtered)
avg_angle = np.average(np.arccos(np.clip(dot_prod[filter], -1, 1))) * 180 / np.pi
print("Angle without starting fibers: %f" % avg_angle)
print("-----------------------------")
def check_alignment(self):
print("Statistics for eigenvectors of tensors:")
eval_set = self.get_eval_set()
outgoing = np.array(eval_set["outgoing"])
center = np.array(eval_set["center"])
voxels = np.round(center).astype(int)
if self.V1 is None:
if self.voxel_dimension != 6:
print("Data has wrong dimension to be tensor, skip check")
return
tensor = np.array([self.brain_data[voxel[0]][voxel[1]][voxel[2]] for voxel in voxels])
eigenvec = extract_direction(tensor)
else:
eigenvec_data = nib.load(self.V1).get_data()
eigenvec = [eigenvec_data[voxel[0]][voxel[1]][voxel[2]] for voxel in voxels]
# take absolute of dot product to ignore ambiguous direction
dot_prod = np.abs(np.sum(eigenvec * outgoing, axis=1))
dot_loss = 1 - np.average(dot_prod)
avg_angle = np.average(np.arccos(np.clip(dot_prod, -1, 1))) * 180 / np.pi
print("Average Dot Loss (1-<eigenvector, outgoing>): %f" % dot_loss)
print("Average Angle: %f" % avg_angle)
print("-----------------------------")
def check_empty_data(self, warning_only=False, threshold=0.05):
empty = 0
data_blocks = self.get_eval_set()["data_block"]
if len(data_blocks) == 0:
return
for data_block in data_blocks:
if np.isclose(data_block, 0.0).all():
empty += 1
percentage = empty / len(data_blocks)
if warning_only:
if percentage > threshold:
print("WARNING: Blocks with empty data: %f" % percentage)
else:
print("Blocks with empty data: %f" % percentage)
class UnsupervisedExamples(PointExamples):
"""PointExamples for unsupervised training."""
def __init__(self, nii_file, trk_file, block_size, num_eval_examples):
PointExamples.__init__(self, nii_file, trk_file, block_size,
num_eval_examples)
def get_batch(self, generator, requested_num_examples=0):
""" Return a dictionary of examples.
Args:
requested_num_examples: Integer which indicates desired number of
examples. Should be smaller or equal to num_train_examples else
warning is raised and num_train_examples are returned.
label_type: String which indicates the desired label type which are
described in the docstring of PointExamples.
Returns:
A dictionary with keys "center", "incoming", "outgoing" and
"data_block". Each value is a list of length requested_num_examples.
The i-th element of e.g. list "dataBlock" contains the data_block
array for the i-th example:
examples["center"][i] = [x,y,z] or one_hot code
examples["incoming"][i] = [x,y,z] or one_hot code
examples["outgoing"][i] = [x,y,z] or one_hot code
examples["data_block"][i] = np.array
"""
batch = {
"center": [],
"incoming": [],
"outgoing": [],
"data_block": []
}
for i in range(requested_num_examples):
example = next(generator)
# Add example to examples by appending individual lists
for key, list in batch.items():
if key == 'data_block':
# still flatten the data blocks
list.append(example[key].flatten())
else:
list.append(example[key])
return batch
def get_unlabeled_batch(self, generator, requested_num_examples=0):
examples = []
for i in range(requested_num_examples):
example = next(generator)
examples.append(example["data_block"].flatten())
return np.array(examples)
def get_train_batches(self, requested_num_examples):
""" Return an array of examples.
Args:
requested_num_examples: Integer which indicates desired number of
examples. Should be smaller or equal to num_train_examples else
warning is raised and num_train_examples are returned.
Returns:
An array with the requested number of examples. Each example is a
flattened array as a list of tensors for the whole cube size, where
each tensor is represented by the 6 values in it's upper diagonal.
"""
if self.train_generator is None:
self.train_generator = self.example_generator(self.train_labels,
"point")
return self.get_unlabeled_batch(self.train_generator,
requested_num_examples)
def get_eval_set(self, label_type="point", unlabeled=False):
""" Return evaluation examples including labels for ground truth.
Args:
num: number of examples. If left to None, all evaluation examples are
returned
label_type: String which indicates the desired label type which are
described in the docstring of PointExamples.
Returns:
A dictionary with keys "center", "incoming", "outgoing" and
"data_block". Each value is a list of length requested_num_examples.
The i-th element of e.g. list "dataBlock" contains the data_block
array for the i-th example:
examples["center"][i] = [x,y,z] or one_hot code
examples["incoming"][i] = [x,y,z] or one_hot code
examples["outgoing"][i] = [x,y,z] or one_hot code
examples["data_block"][i] = np.array
"""
# only calculate once
if unlabeled:
if (not hasattr(self, 'unlabeled_eval_set')) or \
self.unlabeled_eval_set is None:
eval_generator = self.example_generator(self.eval_labels,
"point")
self.unlabeled_eval_set = self.get_unlabeled_batch(
eval_generator, len(self.eval_labels))
ret = self.unlabeled_eval_set
else:
if self.eval_set is None:
eval_generator = self.example_generator(self.eval_labels,
label_type)
self.eval_set = self.get_batch(eval_generator,
len(self.eval_labels))
ret = self.eval_set
return ret
class TestExamples(object):
"""Usage Demonstration of the Examples class
Make sure you put valid "tensor.nii" and "fibers.trk" files in the same
directory as this module.
"""
def __init__(self):
# TODO: rework
# Create a new PointExamples instance
path = str(os.path.dirname(
os.path.abspath(__file__)).split("example_loader")[0]) + "data/"
pt_ex = PointExamples(
path + "tensor.nii",
path + "fibers.trk",
block_size=3,
num_eval_examples=1)
print("Created PointExamples instance with blocksize 3!")
# Access interesting attributes
print("num_train_examples: {}".format(pt_ex.num_train_examples))
print("num_fibers: {}".format(pt_ex.num_fibers))
# Check that the initial exampleState is indeed zero
print("Initial example_state: {}".format(pt_ex.example_state))
# Get a first one-hot point example
ex1 = pt_ex.get_train_batch(1, label_type="one_hot")
print("Got one example!")
# Now the exampleState is one
print("Now the exampleState is: {}".format(pt_ex.example_state))
print("Content of the first example:")
print("center: {}".format(ex1["center"]))
print("incoming: {}".format(ex1["incoming"]))
print("outgoing: {}".format(ex1["outgoing"]))
print("data_block type: {}".format(type(ex1["data_block"][0])))
print("data_block shape: {}".format(ex1["data_block"][0].shape))
if __name__ == "__main__":
pass
|
"""This module contains functionality to load tractography training data.
Credit goes to <NAME>
Todo:
Update doc
"""
import os
import numpy as np
import nibabel as nib
import functools
print = functools.partial(print, flush=True)
def aff_to_rot(aff):
"""Computes the rotation matrix corresponding to the given affine matrix.
Args:
aff: The affine matrix (4, 4).
Returns:
rotation: The (3, 3) matrix corresponding to the rotation in the affine.
"""
mat = aff[0:3, 0:3]
scales = np.linalg.norm(mat, axis=0)
rotation = np.divide(mat, scales)
assert np.isclose(abs(np.linalg.det(rotation)), 1.0)
return rotation
class Examples(object):
"""Base Class for loading tractography training samples.
This class provides functionality to create blocks of diffusion-data and
the associated fiber information. The diffusion data block represents the
input to any learning algorithm, whereas the fiber information serves as
label.
Classes derived from this base class handle different forms of input and
labels. For instance, the input can be raw diffusion measurements or
derived representations such as diffusion tensor or spherical harmonics.
Labels describe the local fiber flow which is the subject of prediction.
Subclasses:
PointExamples
Attributes:
fibers: List of streamlines. Each streamline is a list
with shape (fiber_length,3) which contains the x,y,z coordinates of
each point in the fiber.
fiber_header: Struct array with info about the loaded track file. See
http://trackvis.org/docs/?subsect=fileformat for more information.
brain_file: Proxy to the diffusion data file, which is assumed to be of
nifti format.
brain_data: MemMap to the diffusion data stored in the nifti file.
brain_header: Struct array with information about the loaded diffusion
data file. See https://brainder.org/2012/09/23/the-nifti-file-format/
for more information.
voxel_size: List which contains the voxel spacing in x, y, z directions.
Units are Millimeter.
block_size: Integer which indicates the entire length of the diffusion
data block in one dimension. E.g. if 7x7x7 blocks are considered,
then the block_size is 7. Should be odd.
train_labels: List which contains all training fiber labels which are
parsed from the track file. Each label is a dictionary which keys depend
on the subclass.
eval_labels: List which contains all evaluation fiber labels which are
parsed from the track file. Each label is a dictionary which keys depend
on the subclass.
block_length: Integer which indicates half the block_size minus one.
E.g. if 7x7x7 blocks are considered, the block_length is 3, i.e. the
distance from the center in each direction in voxels.
voxel_dimension: List as x,y,z dimensions of brain data.
"""
def __init__(self,
nii_file,
trk_file,
block_size,
num_eval_examples,
load_only_n_samples=False):
"""Load the input files and initialize fields.
Args:
nii_file: Path to the nifti file which is used as diffusion data
input.
trk_file: Path to the trackvis file which is used for the labels,
should be derived from the data represented in the niiFile.
block_size: Integer (odd) which indicates the desired data block
size.
num_eval_examples: Integer which indicates approximate number of
evaluation examples (and therefore labels) loaded from the track
file. Actual amount of evaluation examples can vary slightly
because of adding whole fibers at a time.
"""
assert isinstance(nii_file, list)
assert isinstance(trk_file, list)
assert len(nii_file) == len(trk_file)
print("Loading {} brains".format(len(nii_file)))
self.voxel_size = []
if nii_file is not None:
self.brain_file = [nib.load(file) for file in nii_file]
self.brain_data = [brain_file.get_data() for brain_file in self.brain_file]
self.brain_header = [brain_file.header.structarr for brain_file in self.brain_file]
self.voxel_size = [brain_header["pixdim"][1:4] for brain_header in self.brain_header]
self.voxel_dimension = [np.shape(brain_data)[3] for brain_data in self.brain_data]
nii_aff = [brain_file.affine for brain_file in self.brain_file]
assert all([brain.shape[-1] == self.brain_data[0].shape[-1] for
brain in self.brain_data])
if block_size is not None:
self.block_size = block_size
if trk_file is not None:
# self.fibers = []
self.fiber_header = []
self.n_labels = []
self.train_labels = []
trk_aff = [nib.trackvis.aff_from_hdr(fiber_header) for
fiber_header in self.fiber_header]
for i, file in enumerate(trk_file):
fibers, fiber_header = nib.trackvis.read(file,
points_space="voxel")
fibers = [fiber[0] for fiber in fibers]
# self.fibers.append(fibers)
self.fiber_header.append(fiber_header)
if nii_file is None:
self.voxel_size.append(trk_aff.diagonal()[:3])
train_labels, eval_labels = self.initialize_labels(
fibers,
voxel_size=self.voxel_size[i],
num_eval_examples=0,
load_only_n_samples=load_only_n_samples)
self.train_labels.append(train_labels)
self.n_labels.append(len(train_labels))
else:
self.fibers, self.fiber_header = None, None
self.train_labels, self.eval_labels = [], []
self.eval_set = None
if nii_file is not None and trk_file is not None:
assert all([np.allclose(nii_aff, trk_aff) for nii_aff, trk_aff in zip(nii_aff, trk_aff)])
self.affine = nii_aff
elif trk_file is not None:
self.affine = trk_aff
elif nii_file is not None:
self.affine = nii_aff
def get_train_batch(self, requested_num_examples):
"""Return a dictionary of examples.
Main method for external applications.
Args:
requested_num_examples: Integer which indicates desired number of
examples. Should be smaller or equal to num_train_examples else
warning is raised and num_train_examples are returned.
Returns:
A dictionary with keys "center", "incoming", "outgoing" and
"data_block". Each value is a list of length requested_num_examples.
The i-th element of e.g. list "dataBlock" contains the data_block for
the i-th example:
examples["center"][i] = [x,y,z] or one_hot code
examples["incoming"][i] = [x,y,z] or one_hot code
examples["outgoing"][i] = [x,y,z] or one_hot code
examples["data_block"][i] = np.array
"""
pass
def get_eval_set(self):
"""Return the evaluation set.
Returns:
A dictionary of evaluation examples. The structure is the same as
for a training batch. The total number of evaluation samples is
given by num_eval_examples.
"""
def initialize_labels(self, fibers, num_eval_examples, load_only_n_samples=False):
"""Parse labels from track file.
For internal use.
Returns:
Tuple of two lists of training and evaluation labels. Each label is
a dictionary which contains information about fiber flow. The keys of
a label depend on the subclass.
"""
pass
@staticmethod
def points_to_one_hot(center, point):
"""Calculate one-hot code for neighbor voxels.
For internal use.
Args:
center: List [x,y,z] which contains the coordinates of the voxel
approached or left by a fiber.
point: List [x,y,z] which contains the coordinates of the neighbor
voxel from where the center voxel is approached or left.
Returns:
Numpy array of shape (27). It encodes either from which neighbor
voxel the a fiber entered the center voxel or to which neighbor
voxel the fiber left the center voxel.
"""
center_voxel = np.round(center).astype(int)
if not np.array_equal(point, np.zeros(3)):
point_voxel = np.round(point).astype(int)
relative = point_voxel - center_voxel
else:
relative = np.zeros(3, dtype=np.int64)
num = 13 + np.dot([1, -3, -9], relative)
one_hot = np.zeros(27)
one_hot[num] = 1
return one_hot
@staticmethod
def points_to_relative(_from, to):
"""Calculate relative direction from global coordinates.
For internal use.
Args:
_from: List [x,y,z] which contains the coordinates of the voxel
starting point of a fiber segment.
to: List [x,y,z] which contains the coordinates of the voxel
starting point of a fiber segment
Returns:
Numpy array of shape (3) of the relative direction from "_from" to "to".
"""
if not np.array_equal(_from, np.zeros(3)) and not np.array_equal(to, np.zeros(3)):
relative = np.asarray(to) - np.asarray(_from)
if np.linalg.norm(relative) < 1e-9:
raise ValueError("Norm of relative vector is vanishingly small.")
return relative / np.linalg.norm(relative)
else:
return np.zeros(3)
@staticmethod
def build_datablock(
data,
block_size,
center_point,
incoming_point,
outgoing_point,
label_type,
affine):
"""Creates an example with all the label information and data added.
Args:
data: MemMap to the diffusion data stored in the nifti file.
block_size: Integer which indicates the entire length of the diffusion
data block in one dimension. E.g. if 7x7x7 blocks are considered,
then the block_size is 7. Should be odd.
center_point: List of [x,y,z] of coordinate where fiber goes though.
incoming_point: List of [x,y,z] of coordinate where fiber comes from.
outgoing_point: List of [x,y,z] of coordinate where fiber goes to.
label_type: String which indicates the desired label type which are
described in the docstring of PointExamples.
Returns: A dictionary with keys "center", "incoming", "outgoing" and
"data_block". Each value is a list of length requested_num_examples.
example["center"] = np.array [x,y,z] or one_hot code
example["incoming"] = np.array [x,y,z] or one_hot code
example["outgoing"] = np.array [x,y,z] or one_hot code
example["data_block"] = np.array
"""
example = {}
voxel = np.round(center_point).astype(int)
rot = aff_to_rot(affine)
if label_type == "one_hot":
example["center"] = np.round(center_point).astype(int)
example["incoming"] = Examples.points_to_one_hot(
center_point,
incoming_point)
example["outgoing"] = Examples.points_to_one_hot(
center_point,
outgoing_point)
elif label_type == "point":
example["center"] = np.array(center_point)
example["incoming"] = Examples.points_to_relative(
incoming_point[0],
center_point)
example["incoming"] = rot.dot(example["incoming"])
for i in range(len(incoming_point) - 1):
next_incoming = Examples.points_to_relative(
incoming_point[i + 1],
incoming_point[i])
next_incoming = rot.dot(next_incoming)
example["incoming"] = np.append(example["incoming"],
next_incoming)
example["outgoing"] = Examples.points_to_relative(
center_point,
outgoing_point)
example["outgoing"] = rot.dot(example["outgoing"])
data_shape = np.shape(data)
example["data_block"] = np.zeros((block_size,
block_size,
block_size,
data_shape[3]))
if (voxel[0] < 0 or voxel[0] >= data_shape[0] or
voxel[1] < 0 or voxel[1] >= data_shape[1] or
voxel[2] < 0 or voxel[2] >= data_shape[2]):
print("Warning: voxel out of bounds: ({}, {}, {}), data: (0:{}, 0:{}, 0:{})".format(
voxel[0], voxel[1], voxel[2], data_shape[0], data_shape[1], data_shape[2]))
return example
block_length = int(np.floor(block_size / 2))
# Pad data if block is out of bounds
start = [voxel[0] - block_length,
voxel[1] - block_length,
voxel[2] - block_length]
end = [voxel[0] + block_length + 1,
voxel[1] + block_length + 1,
voxel[2] + block_length + 1]
example["data_block"][
max(-(start[0]), 0):(block_size - max(end[0] - data_shape[0], 0)),
max(-(start[1]), 0):(block_size - max(end[1] - data_shape[1], 0)),
max(-(start[2]), 0):(block_size - max(end[2] - data_shape[2], 0)),
:] = np.array(data[
max(start[0], 0): min(end[0], data_shape[0]),
max(start[1], 0): min(end[1], data_shape[1]),
max(start[2], 0): min(end[2], data_shape[2]),
:])
return example
@staticmethod
def get_block(nii_file,
block_size,
point):
# TODO: Reduce code duplication in get_datablock
if isinstance(nii_file, str):
nii_file = nib.load(nii_file)
assert isinstance(nii_file,
(nib.nifti1.Nifti1Image, nib.nifti2.Nifti2Image))
voxel = np.round(point).astype(int)
data_shape = np.shape(data)
block = np.zeros((block_size,
block_size,
block_size,
data_shape[3]))
if (voxel[0] < 0 or voxel[0] >= data_shape[0] or
voxel[1] < 0 or voxel[1] >= data_shape[1] or
voxel[2] < 0 or voxel[2] >= data_shape[2]):
print("Warning: voxel out of bounds: ({}, {}, {}), "
"data: (0:{}, 0:{}, 0:{})".format(
voxel[0], voxel[1], voxel[2], data_shape[0],
data_shape[1], data_shape[2]))
return block
block_length = int(np.floor(block_size / 2))
# Pad data if block is out of bounds
start = [voxel[0] - block_length,
voxel[1] - block_length,
voxel[2] - block_length]
end = [voxel[0] + block_length + 1,
voxel[1] + block_length + 1,
voxel[2] + block_length + 1]
block[
max(-(start[0]), 0):(block_size - max(end[0] - data_shape[0], 0)),
max(-(start[1]), 0):(block_size - max(end[1] - data_shape[1], 0)),
max(-(start[2]), 0):(block_size - max(end[2] - data_shape[2], 0)),
:] = np.array(data[
max(start[0], 0): min(end[0], data_shape[0]),
max(start[1], 0): min(end[1], data_shape[1]),
max(start[2], 0): min(end[2], data_shape[2]),
:])
return block
class PointExamples(Examples):
"""Class which represents fiber point examples.
Todo:
Update doc
"""
def __init__(self,
nii_file=None,
trk_file=None,
block_size=None,
n_incoming=None,
every_n_fibers=None,
load_only_n_fibers=False,
load_only_n_samples=False,
num_eval_examples=0,
data_corrupt_percent=0.0,
example_percent=1.0,
min_fiber_length=0,
ignore_start_point=False,
ignore_stop_point=True,
cache_examples=False,
V1=None):
"""Load the input files and initialize fields."""
self.min_length = min_fiber_length
self.ignore_start_point = ignore_start_point
self.ignore_stop_point = ignore_stop_point
self.n_incoming = n_incoming
self.every_n_fibers = every_n_fibers
self.eval_fibers = []
self.train_generator = None
self.eval_generator = None
self.cache_examples = cache_examples
self.data_corrupt_percent = data_corrupt_percent
self.example_percent = example_percent
self.V1 = V1
Examples.__init__(self,
nii_file,
trk_file,
block_size,
num_eval_examples,
load_only_n_samples=load_only_n_samples)
# self.check_empty_data(warning_only=True)
def initialize_labels(self,
fibers,
voxel_size,
num_eval_examples,
augment_reverse_fibers=True,
load_only_n_samples=False):
print("Filtering Fibers...")
if self.min_length > 0:
fibers_filtered = []
for fiber in fibers:
fiber_length_mm = 0
for j in range(1, len(fiber)):
fiber_length_mm += np.linalg.norm(
(fiber[j] - fiber[j - 1]) * voxel_size)
if fiber_length_mm > self.min_length:
fibers_filtered.append(fiber)
break
else:
fibers_filtered = fibers
if self.every_n_fibers is not None:
fibers_filtered = [fiber for i, fiber in enumerate(fibers_filtered) if i % self.every_n_fibers == 0]
np.random.shuffle(fibers_filtered)
print("Using {}/{} fibers longer than {}mm".format(len(fibers_filtered), len(fibers),
self.min_length))
label_list = []
eval_labels = []
for fiber in fibers_filtered:
for j in range(self.ignore_start_point, len(fiber) - self.ignore_stop_point):
label = {"center": fiber[j]}
start = max(j - self.n_incoming, 0)
end = max(j, 0)
label["incoming"] = fiber[start:end][::-1]
label["incoming"] = np.append(
label["incoming"],
np.zeros((self.n_incoming - len(label["incoming"]), 3)),
0)
if j == len(fiber) - 1:
label["outgoing"] = np.zeros(3)
else:
label["outgoing"] = fiber[j + 1]
label_list.append(label)
if augment_reverse_fibers:
# TODO: consider ignoring start and end
start = min(j + 1, len(fiber))
end = min(j + 1 + self.n_incoming, len(fiber))
incoming = fiber[start:end]
incoming = np.append(incoming,
np.zeros((self.n_incoming - len(incoming), 3)),
0)
reverse_label = {"center": label["center"], "incoming": incoming,
"outgoing": label["incoming"][0]}
label_list.append(reverse_label)
if load_only_n_samples and len(label_list) >= load_only_n_samples:
break
if not eval_labels and num_eval_examples > 0:
self.eval_fibers.append(fiber)
if len(label_list) >= num_eval_examples and not eval_labels \
and num_eval_examples > 0:
eval_labels = label_list
label_list = []
if len(eval_labels) < num_eval_examples:
raise ValueError("PointExamples: Requested more evaluation examples than available")
print("finished loading, now shuffle")
train_labels = label_list
np.random.shuffle(eval_labels)
np.random.shuffle(train_labels)
#if self.example_percent < 1.0:
# # Subsample the labels
# n_old = len(train_labels)
# n_wanted = np.round(n_old * self.example_percent).astype(int)
# train_labels = train_labels[0:n_wanted] # Subsample
# n_new = len(train_labels)
# print("Training labels are {} / {}, i.e. {:3.2f} %".format(n_new,
# n_old,
# n_new / n_old * 100))
print("Generated {} train and {} eval fiber labels\n".format(len(train_labels),
len(eval_labels)))
# NOTE: Here is the corruption of the training labels.
# First, we calculate how many labels have to be corrupted. Then, this number of labels is
# corrupted by removing the outgoing label and in its place putting a new random one that
# has been obtained by adding to the 'center' a random unit vector in R3.
# NOTE: Labels have already been shuffled, so this can be carried on in sequential order.
if self.data_corrupt_percent > 0.0:
n_to_corrupt = int(np.floor(len(train_labels) * self.data_corrupt_percent))
print("DEBUG: Corrupting data. Corruption number is ",
n_to_corrupt,
"on a total of",
len(train_labels))
for idx in range(n_to_corrupt):
cur_label = train_labels[idx]
cur_center = cur_label['center']
random_v = np.random.normal(size=3)
random_v = np.divide(random_v, np.linalg.norm(random_v))
new_outgoing = cur_center + random_v
cur_label['outgoing'] = new_outgoing
train_labels[idx] = cur_label # QUESTION: is this really necessary?
# Done with the corruption
return train_labels, eval_labels
def example_generator(self, labels, label_type):
if label_type not in ["one_hot", "point"]:
print("ERROR: PointExamples: build_batch: Unknown label_type")
for label in labels:
example = Examples.build_datablock(self.brain_data, self.block_size,
label["center"], label["incoming"],
label["outgoing"], label_type, self.affine)
yield example
def get_generator(self):
n_labels_min = min(self.n_labels)
n_brains = len(self.n_labels)
print("n_labels: {}".format(self.n_labels))
def generator():
for i in range(n_labels_min):
for j in range(n_brains):
example = Examples.build_datablock(self.brain_data[j],
self.block_size,
self.train_labels[j][i]["center"],
self.train_labels[j][i]["incoming"],
self.train_labels[j][i]["outgoing"],
"point",
self.affine[j])
yield ({"blocks": example["data_block"],
"incoming": example["incoming"].reshape(-1, 3)},
example["outgoing"])
return generator
def get_batch(self, generator, requested_num_examples=0):
""" Return a dictionary of examples.
Args:
requested_num_examples: Integer which indicates desired number of
examples. Should be smaller or equal to num_train_examples else
warning is raised and num_train_examples are returned.
generator: Generator from which to pull examples from.
Returns:
A dictionary with keys "center", "incoming", "outgoing" and
"data_block". Each value is a list of length requested_num_examples.
The i-th element of e.g. list "dataBlock" contains the data_block
array for the i-th example:
examples["center"][i] = [x,y,z] or one_hot code
examples["incoming"][i] = [x,y,z] or one_hot code
examples["outgoing"][i] = [x,y,z] or one_hot code
examples["data_block"][i] = np.array
"""
batch = {
"center": [],
"incoming": [],
"outgoing": [],
"blocks": []
}
for i in range(requested_num_examples):
example = next(generator)
# Add example to examples by appending individual lists
batch["incoming"].append(example["incoming"])
batch["blocks"].append(example["data_block"])
batch["outgoing"].append(example["outgoing"])
return batch
def get_train_batch(self, requested_num_examples, label_type="point"):
if self.train_generator is None:
self.train_generator = self.example_generator(self.train_labels, label_type)
return self.get_batch(self.train_generator, requested_num_examples)
def get_eval_batch(self, requested_num_examples, label_type="point"):
if self.eval_generator is None:
self.eval_generator = self.example_generator(self.eval_labels, label_type)
return self.get_batch(self.eval_generator, requested_num_examples)
def get_eval_set(self, label_type="point"):
# only calculate once
if self.eval_set is None:
eval_generator = self.example_generator(self.eval_labels, label_type)
self.eval_set = self.get_batch(eval_generator, len(self.eval_labels))
return self.eval_set
def print_statistics(self):
print("Statistics for evalution set:")
eval_set = self.get_eval_set()
incoming = np.array(eval_set["incoming"])[:, 0:3]
outgoing = np.array(eval_set["outgoing"])
dot_prod = np.sum(incoming * outgoing, axis=1)
dot_loss = 1 - np.average(dot_prod)
print("Average Dot Loss (1-<incoming, outgoing>): %f" % dot_loss)
avg_angle = np.average(np.arccos(np.clip(dot_prod, -1, 1))) * 180 / np.pi
print("Average Angle: %f" % avg_angle)
if not self.ignore_start_point:
filter = [not np.array_equal(vec, [0, 0, 0]) for vec in incoming]
dot_loss_filtered = 1 - np.average(dot_prod[filter])
print("Loss without starting fibers: %f" % dot_loss_filtered)
avg_angle = np.average(np.arccos(np.clip(dot_prod[filter], -1, 1))) * 180 / np.pi
print("Angle without starting fibers: %f" % avg_angle)
print("-----------------------------")
def check_alignment(self):
print("Statistics for eigenvectors of tensors:")
eval_set = self.get_eval_set()
outgoing = np.array(eval_set["outgoing"])
center = np.array(eval_set["center"])
voxels = np.round(center).astype(int)
if self.V1 is None:
if self.voxel_dimension != 6:
print("Data has wrong dimension to be tensor, skip check")
return
tensor = np.array([self.brain_data[voxel[0]][voxel[1]][voxel[2]] for voxel in voxels])
eigenvec = extract_direction(tensor)
else:
eigenvec_data = nib.load(self.V1).get_data()
eigenvec = [eigenvec_data[voxel[0]][voxel[1]][voxel[2]] for voxel in voxels]
# take absolute of dot product to ignore ambiguous direction
dot_prod = np.abs(np.sum(eigenvec * outgoing, axis=1))
dot_loss = 1 - np.average(dot_prod)
avg_angle = np.average(np.arccos(np.clip(dot_prod, -1, 1))) * 180 / np.pi
print("Average Dot Loss (1-<eigenvector, outgoing>): %f" % dot_loss)
print("Average Angle: %f" % avg_angle)
print("-----------------------------")
def check_empty_data(self, warning_only=False, threshold=0.05):
empty = 0
data_blocks = self.get_eval_set()["data_block"]
if len(data_blocks) == 0:
return
for data_block in data_blocks:
if np.isclose(data_block, 0.0).all():
empty += 1
percentage = empty / len(data_blocks)
if warning_only:
if percentage > threshold:
print("WARNING: Blocks with empty data: %f" % percentage)
else:
print("Blocks with empty data: %f" % percentage)
class UnsupervisedExamples(PointExamples):
"""PointExamples for unsupervised training."""
def __init__(self, nii_file, trk_file, block_size, num_eval_examples):
PointExamples.__init__(self, nii_file, trk_file, block_size,
num_eval_examples)
def get_batch(self, generator, requested_num_examples=0):
""" Return a dictionary of examples.
Args:
requested_num_examples: Integer which indicates desired number of
examples. Should be smaller or equal to num_train_examples else
warning is raised and num_train_examples are returned.
label_type: String which indicates the desired label type which are
described in the docstring of PointExamples.
Returns:
A dictionary with keys "center", "incoming", "outgoing" and
"data_block". Each value is a list of length requested_num_examples.
The i-th element of e.g. list "dataBlock" contains the data_block
array for the i-th example:
examples["center"][i] = [x,y,z] or one_hot code
examples["incoming"][i] = [x,y,z] or one_hot code
examples["outgoing"][i] = [x,y,z] or one_hot code
examples["data_block"][i] = np.array
"""
batch = {
"center": [],
"incoming": [],
"outgoing": [],
"data_block": []
}
for i in range(requested_num_examples):
example = next(generator)
# Add example to examples by appending individual lists
for key, list in batch.items():
if key == 'data_block':
# still flatten the data blocks
list.append(example[key].flatten())
else:
list.append(example[key])
return batch
def get_unlabeled_batch(self, generator, requested_num_examples=0):
examples = []
for i in range(requested_num_examples):
example = next(generator)
examples.append(example["data_block"].flatten())
return np.array(examples)
def get_train_batches(self, requested_num_examples):
""" Return an array of examples.
Args:
requested_num_examples: Integer which indicates desired number of
examples. Should be smaller or equal to num_train_examples else
warning is raised and num_train_examples are returned.
Returns:
An array with the requested number of examples. Each example is a
flattened array as a list of tensors for the whole cube size, where
each tensor is represented by the 6 values in it's upper diagonal.
"""
if self.train_generator is None:
self.train_generator = self.example_generator(self.train_labels,
"point")
return self.get_unlabeled_batch(self.train_generator,
requested_num_examples)
def get_eval_set(self, label_type="point", unlabeled=False):
""" Return evaluation examples including labels for ground truth.
Args:
num: number of examples. If left to None, all evaluation examples are
returned
label_type: String which indicates the desired label type which are
described in the docstring of PointExamples.
Returns:
A dictionary with keys "center", "incoming", "outgoing" and
"data_block". Each value is a list of length requested_num_examples.
The i-th element of e.g. list "dataBlock" contains the data_block
array for the i-th example:
examples["center"][i] = [x,y,z] or one_hot code
examples["incoming"][i] = [x,y,z] or one_hot code
examples["outgoing"][i] = [x,y,z] or one_hot code
examples["data_block"][i] = np.array
"""
# only calculate once
if unlabeled:
if (not hasattr(self, 'unlabeled_eval_set')) or \
self.unlabeled_eval_set is None:
eval_generator = self.example_generator(self.eval_labels,
"point")
self.unlabeled_eval_set = self.get_unlabeled_batch(
eval_generator, len(self.eval_labels))
ret = self.unlabeled_eval_set
else:
if self.eval_set is None:
eval_generator = self.example_generator(self.eval_labels,
label_type)
self.eval_set = self.get_batch(eval_generator,
len(self.eval_labels))
ret = self.eval_set
return ret
class TestExamples(object):
"""Usage Demonstration of the Examples class
Make sure you put valid "tensor.nii" and "fibers.trk" files in the same
directory as this module.
"""
def __init__(self):
# TODO: rework
# Create a new PointExamples instance
path = str(os.path.dirname(
os.path.abspath(__file__)).split("example_loader")[0]) + "data/"
pt_ex = PointExamples(
path + "tensor.nii",
path + "fibers.trk",
block_size=3,
num_eval_examples=1)
print("Created PointExamples instance with blocksize 3!")
# Access interesting attributes
print("num_train_examples: {}".format(pt_ex.num_train_examples))
print("num_fibers: {}".format(pt_ex.num_fibers))
# Check that the initial exampleState is indeed zero
print("Initial example_state: {}".format(pt_ex.example_state))
# Get a first one-hot point example
ex1 = pt_ex.get_train_batch(1, label_type="one_hot")
print("Got one example!")
# Now the exampleState is one
print("Now the exampleState is: {}".format(pt_ex.example_state))
print("Content of the first example:")
print("center: {}".format(ex1["center"]))
print("incoming: {}".format(ex1["incoming"]))
print("outgoing: {}".format(ex1["outgoing"]))
print("data_block type: {}".format(type(ex1["data_block"][0])))
print("data_block shape: {}".format(ex1["data_block"][0].shape))
if __name__ == "__main__":
pass
|
en
| 0.791549
|
This module contains functionality to load tractography training data. Credit goes to <NAME> Todo: Update doc Computes the rotation matrix corresponding to the given affine matrix. Args: aff: The affine matrix (4, 4). Returns: rotation: The (3, 3) matrix corresponding to the rotation in the affine. Base Class for loading tractography training samples. This class provides functionality to create blocks of diffusion-data and the associated fiber information. The diffusion data block represents the input to any learning algorithm, whereas the fiber information serves as label. Classes derived from this base class handle different forms of input and labels. For instance, the input can be raw diffusion measurements or derived representations such as diffusion tensor or spherical harmonics. Labels describe the local fiber flow which is the subject of prediction. Subclasses: PointExamples Attributes: fibers: List of streamlines. Each streamline is a list with shape (fiber_length,3) which contains the x,y,z coordinates of each point in the fiber. fiber_header: Struct array with info about the loaded track file. See http://trackvis.org/docs/?subsect=fileformat for more information. brain_file: Proxy to the diffusion data file, which is assumed to be of nifti format. brain_data: MemMap to the diffusion data stored in the nifti file. brain_header: Struct array with information about the loaded diffusion data file. See https://brainder.org/2012/09/23/the-nifti-file-format/ for more information. voxel_size: List which contains the voxel spacing in x, y, z directions. Units are Millimeter. block_size: Integer which indicates the entire length of the diffusion data block in one dimension. E.g. if 7x7x7 blocks are considered, then the block_size is 7. Should be odd. train_labels: List which contains all training fiber labels which are parsed from the track file. Each label is a dictionary which keys depend on the subclass. eval_labels: List which contains all evaluation fiber labels which are parsed from the track file. Each label is a dictionary which keys depend on the subclass. block_length: Integer which indicates half the block_size minus one. E.g. if 7x7x7 blocks are considered, the block_length is 3, i.e. the distance from the center in each direction in voxels. voxel_dimension: List as x,y,z dimensions of brain data. Load the input files and initialize fields. Args: nii_file: Path to the nifti file which is used as diffusion data input. trk_file: Path to the trackvis file which is used for the labels, should be derived from the data represented in the niiFile. block_size: Integer (odd) which indicates the desired data block size. num_eval_examples: Integer which indicates approximate number of evaluation examples (and therefore labels) loaded from the track file. Actual amount of evaluation examples can vary slightly because of adding whole fibers at a time. # self.fibers = [] # self.fibers.append(fibers) Return a dictionary of examples. Main method for external applications. Args: requested_num_examples: Integer which indicates desired number of examples. Should be smaller or equal to num_train_examples else warning is raised and num_train_examples are returned. Returns: A dictionary with keys "center", "incoming", "outgoing" and "data_block". Each value is a list of length requested_num_examples. The i-th element of e.g. list "dataBlock" contains the data_block for the i-th example: examples["center"][i] = [x,y,z] or one_hot code examples["incoming"][i] = [x,y,z] or one_hot code examples["outgoing"][i] = [x,y,z] or one_hot code examples["data_block"][i] = np.array Return the evaluation set. Returns: A dictionary of evaluation examples. The structure is the same as for a training batch. The total number of evaluation samples is given by num_eval_examples. Parse labels from track file. For internal use. Returns: Tuple of two lists of training and evaluation labels. Each label is a dictionary which contains information about fiber flow. The keys of a label depend on the subclass. Calculate one-hot code for neighbor voxels. For internal use. Args: center: List [x,y,z] which contains the coordinates of the voxel approached or left by a fiber. point: List [x,y,z] which contains the coordinates of the neighbor voxel from where the center voxel is approached or left. Returns: Numpy array of shape (27). It encodes either from which neighbor voxel the a fiber entered the center voxel or to which neighbor voxel the fiber left the center voxel. Calculate relative direction from global coordinates. For internal use. Args: _from: List [x,y,z] which contains the coordinates of the voxel starting point of a fiber segment. to: List [x,y,z] which contains the coordinates of the voxel starting point of a fiber segment Returns: Numpy array of shape (3) of the relative direction from "_from" to "to". Creates an example with all the label information and data added. Args: data: MemMap to the diffusion data stored in the nifti file. block_size: Integer which indicates the entire length of the diffusion data block in one dimension. E.g. if 7x7x7 blocks are considered, then the block_size is 7. Should be odd. center_point: List of [x,y,z] of coordinate where fiber goes though. incoming_point: List of [x,y,z] of coordinate where fiber comes from. outgoing_point: List of [x,y,z] of coordinate where fiber goes to. label_type: String which indicates the desired label type which are described in the docstring of PointExamples. Returns: A dictionary with keys "center", "incoming", "outgoing" and "data_block". Each value is a list of length requested_num_examples. example["center"] = np.array [x,y,z] or one_hot code example["incoming"] = np.array [x,y,z] or one_hot code example["outgoing"] = np.array [x,y,z] or one_hot code example["data_block"] = np.array # Pad data if block is out of bounds # TODO: Reduce code duplication in get_datablock # Pad data if block is out of bounds Class which represents fiber point examples. Todo: Update doc Load the input files and initialize fields. # self.check_empty_data(warning_only=True) # TODO: consider ignoring start and end #if self.example_percent < 1.0: # # Subsample the labels # n_old = len(train_labels) # n_wanted = np.round(n_old * self.example_percent).astype(int) # train_labels = train_labels[0:n_wanted] # Subsample # n_new = len(train_labels) # print("Training labels are {} / {}, i.e. {:3.2f} %".format(n_new, # n_old, # n_new / n_old * 100)) # NOTE: Here is the corruption of the training labels. # First, we calculate how many labels have to be corrupted. Then, this number of labels is # corrupted by removing the outgoing label and in its place putting a new random one that # has been obtained by adding to the 'center' a random unit vector in R3. # NOTE: Labels have already been shuffled, so this can be carried on in sequential order. # QUESTION: is this really necessary? # Done with the corruption Return a dictionary of examples. Args: requested_num_examples: Integer which indicates desired number of examples. Should be smaller or equal to num_train_examples else warning is raised and num_train_examples are returned. generator: Generator from which to pull examples from. Returns: A dictionary with keys "center", "incoming", "outgoing" and "data_block". Each value is a list of length requested_num_examples. The i-th element of e.g. list "dataBlock" contains the data_block array for the i-th example: examples["center"][i] = [x,y,z] or one_hot code examples["incoming"][i] = [x,y,z] or one_hot code examples["outgoing"][i] = [x,y,z] or one_hot code examples["data_block"][i] = np.array # Add example to examples by appending individual lists # only calculate once # take absolute of dot product to ignore ambiguous direction PointExamples for unsupervised training. Return a dictionary of examples. Args: requested_num_examples: Integer which indicates desired number of examples. Should be smaller or equal to num_train_examples else warning is raised and num_train_examples are returned. label_type: String which indicates the desired label type which are described in the docstring of PointExamples. Returns: A dictionary with keys "center", "incoming", "outgoing" and "data_block". Each value is a list of length requested_num_examples. The i-th element of e.g. list "dataBlock" contains the data_block array for the i-th example: examples["center"][i] = [x,y,z] or one_hot code examples["incoming"][i] = [x,y,z] or one_hot code examples["outgoing"][i] = [x,y,z] or one_hot code examples["data_block"][i] = np.array # Add example to examples by appending individual lists # still flatten the data blocks Return an array of examples. Args: requested_num_examples: Integer which indicates desired number of examples. Should be smaller or equal to num_train_examples else warning is raised and num_train_examples are returned. Returns: An array with the requested number of examples. Each example is a flattened array as a list of tensors for the whole cube size, where each tensor is represented by the 6 values in it's upper diagonal. Return evaluation examples including labels for ground truth. Args: num: number of examples. If left to None, all evaluation examples are returned label_type: String which indicates the desired label type which are described in the docstring of PointExamples. Returns: A dictionary with keys "center", "incoming", "outgoing" and "data_block". Each value is a list of length requested_num_examples. The i-th element of e.g. list "dataBlock" contains the data_block array for the i-th example: examples["center"][i] = [x,y,z] or one_hot code examples["incoming"][i] = [x,y,z] or one_hot code examples["outgoing"][i] = [x,y,z] or one_hot code examples["data_block"][i] = np.array # only calculate once Usage Demonstration of the Examples class Make sure you put valid "tensor.nii" and "fibers.trk" files in the same directory as this module. # TODO: rework # Create a new PointExamples instance # Access interesting attributes # Check that the initial exampleState is indeed zero # Get a first one-hot point example # Now the exampleState is one
| 3.110762
| 3
|
third_party/webrtc/src/chromium/src/tools/perf/measurements/page_cycler.py
|
bopopescu/webrtc-streaming-node
| 8
|
6628242
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The page cycler measurement.
This measurement registers a window load handler in which is forces a layout and
then records the value of performance.now(). This call to now() measures the
time from navigationStart (immediately after the previous page's beforeunload
event) until after the layout in the page's load event. In addition, two garbage
collections are performed in between the page loads (in the beforeunload event).
This extra garbage collection time is not included in the measurement times.
Finally, various memory and IO statistics are gathered at the very end of
cycling all pages.
"""
import collections
import os
from telemetry.core import util
from telemetry.page import page_test
from telemetry.value import scalar
from metrics import cpu
from metrics import keychain_metric
from metrics import memory
from metrics import power
from metrics import speedindex
class PageCycler(page_test.PageTest):
def __init__(self, page_repeat, pageset_repeat, cold_load_percent=50,
report_speed_index=False, clear_cache_before_each_run=False):
super(PageCycler, self).__init__(
clear_cache_before_each_run=clear_cache_before_each_run)
with open(os.path.join(os.path.dirname(__file__),
'page_cycler.js'), 'r') as f:
self._page_cycler_js = f.read()
self._report_speed_index = report_speed_index
self._speedindex_metric = speedindex.SpeedIndexMetric()
self._memory_metric = None
self._power_metric = None
self._cpu_metric = None
self._has_loaded_page = collections.defaultdict(int)
self._initial_renderer_url = None # to avoid cross-renderer navigation
cold_runs_percent_set = (cold_load_percent != None)
# Handle requests for cold cache runs
if (cold_runs_percent_set and
(cold_load_percent < 0 or cold_load_percent > 100)):
raise Exception('cold-load-percent must be in the range [0-100]')
# Make sure _cold_run_start_index is an integer multiple of page_repeat.
# Without this, --pageset_shuffle + --page_repeat could lead to
# assertion failures on _started_warm in WillNavigateToPage.
if cold_runs_percent_set:
number_warm_pageset_runs = int(
(int(pageset_repeat) - 1) * (100 - cold_load_percent) / 100)
number_warm_runs = number_warm_pageset_runs * page_repeat
self._cold_run_start_index = number_warm_runs + page_repeat
else:
self._cold_run_start_index = pageset_repeat * page_repeat
def WillStartBrowser(self, platform):
"""Initialize metrics once right before the browser has been launched."""
self._power_metric = power.PowerMetric(platform)
def DidStartBrowser(self, browser):
"""Initialize metrics once right after the browser has been launched."""
self._memory_metric = memory.MemoryMetric(browser)
self._cpu_metric = cpu.CpuMetric(browser)
def WillNavigateToPage(self, page, tab):
if page.is_file:
# For legacy page cyclers which use the filesystem, do an initial
# navigate to avoid paying for a cross-renderer navigation.
initial_url = tab.browser.platform.http_server.UrlOf('nonexistent.html')
if self._initial_renderer_url != initial_url:
self._initial_renderer_url = initial_url
tab.Navigate(self._initial_renderer_url)
page.script_to_evaluate_on_commit = self._page_cycler_js
if self.ShouldRunCold(page.url):
tab.ClearCache(force=True)
if self._report_speed_index:
self._speedindex_metric.Start(page, tab)
self._cpu_metric.Start(page, tab)
self._power_metric.Start(page, tab)
def DidNavigateToPage(self, page, tab):
self._memory_metric.Start(page, tab)
def CustomizeBrowserOptions(self, options):
memory.MemoryMetric.CustomizeBrowserOptions(options)
power.PowerMetric.CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs('--js-flags=--expose_gc')
if self._report_speed_index:
self._speedindex_metric.CustomizeBrowserOptions(options)
keychain_metric.KeychainMetric.CustomizeBrowserOptions(options)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression('__pc_load_time', 60)
chart_name_prefix = ('cold_' if self.IsRunCold(page.url) else
'warm_')
results.AddValue(scalar.ScalarValue(
results.current_page, '%stimes.page_load_time' % chart_name_prefix,
'ms', tab.EvaluateJavaScript('__pc_load_time'),
description='Average page load time. Measured from '
'performance.timing.navigationStart until the completion '
'time of a layout after the window.load event. Cold times '
'are the times when the page is loaded cold, i.e. without '
'loading it before, and warm times are times when the '
'page is loaded after being loaded previously.'))
self._has_loaded_page[page.url] += 1
self._power_metric.Stop(page, tab)
self._memory_metric.Stop(page, tab)
self._memory_metric.AddResults(tab, results)
self._power_metric.AddResults(tab, results)
self._cpu_metric.Stop(page, tab)
self._cpu_metric.AddResults(tab, results)
if self._report_speed_index:
def SpeedIndexIsFinished():
return self._speedindex_metric.IsFinished(tab)
util.WaitFor(SpeedIndexIsFinished, 60)
self._speedindex_metric.Stop(page, tab)
self._speedindex_metric.AddResults(
tab, results, chart_name=chart_name_prefix+'speed_index')
keychain_metric.KeychainMetric().AddResults(tab, results)
def IsRunCold(self, url):
return self.ShouldRunCold(url) or self._has_loaded_page[url] == 0
def ShouldRunCold(self, url):
# We do the warm runs first for two reasons. The first is so we can
# preserve any initial profile cache for as long as possible.
# The second is that, if we did cold runs first, we'd have a transition
# page set during which we wanted the run for each URL to both
# contribute to the cold data and warm the catch for the following
# warm run, and clearing the cache before the load of the following
# URL would eliminate the intended warmup for the previous URL.
return self._has_loaded_page[url] >= self._cold_run_start_index
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The page cycler measurement.
This measurement registers a window load handler in which is forces a layout and
then records the value of performance.now(). This call to now() measures the
time from navigationStart (immediately after the previous page's beforeunload
event) until after the layout in the page's load event. In addition, two garbage
collections are performed in between the page loads (in the beforeunload event).
This extra garbage collection time is not included in the measurement times.
Finally, various memory and IO statistics are gathered at the very end of
cycling all pages.
"""
import collections
import os
from telemetry.core import util
from telemetry.page import page_test
from telemetry.value import scalar
from metrics import cpu
from metrics import keychain_metric
from metrics import memory
from metrics import power
from metrics import speedindex
class PageCycler(page_test.PageTest):
def __init__(self, page_repeat, pageset_repeat, cold_load_percent=50,
report_speed_index=False, clear_cache_before_each_run=False):
super(PageCycler, self).__init__(
clear_cache_before_each_run=clear_cache_before_each_run)
with open(os.path.join(os.path.dirname(__file__),
'page_cycler.js'), 'r') as f:
self._page_cycler_js = f.read()
self._report_speed_index = report_speed_index
self._speedindex_metric = speedindex.SpeedIndexMetric()
self._memory_metric = None
self._power_metric = None
self._cpu_metric = None
self._has_loaded_page = collections.defaultdict(int)
self._initial_renderer_url = None # to avoid cross-renderer navigation
cold_runs_percent_set = (cold_load_percent != None)
# Handle requests for cold cache runs
if (cold_runs_percent_set and
(cold_load_percent < 0 or cold_load_percent > 100)):
raise Exception('cold-load-percent must be in the range [0-100]')
# Make sure _cold_run_start_index is an integer multiple of page_repeat.
# Without this, --pageset_shuffle + --page_repeat could lead to
# assertion failures on _started_warm in WillNavigateToPage.
if cold_runs_percent_set:
number_warm_pageset_runs = int(
(int(pageset_repeat) - 1) * (100 - cold_load_percent) / 100)
number_warm_runs = number_warm_pageset_runs * page_repeat
self._cold_run_start_index = number_warm_runs + page_repeat
else:
self._cold_run_start_index = pageset_repeat * page_repeat
def WillStartBrowser(self, platform):
"""Initialize metrics once right before the browser has been launched."""
self._power_metric = power.PowerMetric(platform)
def DidStartBrowser(self, browser):
"""Initialize metrics once right after the browser has been launched."""
self._memory_metric = memory.MemoryMetric(browser)
self._cpu_metric = cpu.CpuMetric(browser)
def WillNavigateToPage(self, page, tab):
if page.is_file:
# For legacy page cyclers which use the filesystem, do an initial
# navigate to avoid paying for a cross-renderer navigation.
initial_url = tab.browser.platform.http_server.UrlOf('nonexistent.html')
if self._initial_renderer_url != initial_url:
self._initial_renderer_url = initial_url
tab.Navigate(self._initial_renderer_url)
page.script_to_evaluate_on_commit = self._page_cycler_js
if self.ShouldRunCold(page.url):
tab.ClearCache(force=True)
if self._report_speed_index:
self._speedindex_metric.Start(page, tab)
self._cpu_metric.Start(page, tab)
self._power_metric.Start(page, tab)
def DidNavigateToPage(self, page, tab):
self._memory_metric.Start(page, tab)
def CustomizeBrowserOptions(self, options):
memory.MemoryMetric.CustomizeBrowserOptions(options)
power.PowerMetric.CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs('--js-flags=--expose_gc')
if self._report_speed_index:
self._speedindex_metric.CustomizeBrowserOptions(options)
keychain_metric.KeychainMetric.CustomizeBrowserOptions(options)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression('__pc_load_time', 60)
chart_name_prefix = ('cold_' if self.IsRunCold(page.url) else
'warm_')
results.AddValue(scalar.ScalarValue(
results.current_page, '%stimes.page_load_time' % chart_name_prefix,
'ms', tab.EvaluateJavaScript('__pc_load_time'),
description='Average page load time. Measured from '
'performance.timing.navigationStart until the completion '
'time of a layout after the window.load event. Cold times '
'are the times when the page is loaded cold, i.e. without '
'loading it before, and warm times are times when the '
'page is loaded after being loaded previously.'))
self._has_loaded_page[page.url] += 1
self._power_metric.Stop(page, tab)
self._memory_metric.Stop(page, tab)
self._memory_metric.AddResults(tab, results)
self._power_metric.AddResults(tab, results)
self._cpu_metric.Stop(page, tab)
self._cpu_metric.AddResults(tab, results)
if self._report_speed_index:
def SpeedIndexIsFinished():
return self._speedindex_metric.IsFinished(tab)
util.WaitFor(SpeedIndexIsFinished, 60)
self._speedindex_metric.Stop(page, tab)
self._speedindex_metric.AddResults(
tab, results, chart_name=chart_name_prefix+'speed_index')
keychain_metric.KeychainMetric().AddResults(tab, results)
def IsRunCold(self, url):
return self.ShouldRunCold(url) or self._has_loaded_page[url] == 0
def ShouldRunCold(self, url):
# We do the warm runs first for two reasons. The first is so we can
# preserve any initial profile cache for as long as possible.
# The second is that, if we did cold runs first, we'd have a transition
# page set during which we wanted the run for each URL to both
# contribute to the cold data and warm the catch for the following
# warm run, and clearing the cache before the load of the following
# URL would eliminate the intended warmup for the previous URL.
return self._has_loaded_page[url] >= self._cold_run_start_index
|
en
| 0.915817
|
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. The page cycler measurement. This measurement registers a window load handler in which is forces a layout and then records the value of performance.now(). This call to now() measures the time from navigationStart (immediately after the previous page's beforeunload event) until after the layout in the page's load event. In addition, two garbage collections are performed in between the page loads (in the beforeunload event). This extra garbage collection time is not included in the measurement times. Finally, various memory and IO statistics are gathered at the very end of cycling all pages. # to avoid cross-renderer navigation # Handle requests for cold cache runs # Make sure _cold_run_start_index is an integer multiple of page_repeat. # Without this, --pageset_shuffle + --page_repeat could lead to # assertion failures on _started_warm in WillNavigateToPage. Initialize metrics once right before the browser has been launched. Initialize metrics once right after the browser has been launched. # For legacy page cyclers which use the filesystem, do an initial # navigate to avoid paying for a cross-renderer navigation. # We do the warm runs first for two reasons. The first is so we can # preserve any initial profile cache for as long as possible. # The second is that, if we did cold runs first, we'd have a transition # page set during which we wanted the run for each URL to both # contribute to the cold data and warm the catch for the following # warm run, and clearing the cache before the load of the following # URL would eliminate the intended warmup for the previous URL.
| 2.352242
| 2
|
provision/management/commands/checkipzmarkers.py
|
NOAA-GSD/qrba_os
| 1
|
6628243
|
<filename>provision/management/commands/checkipzmarkers.py
# https://stackoverflow.com/questions/19475955/using-django-models-in-external-python-script
from django.core.management.base import BaseCommand, CommandError
from provision.models import IPzone, NfsExport, Restriction
class Command(BaseCommand):
help = "checks the ipzmarkers in all ipzones"
def handle(self, *args, **options):
zones = IPzone.objects.all()
numzones = len(zones)
print("found " + str(numzones) + " zones")
unset = set()
niipa = set()
for z in zones:
if '#None' in str(z):
continue
ipzmarker = z.get_ipzone_marker()
if 'unset' in str(ipzmarker):
print("ipzmarker unset for z " + str(z))
# z.set_ipaddrs(ipzmarker)
unset.add(str(z))
ipaddrs = z.get_ipaddrs()
if str(ipzmarker) not in str(ipaddrs):
print("ipzmarker " + str(ipzmarker) + " not found in ipaddrs for z " + str(z))
niipa.add(str(z))
ipaddrs.append(ipzmarker)
z.set_ipaddrs(ipaddrs)
rpset = set()
for r in Restriction.objects.all():
for ipz in r.get_ipzones():
if ipz.__eq__(z):
rpset.add(r)
nsfxparents = set()
for x in NfsExport.objects.all():
xrqs = x.restrictions.get_queryset()
for xr in xrqs:
for r in rpset:
if r.__eq__(xr):
nsfxparents.add(x)
if len(nsfxparents) > 0:
print(" nsfparents:")
for x in nsfxparents:
msg = " " + str(x)
print(msg)
print("\n")
# else:
# print( "ipzmarker " + str(ipzmarker) + " found for z " + str(z) )
# ipzmarker = z.get_ipzone_marker()
# ipaddrs = z.get_ipaddrs()
# if str(ipzmarker) not in str(ipaddrs):
# print( " unset ipzmarker for z " + str(z))
unlist = []
for x in unset:
unlist.append(x)
unlist.sort()
nilist = []
for x in niipa:
nilist.append(x)
nilist.sort()
print("num unset: " + str(len(unlist)))
print("num niipa: " + str(len(nilist)))
print("unset: " + str(unset))
print("niipa: " + str(niipa))
|
<filename>provision/management/commands/checkipzmarkers.py
# https://stackoverflow.com/questions/19475955/using-django-models-in-external-python-script
from django.core.management.base import BaseCommand, CommandError
from provision.models import IPzone, NfsExport, Restriction
class Command(BaseCommand):
help = "checks the ipzmarkers in all ipzones"
def handle(self, *args, **options):
zones = IPzone.objects.all()
numzones = len(zones)
print("found " + str(numzones) + " zones")
unset = set()
niipa = set()
for z in zones:
if '#None' in str(z):
continue
ipzmarker = z.get_ipzone_marker()
if 'unset' in str(ipzmarker):
print("ipzmarker unset for z " + str(z))
# z.set_ipaddrs(ipzmarker)
unset.add(str(z))
ipaddrs = z.get_ipaddrs()
if str(ipzmarker) not in str(ipaddrs):
print("ipzmarker " + str(ipzmarker) + " not found in ipaddrs for z " + str(z))
niipa.add(str(z))
ipaddrs.append(ipzmarker)
z.set_ipaddrs(ipaddrs)
rpset = set()
for r in Restriction.objects.all():
for ipz in r.get_ipzones():
if ipz.__eq__(z):
rpset.add(r)
nsfxparents = set()
for x in NfsExport.objects.all():
xrqs = x.restrictions.get_queryset()
for xr in xrqs:
for r in rpset:
if r.__eq__(xr):
nsfxparents.add(x)
if len(nsfxparents) > 0:
print(" nsfparents:")
for x in nsfxparents:
msg = " " + str(x)
print(msg)
print("\n")
# else:
# print( "ipzmarker " + str(ipzmarker) + " found for z " + str(z) )
# ipzmarker = z.get_ipzone_marker()
# ipaddrs = z.get_ipaddrs()
# if str(ipzmarker) not in str(ipaddrs):
# print( " unset ipzmarker for z " + str(z))
unlist = []
for x in unset:
unlist.append(x)
unlist.sort()
nilist = []
for x in niipa:
nilist.append(x)
nilist.sort()
print("num unset: " + str(len(unlist)))
print("num niipa: " + str(len(nilist)))
print("unset: " + str(unset))
print("niipa: " + str(niipa))
|
en
| 0.426733
|
# https://stackoverflow.com/questions/19475955/using-django-models-in-external-python-script # z.set_ipaddrs(ipzmarker) # else: # print( "ipzmarker " + str(ipzmarker) + " found for z " + str(z) ) # ipzmarker = z.get_ipzone_marker() # ipaddrs = z.get_ipaddrs() # if str(ipzmarker) not in str(ipaddrs): # print( " unset ipzmarker for z " + str(z))
| 2.24996
| 2
|
great_expectations/rule_based_profiler/domain_builder/simple_semantic_type_domain_builder.py
|
victorcouste/great_expectations
| 2
|
6628244
|
from typing import Any, Dict, List, Optional, Union
import great_expectations.exceptions as ge_exceptions
from great_expectations import DataContext
from great_expectations.core.batch import BatchRequest
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.profile.base import ProfilerTypeMapping
from great_expectations.rule_based_profiler.domain_builder import (
Domain,
DomainBuilder,
InferredSemanticDomainType,
SemanticDomainTypes,
)
from great_expectations.rule_based_profiler.parameter_builder import ParameterContainer
from great_expectations.validator.validator import MetricConfiguration
class SimpleSemanticTypeColumnDomainBuilder(DomainBuilder):
"""
This DomainBuilder utilizes a "best-effort" semantic interpretation of ("storage") columns of a table.
"""
def __init__(
self,
data_context: DataContext,
batch_request: Optional[Union[BatchRequest, dict]] = None,
semantic_types: Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
] = None,
):
"""
Args:
data_context: DataContext
batch_request: specified in DomainBuilder configuration to get Batch objects for domain computation.
"""
super().__init__(
data_context=data_context,
batch_request=batch_request,
)
if semantic_types is None:
semantic_types = []
self._semantic_types = semantic_types
def _get_domains(
self,
variables: Optional[ParameterContainer] = None,
) -> List[Domain]:
"""
Find the semantic column type for each column and return all domains matching the specified type or types.
"""
semantic_types: List[
SemanticDomainTypes
] = _parse_semantic_domain_type_argument(semantic_types=self._semantic_types)
batch_id: str = self.get_batch_id(variables=variables)
column_types_dict_list: List[Dict[str, Any]] = self.get_validator(
variables=variables
).get_metric(
metric=MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs={
"batch_id": batch_id,
},
metric_value_kwargs={
"include_nested": True,
},
metric_dependencies=None,
)
)
table_column_names: List[str] = self.get_validator(
variables=variables
).get_metric(
metric=MetricConfiguration(
metric_name="table.columns",
metric_domain_kwargs={
"batch_id": batch_id,
},
metric_value_kwargs=None,
metric_dependencies=None,
)
)
column_name: str
# A semantic type is distinguished from the structured column type;
# An example structured column type would be "integer". The inferred semantic type would be "id".
table_column_name_to_inferred_semantic_domain_type_mapping: Dict[
str, SemanticDomainTypes
] = {
column_name: self.infer_semantic_domain_type_from_table_column_type(
column_types_dict_list=column_types_dict_list,
column_name=column_name,
).semantic_domain_type
for column_name in table_column_names
}
candidate_column_names: List[str] = list(
filter(
lambda candidate_column_name: table_column_name_to_inferred_semantic_domain_type_mapping[
candidate_column_name
]
in semantic_types,
table_column_names,
)
)
domains: List[Domain] = [
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": column_name,
},
details={
"inferred_semantic_domain_type": table_column_name_to_inferred_semantic_domain_type_mapping[
column_name
],
},
)
for column_name in candidate_column_names
]
return domains
# This method (default implementation) can be overwritten (with different implementation mechanisms) by subclasses.
# noinspection PyMethodMayBeStatic
def infer_semantic_domain_type_from_table_column_type(
self,
column_types_dict_list: List[Dict[str, Any]],
column_name: str,
) -> InferredSemanticDomainType:
# Note: As of Python 3.8, specifying argument type in Lambda functions is not supported by Lambda syntax.
column_types_dict_list = list(
filter(
lambda column_type_dict: column_name == column_type_dict["name"],
column_types_dict_list,
)
)
if len(column_types_dict_list) != 1:
raise ge_exceptions.ProfilerExecutionError(
message=f"""Error: {len(column_types_dict_list)} columns were found while obtaining semantic type \
information. Please ensure that the specified column name refers to exactly one column.
"""
)
column_type: str = str(column_types_dict_list[0]["type"]).upper()
semantic_column_type: SemanticDomainTypes
if column_type in (
{type_name.upper() for type_name in ProfilerTypeMapping.INT_TYPE_NAMES}
| {type_name.upper() for type_name in ProfilerTypeMapping.FLOAT_TYPE_NAMES}
):
semantic_column_type = SemanticDomainTypes.NUMERIC
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.STRING_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.TEXT
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.BOOLEAN_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.LOGIC
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.DATETIME_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.DATETIME
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.BINARY_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.BINARY
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.CURRENCY_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.CURRENCY
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.IDENTIFIER_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.IDENTIFIER
elif column_type in (
{
type_name.upper()
for type_name in ProfilerTypeMapping.MISCELLANEOUS_TYPE_NAMES
}
| {type_name.upper() for type_name in ProfilerTypeMapping.RECORD_TYPE_NAMES}
):
semantic_column_type = SemanticDomainTypes.MISCELLANEOUS
else:
semantic_column_type = SemanticDomainTypes.UNKNOWN
inferred_semantic_column_type: InferredSemanticDomainType = (
InferredSemanticDomainType(
semantic_domain_type=semantic_column_type,
details={
"algorithm_type": "deterministic",
"mechanism": "lookup_table",
"source": "great_expectations.profile.base.ProfilerTypeMapping",
},
)
)
return inferred_semantic_column_type
def _parse_semantic_domain_type_argument(
semantic_types: Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
] = None
) -> List[SemanticDomainTypes]:
if semantic_types is None:
return []
semantic_type: Union[str, SemanticDomainTypes]
if isinstance(semantic_types, str):
semantic_types = semantic_types.upper()
return [
SemanticDomainTypes[semantic_type] for semantic_type in [semantic_types]
]
if isinstance(semantic_types, SemanticDomainTypes):
return [semantic_type for semantic_type in [semantic_types]]
elif isinstance(semantic_types, list):
if all([isinstance(semantic_type, str) for semantic_type in semantic_types]):
semantic_types = [semantic_type.upper() for semantic_type in semantic_types]
return [
SemanticDomainTypes[semantic_type] for semantic_type in semantic_types
]
elif all(
[
isinstance(semantic_type, SemanticDomainTypes)
for semantic_type in semantic_types
]
):
return [semantic_type for semantic_type in semantic_types]
else:
raise ValueError(
"All elements in semantic_types list must be either of str or SemanticDomainTypes type."
)
else:
raise ValueError("Unrecognized semantic_types directive.")
|
from typing import Any, Dict, List, Optional, Union
import great_expectations.exceptions as ge_exceptions
from great_expectations import DataContext
from great_expectations.core.batch import BatchRequest
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.profile.base import ProfilerTypeMapping
from great_expectations.rule_based_profiler.domain_builder import (
Domain,
DomainBuilder,
InferredSemanticDomainType,
SemanticDomainTypes,
)
from great_expectations.rule_based_profiler.parameter_builder import ParameterContainer
from great_expectations.validator.validator import MetricConfiguration
class SimpleSemanticTypeColumnDomainBuilder(DomainBuilder):
"""
This DomainBuilder utilizes a "best-effort" semantic interpretation of ("storage") columns of a table.
"""
def __init__(
self,
data_context: DataContext,
batch_request: Optional[Union[BatchRequest, dict]] = None,
semantic_types: Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
] = None,
):
"""
Args:
data_context: DataContext
batch_request: specified in DomainBuilder configuration to get Batch objects for domain computation.
"""
super().__init__(
data_context=data_context,
batch_request=batch_request,
)
if semantic_types is None:
semantic_types = []
self._semantic_types = semantic_types
def _get_domains(
self,
variables: Optional[ParameterContainer] = None,
) -> List[Domain]:
"""
Find the semantic column type for each column and return all domains matching the specified type or types.
"""
semantic_types: List[
SemanticDomainTypes
] = _parse_semantic_domain_type_argument(semantic_types=self._semantic_types)
batch_id: str = self.get_batch_id(variables=variables)
column_types_dict_list: List[Dict[str, Any]] = self.get_validator(
variables=variables
).get_metric(
metric=MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs={
"batch_id": batch_id,
},
metric_value_kwargs={
"include_nested": True,
},
metric_dependencies=None,
)
)
table_column_names: List[str] = self.get_validator(
variables=variables
).get_metric(
metric=MetricConfiguration(
metric_name="table.columns",
metric_domain_kwargs={
"batch_id": batch_id,
},
metric_value_kwargs=None,
metric_dependencies=None,
)
)
column_name: str
# A semantic type is distinguished from the structured column type;
# An example structured column type would be "integer". The inferred semantic type would be "id".
table_column_name_to_inferred_semantic_domain_type_mapping: Dict[
str, SemanticDomainTypes
] = {
column_name: self.infer_semantic_domain_type_from_table_column_type(
column_types_dict_list=column_types_dict_list,
column_name=column_name,
).semantic_domain_type
for column_name in table_column_names
}
candidate_column_names: List[str] = list(
filter(
lambda candidate_column_name: table_column_name_to_inferred_semantic_domain_type_mapping[
candidate_column_name
]
in semantic_types,
table_column_names,
)
)
domains: List[Domain] = [
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": column_name,
},
details={
"inferred_semantic_domain_type": table_column_name_to_inferred_semantic_domain_type_mapping[
column_name
],
},
)
for column_name in candidate_column_names
]
return domains
# This method (default implementation) can be overwritten (with different implementation mechanisms) by subclasses.
# noinspection PyMethodMayBeStatic
def infer_semantic_domain_type_from_table_column_type(
self,
column_types_dict_list: List[Dict[str, Any]],
column_name: str,
) -> InferredSemanticDomainType:
# Note: As of Python 3.8, specifying argument type in Lambda functions is not supported by Lambda syntax.
column_types_dict_list = list(
filter(
lambda column_type_dict: column_name == column_type_dict["name"],
column_types_dict_list,
)
)
if len(column_types_dict_list) != 1:
raise ge_exceptions.ProfilerExecutionError(
message=f"""Error: {len(column_types_dict_list)} columns were found while obtaining semantic type \
information. Please ensure that the specified column name refers to exactly one column.
"""
)
column_type: str = str(column_types_dict_list[0]["type"]).upper()
semantic_column_type: SemanticDomainTypes
if column_type in (
{type_name.upper() for type_name in ProfilerTypeMapping.INT_TYPE_NAMES}
| {type_name.upper() for type_name in ProfilerTypeMapping.FLOAT_TYPE_NAMES}
):
semantic_column_type = SemanticDomainTypes.NUMERIC
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.STRING_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.TEXT
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.BOOLEAN_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.LOGIC
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.DATETIME_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.DATETIME
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.BINARY_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.BINARY
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.CURRENCY_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.CURRENCY
elif column_type in {
type_name.upper() for type_name in ProfilerTypeMapping.IDENTIFIER_TYPE_NAMES
}:
semantic_column_type = SemanticDomainTypes.IDENTIFIER
elif column_type in (
{
type_name.upper()
for type_name in ProfilerTypeMapping.MISCELLANEOUS_TYPE_NAMES
}
| {type_name.upper() for type_name in ProfilerTypeMapping.RECORD_TYPE_NAMES}
):
semantic_column_type = SemanticDomainTypes.MISCELLANEOUS
else:
semantic_column_type = SemanticDomainTypes.UNKNOWN
inferred_semantic_column_type: InferredSemanticDomainType = (
InferredSemanticDomainType(
semantic_domain_type=semantic_column_type,
details={
"algorithm_type": "deterministic",
"mechanism": "lookup_table",
"source": "great_expectations.profile.base.ProfilerTypeMapping",
},
)
)
return inferred_semantic_column_type
def _parse_semantic_domain_type_argument(
semantic_types: Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
] = None
) -> List[SemanticDomainTypes]:
if semantic_types is None:
return []
semantic_type: Union[str, SemanticDomainTypes]
if isinstance(semantic_types, str):
semantic_types = semantic_types.upper()
return [
SemanticDomainTypes[semantic_type] for semantic_type in [semantic_types]
]
if isinstance(semantic_types, SemanticDomainTypes):
return [semantic_type for semantic_type in [semantic_types]]
elif isinstance(semantic_types, list):
if all([isinstance(semantic_type, str) for semantic_type in semantic_types]):
semantic_types = [semantic_type.upper() for semantic_type in semantic_types]
return [
SemanticDomainTypes[semantic_type] for semantic_type in semantic_types
]
elif all(
[
isinstance(semantic_type, SemanticDomainTypes)
for semantic_type in semantic_types
]
):
return [semantic_type for semantic_type in semantic_types]
else:
raise ValueError(
"All elements in semantic_types list must be either of str or SemanticDomainTypes type."
)
else:
raise ValueError("Unrecognized semantic_types directive.")
|
en
| 0.708816
|
This DomainBuilder utilizes a "best-effort" semantic interpretation of ("storage") columns of a table. Args: data_context: DataContext batch_request: specified in DomainBuilder configuration to get Batch objects for domain computation. Find the semantic column type for each column and return all domains matching the specified type or types. # A semantic type is distinguished from the structured column type; # An example structured column type would be "integer". The inferred semantic type would be "id". # This method (default implementation) can be overwritten (with different implementation mechanisms) by subclasses. # noinspection PyMethodMayBeStatic # Note: As of Python 3.8, specifying argument type in Lambda functions is not supported by Lambda syntax. Error: {len(column_types_dict_list)} columns were found while obtaining semantic type \ information. Please ensure that the specified column name refers to exactly one column.
| 2.037777
| 2
|
opportunities/migrations/0013_auto_20210103_0116.py
|
MrEscape54/CRM
| 0
|
6628245
|
<reponame>MrEscape54/CRM
# Generated by Django 3.1.4 on 2021-01-03 04:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0018_auto_20201230_1302'),
('opportunities', '0012_auto_20210103_0100'),
]
operations = [
migrations.AlterField(
model_name='opportunity',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='opportunities', to='accounts.account'),
),
]
|
# Generated by Django 3.1.4 on 2021-01-03 04:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0018_auto_20201230_1302'),
('opportunities', '0012_auto_20210103_0100'),
]
operations = [
migrations.AlterField(
model_name='opportunity',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='opportunities', to='accounts.account'),
),
]
|
en
| 0.799879
|
# Generated by Django 3.1.4 on 2021-01-03 04:16
| 1.499081
| 1
|
antelope_core/entities/entities.py
|
AntelopeLCA/core
| 1
|
6628246
|
<filename>antelope_core/entities/entities.py
from __future__ import print_function, unicode_literals
import uuid
from itertools import chain
from numbers import Number
from antelope import CatalogRef, BaseEntity, PropertyExists
from synonym_dict import LowerDict
entity_types = ('process', 'flow', 'quantity', 'fragment')
entity_refs = {
'process': 'exchange',
'flow': 'quantity',
'quantity': 'unit',
'fragment': 'fragment'
}
def concatenate(*lists):
return chain(*lists)
class EntityInitializationError(Exception):
pass
class EntityMergeError(Exception):
pass
class LcEntity(BaseEntity):
"""
All LC entities behave like dicts, but they all have some common properties, defined here.
"""
_pre_fields = ['Name']
_new_fields = []
_ref_field = ''
_post_fields = ['Comment']
_origin = None
def __init__(self, entity_type, external_ref, origin=None, entity_uuid=None, **kwargs):
if external_ref is None:
if entity_uuid is None:
raise EntityInitializationError('At least one of entity_uuid, external_ref must be provided')
external_ref = str(entity_uuid)
self._external_ref = str(external_ref)
self._uuid = None
if entity_uuid is not None:
self.uuid = entity_uuid
self._d = LowerDict()
self._entity_type = entity_type
self._reference_entity = None
if origin is not None:
self.origin = origin
self._d['Name'] = self._external_ref
self._d['Comment'] = ''
self._query_ref = None # memoize this
for k, v in kwargs.items():
if v is None:
continue
self[k] = v
@property
def reference_entity(self):
return self._reference_entity
def make_ref(self, query):
if self._query_ref is None:
d = dict()
for k in self.signature_fields():
if k == self._ref_field:
continue
if k in self._d:
d[k] = self._d[k]
self._query_ref = CatalogRef.from_query(self.external_ref, query, self.entity_type,
uuid=self.uuid, **d)
return self._query_ref
@property
def entity_type(self):
return self._entity_type
@property
def origin(self):
return self._origin
@property
def is_entity(self):
"""
Used to distinguish between entities and catalog refs (which answer False)
:return: True for LcEntity subclasses
"""
return True
def map_origin(self, omap, fallback=None):
"""
This is used to propagate a change in origin semantics. Provide a dict that maps old origins to new origins.
External ref should remain the same with respect to the new origin.
:param omap: dict mapping old origin to new origin
:param fallback: if present, use in cases where old origin not found
:return:
"""
if self._origin in omap:
self._origin = omap[self._origin]
elif fallback is not None:
self._origin = fallback
@origin.setter
def origin(self, value):
if self._origin is None:
self._origin = value
else:
raise PropertyExists('Origin already set to %s' % self._origin)
def signature_fields(self):
return concatenate(self._pre_fields, self._new_fields,
[self._ref_field] if self._ref_field is not [] else [], self._post_fields)
@property
def reference_field(self):
return self._ref_field
@property
def external_ref(self):
return self._external_ref
def get_signature(self):
k = dict()
for i in self.signature_fields():
k[i] = self[i]
return k
@property
def uuid(self):
return self._uuid
@uuid.setter
def uuid(self, key):
if self._uuid is not None:
raise PropertyExists('UUID has already been specified! %s' % self._uuid)
if isinstance(key, uuid.UUID):
self._uuid = str(key)
else:
self._uuid = str(uuid.UUID(key))
def _validate_reference(self, ref_entity):
if ref_entity is None:
# raise ValueError('Null reference')
return False # allow none references
if ref_entity.entity_type != entity_refs[self.entity_type]:
raise TypeError("Type Mismatch on reference entity: expected %s, found %s" % (entity_refs[self.entity_type],
ref_entity.entity_type))
return True
def _set_reference(self, ref_entity):
"""
set the entity's reference value. Can be overridden
:param ref_entity:
:return:
"""
self._validate_reference(ref_entity)
self._reference_entity = ref_entity
def has_property(self, prop):
return prop in self._d
def properties(self):
for i in self._d.keys():
yield i
def get_properties(self):
"""
dict of properties and values for a given entity
:return:
"""
d = dict()
for i in self.properties():
d[i] = self._d[i]
return d
def update(self, d):
self._d.update(d)
def validate(self):
valid = True
if self.reference_entity is not None:
try:
self._validate_reference(self.reference_entity)
except TypeError:
print("Reference entity type %s is wrong for %s (%s)" %
(self.reference_entity.entity_type,
self.entity_type,
entity_refs[self.entity_type]))
valid = False
for i in self.signature_fields():
try:
self[i]
except KeyError:
print("Required field %s does not exist" % i)
valid = False
return valid
def _print_ref_field(self):
if self.reference_entity is None:
return None
else:
return '%s' % self.reference_entity.external_ref
def serialize(self, domesticate=False, drop_fields=()):
j = {
'entityType': self.entity_type,
'externalId': self.external_ref,
'origin': self.origin,
self._ref_field: self._print_ref_field(),
}
if domesticate or self._origin is None:
j.pop('origin')
for k, v in self._d.items():
if k in drop_fields:
continue
if v is None:
continue
elif isinstance(v, list):
j[k] = v
elif isinstance(v, set):
j[k] = sorted(list(v))
elif isinstance(v, Number):
j[k] = v
elif isinstance(v, bool):
j[k] = v
elif isinstance(v, LcEntity):
j[k] = {"origin": v.origin,
"externalId": v.external_ref,
"entity_type": v.entity_type}
elif isinstance(v, dict):
j[k] = v
else:
j[k] = str(v)
return j
def __getitem__(self, item):
if item.lower() == self._ref_field.lower():
return self.reference_entity
elif item == 'EntityType':
return self.entity_type
else:
# don't catch KeyErrors here-- leave that to subclasses
return self._d[item]
def get(self, item, default=None):
try:
return self.__getitem__(item)
except KeyError:
return default
def __setitem__(self, key, value):
if key.lower() in (self._ref_field.lower(), 'reference', 'referenceentity', 'reference_entity'):
self._set_reference(value)
elif key.lower() in ('entityid', 'entitytype', 'externalid', 'origin'):
raise KeyError('Disallowed Keyname %s' % key)
else:
self._d[key] = value
def merge(self, other):
if False: # not isinstance(other, LcEntity): ## This is not a requirement! cf. EntityRefs, Disclosure objs
raise EntityMergeError('Incoming is not an LcEntity: %s' % other)
elif self.entity_type != other.entity_type:
raise EntityMergeError('Incoming entity type %s mismatch with %s' % (other.entity_type, self.entity_type))
elif self.external_ref != other.external_ref:
raise EntityMergeError('Incoming External ref %s conflicts with existing %s' % (other.external_ref,
self.external_ref))
else:
# if self.origin != other.origin:
# print('Merging entities with differing origin: \nnew: %s\nexisting: %s'% (other.origin, self.origin))
for k in other.properties():
if k not in self._d.keys():
print('Merge: Adding key %s: %s' % (k, other[k]))
self[k] = other[k]
def show(self):
print('%s Entity (ref %s)' % (self.entity_type.title(), self.external_ref))
print('origin: %s' % self.origin)
if self.entity_type == 'process':
for i in self.reference_entity:
print('reference: %s' % i)
else:
print('reference: %s' % self.reference_entity)
fix = ['Name', 'Comment']
postfix = set(str(k) for k in self._d.keys()).difference(fix)
ml = len(max(self._d.keys(), key=len))
for k in fix:
print('%*s: %s' % (ml, k, self._d[k]))
for k in postfix:
print('%*s: %s' % (ml, k, self._d[k]))
def __str__(self):
return 'LC %s: %s' % (self.entity_type, self._d['Name'])
@property
def _name(self):
return str(self)
def __hash__(self):
"""
External ref is set by the end of __init__ and is immutable (except for fragments-- which use uuid for hash)
:return:
"""
if self._origin is None:
raise AttributeError('Origin not set!')
return hash(self.link)
def __eq__(self, other):
"""
two entities are equal if their types, origins, and external references are the same.
internal refs do not need to be equal; reference entities do not need to be equal
:return:
"""
if other is None:
return False
# if not isinstance(other, LcEntity): # taking this out so that CatalogRefs and entities can be compared
# return False
try:
is_eq = (self.external_ref == other.external_ref
and self.origin == other.origin
and self.entity_type == other.entity_type)
except AttributeError:
is_eq = False
return is_eq
|
<filename>antelope_core/entities/entities.py
from __future__ import print_function, unicode_literals
import uuid
from itertools import chain
from numbers import Number
from antelope import CatalogRef, BaseEntity, PropertyExists
from synonym_dict import LowerDict
entity_types = ('process', 'flow', 'quantity', 'fragment')
entity_refs = {
'process': 'exchange',
'flow': 'quantity',
'quantity': 'unit',
'fragment': 'fragment'
}
def concatenate(*lists):
return chain(*lists)
class EntityInitializationError(Exception):
pass
class EntityMergeError(Exception):
pass
class LcEntity(BaseEntity):
"""
All LC entities behave like dicts, but they all have some common properties, defined here.
"""
_pre_fields = ['Name']
_new_fields = []
_ref_field = ''
_post_fields = ['Comment']
_origin = None
def __init__(self, entity_type, external_ref, origin=None, entity_uuid=None, **kwargs):
if external_ref is None:
if entity_uuid is None:
raise EntityInitializationError('At least one of entity_uuid, external_ref must be provided')
external_ref = str(entity_uuid)
self._external_ref = str(external_ref)
self._uuid = None
if entity_uuid is not None:
self.uuid = entity_uuid
self._d = LowerDict()
self._entity_type = entity_type
self._reference_entity = None
if origin is not None:
self.origin = origin
self._d['Name'] = self._external_ref
self._d['Comment'] = ''
self._query_ref = None # memoize this
for k, v in kwargs.items():
if v is None:
continue
self[k] = v
@property
def reference_entity(self):
return self._reference_entity
def make_ref(self, query):
if self._query_ref is None:
d = dict()
for k in self.signature_fields():
if k == self._ref_field:
continue
if k in self._d:
d[k] = self._d[k]
self._query_ref = CatalogRef.from_query(self.external_ref, query, self.entity_type,
uuid=self.uuid, **d)
return self._query_ref
@property
def entity_type(self):
return self._entity_type
@property
def origin(self):
return self._origin
@property
def is_entity(self):
"""
Used to distinguish between entities and catalog refs (which answer False)
:return: True for LcEntity subclasses
"""
return True
def map_origin(self, omap, fallback=None):
"""
This is used to propagate a change in origin semantics. Provide a dict that maps old origins to new origins.
External ref should remain the same with respect to the new origin.
:param omap: dict mapping old origin to new origin
:param fallback: if present, use in cases where old origin not found
:return:
"""
if self._origin in omap:
self._origin = omap[self._origin]
elif fallback is not None:
self._origin = fallback
@origin.setter
def origin(self, value):
if self._origin is None:
self._origin = value
else:
raise PropertyExists('Origin already set to %s' % self._origin)
def signature_fields(self):
return concatenate(self._pre_fields, self._new_fields,
[self._ref_field] if self._ref_field is not [] else [], self._post_fields)
@property
def reference_field(self):
return self._ref_field
@property
def external_ref(self):
return self._external_ref
def get_signature(self):
k = dict()
for i in self.signature_fields():
k[i] = self[i]
return k
@property
def uuid(self):
return self._uuid
@uuid.setter
def uuid(self, key):
if self._uuid is not None:
raise PropertyExists('UUID has already been specified! %s' % self._uuid)
if isinstance(key, uuid.UUID):
self._uuid = str(key)
else:
self._uuid = str(uuid.UUID(key))
def _validate_reference(self, ref_entity):
if ref_entity is None:
# raise ValueError('Null reference')
return False # allow none references
if ref_entity.entity_type != entity_refs[self.entity_type]:
raise TypeError("Type Mismatch on reference entity: expected %s, found %s" % (entity_refs[self.entity_type],
ref_entity.entity_type))
return True
def _set_reference(self, ref_entity):
"""
set the entity's reference value. Can be overridden
:param ref_entity:
:return:
"""
self._validate_reference(ref_entity)
self._reference_entity = ref_entity
def has_property(self, prop):
return prop in self._d
def properties(self):
for i in self._d.keys():
yield i
def get_properties(self):
"""
dict of properties and values for a given entity
:return:
"""
d = dict()
for i in self.properties():
d[i] = self._d[i]
return d
def update(self, d):
self._d.update(d)
def validate(self):
valid = True
if self.reference_entity is not None:
try:
self._validate_reference(self.reference_entity)
except TypeError:
print("Reference entity type %s is wrong for %s (%s)" %
(self.reference_entity.entity_type,
self.entity_type,
entity_refs[self.entity_type]))
valid = False
for i in self.signature_fields():
try:
self[i]
except KeyError:
print("Required field %s does not exist" % i)
valid = False
return valid
def _print_ref_field(self):
if self.reference_entity is None:
return None
else:
return '%s' % self.reference_entity.external_ref
def serialize(self, domesticate=False, drop_fields=()):
j = {
'entityType': self.entity_type,
'externalId': self.external_ref,
'origin': self.origin,
self._ref_field: self._print_ref_field(),
}
if domesticate or self._origin is None:
j.pop('origin')
for k, v in self._d.items():
if k in drop_fields:
continue
if v is None:
continue
elif isinstance(v, list):
j[k] = v
elif isinstance(v, set):
j[k] = sorted(list(v))
elif isinstance(v, Number):
j[k] = v
elif isinstance(v, bool):
j[k] = v
elif isinstance(v, LcEntity):
j[k] = {"origin": v.origin,
"externalId": v.external_ref,
"entity_type": v.entity_type}
elif isinstance(v, dict):
j[k] = v
else:
j[k] = str(v)
return j
def __getitem__(self, item):
if item.lower() == self._ref_field.lower():
return self.reference_entity
elif item == 'EntityType':
return self.entity_type
else:
# don't catch KeyErrors here-- leave that to subclasses
return self._d[item]
def get(self, item, default=None):
try:
return self.__getitem__(item)
except KeyError:
return default
def __setitem__(self, key, value):
if key.lower() in (self._ref_field.lower(), 'reference', 'referenceentity', 'reference_entity'):
self._set_reference(value)
elif key.lower() in ('entityid', 'entitytype', 'externalid', 'origin'):
raise KeyError('Disallowed Keyname %s' % key)
else:
self._d[key] = value
def merge(self, other):
if False: # not isinstance(other, LcEntity): ## This is not a requirement! cf. EntityRefs, Disclosure objs
raise EntityMergeError('Incoming is not an LcEntity: %s' % other)
elif self.entity_type != other.entity_type:
raise EntityMergeError('Incoming entity type %s mismatch with %s' % (other.entity_type, self.entity_type))
elif self.external_ref != other.external_ref:
raise EntityMergeError('Incoming External ref %s conflicts with existing %s' % (other.external_ref,
self.external_ref))
else:
# if self.origin != other.origin:
# print('Merging entities with differing origin: \nnew: %s\nexisting: %s'% (other.origin, self.origin))
for k in other.properties():
if k not in self._d.keys():
print('Merge: Adding key %s: %s' % (k, other[k]))
self[k] = other[k]
def show(self):
print('%s Entity (ref %s)' % (self.entity_type.title(), self.external_ref))
print('origin: %s' % self.origin)
if self.entity_type == 'process':
for i in self.reference_entity:
print('reference: %s' % i)
else:
print('reference: %s' % self.reference_entity)
fix = ['Name', 'Comment']
postfix = set(str(k) for k in self._d.keys()).difference(fix)
ml = len(max(self._d.keys(), key=len))
for k in fix:
print('%*s: %s' % (ml, k, self._d[k]))
for k in postfix:
print('%*s: %s' % (ml, k, self._d[k]))
def __str__(self):
return 'LC %s: %s' % (self.entity_type, self._d['Name'])
@property
def _name(self):
return str(self)
def __hash__(self):
"""
External ref is set by the end of __init__ and is immutable (except for fragments-- which use uuid for hash)
:return:
"""
if self._origin is None:
raise AttributeError('Origin not set!')
return hash(self.link)
def __eq__(self, other):
"""
two entities are equal if their types, origins, and external references are the same.
internal refs do not need to be equal; reference entities do not need to be equal
:return:
"""
if other is None:
return False
# if not isinstance(other, LcEntity): # taking this out so that CatalogRefs and entities can be compared
# return False
try:
is_eq = (self.external_ref == other.external_ref
and self.origin == other.origin
and self.entity_type == other.entity_type)
except AttributeError:
is_eq = False
return is_eq
|
en
| 0.81832
|
All LC entities behave like dicts, but they all have some common properties, defined here. # memoize this Used to distinguish between entities and catalog refs (which answer False) :return: True for LcEntity subclasses This is used to propagate a change in origin semantics. Provide a dict that maps old origins to new origins. External ref should remain the same with respect to the new origin. :param omap: dict mapping old origin to new origin :param fallback: if present, use in cases where old origin not found :return: # raise ValueError('Null reference') # allow none references set the entity's reference value. Can be overridden :param ref_entity: :return: dict of properties and values for a given entity :return: # don't catch KeyErrors here-- leave that to subclasses # not isinstance(other, LcEntity): ## This is not a requirement! cf. EntityRefs, Disclosure objs # if self.origin != other.origin: # print('Merging entities with differing origin: \nnew: %s\nexisting: %s'% (other.origin, self.origin)) External ref is set by the end of __init__ and is immutable (except for fragments-- which use uuid for hash) :return: two entities are equal if their types, origins, and external references are the same. internal refs do not need to be equal; reference entities do not need to be equal :return: # if not isinstance(other, LcEntity): # taking this out so that CatalogRefs and entities can be compared # return False
| 2.149227
| 2
|
tests/_util.py
|
Bouke/invoke
| 0
|
6628247
|
import os
import sys
try:
import termios
except ImportError:
# Not available on Windows
termios = None
from contextlib import contextmanager
from invoke.vendor.six import BytesIO, b, iteritems, wraps
from mock import patch, Mock
from spec import trap, Spec, eq_, ok_, skip
from invoke import Program, Runner
from invoke.platform import WINDOWS
support = os.path.join(os.path.dirname(__file__), '_support')
def skip_if_windows(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if WINDOWS:
skip()
return fn(*args, **kwargs)
return wrapper
@contextmanager
def support_path():
sys.path.insert(0, support)
try:
yield
finally:
sys.path.pop(0)
def load(name):
with support_path():
return __import__(name)
class IntegrationSpec(Spec):
def setup(self):
# Preserve environment for later restore
self.old_environ = os.environ.copy()
# Always do things relative to tests/_support
os.chdir(support)
def teardown(self):
# Chdir back to project root to avoid problems
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
# Nuke changes to environ
os.environ.clear()
os.environ.update(self.old_environ)
# Strip any test-support task collections from sys.modules to prevent
# state bleed between tests; otherwise tests can incorrectly pass
# despite not explicitly loading/cd'ing to get the tasks they call
# loaded.
for name, module in iteritems(sys.modules.copy()):
if module and support in getattr(module, '__file__', ''):
del sys.modules[name]
@trap
def expect(invocation, out=None, err=None, program=None, invoke=True,
test=None):
"""
Run ``invocation`` via ``program`` and expect resulting output to match.
May give one or both of ``out``/``err`` (but not neither).
``program`` defaults to ``Program()``.
To skip automatically assuming the argv under test starts with ``"invoke
"``, say ``invoke=False``.
To customize the operator used for testing (default: equality), use
``test`` (which should be an assertion wrapper of some kind).
"""
if program is None:
program = Program()
if invoke:
invocation = "invoke {0}".format(invocation)
program.run(invocation, exit=False)
# Perform tests
if out is not None:
(test or eq_)(sys.stdout.getvalue(), out)
if err is not None:
(test or eq_)(sys.stderr.getvalue(), err)
class MockSubprocess(object):
def __init__(self, out='', err='', exit=0, isatty=None, autostart=True):
self.out_file = BytesIO(b(out))
self.err_file = BytesIO(b(err))
self.exit = exit
self.isatty = isatty
if autostart:
self.start()
def start(self):
# Start patchin'
self.popen = patch('invoke.runners.Popen')
Popen = self.popen.start()
self.read = patch('os.read')
read = self.read.start()
self.sys_stdin = patch('sys.stdin', new_callable=BytesIO)
sys_stdin = self.sys_stdin.start()
# Setup mocks
process = Popen.return_value
process.returncode = self.exit
process.stdout.fileno.return_value = 1
process.stderr.fileno.return_value = 2
# If requested, mock isatty to fake out pty detection
if self.isatty is not None:
sys_stdin.isatty = Mock(return_value=self.isatty)
def fakeread(fileno, count):
fd = {1: self.out_file, 2: self.err_file}[fileno]
return fd.read(count)
read.side_effect = fakeread
# Return the Popen mock as it's sometimes wanted inside tests
return Popen
def stop(self):
self.popen.stop()
self.read.stop()
self.sys_stdin.stop()
def mock_subprocess(out='', err='', exit=0, isatty=None, insert_Popen=False):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
proc = MockSubprocess(
out=out, err=err, exit=exit, isatty=isatty, autostart=False,
)
Popen = proc.start()
args = list(args)
if insert_Popen:
args.append(Popen)
try:
f(*args, **kwargs)
finally:
proc.stop()
return wrapper
return decorator
def mock_pty(out='', err='', exit=0, isatty=None, trailing_error=None,
skip_asserts=False, insert_os=False):
# Windows doesn't have ptys, so all the pty tests should be
# skipped anyway...
if WINDOWS:
return skip_if_windows
def decorator(f):
import fcntl
ioctl_patch = patch('invoke.runners.fcntl.ioctl', wraps=fcntl.ioctl)
@wraps(f)
@patch('invoke.runners.pty')
@patch('invoke.runners.os')
@ioctl_patch
def wrapper(*args, **kwargs):
args = list(args)
pty, os, ioctl = args.pop(), args.pop(), args.pop()
# Don't actually fork, but pretend we did & that main thread is
# also the child (pid 0) to trigger execve call; & give 'parent fd'
# of 1 (stdout).
pty.fork.return_value = 0, 1
# We don't really need to care about waiting since not truly
# forking/etc, so here we just return a nonzero "pid" + sentinel
# wait-status value (used in some tests about WIFEXITED etc)
os.waitpid.return_value = None, Mock(name='exitstatus')
# Either or both of these may get called, depending...
os.WEXITSTATUS.return_value = exit
os.WTERMSIG.return_value = exit
# If requested, mock isatty to fake out pty detection
if isatty is not None:
os.isatty.return_value = isatty
out_file = BytesIO(b(out))
err_file = BytesIO(b(err))
def fakeread(fileno, count):
fd = {1: out_file, 2: err_file}[fileno]
ret = fd.read(count)
# If asked, fake a Linux-platform trailing I/O error.
if not ret and trailing_error:
raise trailing_error
return ret
os.read.side_effect = fakeread
if insert_os:
args.append(os)
f(*args, **kwargs)
# Short-circuit if we raised an error in fakeread()
if trailing_error:
return
# Sanity checks to make sure the stuff we mocked, actually got ran!
# TODO: inject our mocks back into the tests so they can make their
# own assertions if desired
pty.fork.assert_called_with()
# Expect a get, and then later set, of terminal window size
eq_(ioctl.call_args_list[0][0][1], termios.TIOCGWINSZ)
eq_(ioctl.call_args_list[1][0][1], termios.TIOCSWINSZ)
if not skip_asserts:
for name in ('execve', 'waitpid'):
ok_(getattr(os, name).called)
# Ensure at least one of the exit status getters was called
ok_(os.WEXITSTATUS.called or os.WTERMSIG.called)
return wrapper
return decorator
class _Dummy(Runner):
"""
Dummy runner subclass that does minimum work required to execute run().
It also serves as a convenient basic API checker; failure to update it to
match the current Runner API will cause TypeErrors, NotImplementedErrors,
and similar.
"""
# Neuter the input loop sleep, so tests aren't slow (at the expense of CPU,
# which isn't a problem for testing).
input_sleep = 0
def start(self, command, shell, env):
pass
def read_proc_stdout(self, num_bytes):
return ""
def read_proc_stderr(self, num_bytes):
return ""
def _write_proc_stdin(self, data):
pass
@property
def process_is_finished(self):
return True
def returncode(self):
return 0
def stop(self):
pass
# Dummy command that will blow up if it ever truly hits a real shell.
_ = "nope"
# Runner that fakes ^C during subprocess exec
class _KeyboardInterruptingRunner(_Dummy):
def __init__(self, *args, **kwargs):
super(_KeyboardInterruptingRunner, self).__init__(*args, **kwargs)
self._interrupted = False
# Trigger KeyboardInterrupt during wait()
def wait(self):
if not self._interrupted:
self._interrupted = True
raise KeyboardInterrupt
# But also, after that has been done, pretend subprocess shutdown happened
# (or we will loop forever).
def process_is_finished(self):
return self._interrupted
class OhNoz(Exception):
pass
|
import os
import sys
try:
import termios
except ImportError:
# Not available on Windows
termios = None
from contextlib import contextmanager
from invoke.vendor.six import BytesIO, b, iteritems, wraps
from mock import patch, Mock
from spec import trap, Spec, eq_, ok_, skip
from invoke import Program, Runner
from invoke.platform import WINDOWS
support = os.path.join(os.path.dirname(__file__), '_support')
def skip_if_windows(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if WINDOWS:
skip()
return fn(*args, **kwargs)
return wrapper
@contextmanager
def support_path():
sys.path.insert(0, support)
try:
yield
finally:
sys.path.pop(0)
def load(name):
with support_path():
return __import__(name)
class IntegrationSpec(Spec):
def setup(self):
# Preserve environment for later restore
self.old_environ = os.environ.copy()
# Always do things relative to tests/_support
os.chdir(support)
def teardown(self):
# Chdir back to project root to avoid problems
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
# Nuke changes to environ
os.environ.clear()
os.environ.update(self.old_environ)
# Strip any test-support task collections from sys.modules to prevent
# state bleed between tests; otherwise tests can incorrectly pass
# despite not explicitly loading/cd'ing to get the tasks they call
# loaded.
for name, module in iteritems(sys.modules.copy()):
if module and support in getattr(module, '__file__', ''):
del sys.modules[name]
@trap
def expect(invocation, out=None, err=None, program=None, invoke=True,
test=None):
"""
Run ``invocation`` via ``program`` and expect resulting output to match.
May give one or both of ``out``/``err`` (but not neither).
``program`` defaults to ``Program()``.
To skip automatically assuming the argv under test starts with ``"invoke
"``, say ``invoke=False``.
To customize the operator used for testing (default: equality), use
``test`` (which should be an assertion wrapper of some kind).
"""
if program is None:
program = Program()
if invoke:
invocation = "invoke {0}".format(invocation)
program.run(invocation, exit=False)
# Perform tests
if out is not None:
(test or eq_)(sys.stdout.getvalue(), out)
if err is not None:
(test or eq_)(sys.stderr.getvalue(), err)
class MockSubprocess(object):
def __init__(self, out='', err='', exit=0, isatty=None, autostart=True):
self.out_file = BytesIO(b(out))
self.err_file = BytesIO(b(err))
self.exit = exit
self.isatty = isatty
if autostart:
self.start()
def start(self):
# Start patchin'
self.popen = patch('invoke.runners.Popen')
Popen = self.popen.start()
self.read = patch('os.read')
read = self.read.start()
self.sys_stdin = patch('sys.stdin', new_callable=BytesIO)
sys_stdin = self.sys_stdin.start()
# Setup mocks
process = Popen.return_value
process.returncode = self.exit
process.stdout.fileno.return_value = 1
process.stderr.fileno.return_value = 2
# If requested, mock isatty to fake out pty detection
if self.isatty is not None:
sys_stdin.isatty = Mock(return_value=self.isatty)
def fakeread(fileno, count):
fd = {1: self.out_file, 2: self.err_file}[fileno]
return fd.read(count)
read.side_effect = fakeread
# Return the Popen mock as it's sometimes wanted inside tests
return Popen
def stop(self):
self.popen.stop()
self.read.stop()
self.sys_stdin.stop()
def mock_subprocess(out='', err='', exit=0, isatty=None, insert_Popen=False):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
proc = MockSubprocess(
out=out, err=err, exit=exit, isatty=isatty, autostart=False,
)
Popen = proc.start()
args = list(args)
if insert_Popen:
args.append(Popen)
try:
f(*args, **kwargs)
finally:
proc.stop()
return wrapper
return decorator
def mock_pty(out='', err='', exit=0, isatty=None, trailing_error=None,
skip_asserts=False, insert_os=False):
# Windows doesn't have ptys, so all the pty tests should be
# skipped anyway...
if WINDOWS:
return skip_if_windows
def decorator(f):
import fcntl
ioctl_patch = patch('invoke.runners.fcntl.ioctl', wraps=fcntl.ioctl)
@wraps(f)
@patch('invoke.runners.pty')
@patch('invoke.runners.os')
@ioctl_patch
def wrapper(*args, **kwargs):
args = list(args)
pty, os, ioctl = args.pop(), args.pop(), args.pop()
# Don't actually fork, but pretend we did & that main thread is
# also the child (pid 0) to trigger execve call; & give 'parent fd'
# of 1 (stdout).
pty.fork.return_value = 0, 1
# We don't really need to care about waiting since not truly
# forking/etc, so here we just return a nonzero "pid" + sentinel
# wait-status value (used in some tests about WIFEXITED etc)
os.waitpid.return_value = None, Mock(name='exitstatus')
# Either or both of these may get called, depending...
os.WEXITSTATUS.return_value = exit
os.WTERMSIG.return_value = exit
# If requested, mock isatty to fake out pty detection
if isatty is not None:
os.isatty.return_value = isatty
out_file = BytesIO(b(out))
err_file = BytesIO(b(err))
def fakeread(fileno, count):
fd = {1: out_file, 2: err_file}[fileno]
ret = fd.read(count)
# If asked, fake a Linux-platform trailing I/O error.
if not ret and trailing_error:
raise trailing_error
return ret
os.read.side_effect = fakeread
if insert_os:
args.append(os)
f(*args, **kwargs)
# Short-circuit if we raised an error in fakeread()
if trailing_error:
return
# Sanity checks to make sure the stuff we mocked, actually got ran!
# TODO: inject our mocks back into the tests so they can make their
# own assertions if desired
pty.fork.assert_called_with()
# Expect a get, and then later set, of terminal window size
eq_(ioctl.call_args_list[0][0][1], termios.TIOCGWINSZ)
eq_(ioctl.call_args_list[1][0][1], termios.TIOCSWINSZ)
if not skip_asserts:
for name in ('execve', 'waitpid'):
ok_(getattr(os, name).called)
# Ensure at least one of the exit status getters was called
ok_(os.WEXITSTATUS.called or os.WTERMSIG.called)
return wrapper
return decorator
class _Dummy(Runner):
"""
Dummy runner subclass that does minimum work required to execute run().
It also serves as a convenient basic API checker; failure to update it to
match the current Runner API will cause TypeErrors, NotImplementedErrors,
and similar.
"""
# Neuter the input loop sleep, so tests aren't slow (at the expense of CPU,
# which isn't a problem for testing).
input_sleep = 0
def start(self, command, shell, env):
pass
def read_proc_stdout(self, num_bytes):
return ""
def read_proc_stderr(self, num_bytes):
return ""
def _write_proc_stdin(self, data):
pass
@property
def process_is_finished(self):
return True
def returncode(self):
return 0
def stop(self):
pass
# Dummy command that will blow up if it ever truly hits a real shell.
_ = "nope"
# Runner that fakes ^C during subprocess exec
class _KeyboardInterruptingRunner(_Dummy):
def __init__(self, *args, **kwargs):
super(_KeyboardInterruptingRunner, self).__init__(*args, **kwargs)
self._interrupted = False
# Trigger KeyboardInterrupt during wait()
def wait(self):
if not self._interrupted:
self._interrupted = True
raise KeyboardInterrupt
# But also, after that has been done, pretend subprocess shutdown happened
# (or we will loop forever).
def process_is_finished(self):
return self._interrupted
class OhNoz(Exception):
pass
|
en
| 0.876372
|
# Not available on Windows # Preserve environment for later restore # Always do things relative to tests/_support # Chdir back to project root to avoid problems # Nuke changes to environ # Strip any test-support task collections from sys.modules to prevent # state bleed between tests; otherwise tests can incorrectly pass # despite not explicitly loading/cd'ing to get the tasks they call # loaded. Run ``invocation`` via ``program`` and expect resulting output to match. May give one or both of ``out``/``err`` (but not neither). ``program`` defaults to ``Program()``. To skip automatically assuming the argv under test starts with ``"invoke "``, say ``invoke=False``. To customize the operator used for testing (default: equality), use ``test`` (which should be an assertion wrapper of some kind). # Perform tests # Start patchin' # Setup mocks # If requested, mock isatty to fake out pty detection # Return the Popen mock as it's sometimes wanted inside tests # Windows doesn't have ptys, so all the pty tests should be # skipped anyway... # Don't actually fork, but pretend we did & that main thread is # also the child (pid 0) to trigger execve call; & give 'parent fd' # of 1 (stdout). # We don't really need to care about waiting since not truly # forking/etc, so here we just return a nonzero "pid" + sentinel # wait-status value (used in some tests about WIFEXITED etc) # Either or both of these may get called, depending... # If requested, mock isatty to fake out pty detection # If asked, fake a Linux-platform trailing I/O error. # Short-circuit if we raised an error in fakeread() # Sanity checks to make sure the stuff we mocked, actually got ran! # TODO: inject our mocks back into the tests so they can make their # own assertions if desired # Expect a get, and then later set, of terminal window size # Ensure at least one of the exit status getters was called Dummy runner subclass that does minimum work required to execute run(). It also serves as a convenient basic API checker; failure to update it to match the current Runner API will cause TypeErrors, NotImplementedErrors, and similar. # Neuter the input loop sleep, so tests aren't slow (at the expense of CPU, # which isn't a problem for testing). # Dummy command that will blow up if it ever truly hits a real shell. # Runner that fakes ^C during subprocess exec # Trigger KeyboardInterrupt during wait() # But also, after that has been done, pretend subprocess shutdown happened # (or we will loop forever).
| 1.891033
| 2
|
trinity-eth2/components/eth2/discv5/component.py
|
vapory-testing/trinity-vap2
| 14
|
6628248
|
<reponame>vapory-testing/trinity-vap2<gh_stars>10-100
from argparse import ArgumentParser, _SubParsersAction
import logging
import pathlib
import async_service
from eth.db.backends.level import LevelDB
from eth_keys.datatypes import PrivateKey
from eth_utils import decode_hex, encode_hex
from eth_utils.toolz import merge
from lahja import EndpointAPI
from p2p.abc import NodeDBAPI
from p2p.constants import NUM_ROUTING_TABLE_BUCKETS
from p2p.discv5.channel_services import (
DatagramReceiver,
DatagramSender,
IncomingDatagram,
IncomingMessage,
IncomingPacket,
OutgoingDatagram,
OutgoingMessage,
OutgoingPacket,
PacketDecoder,
PacketEncoder,
)
from p2p.discv5.endpoint_tracker import EndpointTracker, EndpointVote
from p2p.discv5.message_dispatcher import MessageDispatcher
from p2p.discv5.messages import default_message_type_registry
from p2p.discv5.packer import Packer
from p2p.discv5.routing_table_manager import RoutingTableManager
from p2p.enr import ENR, UnsignedENR
from p2p.identity_schemes import default_identity_scheme_registry
from p2p.kademlia import KademliaRoutingTable
from p2p.node_db import NodeDB
from trinity.boot_info import BootInfo
from trinity.constants import NODE_DB_DIR as DEFAULT_NODEDB_DIR_NAME
from trinity.extensibility import TrioIsolatedComponent
import trio
logger = logging.getLogger("trinity.components.eth2.discv5.DiscV5Component")
def get_nodedb_dir(boot_info: BootInfo) -> pathlib.Path:
if boot_info.args.nodedb_dir is None:
return boot_info.trinity_config.data_dir / DEFAULT_NODEDB_DIR_NAME
else:
return pathlib.Path(boot_info.args.nodedb_dir)
def get_local_private_key(boot_info: BootInfo) -> PrivateKey:
if boot_info.args.discovery_private_key:
local_private_key_bytes = decode_hex(boot_info.args.discovery_private_key)
return PrivateKey(local_private_key_bytes)
else:
return boot_info.trinity_config.nodekey
async def get_local_enr(
boot_info: BootInfo, node_db: NodeDBAPI, local_private_key: PrivateKey
) -> ENR:
minimal_enr = UnsignedENR(
sequence_number=1,
kv_pairs={
b"id": b"v4",
b"secp256k1": local_private_key.public_key.to_compressed_bytes(),
b"udp": boot_info.args.discovery_port,
},
identity_scheme_registry=default_identity_scheme_registry,
).to_signed_enr(local_private_key.to_bytes())
node_id = minimal_enr.node_id
try:
base_enr = node_db.get_enr(node_id)
except KeyError:
logger.info(f"No Node for {encode_hex(node_id)} found, creating new one")
return minimal_enr
else:
if any(base_enr[key] != value for key, value in minimal_enr.items()):
logger.debug(f"Updating local ENR")
return UnsignedENR(
sequence_number=base_enr.sequence_number + 1,
kv_pairs=merge(dict(base_enr), dict(minimal_enr)),
identity_scheme_registry=default_identity_scheme_registry,
).to_signed_enr(local_private_key.to_bytes())
else:
return base_enr
class DiscV5Component(TrioIsolatedComponent):
name = "DiscV5"
@classmethod
def configure_parser(
cls, arg_parser: ArgumentParser, subparser: _SubParsersAction
) -> None:
discovery_parser = arg_parser.add_argument_group("discovery")
discovery_parser.add_argument(
"--nodedb-dir", help="Path to the directory in which our NodeDB is stored"
)
arg_parser.add_argument(
"--bootstrap_nodes",
help="/ip4/127.0.0.1/tcp/1234/p2p/node1_peer_id,/ip4/127.0.0.1/tcp/5678/p2p/node2_peer_id", # noqa: E501
)
arg_parser.add_argument(
"--preferred_nodes",
help="/ip4/127.0.0.1/tcp/1234/p2p/node1_peer_id,/ip4/127.0.0.1/tcp/5678/p2p/node2_peer_id", # noqa: E501
)
discovery_parser.add_argument(
"--discovery-boot-enrs",
nargs="+",
help="An arbitrary number of ENRs to populate the initial routing table with",
)
discovery_parser.add_argument(
"--discovery-port",
help="UDP port on which to listen for discovery messages",
type=int,
default=9000,
)
discovery_parser.add_argument(
"--discovery-private-key",
help="hex encoded 32 byte private key representing the discovery network identity",
)
@property
def is_enabled(self) -> bool:
return False
async def do_run(self, event_bus: EndpointAPI) -> None:
boot_info = self._boot_info
identity_scheme_registry = default_identity_scheme_registry
message_type_registry = default_message_type_registry
nodedb_dir = get_nodedb_dir(boot_info)
nodedb_dir.mkdir(exist_ok=True)
node_db = NodeDB(default_identity_scheme_registry, LevelDB(nodedb_dir))
local_private_key = get_local_private_key(boot_info)
local_enr = await get_local_enr(boot_info, node_db, local_private_key)
local_node_id = local_enr.node_id
routing_table = KademliaRoutingTable(local_node_id, NUM_ROUTING_TABLE_BUCKETS)
node_db.set_enr(local_enr)
for enr_repr in boot_info.args.discovery_boot_enrs or ():
enr = ENR.from_repr(enr_repr)
node_db.set_enr(enr)
routing_table.update(enr.node_id)
port = boot_info.args.discovery_port
socket = trio.socket.socket(
family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM
)
outgoing_datagram_channels = trio.open_memory_channel[OutgoingDatagram](0)
incoming_datagram_channels = trio.open_memory_channel[IncomingDatagram](0)
outgoing_packet_channels = trio.open_memory_channel[OutgoingPacket](0)
incoming_packet_channels = trio.open_memory_channel[IncomingPacket](0)
outgoing_message_channels = trio.open_memory_channel[OutgoingMessage](0)
incoming_message_channels = trio.open_memory_channel[IncomingMessage](0)
endpoint_vote_channels = trio.open_memory_channel[EndpointVote](0)
datagram_sender = DatagramSender(outgoing_datagram_channels[1], socket)
datagram_receiver = DatagramReceiver(socket, incoming_datagram_channels[0])
packet_encoder = PacketEncoder(
outgoing_packet_channels[1], outgoing_datagram_channels[0]
)
packet_decoder = PacketDecoder(
incoming_datagram_channels[1], incoming_packet_channels[0]
)
packer = Packer(
local_private_key=local_private_key.to_bytes(),
local_node_id=local_node_id,
node_db=node_db,
message_type_registry=message_type_registry,
incoming_packet_receive_channel=incoming_packet_channels[1],
incoming_message_send_channel=incoming_message_channels[0],
outgoing_message_receive_channel=outgoing_message_channels[1],
outgoing_packet_send_channel=outgoing_packet_channels[0],
)
message_dispatcher = MessageDispatcher(
node_db=node_db,
incoming_message_receive_channel=incoming_message_channels[1],
outgoing_message_send_channel=outgoing_message_channels[0],
)
endpoint_tracker = EndpointTracker(
local_private_key=local_private_key.to_bytes(),
local_node_id=local_node_id,
node_db=node_db,
identity_scheme_registry=identity_scheme_registry,
vote_receive_channel=endpoint_vote_channels[1],
)
routing_table_manager = RoutingTableManager(
local_node_id=local_node_id,
routing_table=routing_table,
message_dispatcher=message_dispatcher,
node_db=node_db,
outgoing_message_send_channel=outgoing_message_channels[0],
endpoint_vote_send_channel=endpoint_vote_channels[0],
)
logger.info(f"Starting discovery, listening on port {port}")
logger.info(f"Local Node ID: {encode_hex(local_enr.node_id)}")
logger.info(f"Local ENR: {local_enr}")
services = (
datagram_sender,
datagram_receiver,
packet_encoder,
packet_decoder,
packer,
message_dispatcher,
endpoint_tracker,
routing_table_manager,
)
await socket.bind(("0.0.0.0", port))
with socket:
async with trio.open_nursery() as nursery:
for service in services:
nursery.start_soon(async_service.TrioManager.run_service, service)
if __name__ == "__main__":
from trinity.extensibility.component import run_trio_eth1_component
run_trio_eth1_component(DiscV5Component)
|
from argparse import ArgumentParser, _SubParsersAction
import logging
import pathlib
import async_service
from eth.db.backends.level import LevelDB
from eth_keys.datatypes import PrivateKey
from eth_utils import decode_hex, encode_hex
from eth_utils.toolz import merge
from lahja import EndpointAPI
from p2p.abc import NodeDBAPI
from p2p.constants import NUM_ROUTING_TABLE_BUCKETS
from p2p.discv5.channel_services import (
DatagramReceiver,
DatagramSender,
IncomingDatagram,
IncomingMessage,
IncomingPacket,
OutgoingDatagram,
OutgoingMessage,
OutgoingPacket,
PacketDecoder,
PacketEncoder,
)
from p2p.discv5.endpoint_tracker import EndpointTracker, EndpointVote
from p2p.discv5.message_dispatcher import MessageDispatcher
from p2p.discv5.messages import default_message_type_registry
from p2p.discv5.packer import Packer
from p2p.discv5.routing_table_manager import RoutingTableManager
from p2p.enr import ENR, UnsignedENR
from p2p.identity_schemes import default_identity_scheme_registry
from p2p.kademlia import KademliaRoutingTable
from p2p.node_db import NodeDB
from trinity.boot_info import BootInfo
from trinity.constants import NODE_DB_DIR as DEFAULT_NODEDB_DIR_NAME
from trinity.extensibility import TrioIsolatedComponent
import trio
logger = logging.getLogger("trinity.components.eth2.discv5.DiscV5Component")
def get_nodedb_dir(boot_info: BootInfo) -> pathlib.Path:
if boot_info.args.nodedb_dir is None:
return boot_info.trinity_config.data_dir / DEFAULT_NODEDB_DIR_NAME
else:
return pathlib.Path(boot_info.args.nodedb_dir)
def get_local_private_key(boot_info: BootInfo) -> PrivateKey:
if boot_info.args.discovery_private_key:
local_private_key_bytes = decode_hex(boot_info.args.discovery_private_key)
return PrivateKey(local_private_key_bytes)
else:
return boot_info.trinity_config.nodekey
async def get_local_enr(
boot_info: BootInfo, node_db: NodeDBAPI, local_private_key: PrivateKey
) -> ENR:
minimal_enr = UnsignedENR(
sequence_number=1,
kv_pairs={
b"id": b"v4",
b"secp256k1": local_private_key.public_key.to_compressed_bytes(),
b"udp": boot_info.args.discovery_port,
},
identity_scheme_registry=default_identity_scheme_registry,
).to_signed_enr(local_private_key.to_bytes())
node_id = minimal_enr.node_id
try:
base_enr = node_db.get_enr(node_id)
except KeyError:
logger.info(f"No Node for {encode_hex(node_id)} found, creating new one")
return minimal_enr
else:
if any(base_enr[key] != value for key, value in minimal_enr.items()):
logger.debug(f"Updating local ENR")
return UnsignedENR(
sequence_number=base_enr.sequence_number + 1,
kv_pairs=merge(dict(base_enr), dict(minimal_enr)),
identity_scheme_registry=default_identity_scheme_registry,
).to_signed_enr(local_private_key.to_bytes())
else:
return base_enr
class DiscV5Component(TrioIsolatedComponent):
name = "DiscV5"
@classmethod
def configure_parser(
cls, arg_parser: ArgumentParser, subparser: _SubParsersAction
) -> None:
discovery_parser = arg_parser.add_argument_group("discovery")
discovery_parser.add_argument(
"--nodedb-dir", help="Path to the directory in which our NodeDB is stored"
)
arg_parser.add_argument(
"--bootstrap_nodes",
help="/ip4/127.0.0.1/tcp/1234/p2p/node1_peer_id,/ip4/127.0.0.1/tcp/5678/p2p/node2_peer_id", # noqa: E501
)
arg_parser.add_argument(
"--preferred_nodes",
help="/ip4/127.0.0.1/tcp/1234/p2p/node1_peer_id,/ip4/127.0.0.1/tcp/5678/p2p/node2_peer_id", # noqa: E501
)
discovery_parser.add_argument(
"--discovery-boot-enrs",
nargs="+",
help="An arbitrary number of ENRs to populate the initial routing table with",
)
discovery_parser.add_argument(
"--discovery-port",
help="UDP port on which to listen for discovery messages",
type=int,
default=9000,
)
discovery_parser.add_argument(
"--discovery-private-key",
help="hex encoded 32 byte private key representing the discovery network identity",
)
@property
def is_enabled(self) -> bool:
return False
async def do_run(self, event_bus: EndpointAPI) -> None:
boot_info = self._boot_info
identity_scheme_registry = default_identity_scheme_registry
message_type_registry = default_message_type_registry
nodedb_dir = get_nodedb_dir(boot_info)
nodedb_dir.mkdir(exist_ok=True)
node_db = NodeDB(default_identity_scheme_registry, LevelDB(nodedb_dir))
local_private_key = get_local_private_key(boot_info)
local_enr = await get_local_enr(boot_info, node_db, local_private_key)
local_node_id = local_enr.node_id
routing_table = KademliaRoutingTable(local_node_id, NUM_ROUTING_TABLE_BUCKETS)
node_db.set_enr(local_enr)
for enr_repr in boot_info.args.discovery_boot_enrs or ():
enr = ENR.from_repr(enr_repr)
node_db.set_enr(enr)
routing_table.update(enr.node_id)
port = boot_info.args.discovery_port
socket = trio.socket.socket(
family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM
)
outgoing_datagram_channels = trio.open_memory_channel[OutgoingDatagram](0)
incoming_datagram_channels = trio.open_memory_channel[IncomingDatagram](0)
outgoing_packet_channels = trio.open_memory_channel[OutgoingPacket](0)
incoming_packet_channels = trio.open_memory_channel[IncomingPacket](0)
outgoing_message_channels = trio.open_memory_channel[OutgoingMessage](0)
incoming_message_channels = trio.open_memory_channel[IncomingMessage](0)
endpoint_vote_channels = trio.open_memory_channel[EndpointVote](0)
datagram_sender = DatagramSender(outgoing_datagram_channels[1], socket)
datagram_receiver = DatagramReceiver(socket, incoming_datagram_channels[0])
packet_encoder = PacketEncoder(
outgoing_packet_channels[1], outgoing_datagram_channels[0]
)
packet_decoder = PacketDecoder(
incoming_datagram_channels[1], incoming_packet_channels[0]
)
packer = Packer(
local_private_key=local_private_key.to_bytes(),
local_node_id=local_node_id,
node_db=node_db,
message_type_registry=message_type_registry,
incoming_packet_receive_channel=incoming_packet_channels[1],
incoming_message_send_channel=incoming_message_channels[0],
outgoing_message_receive_channel=outgoing_message_channels[1],
outgoing_packet_send_channel=outgoing_packet_channels[0],
)
message_dispatcher = MessageDispatcher(
node_db=node_db,
incoming_message_receive_channel=incoming_message_channels[1],
outgoing_message_send_channel=outgoing_message_channels[0],
)
endpoint_tracker = EndpointTracker(
local_private_key=local_private_key.to_bytes(),
local_node_id=local_node_id,
node_db=node_db,
identity_scheme_registry=identity_scheme_registry,
vote_receive_channel=endpoint_vote_channels[1],
)
routing_table_manager = RoutingTableManager(
local_node_id=local_node_id,
routing_table=routing_table,
message_dispatcher=message_dispatcher,
node_db=node_db,
outgoing_message_send_channel=outgoing_message_channels[0],
endpoint_vote_send_channel=endpoint_vote_channels[0],
)
logger.info(f"Starting discovery, listening on port {port}")
logger.info(f"Local Node ID: {encode_hex(local_enr.node_id)}")
logger.info(f"Local ENR: {local_enr}")
services = (
datagram_sender,
datagram_receiver,
packet_encoder,
packet_decoder,
packer,
message_dispatcher,
endpoint_tracker,
routing_table_manager,
)
await socket.bind(("0.0.0.0", port))
with socket:
async with trio.open_nursery() as nursery:
for service in services:
nursery.start_soon(async_service.TrioManager.run_service, service)
if __name__ == "__main__":
from trinity.extensibility.component import run_trio_eth1_component
run_trio_eth1_component(DiscV5Component)
|
it
| 0.364061
|
# noqa: E501 # noqa: E501
| 1.460134
| 1
|
adlmagics/adlmagics/test/testcases/session_service_test.py
|
Azure/Azure-Data-Service-Notebook
| 6
|
6628249
|
import unittest
from adlmagics.services.session_service import SessionService
from adlmagics.session_consts import *
from adlmagics.test.mocks.mock_json_persister import MockJsonPersister
class SessionServiceTest(unittest.TestCase):
def test_get_session_item_post_initialization(self):
self.assertEqual(self.__session_service.get_session_item(session_tenant.name), session_tenant.default_value)
self.assertEqual(self.__session_service.get_session_item(session_user.name), session_user.default_value)
self.assertEqual(self.__session_service.get_session_item(session_adla_account.name), session_adla_account.default_value)
self.assertEqual(self.__session_service.get_session_item(session_adls_account.name), session_adls_account.default_value)
self.assertEqual(self.__session_service.get_session_item(session_job_runtime.name), session_job_runtime.default_value)
self.assertEqual(self.__session_service.get_session_item(session_job_priority.name), session_job_priority.default_value)
self.assertEqual(self.__session_service.get_session_item(session_job_parallelism.name), session_job_parallelism.default_value)
self.assertEqual(self.__session_service.get_session_item(session_paging_numberperpage.name), session_paging_numberperpage.default_value)
self.assertEqual(self.__session_service.get_session_item(session_file_encoding.name), session_file_encoding.default_value)
def test_get_session_item_exceptional(self):
self.assertEqual(self.__session_service.get_session_item("nonexisted_session_item_name"), session_null_value)
self.assertEqual(self.__session_service.get_session_item(""), session_null_value)
self.assertEqual(self.__session_service.get_session_item(None), session_null_value)
def test_set_session_item(self):
self.__session_service.set_session_item(session_tenant.name, "test tenant")
self.assertEqual(self.__session_service.get_session_item(session_tenant.name), "test tenant")
def test_set_session_item_exceptional(self):
self.__session_service.set_session_item("nonexisted_session_item_name", "test value")
self.__session_service.set_session_item("", "test value")
self.__session_service.set_session_item(None, "test value")
def test_session_item_names(self):
self.assertEquals(self.__session_service.session_item_names, [
session_tenant.name,
session_user.name,
session_adla_account.name,
session_adls_account.name,
session_job_runtime.name,
session_job_priority.name,
session_job_parallelism.name,
session_paging_numberperpage.name,
session_file_encoding.name
])
def setUp(self):
self.__session_service = SessionService(MockJsonPersister())
def tearDown(self):
self.__session_service = None
|
import unittest
from adlmagics.services.session_service import SessionService
from adlmagics.session_consts import *
from adlmagics.test.mocks.mock_json_persister import MockJsonPersister
class SessionServiceTest(unittest.TestCase):
def test_get_session_item_post_initialization(self):
self.assertEqual(self.__session_service.get_session_item(session_tenant.name), session_tenant.default_value)
self.assertEqual(self.__session_service.get_session_item(session_user.name), session_user.default_value)
self.assertEqual(self.__session_service.get_session_item(session_adla_account.name), session_adla_account.default_value)
self.assertEqual(self.__session_service.get_session_item(session_adls_account.name), session_adls_account.default_value)
self.assertEqual(self.__session_service.get_session_item(session_job_runtime.name), session_job_runtime.default_value)
self.assertEqual(self.__session_service.get_session_item(session_job_priority.name), session_job_priority.default_value)
self.assertEqual(self.__session_service.get_session_item(session_job_parallelism.name), session_job_parallelism.default_value)
self.assertEqual(self.__session_service.get_session_item(session_paging_numberperpage.name), session_paging_numberperpage.default_value)
self.assertEqual(self.__session_service.get_session_item(session_file_encoding.name), session_file_encoding.default_value)
def test_get_session_item_exceptional(self):
self.assertEqual(self.__session_service.get_session_item("nonexisted_session_item_name"), session_null_value)
self.assertEqual(self.__session_service.get_session_item(""), session_null_value)
self.assertEqual(self.__session_service.get_session_item(None), session_null_value)
def test_set_session_item(self):
self.__session_service.set_session_item(session_tenant.name, "test tenant")
self.assertEqual(self.__session_service.get_session_item(session_tenant.name), "test tenant")
def test_set_session_item_exceptional(self):
self.__session_service.set_session_item("nonexisted_session_item_name", "test value")
self.__session_service.set_session_item("", "test value")
self.__session_service.set_session_item(None, "test value")
def test_session_item_names(self):
self.assertEquals(self.__session_service.session_item_names, [
session_tenant.name,
session_user.name,
session_adla_account.name,
session_adls_account.name,
session_job_runtime.name,
session_job_priority.name,
session_job_parallelism.name,
session_paging_numberperpage.name,
session_file_encoding.name
])
def setUp(self):
self.__session_service = SessionService(MockJsonPersister())
def tearDown(self):
self.__session_service = None
|
none
| 1
| 2.339111
| 2
|
|
device_tree_overlays/dtogen/__init__.py
|
fpga-open-speech-tools/simulink_codegen
| 2
|
6628250
|
<reponame>fpga-open-speech-tools/simulink_codegen
__title__ = 'dtogen'
__author__ = '<NAME> <<EMAIL>>, <NAME> <<EMAIL>>'
__license__ = 'GPL v3'
__copyright__ = 'Copyright 2020 Audio Logic'
|
__title__ = 'dtogen'
__author__ = '<NAME> <<EMAIL>>, <NAME> <<EMAIL>>'
__license__ = 'GPL v3'
__copyright__ = 'Copyright 2020 Audio Logic'
|
none
| 1
| 0.923102
| 1
|